コード例 #1
0
 def datasets(self):
     """Returns 2 new OMERO Datasets with required fields set."""
     dataset = omero.model.DatasetI()
     dataset.name = rstring("A_%s" % self.uuid())
     dataset2 = omero.model.DatasetI()
     dataset2.name = rstring("B_%s" % self.uuid())
     return self.update.saveAndReturnArray([dataset, dataset2])
コード例 #2
0
 def new_object(self, classname, name=None, description=None):
     obj = classname()
     if not name:
         name = self.uuid()
     obj.setName(rstring(name))
     obj.setDescription(rstring(description))
     return obj
コード例 #3
0
def externalInfo():
    o = ExternalInfoI()
    o.entityId = rlong(123L)
    o.entityType = rstring("test")
    o.lsid = rstring("ABCDEF")
    o.uuid = rstring("f90a1fd5-275c-4d14-82b3-87b5ef0f07de")
    return o
コード例 #4
0
 def screens(self):
     """Returns 2 new OMERO Screens with required fields set."""
     screen = omero.model.ScreenI()
     screen.name = rstring("A_%s" % self.uuid())
     screen2 = omero.model.ScreenI()
     screen2.name = rstring("B_%s" % self.uuid())
     return self.update.saveAndReturnArray([screen, screen2])
コード例 #5
0
    def new_user(self, group=None, perms=None, admin=False, system=False):
        """
        admin: If user is to be an admin of the created group
        system: If user is to be a system admin
        """

        if not self.root:
            raise Exception("No root client. Cannot create user")

        adminService = self.root.getSession().getAdminService()
        name = self.uuid()

        # Create group if necessary
        if not group:
            g = self.new_group(perms=perms)
        else:
            g = group

        # Create user
        e = omero.model.ExperimenterI()
        e.omeName = rstring(name)
        e.firstName = rstring(name)
        e.lastName = rstring(name)
        e.ldap = rbool(False)
        uid = adminService.createExperimenterWithPassword(e, rstring(name), g, [g, adminService.lookupGroup("user")])
        e = adminService.lookupExperimenter(name)
        if admin:
            adminService.setGroupOwner(g, e)
        if system:
            adminService.addGroups(e, [omero.model.ExperimenterGroupI(0, False)])

        return adminService.getExperimenter(uid)
コード例 #6
0
    def testGetSetMetaMap(self):
        hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
        self.init(hdf, False)

        hdf.add_meta_map({'a': rint(1)})
        m1 = hdf.get_meta_map()
        assert len(m1) == 3
        assert m1['__initialized'].val > 0
        assert m1['__version'] == rstring('2')
        assert m1['a'] == rint(1)

        with pytest.raises(omero.ApiUsageException) as exc:
            hdf.add_meta_map({'b': rint(1), '__c': rint(2)})
        assert exc.value.message == 'Reserved attribute name: __c'
        assert hdf.get_meta_map() == m1

        with pytest.raises(omero.ValidationException) as exc:
            hdf.add_meta_map({'d': rint(None)})
        assert exc.value.serverStackTrace.startswith('Unsupported type:')
        assert hdf.get_meta_map() == m1

        hdf.add_meta_map({}, replace=True)
        m2 = hdf.get_meta_map()
        assert len(m2) == 2
        assert m2 == {
            '__initialized': m1['__initialized'], '__version': rstring('2')}

        hdf.add_meta_map({'__test': 1}, replace=True, init=True)
        m3 = hdf.get_meta_map()
        assert m3 == {'__test': rint(1)}

        hdf.cleanup()
コード例 #7
0
def runAsScript():
    """
    The main entry point of the script, as called by the client via the
    scripting service, passing the required parameters.
    """
    print_duration(False)    # start timer
    dataTypes = [rstring('Dataset'), rstring('Image')]

    client = scripts.client(
        'Images_From_ROIs.py',
        """Crop rectangular regions from slide scanner images.""",

        scripts.String(
            "Data_Type", optional=False, grouping="1",
            description="Choose Images via their 'Dataset' or directly by "
            " 'Image' IDs.", values=dataTypes, default="Image"),

        scripts.List(
            "IDs", optional=False, grouping="2",
            description="List of Dataset IDs or Image IDs to "
            " process.").ofType(rlong(0)),

        scripts.String(
            "Container_Name", grouping="3",
            description="Option: put Images in new Dataset with this name",
            default="From_ROIs"),
                            
        scripts.Bool(
            "Email_Results", grouping="4", default=True,
            description="E-mail the results"),
                            
        scripts.String("Email_address", grouping="4.1",
        description="Specify e-mail address"),

        version="5.0.2",
        authors=["Daniel Matthews", "QBI"],
        institutions = ["University of Queensland"],
        contact = "*****@*****.**",
    )

    try:
        parameterMap = client.getInputs(unwrap=True)
        print parameterMap

        # create a wrapper so we can use the Blitz Gateway.
        conn = BlitzGateway(client_obj=client)
        
        if parameterMap['Email_Results'] and not validate_email(conn, parameterMap):
            client.setOutput("Message", rstring("No valid email address"))
            return

        robj, message = make_images_from_rois(conn, parameterMap)

        client.setOutput("Message", rstring(message))
        if robj is not None:
            client.setOutput("Result", robject(robj))

    finally:
        client.closeSession()
        print_duration()
コード例 #8
0
 def write_to_omero(self):
     sf = self.client.getSession()
     group = str(self.value_resolver.target_object.details.group.id.val)
     sr = sf.sharedResources()
     update_service = sf.getUpdateService()
     name = 'bulk_annotations'
     table = sr.newTable(1, name, {'omero.group': group})
     if table is None:
         raise MetadataError(
             "Unable to create table: %s" % name)
     original_file = table.getOriginalFile()
     log.info('Created new table OriginalFile:%d' % original_file.id.val)
     table.initialize(self.columns)
     log.info('Table initialized with %d columns.' % (len(self.columns)))
     table.addData(self.columns)
     log.info('Added data column data.')
     table.close()
     file_annotation = FileAnnotationI()
     file_annotation.ns = \
             rstring('openmicroscopy.org/omero/bulk_annotations')
     file_annotation.description = rstring(name)
     file_annotation.file = OriginalFileI(original_file.id.val, False)
     link = self.create_annotation_link()
     link.parent = self.target_object
     link.child = file_annotation
     update_service.saveObject(link, {'omero.group': group})
コード例 #9
0
ファイル: conftest.py プロジェクト: knabar/omero-marshal
def externalInfo():
    o = ExternalInfoI()
    o.entityId = rlong(123L)
    o.entityType = rstring('test')
    o.lsid = rstring('ABCDEF')
    o.uuid = rstring('f90a1fd5-275c-4d14-82b3-87b5ef0f07de')
    return o
コード例 #10
0
    def test1175(self):
        uuid = self.root.sf.getAdminService().getEventContext().sessionUuid
        update = self.root.sf.getUpdateService()
        timeline = self.root.sf.getTimelineService()

        # create dataset
        ds = self.make_dataset(name='test1154-ds-%s' % (uuid),
                               client=self.root)
        ds.unload()

        # create tag
        ann = omero.model.TagAnnotationI()
        ann.textValue = rstring('tag-%s' % (uuid))
        ann.setDescription(rstring('tag-%s' % (uuid)))
        t_ann = omero.model.DatasetAnnotationLinkI()
        t_ann.setParent(ds)
        t_ann.setChild(ann)
        update.saveObject(t_ann)

        p = omero.sys.Parameters()
        p.map = {}
        f = omero.sys.Filter()
        f.ownerId = rlong(0)
        f.limit = rint(10)
        p.theFilter = f

        M = timeline.getMostRecentAnnotationLinks
        res = M(None, ['TagAnnotation'], None, p)
        assert len(res) > 0

        # And now for #9609
        res = M(None, ['TagAnnotation'], None, p, {"omero.group": "-1"})
        assert len(res) > 0
コード例 #11
0
def createTag(name, description=None):
    print "Create Tag:", name
    tag = TagAnnotationI()
    tag.textValue = rstring(name)
    if description is not None:
        tag.description = rstring(description)
    return tag
コード例 #12
0
    def create_original_file(self, content, client=None):
        """
        Create an original file and upload it onto the server
        """

        if client is None:
            update = self.update
            sf = self.sf
        else:
            update = client.sf.getUpdateService()
            sf = client.sf

        ofile = omero.model.OriginalFileI()
        ofile.name = rstring("")
        ofile.path = rstring("")
        ofile = update.saveAndReturnObject(ofile)

        rfs = sf.createRawFileStore()
        try:
            rfs.setFileId(ofile.id.val)
            rfs.write(content, 0, len(content))
            ofile = rfs.save()
            assert len(content) == ofile.size.val
            return ofile
        finally:
            rfs.close()
コード例 #13
0
ファイル: Images_From_ROIs.py プロジェクト: sbesson/scripts
def runAsScript():
    """
    The main entry point of the script, as called by the client via the
    scripting service, passing the required parameters.
    """
    printDuration(False)    # start timer
    dataTypes = [rstring('Dataset'), rstring('Image')]

    client = scripts.client(
        'Images_From_ROIs.py',
        """Create new Images from the regions defined by Rectangle ROIs on \
other Images.
Designed to work with single-plane images (Z=1 T=1) with multiple ROIs per \
image.
If you choose to make an image stack from all the ROIs, this script \
assumes that all the ROIs on each Image are the same size.""",

        scripts.String(
            "Data_Type", optional=False, grouping="1",
            description="Choose Images via their 'Dataset' or directly by "
            " 'Image' IDs.", values=dataTypes, default="Image"),

        scripts.List(
            "IDs", optional=False, grouping="2",
            description="List of Dataset IDs or Image IDs to "
            " process.").ofType(rlong(0)),

        scripts.String(
            "Container_Name", grouping="3",
            description="Option: put Images in new Dataset with this name"
            " OR use this name for new Image stacks, if 'Make_Image_Stack')",
            default="From_ROIs"),

        scripts.Bool(
            "Make_Image_Stack", grouping="4", default=False,
            description="If true, make a single Image (stack) from all the"
            " ROIs of each parent Image"),

        version="4.2.0",
        authors=["William Moore", "OME Team"],
        institutions=["University of Dundee"],
        contact="*****@*****.**",
    )

    try:
        parameterMap = client.getInputs(unwrap=True)
        print parameterMap

        # create a wrapper so we can use the Blitz Gateway.
        conn = BlitzGateway(client_obj=client)

        robj, message = makeImagesFromRois(conn, parameterMap)

        client.setOutput("Message", rstring(message))
        if robj is not None:
            client.setOutput("Result", robject(robj))

    finally:
        client.closeSession()
        printDuration()
コード例 #14
0
 def setUp(self):
     AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \
         FromFileOriginalFileProvider
     original_files = list()
     # Create our container images and an original file image map
     images = list()
     n_images = 0
     for row in range(16):
         for column in range(24):
             well = WellI(n_images, True)
             well.column = rint(column)
             well.row = rint(row)
             well_sample = WellSampleI(n_images, True)
             well_sample.well = well
             image = ImageI(n_images, True)
             image.addWellSample(well_sample)
             images.append(image)
     original_file_image_map = dict()
     # Our required original file format
     format = rstring('Companion/InCell')
     # Create original file representing the result file
     o = OriginalFileI(1L, True)
     o.name = rstring(self.RESULT_FILE)
     o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE))
     o.mimetype = format
     original_files.append(o) #[1L] = o
     original_file_image_map[1L] = image
     sf = TestingServiceFactory()
     self.analysis_ctx = InCellPlateAnalysisCtx(
         images, original_files, original_file_image_map, 1L, sf)
コード例 #15
0
ファイル: tag.py プロジェクト: stelfrich/openmicroscopy
    def create_tag(self, name, description, text="tag"):
        """
        Creates a new tag object. Returns the new tag object.

        If either the name or description parameters are None, the user will be
        prompted to input them.

        The "text" parameter should be the text description to use upon user
        input. For example, if we were creating a tag, this would be "tag"
        (the default). If we were creating a tagset, this could be "tag set".
        """
        if name is None:
            name = raw_input("Please enter a name for this %s: " % text)

        if description is None:
            description = raw_input("Please enter a description for this %s: "
                                    % text)

        if name is not None and description is not None and name != '':
            tag = TagAnnotationI()
            tag.textValue = rstring(name)
            if description is not None and len(description) > 0:
                tag.description = rstring(description)

            return tag
        else:
            self.ctx.err("Tag/tagset name cannot be 'None' or empty.")
            sys.exit(1)
コード例 #16
0
    def testCreatAndUpdatePublicGroup(self):
        # this is the test of creating public group and updating it
        # including changes in #1434
        uuid = self.uuid()
        admin = self.root.sf.getAdminService()

        # create group1
        new_gr1 = ExperimenterGroupI()
        new_gr1.name = rstring("group1_%s" % uuid)
        new_gr1.ldap = rbool(False)
        p = PermissionsI()
        p.setUserRead(True)
        p.setUserWrite(True)
        p.setGroupRead(True)
        p.setGroupWrite(True)
        p.setWorldRead(False)
        p.setWorldAnnotate(False)
        p.setWorldWrite(False)
        new_gr1.details.permissions = p
        g1_id = admin.createGroup(new_gr1)

        # update name of group1
        gr1 = admin.getGroup(g1_id)
        assert 'rwrw--' == str(gr1.details.permissions)
        new_name = "changed_name_group1_%s" % uuid
        gr1.name = rstring(new_name)
        admin.updateGroup(gr1)
        gr1_u = admin.getGroup(g1_id)
        assert new_name == gr1_u.name.val
コード例 #17
0
    def test3136(self):
        """
        Calls to updateGroup were taking too long
        because the default value of permissions
        returned by the server was triggering a
        full changePermissions event.
        """
        admin = self.root.sf.getAdminService()
        group = self.new_group(perms="rw----")

        # Change the name but not the permissions
        group.name = rstring(self.uuid())
        elapsed1, rv = self.timeit(admin.updateGroup, group)

        # Now change the name and the permissions
        group.name = rstring(self.uuid())
        group.details.permissions = omero.model.PermissionsI("rwr---")
        elapsed2, rv = self.timeit(admin.updateGroup, group)

        # Locally this test always fails as the two times are
        # the same order of magnitude. This may be an indication that
        # the relevant ticket:
        # http://trac.openmicroscopy.org/ome/ticket/3136
        # is still valid. Does the ticket need re-opening
        # or does the test condition need relaxing?
        assert elapsed1 < (0.1 * elapsed2),\
            "elapsed1=%s, elapsed2=%s" % (elapsed1, elapsed2)
コード例 #18
0
ファイル: library.py プロジェクト: DirkHaehnel/openmicroscopy
    def new_user(self, group = None, perms = None,
            admin = False, system = False):
        """
        admin: If user is to be an admin of the created group
        system: If user is to be a system admin
        """

        if not self.root:
            raise exceptions.Exception("No root client. Cannot create user")

        adminService = self.root.getSession().getAdminService()
        name = self.uuid()

        # Create group if necessary
        if not group:
            g = self.new_group(perms = perms)
            group = g.name.val
        else:
            g, group = self.group_and_name(group)

        # Create user
        e = omero.model.ExperimenterI()
        e.omeName = rstring(name)
        e.firstName = rstring(name)
        e.lastName = rstring(name)
        uid = adminService.createUser(e, group)
        e = adminService.lookupExperimenter(name)
        if admin:
            adminService.setGroupOwner(g, e)
        if system:
            adminService.addGroups(e, \
                    [omero.model.ExperimenterGroupI(0, False)])

        return adminService.getExperimenter(uid)
コード例 #19
0
    def new_user(self, group = None, perms = None, admin = False):

        if not self.root:
            raise exceptions.Exception("No root client. Cannot create user")

        admin = self.root.getSession().getAdminService()
        name = self.uuid()

        # Create group if necessary
        if not group:
            g = self.new_group(perms = perms)
            group = g.name.val
        else:
            g, group = self.group_and_name(group)

        # Create user
        e = omero.model.ExperimenterI()
        e.omeName = rstring(name)
        e.firstName = rstring(name)
        e.lastName = rstring(name)
        uid = admin.createUser(e, group)
        e = admin.lookupExperimenter(name)
        if admin:
            admin.setGroupOwner(g, e)
        return admin.getExperimenter(uid)
コード例 #20
0
    def testChgrp11109(self):
        """
        Place a plate in a single screen and attempt to move it.
        """
        # One user in two groups
        client, user = self.new_client_and_user(perms=PRIVATE)
        admin = client.sf.getAdminService()
        target_grp = self.new_group([user], perms=PRIVATE)
        target_gid = target_grp.id.val
        admin.getEventContext()  # Refresh

        update = client.sf.getUpdateService()
        plate = PlateI()
        plate.name = rstring("testChgrp11109")
        screen = ScreenI()
        screen.name = rstring("testChgrp11109")
        link = screen.linkPlate(plate)
        link = update.saveAndReturnObject(link)

        # Now chgrp, should succeed
        chgrp = Chgrp2(
            targetObjects={"Plate": [link.child.id.val]}, groupId=target_gid)
        self.doSubmit(chgrp, client)

        # Check that the links have been destroyed
        query = client.sf.getQueryService()
        with pytest.raises(omero.ValidationException):
            query.get("ScreenPlateLink", link.id.val, {"omero.group": "-1"})
コード例 #21
0
    def test_create_map_ann(self, ns, kw):
        sess = MockSession()
        self.mox.StubOutWithMock(sess.us, 'saveAndReturnObject')

        map = {'a': rstring('1'), 'bb': rstring('cc')}

        rid = 2
        r = omero.model.MapAnnotationI()
        if ns:
            r.setNs(rstring(ns))
        r.setMapValue(map)
        r.setId(rlong(rid))

        sess.us.saveAndReturnObject(mox.Func(
            lambda o: o.getNs() == wrap(ns) and
            o.getMapValue() == wrap(map).val)).AndReturn(r)

        self.mox.ReplayAll()

        ma = OmeroMetadata.MapAnnotations(sess, namespace=ns)
        if kw:
            assert ma.create_map_annkw(a='1', bb='cc') == rid
        else:
            assert ma.create_map_ann({'a': '1', 'bb': 'cc'}) == rid
        self.mox.VerifyAll()
コード例 #22
0
 def getDatasetMap(self):
     """
     Convert unique list of dataset names to a map
     (dataset_name, dataset_object).
     """
     dataset_map = {}
     params = omero.sys.ParametersI()
     params.add("pid", rlong(self.target_project_id))
     for name in self.target_dataset_names:
         params.add("dname", rstring(name))
         dataset = self.query_service.findByQuery(
             self.dataset_query, params)
         if dataset is None:
             print "Creating new datset"
             dataset = omero.model.DatasetI()
             dataset.setName(rstring(name))
             dataset = \
                 self.update_service.saveAndReturnObject(dataset)
             datasetId = dataset.getId().getValue()
             print "\tNew dataset ID:", datasetId
             link = omero.model.ProjectDatasetLinkI()
             print "\tLinking dataset to:", self.target_project_id
             link.parent = omero.model.ProjectI(
                 self.target_project_id, False)
             link.child = omero.model.DatasetI(datasetId, False)
             self.update_service.saveObject(link)
             dataset = self.query_service.findByQuery(
                 self.dataset_query, params)
         dataset_map[name] = dataset
     return dataset_map
コード例 #23
0
ファイル: test_delete.py プロジェクト: emilroz/openmicroscopy
    def test5793(self):
        uuid = self.client.sf.getAdminService().getEventContext().sessionUuid
        query = self.client.sf.getQueryService()

        img = self.new_image(name="delete tagset test")

        tag = omero.model.TagAnnotationI()
        tag.textValue = rstring("tag %s" % uuid)
        tag = self.client.sf.getUpdateService().saveAndReturnObject(tag)

        img.linkAnnotation(tag)
        img = self.client.sf.getUpdateService().saveAndReturnObject(img)

        tagset = omero.model.TagAnnotationI()
        tagset.textValue = rstring("tagset %s" % uuid)
        tagset.linkAnnotation(tag)
        tagset = self.client.sf.getUpdateService().saveAndReturnObject(tagset)

        tag = tagset.linkedAnnotationList()[0]

        command = omero.cmd.Delete("/Annotation", tagset.id.val, None)
        handle = self.client.sf.submit(command)
        self.waitOnCmd(self.client, handle)

        assert not query.find("TagAnnotation", tagset.id.val)
        assert tag.id.val == query.find("TagAnnotation", tag.id.val).id.val
コード例 #24
0
ファイル: test_show.py プロジェクト: stelfrich/openmicroscopy
 def screen_plate_run_well(self):
     """
     Returns a new OMERO Screen, linked Plate, linked Well, linked
     WellSample, linked Image populate by an
     L{test.integration.library.ITest} instance and
     linked PlateAcquisition with all required fields set.
     """
     screen = ScreenI()
     screen.name = rstring(self.uuid())
     plate = PlateI()
     plate.name = rstring(self.uuid())
     # Well A10 (will have two WellSamples)
     well_a = WellI()
     well_a.row = rint(0)
     well_a.column = rint(9)
     # Well A11 (will not have a WellSample)
     well_b = WellI()
     well_b.row = rint(0)
     well_b.column = rint(10)
     ws_a = WellSampleI()
     image_a = self.new_image(name=self.uuid())
     ws_a.image = image_a
     ws_b = WellSampleI()
     image_b = self.new_image(name=self.uuid())
     ws_b.image = image_b
     plate_acquisition = PlateAcquisitionI()
     plate_acquisition.plate = plate
     ws_a.plateAcquisition = plate_acquisition
     ws_b.plateAcquisition = plate_acquisition
     well_a.addWellSample(ws_a)
     well_a.addWellSample(ws_b)
     plate.addWell(well_a)
     plate.addWell(well_b)
     screen.linkPlate(plate)
     return self.update.saveAndReturnObject(screen)
コード例 #25
0
 def plates(self):
     """Returns 2 new OMERO Plates with required fields set."""
     plate = omero.model.PlateI()
     plate.name = rstring("A_%s" % self.uuid())
     plate2 = omero.model.PlateI()
     plate2.name = rstring("B_%s" % self.uuid())
     return self.update.saveAndReturnArray([plate, plate2])
コード例 #26
0
    def pix(self, x=10, y=10, z=10, c=3, t=50, client=None):
        """
        Creates an int8 pixel of the given size in the database.
        No data is written.
        """
        image = self.new_image()
        pixels = PixelsI()
        pixels.sizeX = rint(x)
        pixels.sizeY = rint(y)
        pixels.sizeZ = rint(z)
        pixels.sizeC = rint(c)
        pixels.sizeT = rint(t)
        pixels.sha1 = rstring("")
        pixels.pixelsType = PixelsTypeI()
        pixels.pixelsType.value = rstring("int8")
        pixels.dimensionOrder = DimensionOrderI()
        pixels.dimensionOrder.value = rstring("XYZCT")
        image.addPixels(pixels)

        if client is None:
            client = self.client
        update = client.sf.getUpdateService()
        image = update.saveAndReturnObject(image)
        pixels = image.getPrimaryPixels()
        return pixels
コード例 #27
0
 def new_tag(self):
     """
     Returns a new Tag objects
     """
     tag = omero.model.TagAnnotationI()
     tag.textValue = rstring(self.uuid())
     tag.ns = rstring("pytest")
     return self.sf.getUpdateService().saveAndReturnObject(tag)
コード例 #28
0
ファイル: test_csrf.py プロジェクト: sbesson/openmicroscopy
def new_tag(request, itest, client):
    """
    Returns a new Tag objects
    """
    tag = omero.model.TagAnnotationI()
    tag.textValue = rstring(itest.uuid())
    tag.ns = rstring("pytest")
    return client.getSession().getUpdateService().saveAndReturnObject(tag)
コード例 #29
0
def project_with_datasets(project):
    for dataset_id in range(1, 3):
        o = DatasetI()
        o.id = rlong(dataset_id)
        o.name = rstring('dataset_name_%d' % dataset_id)
        o.description = rstring('dataset_description_%d' % dataset_id)
        project.linkDataset(o)
    return project
コード例 #30
0
ファイル: fs.py プロジェクト: kennethgillen/openmicroscopy
def prep_directory(client, mrepo):
    """
    Create an empty FS directory by performing an import and
    then deleting the created fileset.
    """

    from omero.cmd import Delete2, DoAll
    from omero.grid import ImportSettings

    from omero.model import ChecksumAlgorithmI
    from omero.model import FilesetI
    from omero.model import FilesetEntryI
    from omero.model import UploadJobI

    fs = FilesetI()
    fs.linkJob(UploadJobI())
    entry = FilesetEntryI()
    entry.clientPath = rstring("README.txt")
    fs.addFilesetEntry(entry)
    settings = ImportSettings()
    settings.checksumAlgorithm = ChecksumAlgorithmI()
    settings.checksumAlgorithm.value = rstring("SHA1-160")
    proc = mrepo.importFileset(fs, settings)
    try:

        tmp = create_path()
        prx = proc.getUploader(0)
        try:
            tmp.write_text("THIS IS A PLACEHOLDER")
            hash = client.sha1(tmp)
            with open(tmp, "r") as source:
                client.write_stream(source, prx)
        finally:
            prx.close()
        tmp.remove()

        handle = proc.verifyUpload([hash])
        try:
            req = handle.getRequest()
            fs = req.activity.parent
        finally:
            handle.close()

        dir = unwrap(mrepo.treeList(fs.templatePrefix.val))
        oid = dir.items()[0][1].get("id")
        ofile = client.sf.getQueryService().get("OriginalFile", oid)

        delete1 = Delete2(targetObjects={"Fileset": [fs.id.val]})
        delete2 = Delete2(targetObjects={"OriginalFile": [ofile.id.val]})
        doall = DoAll()
        doall.requests = [delete1, delete2]
        cb = client.submit(doall)
        cb.close(True)

    finally:
        proc.close()

    return fs.templatePrefix.val
コード例 #31
0
ファイル: test_chgrp.py プロジェクト: patrick330602/fomf3dbd
 def dataset(self):
     """Returns a new OMERO Project with required fields set."""
     dataset = DatasetI()
     dataset.name = rstring(self.uuid())
     return self.update.saveAndReturnObject(dataset)
コード例 #32
0
    ids = scriptParams["IDs"]

    # dataType is 'Dataset' or 'Image' so we can use it directly in
    # getObjects()
    obs = conn.getObjects(dataType, ids)  # generator of images or datasets
    objects = list(obs)
    return objects


if __name__ == "__main__":
    """
    The main entry point of the script, as called by the client via the
    scripting service, passing the required parameters.
    """

    dataTypes = [rstring('Dataset')]  # only works on datasets

    # Here we define the script name and description.
    # Good practice to put url here to give users more guidance on how to run
    # your script.
    client = scripts.client(
        'Key_Val_from_Description.py',
        (" Adds key-value metadata pairs to images in a data set from "
         " the description for a dataset or collections of datasets"
         " k-v pairs taken from the dataset description"
         " and by parsing the filename"
         " Image IDs or by the Dataset IDs.\nSee"
         " http://www.openmicroscopy.org/site/support/omero5.2/developers/"
         "scripts/user-guide.html for the tutorial that uses this script."),
        scripts.String("Data_Type",
                       optional=False,
コード例 #33
0
def AddKeysToMatchingFiles(conn,
                           Id,
                           global_kv,
                           template,
                           file_keys,
                           spec_kv=None):
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    dataset = conn.getObject("Dataset", Id)

    # compile the regexp
    if (template is not None):
        template = "^{}$".format(template)
        template = template.replace("*", "([^\s_\/]+)")
        template = template.replace("?", "[^\s_\/]")
        regexp = re.compile(template)

    # add the metadata to the images
    nimg = dataset.countChildren()
    nimg_updated = 0
    nkv_added = 0
    for image in dataset.listChildren():
        if (not (image.canAnnotate() and image.canLink())):
            message = "You don't have permission to add annotations to {}".format(
                image.getName())
            client.setOutput("Message", rstring(message))
            return

        existing_kv = GetExistingMapAnnotions(image)
        updated_kv = copy.deepcopy(existing_kv)
        for key, vals in global_kv.items():
            if key not in updated_kv: updated_kv[key] = set()
            for val in vals:
                updated_kv[key].add(val)

        if (template is not None):
            # apply the template to the file name
            name = image.getName()

            # this adds directory path to filename
            # is probably better to extend name of file with path first
            #path = os.path.dirname(image.getImportedImageFilePaths()['client_paths'][0])
            #filename = path+"/"+name

            filename = name
            match = regexp.search(filename)

            if (match is not None):
                print("Match found", filename)
                for i, val in enumerate(match.groups()):
                    i1 = i
                    if (i1 in file_keys):
                        key = file_keys[i1]
                        if key not in updated_kv: updated_kv[key] = set()
                        updated_kv[key].add(val)

                if (spec_kv is not None):
                    for key, vals in spec_kv.items():
                        if key not in updated_kv: updated_kv[key] = set()
                        for val in vals:
                            updated_kv[key].add(val)

        nold_i = sum(map(len, existing_kv.values()))
        nnew_i = sum(map(len, updated_kv.values()))
        nkv_added = nkv_added + (nnew_i + nold_i)

        if (existing_kv != updated_kv):
            RemoveMapAnnotations(conn, 'image', image.getId())
            map_ann = omero.gateway.MapAnnotationWrapper(conn)
            namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION
            # namespace = "openmicroscopy.org/mapr/gene"
            map_ann.setNs(namespace)
            # convert the ordered dict to a list of lists
            kv_list = []
            for k, vset in updated_kv.items():
                for v in vset:
                    kv_list.append([k, v])
            map_ann.setValue(kv_list)
            map_ann.save()
            image.linkAnnotation(map_ann)

            nimg_updated = nimg_updated + 1

    return nimg_updated, nkv_added
コード例 #34
0
ファイル: Combine_Images.py プロジェクト: cneves/scripts
def makeSingleImage(services, parameterMap, imageIds, dataset, colourMap):
    """
    This takes the images specified by imageIds, sorts them in to Z,C,T
    dimensions according to parameters in the parameterMap, assembles them
    into a new Image, which is saved in dataset.
    """

    if len(imageIds) == 0:
        return

    renderingEngine = services["renderingEngine"]
    queryService = services["queryService"]
    pixelsService = services["pixelsService"]
    rawPixelStore = services["rawPixelStore"]
    rawPixelStoreUpload = services["rawPixelStoreUpload"]
    updateService = services["updateService"]
    containerService = services["containerService"]

    print "makeSingleImage: imageId count:", len(imageIds)

    # Filter images by name if user has specified filter.
    idNameMap = None
    if "Filter_Names" in parameterMap:
        filterString = parameterMap["Filter_Names"]
        if len(filterString) > 0:
            print "Filtering images for names containing '%s'" % filterString
            idNameMap = getImageNames(queryService, imageIds)
            imageIds = [
                i for i in imageIds if idNameMap[i].find(filterString) > -1
            ]

    imageId = imageIds[0]

    # get pixels, with pixelsType, from the first image
    query_string = "select p from Pixels p join fetch p.image i join "\
        "fetch p.pixelsType pt where i.id='%d'" % imageId
    pixels = queryService.findByQuery(query_string, None)
    pixelsType = pixels.getPixelsType()  # use the pixels type object we
    # got from the first image.

    # combined image will have same X and Y sizes...
    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sourceZ = pixels.getSizeZ().getValue()  # if we have a Z stack, use this
    # in new image (don't combine Z)

    # Now we need to find where our planes are coming from.
    # imageMap is a map of destination:source, defined as (newX, newY,
    # newZ):(imageId, z)
    if "Manually_Define_Dimensions" in parameterMap and \
            parameterMap["Manually_Define_Dimensions"]:
        sizeZ, sizeC, sizeT, imageMap = manuallyAssignImages(
            parameterMap, imageIds, sourceZ)
        cNames = {}
    else:
        sizeZ, cNames, sizeT, imageMap = assignImagesByRegex(
            parameterMap, imageIds, queryService, sourceZ, idNameMap)
        sizeC = len(cNames)

    print "sizeZ: %s  sizeC: %s  sizeT: %s" % (sizeZ, sizeC, sizeT)

    if "Channel_Names" in parameterMap:
        for c, name in enumerate(parameterMap["Channel_Names"]):
            cNames[c] = name

    imageName = "combinedImage"
    description = "created from image Ids: %s" % imageIds

    channelList = range(sizeC)
    iId = pixelsService.createImage(sizeX, sizeY, sizeZ, sizeT, channelList,
                                    pixelsType, imageName, description)
    image = containerService.getImages("Image", [iId.getValue()], None)[0]

    pixelsId = image.getPrimaryPixels().getId().getValue()
    rawPixelStoreUpload.setPixelsId(pixelsId, True)

    for theC in range(sizeC):
        minValue = 0
        maxValue = 0
        for theZ in range(sizeZ):
            for theT in range(sizeT):
                if (theZ, theC, theT) in imageMap:
                    imageId, planeZ = imageMap[(theZ, theC, theT)]
                    print "Getting plane from Image ID:", imageId
                    query_string = "select p from Pixels p join fetch "\
                        "p.image i join fetch p.pixelsType pt where "\
                        "i.id='%d'" % imageId
                    pixels = queryService.findByQuery(query_string, None)
                    plane2D = getPlane(rawPixelStore, pixels, planeZ, 0, 0)
                else:
                    print "Creating blank plane for theZ, theC, theT",\
                        theZ, theC, theT
                    plane2D = zeros((sizeY, sizeX))
                print "Uploading plane: theZ: %s, theC: %s, theT: %s"\
                    % (theZ, theC, theT)
                scriptUtil.uploadPlaneByRow(rawPixelStoreUpload, plane2D, theZ,
                                            theC, theT)
                minValue = min(minValue, plane2D.min())
                maxValue = max(maxValue, plane2D.max())
        print "Setting the min, max ", minValue, maxValue
        pixelsService.setChannelGlobalMinMax(pixelsId, theC, float(minValue),
                                             float(maxValue))
        rgba = COLOURS["White"]
        if theC in colourMap:
            rgba = colourMap[theC]
            print "Setting the Channel colour:", rgba
        scriptUtil.resetRenderingSettings(renderingEngine, pixelsId, theC,
                                          minValue, maxValue, rgba)

    # rename new channels
    pixels = renderingEngine.getPixels()  # has channels loaded - (getting
    # Pixels from image doesn't)
    i = 0
    for c in pixels.iterateChannels():  # c is an instance of
        # omero.model.ChannelI
        if i >= len(cNames):
            break
        lc = c.getLogicalChannel()  # returns omero.model.LogicalChannelI
        lc.setName(rstring(cNames[i]))
        updateService.saveObject(lc)
        i += 1

    # put the image in dataset, if specified.
    if dataset and dataset.canLink():
        link = omero.model.DatasetImageLinkI()
        link.parent = omero.model.DatasetI(dataset.getId(), False)
        link.child = omero.model.ImageI(image.id.val, False)
        updateService.saveAndReturnObject(link)
    else:
        link = None

    return image, link
コード例 #35
0
ファイル: user.py プロジェクト: will-moore/omero-py
class UserControl(UserGroupControl):
    def _configure(self, parser):

        self.exc = ExceptionHandler()

        parser.add_login_arguments()
        sub = parser.sub()

        add = parser.add(sub, self.add, help="Add user")
        add.add_argument("--ignore-existing",
                         action="store_true",
                         default=False,
                         help="Do not fail if user already exists")
        add.add_argument("-m",
                         "--middlename",
                         help="Middle name, if available")
        add.add_argument("-e", "--email")
        add.add_argument("-i", "--institution")
        # Capitalized since conflict with main values
        add.add_argument("-a",
                         "--admin",
                         action="store_true",
                         help="Whether the user should be an admin")
        add.add_argument("username", help="User's login name")
        add.add_argument("firstname", help="User's first name")
        add.add_argument("lastname", help="User's last name")
        self.add_group_arguments(add, " to join")

        password_group = add.add_mutually_exclusive_group()
        password_group.add_argument("-P",
                                    "--userpassword",
                                    help="Password for user")
        password_group.add_argument("--no-password",
                                    action="store_true",
                                    default=False,
                                    help="Create user with empty password")

        list = parser.add(sub,
                          self.list,
                          help="List information about all users")

        info = parser.add(
            sub, self.info,
            "List information about the user(s). Default to the context user")
        self.add_user_arguments(info)

        for x in (list, info):
            x.add_style_argument()
            x.add_group_print_arguments()
            x.add_user_sorting_arguments()

        listgroups = parser.add(
            sub, self.listgroups,
            "List the groups of the user. Default to the context user")
        self.add_user_arguments(listgroups)
        listgroups.add_style_argument()
        listgroups.add_user_print_arguments()
        listgroups.add_group_sorting_arguments()

        password = parser.add(sub, self.password, help="Set user's password")
        password.add_argument("username",
                              nargs="?",
                              help="Username if not the current user")

        email = parser.add(sub, self.email, help="List users' email addresses")
        email.add_argument("-n",
                           "--names",
                           action="store_true",
                           default=False,
                           help="Print user names along with email addresses")
        email.add_argument("-1",
                           "--one",
                           action="store_true",
                           default=False,
                           help="Print one user per line")
        email.add_argument("-i",
                           "--ignore",
                           action="store_true",
                           default=False,
                           help="Ignore users without email addresses")
        email.add_argument(
            "--all",
            action="store_true",
            default=False,
            help="Include all users, including deactivated accounts")

        joingroup = parser.add(sub, self.joingroup, "Join one or more groups")
        self.add_id_name_arguments(joingroup,
                                   "user. Default to the current user")
        group = self.add_group_arguments(joingroup, " to join")
        group.add_argument("--as-owner",
                           action="store_true",
                           default=False,
                           help="Join the group(s) as an owner")

        leavegroup = parser.add(sub, self.leavegroup,
                                "Leave one or more groups")
        self.add_id_name_arguments(leavegroup,
                                   "user. Default to the current user")
        group = self.add_group_arguments(leavegroup, " to leave")
        group.add_argument("--as-owner",
                           action="store_true",
                           default=False,
                           help="Leave the owner list of the group(s)")

        for x in (email, password, list, add, joingroup, leavegroup):
            x.add_login_arguments()

    def format_name(self, exp):
        record = ""
        fn = _(exp.firstName)
        mn = " "
        if _(exp.middleName):
            mn = " %s " % _(exp.middleName)
        ln = _(exp.lastName)
        record += "%s%s%s" % (fn, mn, ln)
        return record

    def email(self, args):
        c = self.ctx.conn(args)
        a = c.sf.getAdminService()
        r = a.getSecurityRoles()

        skipped = []
        records = []
        for exp in a.lookupExperimenters():

            # Handle users without email
            if not _(exp.email):
                if not args.ignore:
                    skipped.append(exp)
                continue

            # Handle deactivated users
            if not args.all:
                groups = exp.linkedExperimenterGroupList()
                group_ids = [x.id.val for x in groups]
                if r.userGroupId not in group_ids:
                    continue

            record = ""
            if args.names:
                record += '"%s"' % self.format_name(exp)
                record += " <%s>" % _(exp.email)
            else:
                record += _(exp.email)

            records.append(record)

        if args.one:
            for record in records:
                self.ctx.out(record)
        else:
            self.ctx.out(", ".join(records))

        if skipped:
            self.ctx.err("Missing email addresses:")
            for s in skipped:
                self.ctx.err(self.format_name(s))

    def password(self, args):
        import omero
        from omero.rtypes import rstring
        client = self.ctx.conn(args)
        own_name = self.ctx.get_event_context().userName
        admin = client.sf.getAdminService()

        # tickets 3202, 5841
        own_pw = self._ask_for_password(" for your user (%s)" % own_name,
                                        strict=False)
        try:
            client.sf.setSecurityPassword(own_pw)
            self.ctx.out("Verified password.\n")
        except omero.SecurityViolation, sv:
            import traceback
            self.ctx.die(456, "SecurityViolation: Bad credentials")
            self.ctx.dbg(traceback.format_exc(sv))

        if args.username:
            try:
                e = admin.lookupExperimenter(args.username)
            except omero.ApiUsageException:
                self.ctx.die(457, "Unknown user: %s" % args.username)
                return  # Never reached
            self.ctx.out("Changing password for %s (id:%s)" %
                         (args.username, e.id.val))
        else:
            self.ctx.out("Changing password for %s" % own_name)

        pw = self._ask_for_password(" to be set")
        pw = rstring(pw)
        if args.username:
            admin.changeUserPassword(args.username, pw)
        else:
            admin.changePassword(pw)
        self.ctx.out("Password changed")
コード例 #36
0
 def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
     warnings.warn("This module is deprecated as of OMERO 5.3.0",
                   DeprecationWarning)
     ShapeData.__init__(self)
     self.points = rstring(self.listToString(pointsList))
     self.setCoord(roicoord)
コード例 #37
0
def processImage(conn, imageId, parameterMap):
    """
    Process an image.
    If imageStack is True, we make a Z-stack using one tile from each ROI (c=0)
    Otherwise, we create a 5D image representing the ROI "cropping" the
    original image. Image is put in a dataset if specified.
    """

    createDataset = parameterMap['New_Dataset']
    datasetName = parameterMap['New_Dataset_Name']

    image = conn.getObject("Image", imageId)
    if image is None:
        return

    parentDataset = image.getParent()
    parentProject = parentDataset.getParent()

    dataset = None
    if not createDataset:
        dataset = parentDataset

    imageName = image.getName()
    updateService = conn.getUpdateService()

    pixels = image.getPrimaryPixels()
    W = image.getSizeX()
    H = image.getSizeY()

    # note pixel sizes (if available) to set for the new images
    physicalSizeX = pixels.getPhysicalSizeX()
    physicalSizeY = pixels.getPhysicalSizeY()
    physicalSizeZ = pixels.getPhysicalSizeZ()

    # Store original channel details
    cNames = []
    emWaves = []
    exWaves = []
    for index, c in enumerate(image.getChannels()):
        lc = c.getLogicalChannel()
        cNames.append(str(c.getLabel()))
        emWaves.append(lc.getEmissionWave())
        exWaves.append(lc.getExcitationWave())

    # x, y, w, h, zStart, zEnd, tStart, tEnd
    rois = getRectangles(conn, image)
    print "rois"
    print rois

    # Make a new 5D image per ROI
    iIds = []
    for index, r in enumerate(rois):
        x, y, w, h, z1, z2, t1, t2 = r
        # Bounding box
        if x < 0:
            x = 0
        if y < 0:
            y = 0
        if x + w > W:
            w = W - x
        if y + h > H:
            h = H - y

        if parameterMap['Entire_Stack']:
            if parameterMap['Z_Stack']:
                z1 = 0
                z2 = image.getSizeZ() - 1
            if parameterMap['T_Stack']:
                t1 = 0
                t2 = image.getSizeT() - 1

        print "  ROI x: %s y: %s w: %s h: %s z1: %s z2: %s t1: %s t2: %s" % (
            x, y, w, h, z1, z2, t1, t2)

        # need a tile generator to get all the planes within the ROI
        sizeZ = z2 - z1 + 1
        sizeT = t2 - t1 + 1
        sizeC = image.getSizeC()
        zctTileList = []
        tile = (x, y, w, h)
        print "zctTileList..."
        for z in range(z1, z2 + 1):
            for c in range(sizeC):
                for t in range(t1, t2 + 1):
                    zctTileList.append((z, c, t, tile))

        def tileGen():
            for i, t in enumerate(pixels.getTiles(zctTileList)):
                yield t

        print "sizeZ, sizeC, sizeT", sizeZ, sizeC, sizeT
        description = """\
Created from Image ID: %d
  Name: %s
  x: %d y: %d w: %d h: %d""" % (imageId, imageName, x, y, w, h)
        # make sure that script_utils creates a NEW rawPixelsStore
        serviceFactory = conn.c.sf  # noqa
        newI = conn.createImageFromNumpySeq(tileGen(),
                                            createImageName(imageName, index),
                                            sizeZ=sizeZ,
                                            sizeC=sizeC,
                                            sizeT=sizeT,
                                            description=description,
                                            dataset=dataset)
        iIds.append(newI.getId())

        # Apply colors from the original image to the new one
        if newI._prepareRenderingEngine():
            renderingEngine = newI._re

            # Apply the original channel names
            newPixels = renderingEngine.getPixels()

            for i, c in enumerate(newPixels.iterateChannels()):
                lc = c.getLogicalChannel()
                lc.setEmissionWave(emWaves[i])
                lc.setExcitationWave(exWaves[i])
                lc.setName(rstring(cNames[i]))
                updateService.saveObject(lc)

            renderingEngine.resetDefaultSettings(True)

        # Apply the original pixel size - Get the object again to refresh state
        newImg = conn.getObject("Image", newI.getId())
        newPixels = newImg.getPrimaryPixels()
        newPixels.setPhysicalSizeX(physicalSizeX)
        newPixels.setPhysicalSizeY(physicalSizeY)
        newPixels.setPhysicalSizeZ(physicalSizeZ)
        newPixels.save()

    if len(iIds) > 0 and createDataset:

        # create a new dataset for new images
        print "\nMaking Dataset '%s' of Images from ROIs of Image: %s" % (
            datasetName, imageId)
        dataset = omero.model.DatasetI()
        dataset.name = rstring(datasetName)
        desc = """\
Images in this Dataset are from ROIs of parent Image:
Name: %s
Image ID: %d""" % (imageName, imageId)
        dataset.description = rstring(desc)
        dataset = updateService.saveAndReturnObject(dataset)
        for iid in iIds:
            link = omero.model.DatasetImageLinkI()
            link.parent = omero.model.DatasetI(dataset.id.val, False)
            link.child = omero.model.ImageI(iid, False)
            updateService.saveObject(link)
        if parentProject:  # and put it in the current project
            link = omero.model.ProjectDatasetLinkI()
            link.parent = omero.model.ProjectI(parentProject.getId(), False)
            link.child = omero.model.DatasetI(dataset.id.val, False)
            updateService.saveAndReturnObject(link)

    return len(iIds)
コード例 #38
0
def processData(conn, scriptParams):
    """
    For each Dataset, process each Image adding the length of each ROI line to
    an OMERO.table.
    Also calculate the average of all lines for each Image and add this as a
    Double Annotation on Image.
    """

    datasetIds = scriptParams['IDs']
    for dataset in conn.getObjects("Dataset", datasetIds):

        # first create our table...
        # columns we want are: imageId, roiId, shapeId, theZ, theT,
        # lineLength, shapetext.
        columns = [
            omero.grid.LongColumn('imageId', '', []),
            omero.grid.RoiColumn('roidId', '', []),
            omero.grid.LongColumn('shapeId', '', []),
            omero.grid.LongColumn('theZ', '', []),
            omero.grid.LongColumn('theT', '', []),
            omero.grid.DoubleColumn('lineLength', '', []),
            omero.grid.StringColumn('shapeText', '', 64, [])
            ]
        # create and initialize the table
        table = conn.c.sf.sharedResources().newTable(
            1, "LineLengths%s" % str(random()))
        table.initialize(columns)

        # make a local array of our data (add it to table in one go)
        imageIds = []
        roiIds = []
        shapeIds = []
        theZs = []
        theTs = []
        lineLengths = []
        shapeTexts = []
        roiService = conn.getRoiService()
        lengthsForImage = []
        for image in dataset.listChildren():
            result = roiService.findByImage(image.getId(), None)
            for roi in result.rois:
                for s in roi.copyShapes():
                    if type(s) == omero.model.LineI:
                        imageIds.append(image.getId())
                        roiIds.append(roi.getId().getValue())
                        shapeIds.append(s.getId().getValue())
                        theZs.append(s.getTheZ().getValue())
                        theTs.append(s.getTheT().getValue())
                        x1 = s.getX1().getValue()
                        x2 = s.getX2().getValue()
                        y1 = s.getY1().getValue()
                        y2 = s.getY2().getValue()
                        x = x1 - x2
                        y = y1 - y2
                        length = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
                        lineLengths.append(length)
                        lengthsForImage.append(length)
                        if s.getTextValue():
                            shapeTexts.append(s.getTextValue().getValue())
                        else:
                            shapeTexts.append("")
            if len(lengthsForImage) == 0:
                print "No lines found on Image:", image.getName()
                continue
            imgAverage = sum(lengthsForImage) / len(lengthsForImage)
            print ("Average length of line for Image: %s is %s"
                   % (image.getName(), imgAverage))

            # Add the average as an annotation on each image.
            lengthAnn = omero.model.DoubleAnnotationI()
            lengthAnn.setDoubleValue(rdouble(imgAverage))
            lengthAnn.setNs(
                rstring("imperial.training.demo.lineLengthAverage"))
            link = omero.model.ImageAnnotationLinkI()
            link.setParent(omero.model.ImageI(image.getId(), False))
            link.setChild(lengthAnn)
            conn.getUpdateService().saveAndReturnObject(link)
            lengthsForImage = []    # reset for next image.

        # Prepare data for adding to OMERO table.
        data = [
            omero.grid.LongColumn('imageId', '', imageIds),
            omero.grid.RoiColumn('roidId', '', roiIds),
            omero.grid.LongColumn('shapeId', '', shapeIds),
            omero.grid.LongColumn('theZ', '', theZs),
            omero.grid.LongColumn('theT', '', theTs),
            omero.grid.DoubleColumn('lineLength', '', lineLengths),
            omero.grid.StringColumn('shapeText', '', 64, shapeTexts),
            ]
        table.addData(data)

        # get the table as an original file & attach this data to Dataset
        orig_file = table.getOriginalFile()
        fileAnn = omero.model.FileAnnotationI()
        fileAnn.setFile(orig_file)
        link = omero.model.DatasetAnnotationLinkI()
        link.setParent(omero.model.DatasetI(dataset.getId(), False))
        link.setChild(fileAnn)
        # conn.getUpdateService().saveAndReturnObject(link)

        a = array(lineLengths)
        print "std", a.std()
        print "mean", a.mean()
        print "max", a.max()
        print "min", a.min()

        # lets retrieve all the lines that are longer than 2 standard
        # deviations above mean
        limit = a.mean() + (2 * a.std())
        print "Retrieving all lines longer than: ", limit
        rowCount = table.getNumberOfRows()
        queryRows = table.getWhereList(
            "lineLength > %s" % limit, variables={}, start=0, stop=rowCount,
            step=0)
        if len(queryRows) == 0:
            print "No lines found"
        else:
            data = table.readCoordinates(queryRows)
            for col in data.columns:
                print "Query Results for Column: ", col.name
                for v in col.values:
                    print "   ", v
コード例 #39
0
def attributes_by_attributes(conn,
                             name="Gene Symbol",
                             value="ASH2L",
                             ns="openmicroscopy.org/mapr/gene",
                             ns2="openmicroscopy.org/mapr/phenotype",
                             name2=None):
    """
    Return a list of neighbours attributes
    for given case insensitive attribute value.
    """
    from omero.rtypes import rstring, rlist, unwrap
    from omero.sys import ParametersI

    params = ParametersI()
    params.addString("value", value.lower())
    q = ("select distinct new map( mv.value as value) "
         "from Annotation as a "
         "join a.mapValue as mv "
         "where lower(mv.value) = :value {where_claus}")
    where_claus = []
    if name:
        params.addString("name", name)
        where_claus.append("and mv.name = :name")
    q = q.format(**{'where_claus': " ".join(where_claus)})

    values = [
        v[0]['value']
        for v in unwrap(conn.getQueryService().projection(q, params))
    ]

    params = ParametersI()
    valuelist = [rstring(unicode(v)) for v in values]
    params.add('values', rlist(valuelist))
    params.addString("ns", ns)
    params.addString("ns2", ns2)

    q = ("select distinct new map("
         "mv.name as name, "
         "mv.value as value, "
         "mv2.name as name2, "
         "mv2.value as value2) "
         "from Image as i "
         "join i.annotationLinks as ial "
         "join i.annotationLinks as ial2 "
         "join ial.child as a "
         "join a.mapValue as mv "
         "join ial2.child as a2 "
         "join a2.mapValue as mv2 "
         "where a.ns = :ns and a2.ns = :ns2 "
         "and mv.value in (:values) {where_claus}")

    where_claus = []
    if name:
        params.addString("name", name)
        where_claus.append("and mv.name = :name")
    if name2:
        params.addString("name2", name2)
        where_claus.append("and mv2.name = :name2")

    q = q.format(**{'where_claus': " ".join(where_claus)})

    res = {}
    for r in unwrap(conn.getQueryService().projection(q, params)):
        r = r[0]
        try:
            res[(r['name'], r['value'])].append((r['name2'], r['value2']))
        except KeyError:
            res[(r['name'], r['value'])] = [(r['name2'], r['value2'])]
    return res
コード例 #40
0
ファイル: ROIs.py プロジェクト: imcf/openmicroscopy
        (theZ, theT, x, y, width, height)

# create an ROI, link it to Image
roi = omero.model.RoiI()
roi.setImage(image._obj
             )  # use the omero.model.ImageI that underlies the 'image' wrapper

# create a rectangle shape and add to ROI
rect = omero.model.RectI()
rect.x = rdouble(x)
rect.y = rdouble(y)
rect.width = rdouble(width)
rect.height = rdouble(height)
rect.theZ = rint(theZ)
rect.theT = rint(theT)
rect.textValue = rstring("test-Rectangle")
roi.addShape(rect)

# create an Ellipse shape and add to ROI
ellipse = omero.model.EllipseI()
ellipse.cx = rdouble(y)
ellipse.cy = rdouble(x)
ellipse.rx = rdouble(width)
ellipse.ry = rdouble(height)
ellipse.theZ = rint(theZ)
ellipse.theT = rint(theT)
ellipse.textValue = rstring("test-Ellipse")
roi.addShape(ellipse)

# Save the ROI (saves any linked shapes too)
r = updateService.saveAndReturnObject(roi)
コード例 #41
0
ファイル: test_chgrp.py プロジェクト: patrick330602/fomf3dbd
 def project(self):
     """Returns a new OMERO Project with required fields set."""
     project = ProjectI()
     project.name = rstring(self.uuid())
     return self.update.saveAndReturnObject(project)
コード例 #42
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from omero.rtypes import rstring, rbool, rlong, rint

s = rstring("value")
b = rbool(True)
l = rlong(1)
i = rint(1)
コード例 #43
0
ファイル: user.py プロジェクト: will-moore/omero-py
    def add(self, args):
        email = args.email
        login = args.username
        first = args.firstname
        middle = args.middlename
        last = args.lastname
        inst = args.institution
        pasw = args.userpassword

        import omero
        from omero.rtypes import rbool, rstring
        from omero_model_ExperimenterI import ExperimenterI as Exp
        from omero_model_ExperimenterGroupI import ExperimenterGroupI as Grp
        c = self.ctx.conn(args)
        e = Exp()
        e.omeName = rstring(login)
        e.firstName = rstring(first)
        e.lastName = rstring(last)
        e.middleName = rstring(middle)
        e.email = rstring(email)
        e.institution = rstring(inst)
        e.ldap = rbool(False)

        # Fail-fast if no-password is passed and the server does not accept
        # empty passwords
        configService = c.getSession().getConfigService()
        password_required = configService.getConfigValue(
            "omero.security.password_required").lower()
        if args.no_password and password_required != 'false':
            self.ctx.die(
                502, "Server does not allow user creation with empty"
                " passwords")

        # Check user existence
        admin = c.getSession().getAdminService()
        try:
            usr = admin.lookupExperimenter(login)
            if usr:
                if args.ignore_existing:
                    self.ctx.out("User exists: %s (id=%s)" %
                                 (login, usr.id.val))
                    return
                else:
                    self.ctx.die(
                        3, "User exists: %s (id=%s)" % (login, usr.id.val))
        except omero.ApiUsageException:
            pass  # Apparently no such user exists

        [gid, groups] = self.list_groups(admin, args, use_context=False)

        roles = admin.getSecurityRoles()
        groups.append(Grp(roles.userGroupId, False))
        if args.admin:
            groups.append(Grp(roles.systemGroupId, False))

        group = groups.pop(0)

        try:
            if args.no_password:
                id = admin.createExperimenter(e, group, groups)
                self.ctx.out("Added user %s (id=%s) without password" %
                             (login, id))
            else:
                if pasw is None:
                    pasw = self._ask_for_password(" for your new user (%s)" %
                                                  login,
                                                  strict=True)
                id = admin.createExperimenterWithPassword(
                    e, rstring(pasw), group, groups)
                self.ctx.out("Added user %s (id=%s) with password" %
                             (login, id))
        except omero.ValidationException, ve:
            # Possible, though unlikely after previous check
            if self.exc.is_constraint_violation(ve):
                self.ctx.die(66, "User already exists: %s" % login)
            else:
                self.ctx.die(67,
                             "Unknown ValidationException: %s" % ve.message)
コード例 #44
0
 def setFillSettings(self, colour):
     warnings.warn("This module is deprecated as of OMERO 5.3.0",
                   DeprecationWarning)
     self.fillColour = rstring(colour)
コード例 #45
0
# code) into the point indicated below.
# A more complete template, for 'real-world' scripts, is also included in this
# folder
# This script takes an Image ID as a parameter from the scripting service.
from omero.rtypes import rlong, rstring, unwrap
from omero.gateway import BlitzGateway
import omero
import omero.scripts as scripts

# Script definition

# Script name, description and 2 parameters are defined here.
# These parameters will be recognised by the Insight and web clients and
# populated with the currently selected Image(s)

dataTypes = [rstring('Project')]
client = scripts.client(
    "Write_Data-4.py",
    """Downloads a named file annotation on a Project""",
    # first parameter
    scripts.String("Data_Type",
                   optional=False,
                   values=dataTypes,
                   default="Project"),
    # second parameter
    scripts.List("IDs", optional=False).ofType(rlong(0)),
    scripts.String("File_Name", optional=False),
)
# we can now create our Blitz Gateway by wrapping the client object
conn = BlitzGateway(client_obj=client)
コード例 #46
0
    def testObjectCreationEqualsAndHash(self):

        # RBool
        true1 = rbool(True)
        true2 = rbool(True)
        false1 = rbool(False)
        false2 = rbool(False)
        assert true1 == true2
        assert false1 == false2
        assert true1.getValue()
        assert not false1.getValue()
        assert true1 == true2
        assert true1 != false1

        # RDouble
        double_zero1 = rdouble(0.0)
        double_zero2 = rdouble(0.0)
        double_notzero1 = rdouble(1.1)
        double_notzero1b = rdouble(1.1)
        double_notzero2 = rdouble(2.2)
        assert double_zero1.getValue() == 0.0
        assert double_notzero1.getValue() == 1.1
        assert double_zero1 == double_zero2
        assert double_zero1 != double_notzero1
        assert double_notzero1 == double_notzero1b
        assert double_notzero1 != double_notzero2

        # RFloat
        float_zero1 = rfloat(0.0)
        float_zero2 = rfloat(0.0)
        float_notzero1 = rfloat(1.1)
        float_notzero1b = rfloat(1.1)
        float_notzero2 = rfloat(2.2)
        assert float_zero1.getValue() == 0.0
        assert float_notzero1.getValue() == 1.1
        assert float_zero1 == float_zero2
        assert float_zero1 != float_notzero1
        assert float_notzero1 == float_notzero1b
        assert float_notzero1 != float_notzero2

        # RInt
        int_zero1 = rint(0)
        int_zero2 = rint(0)
        int_notzero1 = rint(1)
        int_notzero1b = rint(1)
        int_notzero2 = rint(2)
        assert int_zero1.getValue() == 0
        assert int_notzero1.getValue() == 1
        assert int_zero1 == int_zero2
        assert int_zero1 != int_notzero1
        assert int_notzero1 == int_notzero1b
        assert int_notzero1 != int_notzero2

        # RLong
        long_zero1 = rlong(0)
        long_zero2 = rlong(0)
        long_notzero1 = rlong(1)
        long_notzero1b = rlong(1)
        long_notzero2 = rlong(2)
        assert long_zero1.getValue() == 0
        assert long_notzero1.getValue() == 1
        assert long_zero1 == long_zero2
        assert long_zero1 != long_notzero1
        assert long_notzero1 == long_notzero1b
        assert long_notzero1 != long_notzero2

        # RTime
        time_zero1 = rtime(0)
        time_zero2 = rtime(0)
        time_notzero1 = rtime(1)
        time_notzero1b = rtime(1)
        time_notzero2 = rtime(2)
        assert time_zero1.getValue() == 0
        assert time_notzero1.getValue() == 1
        assert time_zero1 == time_zero2
        assert time_zero1 != time_notzero1
        assert time_notzero1 == time_notzero1b
        assert time_notzero1 != time_notzero2

        # RInternal
        internal_null1 = rinternal(None)
        internal_null2 = rinternal(None)
        internal_notnull1 = rinternal(omero.grid.JobParams())
        internal_notnull2 = rinternal(omero.grid.JobParams())
        assert internal_null1 == internal_null2
        assert internal_null1 == internal_null2
        assert internal_null1 != internal_notnull2
        assert internal_notnull1 == internal_notnull1
        assert internal_notnull1 != internal_notnull2

        # RObject
        object_null1 = robject(None)
        object_null2 = robject(None)
        object_notnull1 = robject(omero.model.ImageI())
        object_notnull2 = robject(omero.model.ImageI())
        assert object_null1 == object_null2
        assert object_null1 == object_null2
        assert object_null1 != object_notnull2
        assert object_notnull1 == object_notnull1
        assert object_notnull1 != object_notnull2

        # RString
        string_null1 = rstring(None)
        string_null2 = rstring(None)
        string_notnull1 = rstring("str1")
        string_notnull1b = rstring("str1")
        string_notnull2 = rstring("str2")
        assert string_null1 == string_null2
        assert string_null1 == string_null2
        assert string_null1 != string_notnull2
        assert string_notnull1 == string_notnull1
        assert string_notnull1 != string_notnull2
        assert string_notnull1 == string_notnull1b

        # RClass
        class_null1 = rclass(None)
        class_null2 = rclass(None)
        class_notnull1 = rclass("str1")
        class_notnull1b = rclass("str1")
        class_notnull2 = rclass("str2")
        assert class_null1 == class_null2
        assert class_null1 == class_null2
        assert class_null1 != class_notnull2
        assert class_notnull1 == class_notnull1
        assert class_notnull1 != class_notnull2
        assert class_notnull1 == class_notnull1b
コード例 #47
0
def copy_image_and_metadata(im, conn):
    """
    Copy an image
    im: The image
    conn: The connection object, possibly on a different server
    """
    sizeZ = im.getSizeZ()
    sizeC = im.getSizeC()
    sizeT = im.getSizeT()
    zctList = [(z,c,t) for z in range(sizeZ) for c in range(sizeC)
               for t in range(sizeT)]

    def planeGen():
        planes = im.getPrimaryPixels().getPlanes(zctList)
        for p in planes:
            yield p

    d = im.getDescription()
    newim = conn.createImageFromNumpySeq(
        planeGen(), im.getName(), sizeZ=sizeZ, sizeC=sizeC, sizeT=sizeT)

    #qs = conn.getQueryService()
    #px = qs.get('Pixels', newim.getPrimaryPixels().id)

    #params = omero.sys.Parameters()
    #params.map = { 'id': wrap(px.id) }
    #channels = qs.findAllByQuery(
    #    "SELECT c from Channel c "
    #    "join fetch c.pixels as p "
    #    "where p.id = :id", params)

    us = conn.getUpdateService()

    px_exc = [
        'AnnotationLinksCountPerOwner',
        'Channel',
        'Image',
        'PixelsFileMapsCountPerOwner',
        'PixelsType',
        'PrimaryChannel',
        'RelatedTo',
        'Sha1',
        'SizeC',
        'SizeT',
        'SizeX',
        'SizeY',
        'SizeZ',
        ]

    newim = conn.getObject('Image', newim.id)
    newpx = newim.getPrimaryPixels()
    copy_set_get(im.getPrimaryPixels()._obj, newpx._obj, exclude=px_exc)
    newpx = us.saveAndReturnObject(newpx._obj)

    ch_exc = [
        'AnnotationLinksCountPerOwner',
        'LogicalChannel',
        'Pixels',
        'StatsInfo',
        ]
    lc_exc = ['DetectorSettings']

    for c in xrange(sizeC):
        newim = conn.getObject('Image', newim.id)
        chsrc = im.getChannels()[c]
        chdst = newim.getChannels()[c]
        copy_set_get(chsrc._obj, chdst._obj, exclude=ch_exc)
        us.saveAndReturnObject(chdst._obj)

        lchsrc = chsrc.getLogicalChannel()
        lchdst = chdst.getLogicalChannel()
        copy_set_get(lchsrc._obj, lchdst._obj, exclude=lc_exc)
        us.saveAndReturnObject(lchdst._obj)

    desc = add_source_to_description(im, im.getDescription())
    newim.setDescription(rstring(desc))
    us.saveAndReturnObject(newim._obj)

    return newim
コード例 #48
0
 def testPassThroughNoneAndRTypes(self):
     """
     To prevent having to check for isintance(int,...) or
     isintance(RInt,...) all over the place, the static methods
     automatically check for acceptable
     types and simply pass them through. Similarly, the primitive types all
     check for None and return a null RType if necessary.
     """
     # Bool
     assert None == rbool(None)
     assert rbool(True) == rbool(rbool(True))
     assert rbool(True) == rbool(1)
     assert rbool(False) == rbool(0)
     # Double
     assert None == rdouble(None)
     assert rdouble(0.0) == rdouble(rdouble(0.0))
     assert rdouble(0.0) == rdouble(rdouble(0))
     assert rdouble(0.0) == rdouble(rdouble("0.0"))
     pytest.raises(ValueError, lambda: rdouble("string"))
     # Float
     assert None == rfloat(None)
     assert rfloat(0.0) == rfloat(rfloat(0.0))
     assert rfloat(0.0) == rfloat(rfloat(0))
     assert rfloat(0.0) == rfloat(rfloat("0.0"))
     pytest.raises(ValueError, lambda: rfloat("string"))
     # Long
     assert None == rlong(None)
     assert rlong(0) == rlong(rlong(0))
     assert rlong(0) == rlong(rlong(0.0))
     assert rlong(0) == rlong(rlong("0"))
     pytest.raises(ValueError, lambda: rlong("string"))
     # Time
     assert None == rtime(None)
     assert rtime(0) == rtime(rtime(0))
     assert rtime(0) == rtime(rtime(0.0))
     assert rtime(0) == rtime(rtime("0"))
     pytest.raises(ValueError, lambda: rtime("string"))
     # Int
     assert None == rint(None)
     assert rint(0) == rint(rint(0))
     assert rint(0) == rint(rint(0.0))
     assert rint(0) == rint(rint("0"))
     pytest.raises(ValueError, lambda: rint("string"))
     #
     # Starting here handling of null is different.
     #
     # String
     assert rstring("") == rstring(None)
     assert rstring("a") == rstring(rstring("a"))
     assert rstring("0") == rstring(0)
     # Class
     assert rclass("") == rclass(None)
     assert rclass("c") == rclass(rclass("c"))
     pytest.raises(ValueError, lambda: rclass(0))
     # Internal
     internal = omero.Internal()
     assert rinternal(None) == rinternal(None)
     assert rinternal(internal) == rinternal(rinternal(internal))
     pytest.raises(ValueError, lambda: rinternal("string"))
     # Object
     obj = omero.model.ImageI()
     assert robject(None) == robject(None)
     assert robject(obj) == robject(robject(obj))
     pytest.raises(ValueError, lambda: robject("string"))
     #
     # Same does not hold for collections
     #
     # Array
     assert rarray([]) == rarray(None)
     # assert rarray(obj) == rarray(rarray(obj))
     # pytest.raises(ValueError, lambda : rarray("string"))
     # List
     assert rlist([]) == rlist(None)
     # assert rlist(obj) == rlist(rlist(obj))
     # pytest.raises(ValueError, lambda : rlist("string"))
     # Set
     assert rset([]) == rset(None)
     # assert rset(obj) == rset(rset(obj))
     # pytest.raises(ValueError, lambda : rset("string"))
     # Map
     assert rmap({}) == rmap(None)
コード例 #49
0
            green = np_array[::, ::, 1]
            blue = np_array[::, ::, 2]
            plane_gen = iter([red, green, blue])
            plot_name = image.getName() + "_FRAP_plot"
            i = conn.createImageFromNumpySeq(plane_gen, plot_name, sizeC=3,
                                             dataset=image.getParent())
            frap_plots.append(i)
        else:
            # If not plot, simply return input image
            frap_plots.append(image)

    return frap_plots


if __name__ == "__main__":
    dataTypes = [rstring('Dataset'), rstring('Image')]
    client = scripts.client(
        'Simple FRAP.py',
        """
    This script does simple FRAP analysis using Ellipse ROIs previously
    saved on images. If matplotlib is installed, data is plotted and new
    OMERO images are created from the plots.
        """,
        scripts.String(
            "Data_Type", optional=False, grouping="1",
            description="Choose source of images",
            values=dataTypes, default="Dataset"),

        scripts.List(
            "IDs", optional=False, grouping="2",
            description="Dataset or Image IDs.").ofType(rlong(0)),
コード例 #50
0
ファイル: ROI_utils.py プロジェクト: zfarooq2/openmicroscopy
 def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
     ShapeData.__init__(self)
     self.points = rstring(self.listToString(pointsList))
     self.setCoord(roicoord)
コード例 #51
0
def AddMapAnnotations(conn, dtype, Id):
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ''' 
    * Reads information from the 'Dataset Details' field, 
    * constructs key-val data 
    * attaches it to the dataset and the images contained in it
    '''
    dataset = conn.getObject(dtype, int(Id))
    if (not (dataset.canAnnotate() and dataset.canLink())):
        message = "You don't have permission to add annotations to {}".format(
            dataset.getName())
        client.setOutput("Message", rstring(message))
        return

    description = dataset.getDescription().splitlines()

    modes = {
        "Summary": "default",
        "global key-value": "global",
        "filename key-value": "filename",
        "end key-value": "default"
    }
    mode = 'default'

    global_kv = OrderedDict()  # stores the global key value pairs
    file_keys = OrderedDict()  # stores the 'slot' and key for the file keys
    spec_kv = OrderedDict()  # stores the file specific kv's
    template = None

    nimg_updated = 0
    nkv_updated = 0
    for line in description:
        # 1. See if this is a mode string
        match = re.search("^#\s*(\S+\s+\S+)", line.lower())
        if (match is not None and match.group(1) in modes):
            value = modes[match.group(1)]

            # start a new filename block
            if (mode != 'filename' and value == 'filename'):
                file_keys = OrderedDict()
                spec_kv = OrderedDict()
                template = None

            # end a filename block
            if (mode == 'filename' and value != 'filename'):
                print("Trigger parse fileanames")
                print(spec_kv)
                nimg_up, nadd = AddKeysToMatchingFiles(conn, Id, OrderedDict(),
                                                       template, file_keys,
                                                       spec_kv)
                print("filename {} {}".format(nimg_up, nadd))
                nimg_updated = nimg_updated + nimg_up
                nkv_updated = nkv_updated + nadd

            # end a global block
            if (mode == 'global' and value != 'global'):
                # Add globals to all the images
                nimg_up, nadd = AddKeysToMatchingFiles(conn, Id, global_kv,
                                                       None, file_keys)
                print("Global:  {}  {}".format(nimg_up, nadd))
                nimg_updated = nimg_updated + nimg_up
                nkv_updated = nkv_updated + nadd

            mode = value

        if (mode == 'default'):
            pass

        if (mode == 'global'):
            # split the line for the kay value pair
            match = re.search("^\s*(\S+)\s*:\s*(.*)", line)
            if (match is not None):
                key = match.group(1)
                val = match.group(2)
                if (key not in global_kv): global_kv[key] = set()
                global_kv[key].add(val)

        if (mode == 'filename'):
            # the template
            match = re.search("^\s*(\S+)\s+(\S+)", line)
            if (match and (match.group(1).lower() == 'template')):
                template = match.group(2)
                print("New template {}".format(template))

            # file templated kvs
            # Start line
            #    | /----white space
            #    | |full stop|
            #    V V      V  V
            match = re.search("^\s*(\d)\.\s+(\S+)", line)
            #          ^        ^
            #       position   key
            if (match is not None):
                i = int(match.group(1)) - 1
                file_keys[i] = match.group(2)

            # file specific kvs
            match = re.search("^\s*(\S+)\s*:\s*(\S+)", line)
            if (match is not None):
                key = match.group(1)
                val = match.group(2)
                if (key not in spec_kv): spec_kv[key] = set()
                spec_kv[key].add(val)

    # now add the key value pairs to the dataset
    existing_kv = GetExistingMapAnnotions(dataset)
    if (existing_kv != global_kv):
        RemoveMapAnnotations(conn, 'dataset', dataset.getId())
        map_ann = omero.gateway.MapAnnotationWrapper(conn)
        namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION
        # namespace = "openmicroscopy.org/mapr/gene"
        map_ann.setNs(namespace)
        # convert the ordered dict to a list of lists
        kv_list = []
        for k, vset in global_kv.items():
            for v in vset:
                kv_list.append([k, v])
        map_ann.setValue(kv_list)
        map_ann.save()
        dataset.linkAnnotation(map_ann)

    # add the metadata to the images
    if (True):
        #AddKeysToMatchingFiles( conn, Id, global_kv, template, file_keys )
        return "Added a total of {} kv pairs to {}/{} files  ".format(
            nkv_updated, nimg_updated, len(list(dataset.listChildren())))

    else:
        nimg = dataset.countChildren()
        nimg_updated = 0
        nkv_tot = 0
        for image in dataset.listChildren():
            if (not (image.canAnnotate() and image.canLink())):
                message = "You don't have permission to add annotations to {}".format(
                    image.getName())
                client.setOutput("Message", rstring(message))
                return

            existing_kv = GetExistingMapAnnotions(image)
            updated_kv = copy.deepcopy(global_kv)

            if (template is not None):
                # apply the template to the file name
                name = image.getName()
                path = os.path.dirname(
                    image.getImportedImageFilePaths()['client_paths'][0])
                filename = path + "/" + name
                match = regexp.search(filename)

                if (match is not None):
                    for i, val in enumerate(match.groups()):
                        i1 = i + 1
                        if (i1 in file_keys):
                            key = file_keys[i1]
                            if key not in updated_kv: updated_kv[key] = set()
                            updated_kv[key].add(val)

            print("existing_kv")
            for k, v in existing_kv.items():
                print("  {} : {}".format(k, v))
            print("updated_kv")
            for k, v in updated_kv.items():
                print("  {} : {}".format(k, v))
            print("Are they the same?", existing_kv == updated_kv)

            if (existing_kv != updated_kv):
                print("The key-values pairs are different")
                RemoveMapAnnotations(conn, 'image', image.getId())
                map_ann = omero.gateway.MapAnnotationWrapper(conn)
                # namespace ="openmicroscopy.org/mapr/gene"
                map_ann.setNs(namespace)
                print("Namespace")
                print(map_ann)
                # convert the ordered dict to a list of lists
                kv_list = []
                for k, vset in updated_kv.items():
                    for v in vset:
                        kv_list.append([k, v])
                map_ann.setValue(kv_list)
                map_ann.save()
                image.linkAnnotation(map_ann)

                nimg_updated = nimg_updated + 1
                nkv_tot = nkv_tot + len(updated_kv) - len(existing_kv)
        return "Added a total of {} kv pairs to {}/{} files  ".format(
            nkv_tot, nimg_updated, nimg)
コード例 #52
0
ファイル: ROI_utils.py プロジェクト: zfarooq2/openmicroscopy
 def setFillSettings(self, colour):
     self.fillColour = rstring(colour)
コード例 #53
0
def run_script():
    """
    The main entry point of the script, as called by the client via the
    scripting service, passing the required parameters.
    """
    ckeys = COLOURS.keys()
    ckeys.sort()
    c_options = [rstring(col) for col in ckeys]
    data_types = [rstring('Dataset'), rstring('Image')]
    first_dim = [rstring('Time'), rstring('Channel'), rstring('Z')]
    extra_dims = [
        rstring(''),
        rstring('Time'),
        rstring('Channel'),
        rstring('Z')
    ]
    channel_regs = [rstring(r) for r in channel_regexes.keys()]
    z_regs = [rstring(r) for r in z_regexes.keys()]
    t_regs = [rstring(r) for r in time_regexes.keys()]

    client = scripts.client(
        'Combine_Images.py',
        """Combine several single-plane images (or Z-stacks) into one with \
greater Z, C, T dimensions.
See http://help.openmicroscopy.org/scripts.html""",
        scripts.String(
            "Data_Type",
            optional=False,
            grouping="1",
            description="Use all the images in specified 'Datasets' or choose"
            " individual 'Images'.",
            values=data_types,
            default="Image"),
        scripts.List("IDs",
                     optional=False,
                     grouping="2",
                     description="List of Dataset IDs or Image IDs to "
                     "combine.").ofType(rlong(0)),
        scripts.String(
            "Filter_Names",
            grouping="2.1",
            description="Filter the images by names that contain this value"),
        scripts.Bool(
            "Auto_Define_Dimensions",
            grouping="3",
            default=True,
            description="""Choose new dimensions with respect to the order of"
            " the input images. See URL above."""),
        scripts.String(
            "Channel_Name_Pattern",
            grouping="3.1",
            default=DEFAULT_C_REGEX,
            values=channel_regs,
            description="""Auto-pick images by channel in the image name"""),
        scripts.String(
            "Z_Name_Pattern",
            grouping="3.2",
            default=DEFAULT_Z_REGEX,
            values=z_regs,
            description="""Auto-pick images by Z-index in the image name"""),
        scripts.String(
            "Time_Name_Pattern",
            grouping="3.3",
            default=DEFAULT_T_REGEX,
            values=t_regs,
            description="""Auto-pick images by T-index in the image name"""),
        scripts.Bool(
            "Manually_Define_Dimensions",
            grouping="4",
            default=False,
            description="""Choose new dimensions with respect to the order of"
            " the input images. See URL above."""),
        scripts.String("Dimension_1",
                       grouping="4.1",
                       description="The first Dimension to change",
                       values=first_dim),
        scripts.String(
            "Dimension_2",
            grouping="4.2",
            values=extra_dims,
            default="",
            description="The second Dimension to change. Only specify this if"
            " combining multiple dimensions."),
        scripts.String(
            "Dimension_3",
            grouping="4.3",
            values=extra_dims,
            default="",
            description="The third Dimension to change. Only specify this if"
            " combining multiple dimensions."),
        scripts.Int("Size_Z",
                    grouping="4.4",
                    description="Number of Z planes in new image",
                    min=1),
        scripts.Int("Size_C",
                    grouping="4.5",
                    description="Number of channels in new image",
                    min=1),
        scripts.Int("Size_T",
                    grouping="4.6",
                    description="Number of time-points in new image",
                    min=1),
        scripts.List("Channel_Colours",
                     grouping="7",
                     description="List of Colors for channels.",
                     default="White",
                     values=c_options).ofType(rstring("")),
        scripts.List(
            "Channel_Names",
            grouping="8",
            description="List of Names for channels in the new image."),
        version="4.2.0",
        authors=["William Moore", "OME Team"],
        institutions=["University of Dundee"],
        contact="*****@*****.**",
    )

    try:
        parameter_map = client.getInputs(unwrap=True)

        conn = BlitzGateway(client_obj=client)

        # create the combined image
        images, message = combine_images(conn, parameter_map)

        client.setOutput("Message", rstring(message))
        if images:
            if len(images) == 1:
                client.setOutput("Combined_Image", robject(images[0]))
            elif len(images) > 1:
                client.setOutput("First_Image", robject(images[0]))

    finally:
        client.closeSession()
コード例 #54
0
def make_single_image(services, parameter_map, image_ids, dataset, colour_map):
    """
    This takes the images specified by image_ids, sorts them in to Z,C,T
    dimensions according to parameters in the parameter_map, assembles them
    into a new Image, which is saved in dataset.
    """

    if len(image_ids) == 0:
        return

    rendering_engine = services["renderingEngine"]
    query_service = services["queryService"]
    pixels_service = services["pixelsService"]
    raw_pixel_store = services["rawPixelStore"]
    raw_pixel_store_upload = services["rawPixelStoreUpload"]
    update_service = services["updateService"]
    container_service = services["containerService"]

    # Filter images by name if user has specified filter.
    id_name_map = None
    if "Filter_Names" in parameter_map:
        filter_string = parameter_map["Filter_Names"]
        if len(filter_string) > 0:
            id_name_map = get_image_names(query_service, image_ids)
            image_ids = [
                i for i in image_ids if id_name_map[i].find(filter_string) > -1
            ]

    image_id = image_ids[0]

    # get pixels, with pixelsType, from the first image
    query_string = "select p from Pixels p join fetch p.image i join "\
        "fetch p.pixelsType pt where i.id='%d'" % image_id
    pixels = query_service.findByQuery(query_string, None)
    # use the pixels type object we got from the first image.
    pixels_type = pixels.getPixelsType()

    # combined image will have same X and Y sizes...
    size_x = pixels.getSizeX().getValue()
    size_y = pixels.getSizeY().getValue()
    # if we have a Z stack, use this in new image (don't combine Z)
    source_z = pixels.getSizeZ().getValue()

    # Now we need to find where our planes are coming from.
    # imageMap is a map of destination:source, defined as (newX, newY,
    # newZ):(imageId, z)
    if "Manually_Define_Dimensions" in parameter_map and \
            parameter_map["Manually_Define_Dimensions"]:
        size_z, size_c, size_t, image_map = manually_assign_images(
            parameter_map, image_ids, source_z)
        c_names = {}
    else:
        size_z, c_names, size_t, image_map = assign_images_by_regex(
            parameter_map, image_ids, query_service, source_z, id_name_map)
        size_c = len(c_names)

    if "Channel_Names" in parameter_map:
        for c, name in enumerate(parameter_map["Channel_Names"]):
            c_names[c] = name

    image_name = "combinedImage"
    description = "created from image Ids: %s" % image_ids

    channel_list = range(size_c)
    iid = pixels_service.createImage(size_x, size_y, size_z, size_t,
                                     channel_list, pixels_type, image_name,
                                     description)
    image = container_service.getImages("Image", [iid.getValue()], None)[0]

    pixels_id = image.getPrimaryPixels().getId().getValue()
    raw_pixel_store_upload.setPixelsId(pixels_id, True)

    pixel_sizes = {'x': [], 'y': []}
    for the_c in range(size_c):
        min_value = 0
        max_value = 0
        for the_z in range(size_z):
            for the_t in range(size_t):
                if (the_z, the_c, the_t) in image_map:
                    image_id, plane_z = image_map[(the_z, the_c, the_t)]
                    query_string = "select p from Pixels p join fetch "\
                        "p.image i join fetch p.pixelsType pt where "\
                        "i.id='%d'" % image_id
                    pixels = query_service.findByQuery(query_string, None)
                    plane_2d = get_plane(raw_pixel_store, pixels, plane_z, 0,
                                         0)
                    # Note pixels sizes (may be None)
                    pixel_sizes['x'].append(pixels.getPhysicalSizeX())
                    pixel_sizes['y'].append(pixels.getPhysicalSizeY())
                else:
                    plane_2d = zeros((size_y, size_x))
                script_utils.upload_plane_by_row(raw_pixel_store_upload,
                                                 plane_2d, the_z, the_c, the_t)
                min_value = min(min_value, plane_2d.min())
                max_value = max(max_value, plane_2d.max())
        pixels_service.setChannelGlobalMinMax(pixels_id,
                                              the_c, float(min_value),
                                              float(max_value))
        rgba = COLOURS["White"]
        if the_c in colour_map:
            rgba = colour_map[the_c]
        script_utils.reset_rendering_settings(rendering_engine, pixels_id,
                                              the_c, min_value, max_value,
                                              rgba)

    # rename new channels
    pixels = rendering_engine.getPixels()
    # has channels loaded - (getting Pixels from image doesn't)
    i = 0
    for c in pixels.iterateChannels():
        # c is an instance of omero.model.ChannelI
        if i >= len(c_names):
            break
        lc = c.getLogicalChannel()  # returns omero.model.LogicalChannelI
        lc.setName(rstring(c_names[i]))
        update_service.saveObject(lc)
        i += 1

    # Set pixel sizes if known
    pix_size_x = pick_pixel_sizes(pixel_sizes['x'])
    pix_size_y = pick_pixel_sizes(pixel_sizes['y'])
    if pix_size_x is not None or pix_size_y is not None:
        # reload to avoid OptimisticLockException
        pixels = services["queryService"].get('Pixels',
                                              pixels.getId().getValue())
        if pix_size_x is not None:
            pixels.setPhysicalSizeX(pix_size_x)
        if pix_size_y is not None:
            pixels.setPhysicalSizeY(pix_size_y)
        services["updateService"].saveObject(pixels)

    # put the image in dataset, if specified.
    if dataset and dataset.canLink():
        link = omero.model.DatasetImageLinkI()
        link.parent = omero.model.DatasetI(dataset.getId(), False)
        link.child = omero.model.ImageI(image.getId().getValue(), False)
        update_service.saveAndReturnObject(link)
    else:
        link = None

    return image, link
コード例 #55
0
    def add(self, args):
        email = args.email
        login = args.username
        first = args.firstname
        middle = args.middlename
        last = args.lastname
        inst = args.institution
        pasw = args.userpassword

        import omero
        from omero.rtypes import rstring
        from omero_model_ExperimenterI import ExperimenterI as Exp
        from omero_model_ExperimenterGroupI import ExperimenterGroupI as Grp
        c = self.ctx.conn(args)
        e = Exp()
        e.omeName = rstring(login)
        e.firstName = rstring(first)
        e.lastName = rstring(last)
        e.middleName = rstring(middle)
        e.email = rstring(email)
        e.institution = rstring(inst)
        admin = c.getSession().getAdminService()

        try:
            usr = admin.lookupExperimenter(login)
            if usr:
                if args.ignore_existing:
                    self.ctx.out("User exists: %s (id=%s)"
                                 % (login, usr.id.val))
                    return
                else:
                    self.ctx.die(3, "User exists: %s (id=%s)"
                                 % (login, usr.id.val))
        except omero.ApiUsageException:
            pass  # Apparently no such user exists

        groups = self.list_groups(admin, args)

        roles = admin.getSecurityRoles()
        groups.append(Grp(roles.userGroupId, False))
        if args.admin:
            groups.append(Grp(roles.systemGroupId, False))

        group = groups.pop(0)

        try:
            if args.no_password:
                id = admin.createExperimenter(e, group, groups)
                self.ctx.out("Added user %s (id=%s) without password"
                             % (login, id))
            else:
                if pasw is None:
                    self._ask_for_password(" for your new user (%s)"
                                           % login, strict=True)
                id = admin.createExperimenterWithPassword(e, rstring(pasw),
                                                          group, groups)
                self.ctx.out("Added user %s (id=%s) with password"
                             % (login, id))
        except omero.ValidationException, ve:
            # Possible, though unlikely after previous check
            if self.exc.is_constraint_violation(ve):
                self.ctx.die(66, "User already exists: %s" % login)
            else:
                self.ctx.die(67, "Unknown ValidationException: %s"
                             % ve.message)
コード例 #56
0
ファイル: test_chgrp.py プロジェクト: patrick330602/fomf3dbd
    def test_chgrp_old_container(self, dataset, credentials):
        """
        Tests Admin moving user's Dataset to their Private group and
        linking it to an existing Project there.
        Bug from https://github.com/openmicroscopy/openmicroscopy/pull/3420
        """

        django_client = self.get_django_client(credentials)
        # user creates project in their target group
        project = ProjectI()
        projectName = "chgrp-target-%s" % self.client.getSessionId()
        project.name = rstring(projectName)
        ctx = {"omero.group": str(self.group2.id.val)}
        project = self.sf.getUpdateService().saveAndReturnObject(project, ctx)
        request_url = reverse('chgrp')

        data = {
            "group_id": self.group2.id.val,
            "Dataset": dataset.id.val,
            "target_id": "project-%s" % project.id.val,
        }
        rsp = post(django_client, request_url, data)
        data = json.loads(rsp.content)
        expected = {
            "update": {
                "childless": {
                    "project": [],
                    "orphaned": False,
                    "dataset": []
                },
                "remove": {
                    "project": [],
                    "plate": [],
                    "screen": [],
                    "image": [],
                    "dataset": [dataset.id.val]
                }
            }
        }
        assert data == expected

        activities_url = reverse('activities_json')

        data = get_json(django_client, activities_url)

        # Keep polling activities until no jobs in progress
        while data['inprogress'] > 0:
            time.sleep(0.5)
            data = get_json(django_client, activities_url)

        # individual activities/jobs are returned as dicts within json data
        for k, o in data.items():
            if hasattr(o, 'values'):  # a dict
                if 'report' in o:
                    print o['report']
                assert o['status'] == 'finished'
                assert o['job_name'] == 'Change group'
                assert o['to_group_id'] == self.group2.id.val

        # Dataset should now be in new group, contained in Project
        conn = BlitzGateway(client_obj=self.client)
        userId = conn.getUserId()
        conn.SERVICE_OPTS.setOmeroGroup('-1')
        d = conn.getObject("Dataset", dataset.id.val)
        assert d is not None
        assert d.getDetails().group.id.val == self.group2.id.val
        p = d.getParent()
        assert p is not None
        assert p.getName() == projectName
        # Project owner should be current user
        assert p.getDetails().owner.id.val == userId
        assert p.getId() == project.id.val
コード例 #57
0
def runAsScript():
    """
    The main entry point of the script, as called by the client via the
    scripting service, passing the required parameters.
    """
    printDuration(False)  # start timer

    ckeys = COLOURS.keys()
    ckeys.sort()
    cOptions = [rstring(col) for col in ckeys]

    dataTypes = [rstring('Screen')]

    client = scripts.client(
        'HCS _RenderSettings.py',
        """Sets the rendering settings for all images in a list of screens""",
        scripts.String(
            "Data_Type",
            optional=False,
            grouping="1",
            description="Choose container of images (only Screen supported)",
            values=dataTypes,
            default="Screen"),
        scripts.List("IDs",
                     optional=False,
                     grouping="2",
                     description="List of Screen IDs to process").ofType(
                         rlong(0)),
        scripts.Bool("Reset_To_Imported",
                     grouping="3",
                     default=False,
                     description=
                     "Reset all rendering settings to original Imported values"
                     "Arguments below will be ignored"),
        scripts.List("Channel_Colours",
                     grouping="4",
                     description="List of Colours for channels.",
                     default="White",
                     values=cOptions).ofType(rstring("")),
        scripts.List(
            "Channel_Names",
            grouping="5",
            description="List of Names for channels in the new image."),
        version="0.0.1",
        authors=["Damir Sudar"],
        institutions=["Quantitative Imaging Systems LLC"],
        contact="*****@*****.**",
    )

    try:
        scriptParams = client.getInputs(unwrap=True)
        print scriptParams

        # wrap client to use the Blitz Gateway
        conn = BlitzGateway(client_obj=client)

        # set the desired rendering settings
        message = set_rendersettings(conn, scriptParams)

        client.setOutput("Message", rstring(message))

    finally:
        client.closeSession()
        printDuration()
コード例 #58
0
def run_script():
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    data_types = [rstring('Dataset')]
    client = scripts.client(
        'Create_Metadata_csv',
        """
    This script reads the metadata attached data set and creates
    a csv file attached to the Dataset
        """,
        scripts.String("Data_Type",
                       optional=False,
                       grouping="1",
                       description="Choose source of images",
                       values=data_types,
                       default="Dataset"),
        scripts.List("IDs",
                     optional=False,
                     grouping="2",
                     description="Plate or Screen ID.").ofType(rlong(0)),
        authors=["Christian Evenhuis"],
        institutions=["MIF UTS"],
        contact="*****@*****.**")

    try:
        # process the list of args above.
        script_params = {}
        for key in client.getInputKeys():
            if client.getInput(key):
                script_params[key] = client.getInput(key, unwrap=True)

        # wrap client to use the Blitz Gateway
        conn = BlitzGateway(client_obj=client)
        print("connection made")

        dataType = script_params["Data_Type"]
        print(dataType)
        ids = script_params["IDs"]
        datasets = list(conn.getObjects(
            dataType, ids))  # generator of images or datasets
        print(ids)
        print("datasets:")
        print(datasets)
        for ds in datasets:
            # name of the file
            csv_name = "{}_metadata_out.csv".format(ds.getName())
            print(csv_name)

            # remove the csv if it exists
            for ann in ds.listAnnotations():
                if (isinstance(ann, omero.gateway.FileAnnotationWrapper)):
                    if (ann.getFileName() == csv_name):
                        # if the name matches delete it
                        try:
                            delete = Delete2(targetObjects={
                                'FileAnnotation': [int(ann.getId())]
                            })
                            handle = conn.c.sf.submit(delete)
                            conn.c.waitOnCmd(handle,
                                             loops=10,
                                             ms=500,
                                             failonerror=True,
                                             failontimeout=False,
                                             closehandle=False)
                            print("Deleted existing csv")
                        except Exception, ex:
                            print("Failed to delete existing csv: {}".format(
                                ex.message))
                else:
                    print("No exisiting file")

            #                                 filename         key          multiple vals
            # assemble the metadata into an OrderedDict of ( OrderedDict of Sets          )
            file_names = [img.getName() for img in list(ds.listChildren())]
            kv_dict = OrderedDict()
            for img in ds.listChildren():
                fn = img.getName()
                kv_dict[fn] = GetExistingMapAnnotions(img)

            # attach the data
            mess = attach_csv_file(conn, ds, kv_dict)
            print(mess)
        mess = "done"
        client.setOutput("Message", rstring(mess))
コード例 #59
0
def save_web_figure(request, conn=None, **kwargs):
    """
    Saves 'figureJSON' in POST as an original file. If 'fileId' is specified
    in POST, then we update that file. Otherwise create a new one with
    name 'figureName' from POST.
    """

    update = conn.getUpdateService()
    if not request.method == 'POST':
        return HttpResponse("Need to use POST")

    figure_json = request.POST.get('figureJSON')
    if figure_json is None:
        return HttpResponse("No 'figureJSON' in POST")
    # See https://github.com/will-moore/figure/issues/16
    figure_json = figure_json.encode('utf8')

    image_ids = []
    first_img_id = None
    try:
        json_data = json.loads(figure_json)
        for panel in json_data['panels']:
            image_ids.append(panel['imageId'])
        if len(image_ids) > 0:
            first_img_id = long(image_ids[0])
        # remove duplicates
        image_ids = list(set(image_ids))
        # pretty-print json
        figure_json = json.dumps(json_data, sort_keys=True,
                                 indent=2, separators=(',', ': '))
    except Exception:
        pass

    file_id = request.POST.get('fileId')

    if 'figureName' in json_data and len(json_data['figureName']) > 0:
        figure_name = json_data['figureName']
    else:
        n = datetime.now()
        # time-stamp name by default: WebFigure_2013-10-29_22-43-53.json
        figure_name = "Figure_%s-%s-%s_%s-%s-%s.json" % \
            (n.year, n.month, n.day, n.hour, n.minute, n.second)

    # we store json in description field...
    description = {}
    if first_img_id is not None:
        # We duplicate the figure name here for quicker access when
        # listing files
        # (use this instead of file name because it supports unicode)
        description['name'] = figure_name
        description['imageId'] = first_img_id
        if 'baseUrl' in panel:
            description['baseUrl'] = panel['baseUrl']
    desc = json.dumps(description)

    if file_id is None:
        # Create new file
        # Try to set Group context to the same as first image
        curr_gid = conn.SERVICE_OPTS.getOmeroGroup()
        conn.SERVICE_OPTS.setOmeroGroup('-1')
        i = conn.getObject("Image", first_img_id)
        if i is not None:
            gid = i.getDetails().getGroup().getId()
            conn.SERVICE_OPTS.setOmeroGroup(gid)
        else:
            # Don't leave as -1
            conn.SERVICE_OPTS.setOmeroGroup(curr_gid)
        file_size = len(figure_json)
        f = StringIO()
        f.write(figure_json)
        # Can't use unicode for file name
        figure_name = unicodedata.normalize(
            'NFKD', figure_name).encode('ascii', 'ignore')
        orig_file = conn.createOriginalFileFromFileObj(
            f, '', figure_name, file_size, mimetype="application/json")
        fa = omero.model.FileAnnotationI()
        fa.setFile(omero.model.OriginalFileI(orig_file.getId(), False))
        fa.setNs(wrap(JSON_FILEANN_NS))
        fa.setDescription(wrap(desc))
        fa = update.saveAndReturnObject(fa, conn.SERVICE_OPTS)
        file_id = fa.getId().getValue()

    else:
        # Update existing Original File
        conn.SERVICE_OPTS.setOmeroGroup('-1')
        # Following seems to work OK with group -1 (regardless of group ctx)
        fa = conn.getObject("FileAnnotation", file_id)
        if fa is None:
            return Http404("Couldn't find FileAnnotation of ID: %s" % file_id)
        conn.SERVICE_OPTS.setOmeroGroup(fa.getDetails().group.id.val)
        # Update description
        fa._obj.setDescription(wrap(desc))
        update.saveAndReturnObject(fa._obj, conn.SERVICE_OPTS)
        orig_file = fa._obj.file
        # Update name and size
        orig_file.setName(rstring(figure_name))
        size = len(figure_json)
        orig_file.setSize(rlong(size))
        orig_file = update.saveAndReturnObject(
            orig_file, conn.SERVICE_OPTS)
        # upload file
        raw_file_store = conn.createRawFileStore()
        raw_file_store.setFileId(orig_file.getId().getValue(),
                                 conn.SERVICE_OPTS)
        raw_file_store.write(figure_json, 0, size, conn.SERVICE_OPTS)
        raw_file_store.truncate(size, conn.SERVICE_OPTS)     # ticket #11751
        # Once #11928 is fixed, these last 2 lines can be replaced with
        # rawFileStore.close(conn.SERVICE_OPTS)
        raw_file_store.save(conn.SERVICE_OPTS)
        raw_file_store.close()

    # Link file annotation to all images (remove from any others)
    link_to_images = False      # Disabled for now
    if link_to_images:
        current_links = conn.getAnnotationLinks("Image", ann_ids=[file_id])
        for l in current_links:
            if l.getParent().getId().getValue() not in image_ids:
                # remove old link
                update.deleteObject(l._obj, conn.SERVICE_OPTS)
            else:
                # we don't need to create links for these
                image_ids.remove(l.getParent().getId().getValue())

        # create new links if necessary
        links = []
        if len(image_ids) > 0:
            for i in conn.getObjects("Image", image_ids):
                if not i.canAnnotate():
                    continue
                link = omero.model.ImageAnnotationLinkI()
                link.parent = omero.model.ImageI(i.getId(), False)
                link.child = omero.model.FileAnnotationI(file_id, False)
                links.append(link)
            # Don't want to fail at this point due to strange permissions combo
            try:
                update.saveArray(links, conn.SERVICE_OPTS)
            except Exception:
                pass

    return HttpResponse(str(file_id))
コード例 #60
0
class UserControl(UserGroupControl):

    def _configure(self, parser):

        self.exc = ExceptionHandler()

        parser.add_login_arguments()
        sub = parser.sub()

        add = parser.add(sub, self.add, help="Add user")
        add.add_argument(
            "--ignore-existing", action="store_true", default=False,
            help="Do not fail if user already exists")
        add.add_argument(
            "-m", "--middlename", help="Middle name, if available")
        add.add_argument("-e", "--email")
        add.add_argument("-i", "--institution")
        # Capitalized since conflict with main values
        add.add_argument(
            "-a", "--admin", action="store_true",
            help="Whether the user should be an admin")
        add.add_argument("username", help="User's login name")
        add.add_argument("firstname", help="User's given name")
        add.add_argument("lastname", help="User's surname name")
        self.add_group_arguments(add, "join", "the group as an owner")

        password_group = add.add_mutually_exclusive_group()
        password_group.add_argument(
            "-P", "--userpassword", help="Password for user")
        password_group.add_argument(
            "--no-password", action="store_true", default=False,
            help="Create user with empty password")

        list = parser.add(sub, self.list, help="List current users")
        printgroup = list.add_mutually_exclusive_group()
        printgroup.add_argument(
            "--long", action="store_true", default=True,
            help="Print comma-separated list of all groups (default)")
        printgroup.add_argument(
            "--count", action="store_true", default=False,
            help="Print count of all groups")

        sortgroup = list.add_mutually_exclusive_group()
        sortgroup.add_argument(
            "--sort-by-id", action="store_true", default=True,
            help="Sort users by ID (default)")
        sortgroup.add_argument(
            "--sort-by-login", action="store_true", default=False,
            help="Sort users by login")
        sortgroup.add_argument(
            "--sort-by-first-name", action="store_true", default=False,
            help="Sort users by first name")
        sortgroup.add_argument(
            "--sort-by-last-name", action="store_true", default=False,
            help="Sort users by last name")
        sortgroup.add_argument(
            "--sort-by-email", action="store_true", default=False,
            help="Sort users by email")

        password = parser.add(
            sub, self.password, help="Set user's password")
        password.add_argument(
            "username", nargs="?", help="Username if not the current user")

        email = parser.add(
            sub, self.email, help="List users' email addresses")
        email.add_argument(
            "-n", "--names", action="store_true", default=False,
            help="Print user names along with email addresses")
        email.add_argument(
            "-1", "--one", action="store_true", default=False,
            help="Print one user per line")
        email.add_argument(
            "-i", "--ignore", action="store_true", default=False,
            help="Ignore users without email addresses")

        joingroup = parser.add(sub, self.joingroup, "Join one or more groups")
        self.add_user_arguments(joingroup)
        group = self.add_group_arguments(joingroup, "join")
        group.add_argument(
            "--as-owner", action="store_true", default=False,
            help="Join the group(s) as an owner")

        leavegroup = parser.add(
            sub, self.leavegroup, "Leave one or more groups")
        self.add_user_arguments(leavegroup)
        group = self.add_group_arguments(leavegroup, "leave")
        group.add_argument(
            "--as-owner", action="store_true", default=False,
            help="Leave the owner list of the group(s)")

        for x in (email, password, list, add, joingroup, leavegroup):
            x.add_login_arguments()

    def add_user_arguments(self, parser):
        group = parser.add_mutually_exclusive_group()
        group.add_argument(
            "--id", help="ID of the user. Default to the current user")
        group.add_argument(
            "--name", help="Name of the user. Default to the current user")

    def add_group_arguments(self, parser, action="join", owner_desc=""):
        group = parser.add_argument_group('Group arguments')
        group.add_argument(
            "group_id_or_name",  metavar="group", nargs="*",
            help="ID or name of the group(s) to %s" % action)
        group.add_argument(
            "--group-id", metavar="group", nargs="+",
            help="ID  of the group(s) to %s" % action)
        group.add_argument(
            "--group-name", metavar="group", nargs="+",
            help="Name of the group(s) to %s" % action)
        return group

    def format_name(self, exp):
        record = ""
        fn = _(exp.firstName)
        mn = " "
        if _(exp.middleName):
            mn = " %s " % _(exp.middleName)
        ln = _(exp.lastName)
        record += "%s%s%s" % (fn, mn, ln)
        return record

    def email(self, args):
        c = self.ctx.conn(args)
        a = c.sf.getAdminService()

        skipped = []
        records = []
        for exp in a.lookupExperimenters():

            # Handle users without email
            if not _(exp.email):
                if not args.ignore:
                    skipped.append(exp)
                continue

            record = ""
            if args.names:
                record += '"%s"' % self.format_name(exp)
                record += " <%s>" % _(exp.email)
            else:
                record += _(exp.email)

            records.append(record)

        if args.one:
            for record in records:
                self.ctx.out(record)
        else:
            self.ctx.out(", ".join(records))

        if skipped:
            self.ctx.err("Missing email addresses:")
            for s in skipped:
                self.ctx.err(self.format_name(s))

    def password(self, args):
        import omero
        from omero.rtypes import rstring
        client = self.ctx.conn(args)
        own_name = self.ctx._event_context.userName
        admin = client.sf.getAdminService()

        # tickets 3202, 5841
        own_pw = self._ask_for_password(" for your user (%s)"
                                        % own_name, strict=False)
        try:
            client.sf.setSecurityPassword(own_pw)
            self.ctx.out("Verified password.\n")
        except omero.SecurityViolation, sv:
            import traceback
            self.ctx.die(456, "SecurityViolation: Bad credentials")
            self.ctx.dbg(traceback.format_exc(sv))

        if args.username:
            self.ctx.out("Changing password for %s" % args.username)
        else:
            self.ctx.out("Changing password for %s" % own_name)

        pw = self._ask_for_password(" to be set")
        pw = rstring(pw)
        if args.username:
            admin.changeUserPassword(args.username, pw)
        else:
            admin.changePassword(pw)
        self.ctx.out("Password changed")