Beispiel #1
0
def stat_screens(query):

    tb = TableBuilder("Screen")
    tb.cols(["ID", "Plates", "Wells", "Images", "Planes", "Bytes"])

    plate_count = 0
    well_count = 0
    image_count = 0
    plane_count = 0
    byte_count = 0

    for study, screens in sorted(studies().items()):
        for screen, plates_expected in screens.items():
            params = ParametersI()
            params.addString("screen", screen)
            rv = unwrap(query.projection((
                "select s.id, count(distinct p.id), "
                "       count(distinct w.id), count(distinct i.id),"
                "       sum(cast(pix.sizeZ as long) * pix.sizeT * pix.sizeC), "
                "       sum(cast(pix.sizeZ as long) * pix.sizeT * pix.sizeC * "
                "           pix.sizeX * pix.sizeY * 2) "
                "from Screen s "
                "left outer join s.plateLinks spl "
                "left outer join spl.child as p "
                "left outer join p.wells as w "
                "left outer join w.wellSamples as ws "
                "left outer join ws.image as i "
                "left outer join i.pixels as pix "
                "where s.name = :screen "
                "group by s.id"), params))
            if not rv:
                tb.row(screen, "MISSING", "", "", "", "", "")
            else:
                for x in rv:
                    plate_id, plates, wells, images, planes, bytes = x
                    plate_count += plates
                    well_count += wells
                    image_count += images
                    if planes:
                        plane_count += planes
                    if bytes:
                        byte_count += bytes
                    else:
                        bytes = 0
                    if plates != len(plates_expected):
                        plates = "%s of %s" % (plates, len(plates_expected))
                    tb.row(screen, plate_id, plates, wells, images, planes,
                           filesizeformat(bytes))
    tb.row("Total", "", plate_count, well_count, image_count, plane_count,
           filesizeformat(byte_count))
    print str(tb.build())
Beispiel #2
0
    def _usage_report(self, req, rsp, status, args):
        """
        Output the total bytes used or the error,
        optionally provide more details.
        """
        err = self.get_error(rsp)
        if err:
            self.ctx.err("Error: " + rsp.parameters['message'])
        else:
            size = sum(rsp.totalBytesUsed.values())
            if args.size_only:
                self.ctx.out(size)
            else:
                files = sum(rsp.totalFileCount.values())
                if args.units:
                    size = ("%s %siB"
                            % (self._to_units(size, args.units), args.units))
                elif args.human_readable:
                    size = filesizeformat(size)
                self.ctx.out(
                    "Total disk usage: %s bytes in %d files"
                    % (size, files))

            if args.report and not args.size_only and size > 0:
                self._detailed_usage_report(req, rsp, status, args)
Beispiel #3
0
    def _usage_report(self, req, rsp, status, args):
        """
        Output the total bytes used or the error,
        optionally provide more details.
        """
        err = self.get_error(rsp)
        if err:
            self.ctx.err("Error: " + rsp.parameters['message'])
        else:
            size = sum(rsp.totalBytesUsed.values())
            if args.size_only:
                self.ctx.out(size)
            else:
                files = sum(rsp.totalFileCount.values())
                if args.units:
                    size = ("%s %siB"
                            % (self._to_units(size, args.units), args.units))
                elif args.human_readable:
                    size = filesizeformat(size)
                self.ctx.out(
                    "Total disk usage: %s bytes in %d files"
                    % (size, files))

            if args.report and not args.size_only and size > 0:
                self._detailed_usage_report(req, rsp, status, args)
def stat_screens(query):

    tb = TableBuilder("Container")
    tb.cols(["ID", "Set", "Wells", "Images", "Planes", "Bytes"])

    plate_count = 0
    well_count = 0
    image_count = 0
    plane_count = 0
    byte_count = 0

    for study, containers in sorted(studies().items()):
        for container, set_expected in sorted(containers.items()):
            params = ParametersI()
            params.addString("container", container)
            if "Plate" in set_expected:
                expected = set_expected["Plate"]
                rv = unwrap(query.projection(SPW_QUERY, params))
            elif "Dataset" in set_expected:
                expected = set_expected["Dataset"]
                rv = unwrap(query.projection(PDI_QUERY, params))
            else:
                raise Exception("unknown: %s" % set_expected.keys())

            if not rv:
                tb.row(container, "MISSING", "", "", "", "", "")
            else:
                for x in rv:
                    plate_id, plates, wells, images, planes, bytes = x
                    plate_count += plates
                    well_count += wells
                    image_count += images
                    if planes:
                        plane_count += planes
                    if bytes:
                        byte_count += bytes
                    else:
                        bytes = 0
                    if plates != len(expected):
                        plates = "%s of %s" % (plates, len(expected))
                    tb.row(container, plate_id, plates, wells, images, planes,
                           filesizeformat(bytes))
    tb.row("Total", "", plate_count, well_count, image_count, plane_count,
           filesizeformat(byte_count))
    print str(tb.build())
Beispiel #5
0
def stat_screens(query):

    tb = TableBuilder("Container")
    tb.cols(["ID", "Set", "Wells", "Images", "Planes", "Bytes"])

    plate_count = 0
    well_count = 0
    image_count = 0
    plane_count = 0
    byte_count = 0

    for study, containers in sorted(studies().items()):
        for container, set_expected in sorted(containers.items()):
            params = ParametersI()
            params.addString("container", container)
            if "Plate" in set_expected:
                expected = set_expected["Plate"]
                rv = unwrap(query.projection(SPW_QUERY, params))
            elif "Dataset" in set_expected:
                expected = set_expected["Dataset"]
                rv = unwrap(query.projection(PDI_QUERY, params))
            else:
                raise Exception("unknown: %s" % set_expected.keys())

            if not rv:
                tb.row(container, "MISSING", "", "", "", "", "")
            else:
                for x in rv:
                    plate_id, plates, wells, images, planes, bytes = x
                    plate_count += plates
                    well_count += wells
                    image_count += images
                    if planes:
                        plane_count += planes
                    if bytes:
                        byte_count += bytes
                    else:
                        bytes = 0
                    if plates != len(expected):
                        plates = "%s of %s" % (plates, len(expected))
                    tb.row(container, plate_id, plates, wells, images, planes,
                           filesizeformat(bytes))
    tb.row("Total", "", plate_count, well_count, image_count, plane_count,
           filesizeformat(byte_count))
    print str(tb.build())
Beispiel #6
0
    def _detailed_usage_report(self, req, rsp, status, args):
        """
        Print a breakdown of disk usage in table form, including user,
        group and component information according to the args.
        """
        from omero.util.text import TableBuilder

        sum_by = ("user", "group", "component")
        if args.sum_by is not None:
            sum_by = args.sum_by
        showCols = list(sum_by)
        showCols.extend(["size", "files"])

        align = 'l'*len(sum_by)
        align += 'rr'
        tb = TableBuilder(*showCols)
        tb.set_align(align)
        if args.style:
            tb.set_style(args.style)

        subtotals = {}
        if "component" in sum_by:
            for userGroup in rsp.bytesUsedByReferer.keys():
                for (element, size) in rsp.bytesUsedByReferer[
                        userGroup].items():
                    files = rsp.fileCountByReferer[userGroup][element]
                    keyList = []
                    if "user" in sum_by:
                        keyList.append(userGroup.first)
                    if "group" in sum_by:
                        keyList.append(userGroup.second)
                    keyList.append(element)
                    key = tuple(keyList)
                    if key in subtotals.keys():
                        subtotals[key][0] += size
                        subtotals[key][1] += files
                    else:
                        subtotals[key] = [size, files]
        else:
            for userGroup in rsp.totalBytesUsed.keys():
                size = rsp.totalBytesUsed[userGroup]
                files = rsp.totalFileCount[userGroup]
                keyList = []
                if "user" in sum_by:
                    keyList.append(userGroup.first)
                if "group" in sum_by:
                    keyList.append(userGroup.second)
                key = tuple(keyList)
                if key in subtotals.keys():
                    subtotals[key][0] += size
                    subtotals[key][1] += files
                else:
                    subtotals[key] = [size, files]

        for key in subtotals.keys():
            row = list(key)
            row.extend(subtotals[key])
            tb.row(*tuple(row))

        # Since an order in the response is not guaranteed if not sort keys
        # are specified then sort by the first column at least.
        if args.sort_by:
            keys = []
            for col in args.sort_by:
                try:
                    pos = showCols.index(col)
                    keys.append(pos)
                except:
                    pass
        else:
            keys = [0]
        tb.sort(cols=keys, reverse=args.reverse)

        # Format the size column after sorting.
        if args.units:
            col = tb.get_col("size")
            col = [self._to_units(val, args.units) for val in col]
            tb.replace_col("size", col)
            tb.replace_header("size", "size (%siB)" % args.units)
        elif args.human_readable:
            col = tb.get_col("size")
            col = [filesizeformat(val) for val in col]
            tb.replace_col("size", col)
        else:
            tb.replace_header("size", "size (bytes)")

        self.ctx.out(str(tb.build()))
Beispiel #7
0
    def images(self, args):
        """List images, filtering for archives, etc.

This command is useful for showing pre-FS (i.e. OMERO 4.4
and before) images which have original data archived with
them. It *may* be possible to convert these to OMERO 5
filesets.

Examples:

    bin/omero fs images --archived       # List only OMERO4 images
    bin/omero fs images --order=newest   # Default
    bin/omero fs images --order=largest  # Most used space
    bin/omero fs images --limit=500      # Longer listings
    bin/omero fs images --extended       # More details
        """

        from omero.rtypes import unwrap
        from omero.sys import ParametersI

        select = (
            "select i.id, i.name, fs.id,"
            "count(f1.id)+count(f2.id), "
            "sum(coalesce(f1.size,0) + coalesce(f2.size, 0)) ")
        archived = (not args.archived and "left outer " or "")
        query1 = (
            "from Image i join i.pixels p "
            "%sjoin p.pixelsFileMaps m %sjoin m.parent f1 "
            "left outer join i.fileset as fs "
            "left outer join fs.usedFiles as uf "
            "left outer join uf.originalFile as f2 ") % \
            (archived, archived)
        query2 = (
            "group by i.id, i.name, fs.id ")

        if args.order == "newest":
            query3 = "order by i.id desc"
        elif args.order == "oldest":
            query3 = "order by i.id asc"
        elif args.order == "largest":
            query3 = "order by "
            query3 += "sum(coalesce(f1.size,0) + coalesce(f2.size, 0)) desc"

        client = self.ctx.conn(args)
        service = client.sf.getQueryService()

        count = unwrap(service.projection(
            "select count(i) " + query1,
            None, {"omero.group": "-1"}))[0][0]
        rows = unwrap(service.projection(
            select + query1 + query2 + query3,
            ParametersI().page(args.offset, args.limit),
            {"omero.group": "-1"}))

        # Formatting
        for row in rows:
            if row[2] is None:
                row[2] = ""
            bytes = row[4]
            row[4] = filesizeformat(bytes)

        cols = ["Image", "Name", "FS", "# Files", "Size"]
        if args.extended:
            cols.extend(["Pixels", "Pyramid"])

        tb = self._table(args)
        tb.page(args.offset, args.limit, count)
        tb.cols(cols)
        for idx, row in enumerate(rows):
            values = list(row)
            if args.extended:
                self._extended_info(client, row, values)
            tb.row(idx, *tuple(values))
        self.ctx.out(str(tb.build()))
Beispiel #8
0
class FsControl(CmdControl):

    def _configure(self, parser):

        parser.add_login_arguments()
        sub = parser.sub()

        images = parser.add(sub, self.images)
        images.add_style_argument()
        images.add_limit_arguments()
        images.add_argument(
            "--order", default="newest",
            choices=("newest", "oldest", "largest"),
            help="order of the rows returned")
        images.add_argument(
            "--archived", action="store_true",
            help="list only images with archived data")

        mkdir = parser.add(sub, self.mkdir)
        mkdir.add_argument(
            "new_dir",
            help="directory to create in the repository")
        mkdir.add_argument(
            "--parents", action="store_true",
            help="ensure whole path exists")

        rename = parser.add(sub, self.rename)
        rename.add_argument(
            "fileset",
            type=ProxyStringType("Fileset"),
            help="Fileset which should be renamed: ID or Fileset:ID")
        rename.add_argument(
            "--no-move", action="store_true",
            help="do not move original files and import log")

        repos = parser.add(sub, self.repos)
        repos.add_style_argument()
        repos.add_argument(
            "--managed", action="store_true",
            help="repos only managed repositories")

        sets = parser.add(sub, self.sets)
        sets.add_style_argument()
        sets.add_limit_arguments()
        sets.add_argument(
            "--order", default="newest",
            choices=("newest", "oldest", "prefix"),
            help="order of the rows returned")
        sets.add_argument(
            "--without-images", action="store_true",
            help="list only sets without images (i.e. corrupt)")
        sets.add_argument(
            "--with-transfer", nargs="+", action="append",
            help="list sets by their in-place import method")
        sets.add_argument(
            "--check", action="store_true",
            help="verify the file checksums for each fileset (admins only)")

        ls = parser.add(sub, self.ls)
        ls.add_argument(
            "fileset",
            type=ProxyStringType("Fileset"))

        logfile = parser.add(sub, self.logfile)
        logfile.add_argument("fileset", type=ProxyStringType("Fileset"))
        logfile.add_argument(
            "filename",  nargs="?", default="-",
            help="Local filename to be saved to. '-' for stdout")
        logopts = logfile.add_mutually_exclusive_group()
        logopts.add_argument(
            "--name", action="store_true",
            help="return the path of the logfile within the ManagedRepository")
        logopts.add_argument(
            "--size", action="store_true",
            help="return the size of the logfile in bytes")

        usage = parser.add(sub, self.usage)
        usage.set_args_unsorted()
        usage.add_login_arguments()
        usage.add_style_argument()
        usage.add_argument(
            "--wait", type=long,
            help="Number of seconds to wait for the processing to complete "
            "(Indefinite < 0; No wait=0).", default=-1)
        usage.add_argument(
            "--size-only", action="store_true",
            help="Print total bytes used in bytes")
        usage.add_argument(
            "--report", action="store_true",
            help="Print detailed breakdown of disk usage")
        usage.add_argument(
            "--sum-by", nargs="+", choices=("user", "group", "component"),
            help=("Breakdown of disk usage by a combination of "
                  "user, group and component"))
        usage.add_argument(
            "--sort-by", nargs="+",
            choices=("user", "group", "component", "size", "files"),
            help=("Sort the report table by one or more of "
                  "user, group, component, size and files"))
        usage.add_argument(
            "--reverse", action="store_true",
            help="Reverse sort order")
        unit_group = usage.add_mutually_exclusive_group()
        unit_group.add_argument(
            "--units", choices="KMGTP",
            help="Units to use for disk usage")
        unit_group.add_argument(
            "--human-readable", action="store_true",
            help="Use most appropriate units")
        usage.add_argument(
            "--groups",  action="store_true",
            help="Print size for all current user's groups")
        usage.add_argument(
            "obj", nargs="*",
            help=("Objects to be queried in the form "
                  "'<Class>:<Id>[,<Id> ...]', or '<Class>:*' "
                  "to query all the objects of the given type "))

        for x in (images, sets):
            x.add_argument(
                "--extended", action="store_true",
                help="provide more details for each (slow)")

    def _table(self, args):
        """
        """
        from omero.util.text import TableBuilder
        tb = TableBuilder("#")
        if args.style:
            tb.set_style(args.style)
        return tb

    def _extended_info(self, client, row, values):

        from omero.cmd import ManageImageBinaries

        rsp = None
        try:
            mib = ManageImageBinaries()
            mib.imageId = row[0]
            cb = client.submit(mib)
            try:
                rsp = cb.getResponse()
            finally:
                cb.close(True)
        except Exception, e:
            self.ctx.dbg("Error on MIB: %s" % e)

        if rsp is None:
            values.extend(["ERR", "ERR"])
            return  # Early exit!

        if rsp.pixelsPresent:
            values.append(filesizeformat(rsp.pixelSize))
        elif rsp.pixelSize == 0:
            values.append(filesizeformat(0))
        else:
            v = "%s (bak)" % filesizeformat(rsp.pixelSize)
            values.append(v)
        values.append(filesizeformat(rsp.pyramidSize))
Beispiel #9
0
    def images(self, args):
        """List images, filtering for archives, etc.

This command is useful for showing pre-FS (i.e. OMERO 4.4
and before) images which have original data archived with
them. It *may* be possible to convert these to OMERO 5
filesets.

Examples:

    bin/omero fs images --archived       # List only OMERO4 images
    bin/omero fs images --order=newest   # Default
    bin/omero fs images --order=largest  # Most used space
    bin/omero fs images --limit=500      # Longer listings
    bin/omero fs images --extended       # More details
        """

        from omero.rtypes import unwrap
        from omero.sys import ParametersI
        from omero.util.text import filesizeformat

        select = (
            "select i.id, i.name, fs.id,"
            "count(f1.id)+count(f2.id), "
            "sum(coalesce(f1.size,0) + coalesce(f2.size, 0)) ")
        archived = (not args.archived and "left outer " or "")
        query1 = (
            "from Image i join i.pixels p "
            "%sjoin p.pixelsFileMaps m %sjoin m.parent f1 "
            "left outer join i.fileset as fs "
            "left outer join fs.usedFiles as uf "
            "left outer join uf.originalFile as f2 ") % \
            (archived, archived)
        query2 = (
            "group by i.id, i.name, fs.id ")

        if args.order == "newest":
            query3 = "order by i.id desc"
        elif args.order == "oldest":
            query3 = "order by i.id asc"
        elif args.order == "largest":
            query3 = "order by "
            query3 += "sum(coalesce(f1.size,0) + coalesce(f2.size, 0)) desc"

        client = self.ctx.conn(args)
        service = client.sf.getQueryService()

        count = unwrap(service.projection(
            "select count(i) " + query1,
            None, {"omero.group": "-1"}))[0][0]
        rows = unwrap(service.projection(
            select + query1 + query2 + query3,
            ParametersI().page(args.offset, args.limit),
            {"omero.group": "-1"}))

        # Formatting
        for row in rows:
            if row[2] is None:
                row[2] = ""
            bytes = row[4]
            row[4] = filesizeformat(bytes)

        cols = ["Image", "Name", "FS", "# Files", "Size"]
        if args.extended:
            cols.extend(["Pixels", "Pyramid"])

        tb = self._table(args)
        tb.page(args.offset, args.limit, count)
        tb.cols(cols)
        for idx, row in enumerate(rows):
            values = list(row)
            if args.extended:
                self._extended_info(client, row, values)
            tb.row(idx, *tuple(values))
        self.ctx.out(str(tb.build()))
Beispiel #10
0
    def _detailed_usage_report(self, req, rsp, status, args):
        """
        Print a breakdown of disk usage in table form, including user,
        group and component information according to the args.
        """
        from omero.util.text import TableBuilder

        sum_by = ("user", "group", "component")
        if args.sum_by is not None:
            sum_by = args.sum_by
        showCols = list(sum_by)
        showCols.extend(["size", "files"])

        align = "l" * len(sum_by)
        align += "rr"
        tb = TableBuilder(*showCols)
        tb.set_align(align)
        if args.style:
            tb.set_style(args.style)

        subtotals = {}
        if "component" in sum_by:
            for userGroup in rsp.bytesUsedByReferer.keys():
                for (element, size) in rsp.bytesUsedByReferer[userGroup].items():
                    files = rsp.fileCountByReferer[userGroup][element]
                    keyList = []
                    if "user" in sum_by:
                        keyList.append(userGroup.first)
                    if "group" in sum_by:
                        keyList.append(userGroup.second)
                    keyList.append(element)
                    key = tuple(keyList)
                    if key in subtotals.keys():
                        subtotals[key][0] += size
                        subtotals[key][1] += files
                    else:
                        subtotals[key] = [size, files]
        else:
            for userGroup in rsp.totalBytesUsed.keys():
                size = rsp.totalBytesUsed[userGroup]
                files = rsp.totalFileCount[userGroup]
                keyList = []
                if "user" in sum_by:
                    keyList.append(userGroup.first)
                if "group" in sum_by:
                    keyList.append(userGroup.second)
                key = tuple(keyList)
                if key in subtotals.keys():
                    subtotals[key][0] += size
                    subtotals[key][1] += files
                else:
                    subtotals[key] = [size, files]

        for key in subtotals.keys():
            row = list(key)
            row.extend(subtotals[key])
            tb.row(*tuple(row))

        # Since an order in the response is not guaranteed if not sort keys
        # are specified then sort by the first column at least.
        if args.sort_by:
            keys = []
            for col in args.sort_by:
                try:
                    pos = showCols.index(col)
                    keys.append(pos)
                except:
                    pass
        else:
            keys = [0]
        tb.sort(cols=keys, reverse=args.reverse)

        # Format the size column after sorting.
        if args.units:
            col = tb.get_col("size")
            col = [self._to_units(val, args.units) for val in col]
            tb.replace_col("size", col)
            tb.replace_header("size", "size (%siB)" % args.units)
        elif args.human_readable:
            col = tb.get_col("size")
            col = [filesizeformat(val) for val in col]
            tb.replace_col("size", col)
        else:
            tb.replace_header("size", "size (bytes)")

        self.ctx.out(str(tb.build()))
Beispiel #11
0
class FsControl(BaseControl):

    def _configure(self, parser):

        parser.add_login_arguments()
        sub = parser.sub()

        images = parser.add(sub, self.images)
        images.add_style_argument()
        images.add_limit_arguments()
        images.add_argument(
            "--order", default="newest",
            choices=("newest", "oldest", "largest"),
            help="order of the rows returned")
        images.add_argument(
            "--archived", action="store_true",
            help="list only images with archived data")

        rename = parser.add(sub, self.rename)
        rename.add_argument(
            "fileset",
            type=ProxyStringType("Fileset"),
            help="Fileset which should be renamed: ID or Fileset:ID")
        rename.add_argument(
            "--no-move", action="store_true",
            help="do not move original files and import log")

        repos = parser.add(sub, self.repos)
        repos.add_style_argument()
        repos.add_argument(
            "--managed", action="store_true",
            help="repos only managed repositories")

        sets = parser.add(sub, self.sets)
        sets.add_style_argument()
        sets.add_limit_arguments()
        sets.add_argument(
            "--order", default="newest",
            choices=("newest", "oldest", "prefix"),
            help="order of the rows returned")
        sets.add_argument(
            "--without-images", action="store_true",
            help="list only sets without images (i.e. corrupt)")
        sets.add_argument(
            "--with-transfer", nargs="+", action="append",
            help="list sets by their in-place import method")
        sets.add_argument(
            "--check", action="store_true",
            help="checks each fileset for validity (admins only)")

        for x in (images, sets):
            x.add_argument(
                "--extended", action="store_true",
                help="provide more details for each (slow)")

    def _table(self, args):
        """
        """
        from omero.util.text import TableBuilder
        tb = TableBuilder("#")
        if args.style:
            tb.set_style(args.style)
        return tb

    def _extended_info(self, client, row, values):

        from omero.cmd import ManageImageBinaries
        from omero.util.text import filesizeformat

        rsp = None
        try:
            mib = ManageImageBinaries()
            mib.imageId = row[0]
            cb = client.submit(mib)
            try:
                rsp = cb.getResponse()
            finally:
                cb.close(True)
        except Exception, e:
            self.ctx.dbg("Error on MIB: %s" % e)

        if rsp is None:
            values.extend(["ERR", "ERR"])
            return  # Early exit!

        if rsp.pixelsPresent:
            values.append(filesizeformat(rsp.pixelSize))
        elif rsp.pixelSize == 0:
            values.append(filesizeformat(0))
        else:
            v = "%s (bak)" % filesizeformat(rsp.pixelSize)
            values.append(v)
        values.append(filesizeformat(rsp.pyramidSize))