예제 #1
0
 def test_get_objects(self):
     client = self.new_client()
     image = self.create_test_image(100, 100, 1, 1, 1, client.getSession())
     conn = BlitzGateway(client_obj=client)
     params = {}
     params["Data_Type"] = "Image"
     params["IDs"] = [image.id.val]
     objects, message = scriptUtil.get_objects(conn, params)
     assert objects[0].id == image.id.val
     assert message is ''
     conn.close()
예제 #2
0
def copy_to_remote_omero(client, local_conn, script_params):
    # TODO could maybe refactor to remove client
    data_type = script_params["Data_Type"]
    username = script_params["username"]
    # password = client.getInput("password", unwrap=True)
    password = keyring.get_password("omero", username)
    # The managed_dir is where the local images are stored.
    # TODO could pass this in instead of client?
    managed_dir = client.sf.getConfigService().getConfigValue(
        "omero.managed.dir")
    # # Get the images or datasets
    message = ""
    objects, log_message = script_utils.get_objects(local_conn, script_params)
    message += log_message
    if not objects:
        return message

    try:
        # Connect to remote omero
        c, cli, remote_conn = connect_to_remote(password, username)

        images = []
        if data_type == 'Dataset':
            # TODO handle multiple datasets
            for ds in objects:
                dataset_name = ds.getName()
                target_dataset = "Dataset:name:" + dataset_name
                # create new remote dataset
                remote_ds = upload_dataset(cli, ds, remote_conn)

                images.extend(list(ds.listChildren()))
                if not images:
                    message += "No image found in dataset {}".format(
                        dataset_name)
                    return message

                print("Processing {} images, in dataset {}".format(
                    len(images), dataset_name))
                # TODO use remote_ds id, instead of target ds name
                uploaded_image_ids = upload_images(cli, images, managed_dir,
                                                   target_dataset, remote_conn)
        else:
            images = objects

            print("Processing %s images" % len(images))
            uploaded_image_ids = upload_images(cli, images, managed_dir, None,
                                               remote_conn)
    finally:
        close_remote_connection(c, cli, remote_conn)
    # End of transferring images

    message += "uploaded image ids: " + str(tuple(uploaded_image_ids))
    print message
    return message
예제 #3
0
def find_fileannotation(conn, script_params):
    """
  Find the ilastik project file annotations either attached 
  to the image, dataset or project

  Args:
  conn (BlitzGateway): The OMERO connection
  script_params (dict): The script parameters

  Returns:
  The first ilastik project file annotation found

  Raises:
  Exception: If no ilastik file annotation was found
  """
    objects, log_message = script_utils.get_objects(conn, script_params)

    for obj in objects:
        for ann in obj.listAnnotations():
            if ann.OMERO_TYPE == omero.model.FileAnnotationI:
                if ann.getFile().getName().endswith(".ilp"):
                    return ann

    for obj in objects:
        if type(obj) == omero.gateway._ImageWrapper:
            ds = obj.getParent()
            for ann in ds.listAnnotations():
                if ann.OMERO_TYPE == omero.model.FileAnnotationI:
                    if ann.getFile().getName().endswith(".ilp"):
                        return ann
            pr = obj.getProject()
            for ann in pr.listAnnotations():
                if ann.OMERO_TYPE == omero.model.FileAnnotationI:
                    if ann.getFile().getName().endswith(".ilp"):
                        return ann

    raise Exception("No ilastik project found!")
예제 #4
0
def load_images(conn, script_params):
    """
    Load images specified by the script parameters

    Args:
    conn (BlitzGateway): The OMERO connection
    script_params (dict): The script parameters

    Returns:
    list(ImageWrapper): The images
    """
    objects, log_message = script_utils.get_objects(conn, script_params)
    data_type = script_params["Data_Type"]
    images = []
    if data_type == 'Dataset':
        for ds in objects:
            images.extend(list(ds.listChildren()))
    elif data_type == 'Project':
        for p in objects:
            for ds in p.listChildren():
                images.extend(list(ds.listChildren()))
    else:
        images = objects
    return images
예제 #5
0
def process_images(conn, script_params):
    """
    Process the script params to make a list of channel_offsets, then iterate
    through the images creating a new image from each with the specified
    channel offsets
    """

    message = ""

    # Get the images
    images, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not images:
        return None, None, message
    image_ids = [i.getId() for i in images]

    # Get the channel offsets
    channel_offsets = []
    for i in range(1, 5):
        p_name = "Channel_%s" % i
        if script_params[p_name]:
            index = i - 1  # UI channel index is 1-based - we want 0-based
            x = "Channel%s_X_shift" % i in script_params and \
                script_params["Channel%s_X_shift" % i] or 0
            y = "Channel%s_Y_shift" % i in script_params and \
                script_params["Channel%s_Y_shift" % i] or 0
            z = "Channel%s_Z_shift" % i in script_params and \
                script_params["Channel%s_Z_shift" % i] or 0
            channel_offsets.append({'index': index, 'x': x, 'y': y, 'z': z})

    dataset = None
    if "New_Dataset_Name" in script_params:
        # create new Dataset...
        new_dataset_name = script_params["New_Dataset_Name"]
        dataset = omero.gateway.DatasetWrapper(conn,
                                               obj=omero.model.DatasetI())
        dataset.setName(rstring(new_dataset_name))
        dataset.save()
        # add to parent Project
        parent_ds = images[0].getParent()
        project = parent_ds is not None and parent_ds.getParent() or None
        if project is not None and project.canLink():
            link = omero.model.ProjectDatasetLinkI()
            link.parent = omero.model.ProjectI(project.getId(), False)
            link.child = omero.model.DatasetI(dataset.getId(), False)
            conn.getUpdateService().saveAndReturnObject(link)

    # need to handle Datasets eventually - Just do images for now
    new_images = []
    links = []
    for iId in image_ids:
        new_img, link = new_image_with_channel_offsets(conn, iId,
                                                       channel_offsets,
                                                       dataset)
        if new_img is not None:
            new_images.append(new_img)
            if link is not None:
                links.append(link)

    if not new_images:
        message += "No image created."
    else:
        if len(new_images) == 1:
            if not link:
                link_message = " but could not be attached"
            else:
                link_message = ""
            message += "New image created%s: %s." % (link_message,
                                                     new_images[0].getName())
        elif len(new_images) > 1:
            message += "%s new images created" % len(new_images)
            if not len(links) == len(new_images):
                message += " but some of them could not be attached."
            else:
                message += "."

    return new_images, dataset, message
예제 #6
0
def roi_figure(conn, command_args):
    """
    This processes the script parameters, adding defaults if needed.
    Then calls a method to make the figure, and finally uploads and attaches
    this to the primary image.

    @param: session         The OMERO session
    @param: command_args    Map of String:Object parameters for the script.
                            Objects are not rtypes, since getValue() was
                            called when the map was processed below.
                            But, list and map objects may contain rtypes (need
                            to call getValue())

    @return:                the id of the originalFileLink child. (ID object,
                            not value)
    """

    log("ROI figure created by OMERO on %s" % date.today())
    log("")

    message = ""  # message to be returned to the client
    pixel_ids = []
    image_ids = []
    image_labels = []

    # function for getting image labels.
    def get_image_names(full_name, tags_list, pd_list):
        name = full_name.split("/")[-1]
        return [name]

    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in command_args:
        if command_args["Image_Labels"] == "Datasets":

            def get_datasets(name, tags_list, pd_list):
                return [dataset for project, dataset in pd_list]

            get_labels = get_datasets
        elif command_args["Image_Labels"] == "Tags":

            def get_tags(name, tags_list, pd_list):
                return tags_list

            get_labels = get_tags
        else:
            get_labels = get_image_names
    else:
        get_labels = get_image_names

    # Get the images
    images, log_message = script_utils.get_objects(conn, command_args)
    message += log_message
    if not images:
        return None, message

    # Check for rectangular ROIs and filter images list
    images = [image for image in images if image.getROICount("Rectangle") > 0]
    if not images:
        message += "No rectangle ROI found."
        return None, message

    # Attach figure to the first image
    omero_image = images[0]

    # process the list of images. If image_ids is not set, script can't run.
    log("Image details:")
    for image in images:
        image_ids.append(image.getId())
        pixel_ids.append(image.getPrimaryPixels().getId())

    # a map of imageId : list of (project, dataset) names.
    pd_map = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(),
                                                   image_ids)
    tag_map = figUtil.getTagsFromImages(conn.getMetadataService(), image_ids)
    # Build a legend entry for each image
    for image in images:
        name = image.getName()
        image_date = image.getAcquisitionDate()
        iid = image.getId()
        tags_list = tag_map[iid]
        pd_list = pd_map[iid]

        tags = ", ".join(tags_list)
        pd_string = ", ".join(["%s/%s" % pd for pd in pd_list])
        log(" Image: %s  ID: %d" % (name, iid))
        if image_date:
            log("  Date: %s" % image_date)
        else:
            log("  Date: not set")
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pd_string)

        image_labels.append(get_labels(name, tags_list, pd_list))

    # use the first image to define dimensions, channel colours etc.
    size_x = omero_image.getSizeX()
    size_y = omero_image.getSizeY()
    size_z = omero_image.getSizeZ()
    size_c = omero_image.getSizeC()

    width = size_x
    if "Width" in command_args:
        w = command_args["Width"]
        try:
            width = int(w)
        except ValueError:
            log("Invalid width: %s Using default value: %d" % (str(w), size_x))

    height = size_y
    if "Height" in command_args:
        h = command_args["Height"]
        try:
            height = int(h)
        except ValueError:
            log("Invalid height: %s Using default value" % (str(h), size_y))

    log("Image dimensions for all panels (pixels): width: %d  height: %d" %
        (width, height))

    merged_indexes = []  # the channels in the combined image,
    merged_colours = {}
    if "Merged_Colours" in command_args:
        c_colour_map = command_args["Merged_Colours"]
        for c in c_colour_map:
            rgb = c_colour_map[c]
            try:
                rgb = int(rgb)
                c_index = int(c)
            except ValueError:
                continue
            rgba = image_utils.int_to_rgba(rgb)
            merged_colours[c_index] = rgba
            merged_indexes.append(c_index)
        merged_indexes.sort()
    # make sure we have some merged channels
    if len(merged_indexes) == 0:
        merged_indexes = range(size_c)
    merged_indexes.reverse()

    merged_names = False
    if "Merged_Names" in command_args:
        merged_names = command_args["Merged_Names"]

    # Make channel-names map. If argument wasn't specified, name by index
    channel_names = {}
    if "Channel_Names" in command_args:
        c_name_map = command_args["Channel_Names"]
        for c in range(size_c):
            if str(c) in c_name_map:
                channel_names[c] = c_name_map[str(c)]
            else:
                channel_names[c] = str(c)
    else:
        for c in range(size_c):
            channel_names[c] = str(c)

    # Make split-indexes list. If no "Split_Indexes", show none:
    # http://www.openmicroscopy.org/community/viewtopic.php?f=4&t=940
    split_indexes = []
    if "Split_Indexes" in command_args:
        for index in command_args["Split_Indexes"]:
            split_indexes.append(index)

    colour_channels = True
    key = "Split_Panels_Grey"
    if key in command_args and command_args[key]:
        colour_channels = False

    algorithm = ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in command_args:
        a = command_args["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = ProjectionType.MEANINTENSITY

    stepping = 1
    if "Stepping" in command_args:
        s = command_args["Stepping"]
        if (0 < s < size_z):
            stepping = s

    scalebar = None
    if "Scalebar" in command_args:
        sb = command_args["Scalebar"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except ValueError:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None

    overlay_colour = (255, 255, 255)
    if "Overlay_Colour" in command_args:
        r, g, b, a = OVERLAY_COLOURS[command_args["Overlay_Colour"]]
        overlay_colour = (r, g, b)

    roi_zoom = None
    if "ROI_Zoom" in command_args:
        roi_zoom = float(command_args["ROI_Zoom"])
        if roi_zoom == 0:
            roi_zoom = None

    roi_label = "FigureROI"
    if "ROI_Label" in command_args:
        roi_label = command_args["ROI_Label"]

    spacer = (width / 50) + 2

    fig = get_split_view(conn, image_ids, pixel_ids, split_indexes,
                         channel_names, merged_names, colour_channels,
                         merged_indexes, merged_colours, width, height,
                         image_labels, spacer, algorithm, stepping, scalebar,
                         overlay_colour, roi_zoom, roi_label)

    if fig is None:
        log_message = "No figure produced"
        log("\n" + log_message)
        message += log_message
        return None, message

    log("")
    fig_legend = "\n".join(log_strings)

    format = command_args["Format"]

    figure_name = "roi_figure"
    if "Figure_Name" in command_args:
        figure_name = command_args["Figure_Name"]
        figure_name = os.path.basename(figure_name)
    output = "localfile"
    if format == 'PNG':
        output = output + ".png"
        figure_name = figure_name + ".png"
        fig.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figure_name = figure_name + ".tiff"
        fig.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figure_name = figure_name + ".jpg"
        fig.save(output)
        mimetype = "image/jpeg"

    # Use util method to upload the figure 'output' to the server, attaching
    # it to the omeroImage, adding the
    # figLegend as the fileAnnotation description.
    # Returns the id of the originalFileLink child. (ID object, not value)
    namespace = NSCREATED + "/omero/figure_scripts/ROI_Split_Figure"
    file_annotation, fa_message = script_utils.create_link_file_annotation(
        conn,
        output,
        omero_image,
        output="ROI Split figure",
        mimetype=mimetype,
        namespace=namespace,
        description=fig_legend,
        orig_file_path_and_name=figure_name)
    message += fa_message

    return file_annotation, message
예제 #7
0
def copy_to_remote_omero(client, local_conn, script_params):
    # TODO could maybe refactor to remove client
    data_type = script_params["Data_Type"]
    username = script_params["Username"]
    password = script_params["Password"]
    image_format = script_params["Image_Format"]

    global include_annos
    include_annos = script_params["Include_Annotations"]
    # The managed_dir is where the local images are stored.
    # TODO could pass this in instead of client?
    # This directory requires administrator privileges to access; which is why the
    # script fails if it is run by a non admin-user
    managed_dir = None

    try:
        managed_dir = client.sf.getConfigService().getConfigValue(
            "omero.managed.dir")
    except:
        from os.path import expanduser
        managed_dir = os.path.join(expanduser("~"), 'omero_data',
                                   'ManagedRepository')

    # # Get the images or datasets
    message = ""
    objects, log_message = script_utils.get_objects(local_conn, script_params)
    message += log_message
    if not objects:
        return message

    try:
        # Connect to remote omero
        c, cli, remote_conn = connect_to_remote(password, username)

        images = []
        if data_type == 'Dataset':
            # TODO handle multiple datasets
            for ds in objects:
                dataset_name = ds.getName()
                target_dataset = "Dataset:name:" + dataset_name
                # create new remote dataset
                uploaded_dataset_id = upload_dataset(cli, ds, remote_conn,
                                                     local_conn)

                remote_ds = remote_conn.getObject("Dataset",
                                                  uploaded_dataset_id)
                images.extend(list(ds.listChildren()))
                if not images:
                    message += "No image found in dataset {}".format(
                        dataset_name)
                    return message

                print(("Processing {} images, in dataset {}".format(
                    len(images), dataset_name)))
                # TODO use remote_ds id, instead of target ds name
                uploaded_image_ids = upload_images(cli, images, managed_dir,
                                                   target_dataset, remote_conn,
                                                   local_conn, remote_ds,
                                                   image_format)
        else:
            images = objects

            print(("Processing %s images" % len(images)))
            uploaded_image_ids = upload_images(cli, images, managed_dir, None,
                                               remote_conn, local_conn, None,
                                               image_format)
    finally:
        close_remote_connection(c, cli, remote_conn)
    # End of transferring images

    message += "uploaded image ids: " + str(tuple(uploaded_image_ids))
    return message
예제 #8
0
def make_images_from_rois(conn, parameter_map):
    """
    Processes the list of Image_IDs, either making a new image-stack or a new
    dataset from each image, with new image planes coming from the regions in
    Rectangular ROIs on the parent images.
    """

    data_type = parameter_map["Data_Type"]

    message = ""

    # Get the images
    objects, log_message = script_utils.get_objects(conn, parameter_map)
    message += log_message
    if not objects:
        return None, message

    # Concatenate images from datasets
    if data_type == 'Image':
        images = objects
    else:
        images = []
        for ds in objects:
            images += ds.listChildren()

    # Check for rectangular ROIs and filter images list
    images = [image for image in images if image.getROICount("Rectangle") > 0]
    if not images:
        message += "No rectangle ROI found."
        return None, message

    image_ids = [i.getId() for i in images]
    new_images = []
    new_datasets = []
    links = []
    for iid in image_ids:
        new_image, new_dataset, link = process_image(conn, iid, parameter_map)
        if new_image is not None:
            if isinstance(new_image, list):
                new_images.extend(new_image)
            else:
                new_images.append(new_image)
        if new_dataset is not None:
            new_datasets.append(new_dataset)
        if link is not None:
            if isinstance(link, list):
                links.extend(link)
            else:
                links.append(link)

    if new_images:
        if len(new_images) > 1:
            message += "Created %s new images" % len(new_images)
        else:
            message += "Created a new image"
    else:
        message += "No image created"

    if new_datasets:
        if len(new_datasets) > 1:
            message += " and %s new datasets" % len(new_datasets)
        else:
            message += " and a new dataset"

    if not links or not len(links) == len(new_images):
        message += " but some images could not be attached"
    message += "."

    robj = (len(new_images) > 0) and new_images[0]._obj or None
    return robj, message
예제 #9
0
def split_view_figure(conn, script_params):
    """
    Processes the arguments, populating defaults if necessary. Prints the
    details to log (fig-legend).
    Even handles missing arguments that are not optional (from when this ran
    from commandline with everything optional)
    then calls make_split_view_figure() to make the figure, attaches it to the
    Image as an 'originalFile' annotation, with fig-legend as the description.

    @return: the id of the originalFileLink child. (ID object, not value)
    """

    log("Split-View figure created by OMERO on %s" % date.today())
    log("")

    message = ""  # message to be returned to the client
    image_ids = []
    pixel_ids = []
    image_labels = []

    # function for getting image labels.
    def get_image_names(full_name, tags_list, pd_list):
        name = full_name.split("/")[-1]
        return [name.decode('utf8')]

    # default function for getting labels is getName (or use datasets / tags)
    if script_params["Image_Labels"] == "Datasets":

        def get_datasets(name, tags_list, pd_list):
            return [dataset.decode('utf8') for project, dataset in pd_list]

        get_labels = get_datasets
    elif script_params["Image_Labels"] == "Tags":

        def get_tags(name, tags_list, pd_list):
            return [t.decode('utf8') for t in tags_list]

        get_labels = get_tags
    else:
        get_labels = get_image_names

    # Get the images
    images, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not images:
        return None, message

    # Attach figure to the first image
    omero_image = images[0]

    # process the list of images
    log("Image details:")
    for image in images:
        image_ids.append(image.getId())
        pixel_ids.append(image.getPrimaryPixels().getId())

    # a map of imageId : list of (project, dataset) names.
    pd_map = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(),
                                                   image_ids)
    tag_map = figUtil.getTagsFromImages(conn.getMetadataService(), image_ids)
    # Build a legend entry for each image
    for image in images:
        name = image.getName()
        image_date = image.getAcquisitionDate()
        iid = image.getId()
        tags_list = tag_map[iid]
        pd_list = pd_map[iid]

        tags = ", ".join(tags_list)
        pd_string = ", ".join(["%s/%s" % pd for pd in pd_list])
        log(" Image: %s  ID: %d" % (name, iid))
        if image_date:
            log("  Date: %s" % image_date)
        else:
            log("  Date: not set")
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pd_string)

        image_labels.append(get_labels(name, tags_list, pd_list))

    # use the first image to define dimensions, channel colours etc.
    size_x = omero_image.getSizeX()
    size_y = omero_image.getSizeY()
    size_z = omero_image.getSizeZ()
    size_c = omero_image.getSizeC()

    # set image dimensions
    z_start = -1
    z_end = -1
    if "Z_Start" in script_params:
        z_start = script_params["Z_Start"]
    if "Z_End" in script_params:
        z_end = script_params["Z_End"]

    width = "Width" in script_params and script_params["Width"] or size_x
    height = "Height" in script_params and script_params["Height"] or size_y

    log("Image dimensions for all panels (pixels): width: %d  height: %d" %
        (width, height))

    # Make split-indexes list. If argument wasn't specified, include them all.
    split_indexes = []
    if "Split_Indexes" in script_params:
        split_indexes = script_params["Split_Indexes"]
    else:
        split_indexes = range(size_c)

    # Make channel-names map. If argument wasn't specified, name by index
    channel_names = {}
    for c in range(size_c):
        channel_names[c] = str(c)
    if "Channel_Names" in script_params:
        c_name_map = script_params["Channel_Names"]
        for c in c_name_map:
            index = int(c)
            channel_names[index] = c_name_map[c].decode('utf8')

    merged_indexes = []  # the channels in the combined image,
    merged_colours = {}
    if "Merged_Colours" in script_params:
        c_colour_map = script_params["Merged_Colours"]
        for c in c_colour_map:
            rgb = c_colour_map[c]
            try:
                rgb = int(rgb)
                c_index = int(c)
            except ValueError:
                continue
            rgba = image_utils.int_to_rgba(rgb)
            merged_colours[c_index] = rgba
            merged_indexes.append(c_index)
        merged_indexes.sort()
    else:
        merged_indexes = range(size_c)

    colour_channels = not script_params["Split_Panels_Grey"]

    algorithm = ProjectionType.MAXIMUMINTENSITY
    if "Mean Intensity" == script_params["Algorithm"]:
        algorithm = ProjectionType.MEANINTENSITY

    stepping = min(script_params["Stepping"], size_z)

    scalebar = None
    if "Scalebar" in script_params:
        scalebar = script_params["Scalebar"]
        log("Scalebar is %d microns" % scalebar)

    r, g, b, a = OVERLAY_COLOURS[script_params["Overlay_Colour"]]
    overlay_colour = (r, g, b)

    merged_names = script_params["Merged_Names"]

    fig = make_split_view_figure(conn, pixel_ids, z_start, z_end,
                                 split_indexes, channel_names, colour_channels,
                                 merged_indexes, merged_colours, merged_names,
                                 width, height, image_labels, algorithm,
                                 stepping, scalebar, overlay_colour)

    fig_legend = "\n".join(log_strings)

    figure_name = script_params["Figure_Name"]
    figure_name = os.path.basename(figure_name)
    output = "localfile"
    format = script_params["Format"]
    if format == 'PNG':
        output = output + ".png"
        figure_name = figure_name + ".png"
        fig.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figure_name = figure_name + ".tiff"
        fig.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figure_name = figure_name + ".jpg"
        fig.save(output)
        mimetype = "image/jpeg"

    # Upload the figure 'output' to the server, creating a file annotation and
    # attaching it to the omero_image, adding the
    # fig_legend as the fileAnnotation description.
    namespace = NSCREATED + "/omero/figure_scripts/Split_View_Figure"
    file_annotation, fa_message = script_utils.create_link_file_annotation(
        conn,
        output,
        omero_image,
        output="Split view figure",
        mimetype=mimetype,
        namespace=namespace,
        description=fig_legend,
        orig_file_path_and_name=figure_name)
    message += fa_message

    return file_annotation, message
예제 #10
0
def process_images(conn, script_params):

    line_width = script_params['Line_Width']
    file_anns = []
    message = ""

    # Get the images
    images, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not images:
        return None, message

    # Check for line and polyline ROIs and filter images list
    images = [image for image in images if
              image.getROICount(["Polyline", "Line"]) > 0]
    if not images:
        message += "No ROI containing line or polyline was found."
        return None, message

    for image in images:

        c_names = []
        colors = []
        for ch in image.getChannels():
            c_names.append(ch.getLabel())
            colors.append(ch.getColor().getRGB())

        size_c = image.getSizeC()

        if 'Channels' in script_params:
            script_params['Channels'] = [i-1 for i in
                                         script_params['Channels']]
        else:
            script_params['Channels'] = range(size_c)

        roi_service = conn.getRoiService()
        result = roi_service.findByImage(image.getId(), None)

        lines = []
        polylines = []

        for roi in result.rois:
            roi_id = roi.getId().getValue()
            for s in roi.copyShapes():
                the_t = unwrap(s.getTheT())
                the_z = unwrap(s.getTheZ())
                z = 0
                t = 0
                if the_t is not None:
                    t = the_t
                if the_z is not None:
                    z = the_z
                # TODO: Add some filter of shapes e.g. text? / 'lines' only
                # etc.
                if type(s) == omero.model.LineI:
                    x1 = s.getX1().getValue()
                    x2 = s.getX2().getValue()
                    y1 = s.getY1().getValue()
                    y2 = s.getY2().getValue()
                    lines.append({'id': roi_id, 'theT': t, 'theZ': z,
                                  'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2})

                elif type(s) == omero.model.PolylineI:
                    v = s.getPoints().getValue()
                    points = roi_utils.points_string_to_xy_list(v)
                    polylines.append({'id': roi_id, 'theT': t, 'theZ': z,
                                      'points': points})

        if len(lines) == 0 and len(polylines) == 0:
            continue

        # prepare column headers, including line-id if we are going to output
        # raw data.
        line_id = script_params['Sum_or_Average'] == 'Average, with raw data' \
            and 'Line, ' or ""
        col_header = 'Image_ID, ROI_ID, Z, T, C, %sLine data %s of Line" \
            " Width %s\n' % (line_id, script_params['Sum_or_Average'],
                             script_params['Line_Width'])

        # prepare a csv file to write our data to...
        file_name = "Plot_Profile_%s.csv" % image.getId()
        with open(file_name, 'w') as f:
            f.write(col_header)
            if len(lines) > 0:
                process_lines(conn, script_params, image, lines, line_width, f)
            if len(polylines) > 0:
                process_polylines(
                    conn, script_params, image, polylines, line_width, f)

        file_ann, fa_message = script_utils.create_link_file_annotation(
            conn, file_name, image, output="Line Plot csv (Excel) file",
            mimetype="text/csv", description=None)
        if file_ann:
            file_anns.append(file_ann)

    if not file_anns:
        fa_message = "No Analysis files created. See 'Info' or 'Error' for"\
            " more details"
    elif len(file_anns) > 1:
        fa_message = "Created %s csv (Excel) files" % len(file_anns)
    message += fa_message

    return file_anns, message
예제 #11
0
def make_thumbnail_figure(conn, script_params):
    """
    Makes the figure using the parameters in @script_params, attaches the
    figure to the parent Project/Dataset, and returns the file-annotation ID

    @ returns       Returns the id of the originalFileLink child. (ID object,
                    not value)
    """

    log("Thumbnail figure created by OMERO")
    log("")

    message = ""

    # Get the objects (images or datasets)
    objects, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not objects:
        return None, message

    # Get parent
    parent = None
    if "Parent_ID" in script_params and len(script_params["IDs"]) > 1:
        if script_params["Data_Type"] == "Image":
            parent = conn.getObject("Dataset", script_params["Parent_ID"])
        else:
            parent = conn.getObject("Project", script_params["Parent_ID"])

    if parent is None:
        parent = objects[0]  # Attach figure to the first object

    parent_class = parent.OMERO_CLASS
    log("Figure will be linked to %s%s: %s" %
        (parent_class[0].lower(), parent_class[1:], parent.getName()))

    tag_ids = []
    if "Tag_IDs" in script_params:
        tag_ids = script_params['Tag_IDs']
    if len(tag_ids) == 0:
        tag_ids = None

    show_untagged = False
    if (tag_ids):
        show_untagged = script_params["Show_Untagged_Images"]

    thumb_size = script_params["Thumbnail_Size"]
    max_columns = script_params["Max_Columns"]

    fig_height = 0
    fig_width = 0
    ds_canvases = []

    if script_params["Data_Type"] == "Dataset":
        for dataset in objects:
            log("Dataset: %s     ID: %d" %
                (dataset.getName(), dataset.getId()))
            images = list(dataset.listChildren())
            title = dataset.getName().decode('utf8')
            ds_canvas = paint_dataset_canvas(conn,
                                             images,
                                             title,
                                             tag_ids,
                                             show_untagged,
                                             length=thumb_size,
                                             col_count=max_columns)
            if ds_canvas is None:
                continue
            ds_canvases.append(ds_canvas)
            fig_height += ds_canvas.size[1]
            fig_width = max(fig_width, ds_canvas.size[0])
    else:
        image_canvas = paint_dataset_canvas(conn,
                                            objects,
                                            "",
                                            tag_ids,
                                            show_untagged,
                                            length=thumb_size,
                                            col_count=max_columns)
        ds_canvases.append(image_canvas)
        fig_height += image_canvas.size[1]
        fig_width = max(fig_width, image_canvas.size[0])

    if len(ds_canvases) == 0:
        message += "No figure created"
        return None, message

    figure = Image.new("RGB", (fig_width, fig_height), WHITE)
    y = 0
    for ds in ds_canvases:
        image_utils.paste_image(ds, figure, 0, y)
        y += ds.size[1]

    log("")
    fig_legend = "\n".join(log_lines)

    format = script_params["Format"]
    figure_name = script_params["Figure_Name"]
    figure_name = os.path.basename(figure_name)
    output = "localfile"

    if format == 'PNG':
        output = output + ".png"
        figure_name = figure_name + ".png"
        figure.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figure_name = figure_name + ".tiff"
        figure.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figure_name = figure_name + ".jpg"
        figure.save(output)
        mimetype = "image/jpeg"

    namespace = NSCREATED + "/omero/figure_scripts/Thumbnail_Figure"
    file_annotation, fa_message = script_utils.create_link_file_annotation(
        conn,
        output,
        parent,
        output="Thumbnail figure",
        mimetype=mimetype,
        description=fig_legend,
        namespace=namespace,
        orig_file_path_and_name=figure_name)
    message += fa_message

    return file_annotation, message
예제 #12
0
def roi_figure(conn, command_args):
    """
    This processes the script parameters, adding defaults if needed.
    Then calls a method to make the figure, and finally uploads and attaches
    this to the primary image.

    @param: session         The OMERO session
    @param: command_args    Map of String:Object parameters for the script.
                            Objects are not rtypes, since getValue() was
                            called when the map was processed below.
                            But, list and map objects may contain rtypes (need
                            to call getValue())

    @return:                the id of the originalFileLink child. (ID object,
                            not value)
    """

    log("ROI figure created by OMERO on %s" % date.today())
    log("")

    message = ""  # message to be returned to the client
    pixel_ids = []
    image_ids = []
    image_labels = []

    # function for getting image labels.
    def get_image_names(full_name, tags_list, pd_list):
        name = full_name.split("/")[-1]
        return [name]

    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in command_args:
        if command_args["Image_Labels"] == "Datasets":

            def get_datasets(name, tags_list, pd_list):
                return [dataset for project, dataset in pd_list]

            get_labels = get_datasets
        elif command_args["Image_Labels"] == "Tags":

            def get_tags(name, tags_list, pd_list):
                return tags_list

            get_labels = get_tags
        else:
            get_labels = get_image_names
    else:
        get_labels = get_image_names

    # Get the images
    images, log_message = script_utils.get_objects(conn, command_args)
    message += log_message
    if not images:
        return None, message

    # Check for rectangular ROIs and filter images list
    images = [image for image in images if image.getROICount("Rectangle") > 0]
    if not images:
        message += "No rectangle ROI found."
        return None, message

    # Attach figure to the first image
    omero_image = images[0]

    # process the list of images
    log("Image details:")
    for image in images:
        image_ids.append(image.getId())
        pixel_ids.append(image.getPrimaryPixels().getId())

    # a map of imageId : list of (project, dataset) names.
    pd_map = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(),
                                                   image_ids)
    tag_map = figUtil.getTagsFromImages(conn.getMetadataService(), image_ids)
    # Build a legend entry for each image
    for image in images:
        name = image.getName()
        iid = image.getId()
        image_date = image.getAcquisitionDate()
        tags_list = tag_map[iid]
        pd_list = pd_map[iid]

        tags = ", ".join(tags_list)
        pd_string = ", ".join(["%s/%s" % pd for pd in pd_list])
        log(" Image: %s  ID: %d" % (name, iid))
        if image_date:
            log("  Date: %s" % image_date)
        else:
            log("  Date: not set")
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pd_string)

        image_labels.append(get_labels(name, tags_list, pd_list))

    # use the first image to define dimensions, channel colours etc.
    size_x = omero_image.getSizeX()
    size_y = omero_image.getSizeY()
    size_z = omero_image.getSizeZ()
    size_c = omero_image.getSizeC()

    width = size_x
    if "Width" in command_args:
        w = command_args["Width"]
        try:
            width = int(w)
        except ValueError:
            log("Invalid width: %s Using default value: %d" % (str(w), size_x))

    height = size_y
    if "Height" in command_args:
        h = command_args["Height"]
        try:
            height = int(h)
        except ValueError:
            log("Invalid height: %s Using default value" % (str(h), size_y))

    log("Image dimensions for all panels (pixels): width: %d  height: %d" %
        (width, height))

    # the channels in the combined image,
    if "Merged_Channels" in command_args:
        # convert to 0-based
        merged_indexes = [c - 1 for c in command_args["Merged_Channels"]]
    else:
        merged_indexes = list(range(size_c))  # show all
    merged_indexes.reverse()

    #  if no colours added, use existing rendering settings.
    merged_colours = {}
    # Actually, nicer to always use existing rendering settings.
    # if "Merged_Colours" in commandArgs:
    #     for i, c in enumerate(commandArgs["Merged_Colours"]):
    #         if c in COLOURS:
    #             mergedColours[i] = COLOURS[c]

    algorithm = ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in command_args:
        a = command_args["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = ProjectionType.MEANINTENSITY

    stepping = 1
    if "Stepping" in command_args:
        s = command_args["Stepping"]
        if (0 < s < size_z):
            stepping = s

    scalebar = None
    if "Scalebar" in command_args:
        sb = command_args["Scalebar"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except ValueError:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None

    overlay_colour = (255, 255, 255)
    if "Scalebar_Colour" in command_args:
        if command_args["Scalebar_Colour"] in OVERLAY_COLOURS:
            r, g, b, a = OVERLAY_COLOURS[command_args["Scalebar_Colour"]]
            overlay_colour = (r, g, b)

    roi_zoom = None
    if "Roi_Zoom" in command_args:
        roi_zoom = float(command_args["Roi_Zoom"])
        if roi_zoom == 0:
            roi_zoom = None

    max_columns = None
    if "Max_Columns" in command_args:
        max_columns = command_args["Max_Columns"]

    show_roi_duration = False
    if "Show_ROI_Duration" in command_args:
        show_roi_duration = command_args["Show_ROI_Duration"]

    roi_label = "FigureROI"
    if "Roi_Selection_Label" in command_args:
        roi_label = command_args["Roi_Selection_Label"]

    spacer = (width // 50) + 2

    fig = get_split_view(conn, image_ids, pixel_ids, merged_indexes,
                         merged_colours, width, height, image_labels, spacer,
                         algorithm, stepping, scalebar, overlay_colour,
                         roi_zoom, max_columns, show_roi_duration, roi_label)

    if fig is None:
        log_message = "No figure produced"
        log("\n" + log_message)
        message += log_message
        return None, message
    fig_legend = "\n".join(log_strings)

    format = command_args["Format"]

    figure_name = "movieROIFigure"
    if "Figure_Name" in command_args:
        figure_name = command_args["Figure_Name"]
        figure_name = os.path.basename(figure_name)
    output = "localfile"
    if format == 'PNG':
        output = output + ".png"
        figure_name = figure_name + ".png"
        fig.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figure_name = figure_name + ".tiff"
        fig.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figure_name = figure_name + ".jpg"
        fig.save(output)
        mimetype = "image/jpeg"

    # Use util method to upload the figure 'output' to the server, attaching
    # it to the omeroImage, adding the
    # figLegend as the fileAnnotation description.
    # Returns the id of the originalFileLink child. (ID object, not value)
    namespace = NSCREATED + "/omero/figure_scripts/Movie_ROI_Figure"
    file_annotation, fa_message = script_utils.create_link_file_annotation(
        conn,
        output,
        omero_image,
        output="Movie ROI figure",
        mimetype=mimetype,
        namespace=namespace,
        description=fig_legend,
        orig_file_path_and_name=figure_name)
    message += fa_message

    return file_annotation, message
예제 #13
0
def combine_images(conn, parameter_map):

    # get the services we need
    services = {}
    services["containerService"] = conn.getContainerService()
    services["renderingEngine"] = conn.createRenderingEngine()
    services["queryService"] = conn.getQueryService()
    services["pixelsService"] = conn.getPixelsService()
    services["rawPixelStore"] = conn.c.sf.createRawPixelsStore()
    services["rawPixelStoreUpload"] = conn.c.sf.createRawPixelsStore()
    services["updateService"] = conn.getUpdateService()
    services["rawFileStore"] = conn.createRawFileStore()

    query_service = services["queryService"]

    colour_map = {}
    if "Channel_Colours" in parameter_map:
        for c, colour in enumerate(parameter_map["Channel_Colours"]):
            if colour in COLOURS:
                colour_map[c] = COLOURS[colour]

    # Get images or datasets
    message = ""
    objects, log_message = script_utils.get_objects(conn, parameter_map)
    message += log_message
    if not objects:
        return None, message

    # get the images IDs from list (in order) or dataset (sorted by name)
    output_images = []
    links = []

    data_type = parameter_map["Data_Type"]
    if data_type == "Image":
        dataset = None
        objects.sort(key=lambda x: (x.getName()))  # Sort images by name
        image_ids = [image.id for image in objects]
        # get dataset from first image
        query_string = "select i from Image i join fetch i.datasetLinks idl"\
            " join fetch idl.parent where i.id in (%s)" % image_ids[0]
        image = query_service.findByQuery(query_string, None)
        if image:
            for link in image.iterateDatasetLinks():
                ds = link.parent
                dataset = conn.getObject("Dataset", ds.getId().getValue())
                break  # only use 1st dataset
        new_img, link = make_single_image(services, parameter_map, image_ids,
                                          dataset, colour_map)
        if new_img:
            output_images.append(new_img)
        if link:
            links.append(link)
    else:
        for dataset in objects:
            images = list(dataset.listChildren())
            if not images:
                continue
            images.sort(key=lambda x: (x.getName()))
            image_ids = [i.getId() for i in images]
            new_img, link = make_single_image(services, parameter_map,
                                              image_ids, dataset, colour_map)
            if new_img:
                output_images.append(new_img)
            if link:
                links.append(link)

    # try and close any stateful services
    for s in services:
        try:
            s.close()
        except Exception:
            pass

    if output_images:
        if len(output_images) > 1:
            message += "%s new images created" % len(output_images)
        else:
            message += "New image created"
        if not links or not len(links) == len(output_images):
            message += " but could not be attached"
    else:
        message += "No image created"
    message += "."

    return output_images, message
예제 #14
0
def batch_image_export(conn, script_params):

    # for params with default values, we can get the value directly
    split_cs = script_params["Export_Individual_Channels"]
    merged_cs = script_params["Export_Merged_Image"]
    greyscale = script_params["Individual_Channels_Grey"]
    data_type = script_params["Data_Type"]
    folder_name = script_params["Folder_Name"]
    folder_name = os.path.basename(folder_name)
    format = script_params["Format"]
    project_z = "Choose_Z_Section" in script_params and \
        script_params["Choose_Z_Section"] == 'Max projection'

    if (not split_cs) and (not merged_cs):
        log("Not chosen to save Individual Channels OR Merged Image")
        return

    # check if we have these params
    channel_names = []
    if "Channel_Names" in script_params:
        channel_names = script_params["Channel_Names"]
    zoom_percent = None
    if "Zoom" in script_params and script_params["Zoom"] != "100%":
        zoom_percent = int(script_params["Zoom"][:-1])

    # functions used below for each imaage.
    def get_z_range(size_z, script_params):
        z_range = None
        if "Choose_Z_Section" in script_params:
            z_choice = script_params["Choose_Z_Section"]
            # NB: all Z indices in this script are 1-based
            if z_choice == 'ALL Z planes':
                z_range = (1, size_z + 1)
            elif "OR_specify_Z_index" in script_params:
                z_index = script_params["OR_specify_Z_index"]
                z_index = min(z_index, size_z)
                z_range = (z_index, )
            elif "OR_specify_Z_start_AND..." in script_params and \
                    "...specify_Z_end" in script_params:
                start = script_params["OR_specify_Z_start_AND..."]
                start = min(start, size_z)
                end = script_params["...specify_Z_end"]
                end = min(end, size_z)
                # in case user got z_start and z_end mixed up
                z_start = min(start, end)
                z_end = max(start, end)
                if z_start == z_end:
                    z_range = (z_start, )
                else:
                    z_range = (z_start, z_end + 1)
        return z_range

    def get_t_range(size_t, script_params):
        t_range = None
        if "Choose_T_Section" in script_params:
            t_choice = script_params["Choose_T_Section"]
            # NB: all T indices in this script are 1-based
            if t_choice == 'ALL T planes':
                t_range = (1, size_t + 1)
            elif "OR_specify_T_index" in script_params:
                t_index = script_params["OR_specify_T_index"]
                t_index = min(t_index, size_t)
                t_range = (t_index, )
            elif "OR_specify_T_start_AND..." in script_params and \
                    "...specify_T_end" in script_params:
                start = script_params["OR_specify_T_start_AND..."]
                start = min(start, size_t)
                end = script_params["...specify_T_end"]
                end = min(end, size_t)
                # in case user got t_start and t_end mixed up
                t_start = min(start, end)
                t_end = max(start, end)
                if t_start == t_end:
                    t_range = (t_start, )
                else:
                    t_range = (t_start, t_end + 1)
        return t_range

    # Get the images or datasets
    message = ""
    objects, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not objects:
        return None, message

    # Attach figure to the first image
    parent = objects[0]

    if data_type == 'Dataset':
        images = []
        for ds in objects:
            images.extend(list(ds.listChildren()))
        if not images:
            message += "No image found in dataset(s)"
            return None, message
    else:
        images = objects

    log("Processing %s images" % len(images))

    # somewhere to put images
    curr_dir = os.getcwd()
    exp_dir = os.path.join(curr_dir, folder_name)
    try:
        os.mkdir(exp_dir)
    except OSError:
        pass
    # max size (default 12kx12k)
    size = conn.getDownloadAsMaxSizeSetting()
    size = int(size)

    ids = []
    # do the saving to disk

    for img in images:
        log("Processing image: ID %s: %s" % (img.id, img.getName()))
        pixels = img.getPrimaryPixels()
        if (pixels.getId() in ids):
            continue
        ids.append(pixels.getId())

        if format == 'OME-TIFF':
            if img._prepareRE().requiresPixelsPyramid():
                log("  ** Can't export a 'Big' image to OME-TIFF. **")
                if len(images) == 1:
                    return None, "Can't export a 'Big' image to %s." % format
                continue
            else:
                save_as_ome_tiff(conn, img, folder_name)
        else:
            size_x = pixels.getSizeX()
            size_y = pixels.getSizeY()
            if size_x * size_y > size:
                msg = "Can't export image over %s pixels. " \
                      "See 'omero.client.download_as.max_size'" % size
                log("  ** %s. **" % msg)
                if len(images) == 1:
                    return None, msg
                continue
            else:
                log("Exporting image as %s: %s" % (format, img.getName()))

            log("\n----------- Saving planes from image: '%s' ------------" %
                img.getName())
            size_c = img.getSizeC()
            size_z = img.getSizeZ()
            size_t = img.getSizeT()
            z_range = get_z_range(size_z, script_params)
            t_range = get_t_range(size_t, script_params)
            log("Using:")
            if z_range is None:
                log("  Z-index: Last-viewed")
            elif len(z_range) == 1:
                log("  Z-index: %d" % z_range[0])
            else:
                log("  Z-range: %s-%s" % (z_range[0], z_range[1] - 1))
            if project_z:
                log("  Z-projection: ON")
            if t_range is None:
                log("  T-index: Last-viewed")
            elif len(t_range) == 1:
                log("  T-index: %d" % t_range[0])
            else:
                log("  T-range: %s-%s" % (t_range[0], t_range[1] - 1))
            log("  Format: %s" % format)
            if zoom_percent is None:
                log("  Image Zoom: 100%")
            else:
                log("  Image Zoom: %s" % zoom_percent)
            log("  Greyscale: %s" % greyscale)
            log("Channel Rendering Settings:")
            for ch in img.getChannels():
                log("  %s: %d-%d" %
                    (ch.getLabel(), ch.getWindowStart(), ch.getWindowEnd()))

            try:
                save_planes_for_image(conn,
                                      img,
                                      size_c,
                                      split_cs,
                                      merged_cs,
                                      channel_names,
                                      z_range,
                                      t_range,
                                      greyscale,
                                      zoom_percent,
                                      project_z=project_z,
                                      format=format,
                                      folder_name=folder_name)
            finally:
                # Make sure we close Rendering Engine
                img._re.close()

        # write log for exported images (not needed for ome-tiff)
        name = 'Batch_Image_Export.txt'
        with open(os.path.join(exp_dir, name), 'w') as log_file:
            for s in log_strings:
                log_file.write(s)
                log_file.write("\n")

    if len(os.listdir(exp_dir)) == 0:
        return None, "No files exported. See 'info' for more details"
    # zip everything up (unless we've only got a single ome-tiff)
    if format == 'OME-TIFF' and len(os.listdir(exp_dir)) == 1:
        ometiff_ids = [t.id for t in parent.listAnnotations(ns=NSOMETIFF)]
        conn.deleteObjects("Annotation", ometiff_ids)
        export_file = os.path.join(folder_name, os.listdir(exp_dir)[0])
        namespace = NSOMETIFF
        output_display_name = "OME-TIFF"
        mimetype = 'image/tiff'
    else:
        export_file = "%s.zip" % folder_name
        compress(export_file, folder_name)
        mimetype = 'application/zip'
        output_display_name = "Batch export zip"
        namespace = NSCREATED + "/omero/export_scripts/Batch_Image_Export"

    file_annotation, ann_message = script_utils.create_link_file_annotation(
        conn,
        export_file,
        parent,
        output=output_display_name,
        namespace=namespace,
        mimetype=mimetype)
    message += ann_message
    return file_annotation, message
예제 #15
0
def datasets_to_plates(conn, script_params):

    update_service = conn.getUpdateService()

    message = ""

    # Get the datasets ID
    datasets, log_message = script_utils.get_objects(conn, script_params)
    message += log_message

    def has_images_linked_to_well(dataset):
        params = omero.sys.ParametersI()
        query = "select count(well) from Well as well "\
                "left outer join well.wellSamples as ws " \
                "left outer join ws.image as img "\
                "where img.id in (:ids)"
        params.addIds([i.getId() for i in dataset.listChildren()])
        n_wells = unwrap(conn.getQueryService().projection(
            query, params, conn.SERVICE_OPTS)[0])[0]
        if n_wells > 0:
            return True
        else:
            return False

    # Exclude datasets containing images already linked to a well
    n_datasets = len(datasets)
    datasets = [x for x in datasets if not has_images_linked_to_well(x)]
    if len(datasets) < n_datasets:
        message += "Excluded %s out of %s dataset(s). " \
            % (n_datasets - len(datasets), n_datasets)

    # Return if all input dataset are not found or excluded
    if not datasets:
        return None, message

    # Filter dataset IDs by permissions
    ids = [ds.getId() for ds in datasets if ds.canLink()]
    if len(ids) != len(datasets):
        perm_ids = [str(ds.getId()) for ds in datasets if not ds.canLink()]
        message += "You do not have the permissions to add the images from"\
            " the dataset(s): %s." % ",".join(perm_ids)
    if not ids:
        return None, message

    # find or create Screen if specified
    screen = None
    newscreen = None
    if "Screen" in script_params and len(script_params["Screen"]) > 0:
        s = script_params["Screen"]
        # see if this is ID of existing screen
        try:
            screen_id = long(s)
            screen = conn.getObject("Screen", screen_id)
        except ValueError:
            pass
        # if not, create one
        if screen is None:
            newscreen = omero.model.ScreenI()
            newscreen.name = rstring(s)
            newscreen = update_service.saveAndReturnObject(newscreen)
            screen = conn.getObject("Screen", newscreen.getId().getValue())

    plates = []
    links = []
    deletes = []
    for dataset_id in ids:
        plate, link, delete_handle = dataset_to_plate(conn, script_params,
                                                      dataset_id, screen)
        if plate is not None:
            plates.append(plate)
        if link is not None:
            links.append(link)
        if delete_handle is not None:
            deletes.append(delete_handle)

    # wait for any deletes to finish
    for handle in deletes:
        cb = omero.callbacks.DeleteCallbackI(conn.c, handle)
        while True:  # ms
            if cb.block(100) is not None:
                break

    if newscreen:
        message += "New screen created: %s." % newscreen.getName().getValue()
        robj = newscreen
    elif plates:
        robj = plates[0]
    else:
        robj = None

    if plates:
        if len(plates) == 1:
            plate = plates[0]
            message += " New plate created: %s" % plate.getName().getValue()
        else:
            message += " %s plates created" % len(plates)
        if len(plates) == len(links):
            message += "."
        else:
            message += " but could not be attached."
    else:
        message += "No plate created."
    return robj, message
예제 #16
0
def process_images(conn, script_params):
    """Process each image passed to script, generating new Kymograph images."""
    line_width = script_params['Line_Width']
    new_kymographs = []
    message = ""

    # Get the images
    images, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not images:
        return None, message

    # Check for line and polyline ROIs and filter images list
    images = [
        image for image in images
        if image.getROICount(["Polyline", "Line"]) > 0
    ]
    if not images:
        message += "No ROI containing line or polyline was found."
        return None, message

    for image in images:
        if image.getSizeT() == 1:
            continue
        new_images = []  # kymographs derived from the current image.
        c_names = []
        colors = []
        for ch in image.getChannels():
            c_names.append(ch.getLabel())
            colors.append(ch.getColor().getRGB())

        size_t = image.getSizeT()
        pixels = image.getPrimaryPixels()

        dataset = image.getParent()
        if dataset is not None and not dataset.canLink():
            dataset = None

        roi_service = conn.getRoiService()
        result = roi_service.findByImage(image.getId(), None)

        # kymograph strategy - Using Line and Polyline ROIs:
        # NB: Use ALL time points unless >1 shape AND 'use_all_timepoints' =
        # False
        # If > 1 shape per time-point (per ROI), pick one!
        # 1 - Single line. Use this shape for all time points
        # 2 - Many lines. Use the first one to fix length. Subsequent lines to
        # update start and direction
        # 3 - Single polyline. Use this shape for all time points
        # 4 - Many polylines. Use the first one to fix length.
        for roi in result.rois:
            lines = {}  # map of theT: line
            polylines = {}  # map of theT: polyline
            for s in roi.copyShapes():
                if s is None:
                    continue
                the_t = unwrap(s.getTheT())
                the_z = unwrap(s.getTheZ())
                z = 0
                t = 0
                if the_t is not None:
                    t = the_t
                if the_z is not None:
                    z = the_z
                # TODO: Add some filter of shapes. E.g. text? / 'lines' only
                # etc.
                if type(s) == omero.model.LineI:
                    x1 = s.getX1().getValue()
                    x2 = s.getX2().getValue()
                    y1 = s.getY1().getValue()
                    y2 = s.getY2().getValue()
                    lines[t] = {
                        'theZ': z,
                        'x1': x1,
                        'y1': y1,
                        'x2': x2,
                        'y2': y2
                    }

                elif type(s) == omero.model.PolylineI:
                    v = s.getPoints().getValue()
                    points = points_string_to_xy_list(v)
                    polylines[t] = {'theZ': z, 'points': points}

            if len(lines) > 0:
                new_img = lines_kymograph(conn, script_params, image, lines,
                                          line_width, dataset)
                new_images.append(new_img)
                lines = []
            elif len(polylines) > 0:
                new_img = polyline_kymograph(conn, script_params, image,
                                             polylines, line_width, dataset)
                new_images.append(new_img)

        # look-up the interval for each time-point
        t_interval = None
        infos = list(pixels.copyPlaneInfo(theC=0, theT=size_t - 1, theZ=0))
        if len(infos) > 0 and infos[0].getDeltaT() is not None:
            duration = infos[0].getDeltaT(units="SECOND").getValue()
            if size_t == 1:
                t_interval = duration
            else:
                t_interval = duration / (size_t - 1)
        elif pixels.timeIncrement is not None:
            t_interval = pixels.timeIncrement
        elif "Time_Increment" in script_params:
            t_interval = script_params["Time_Increment"]

        pixel_size = None
        if pixels.physicalSizeX is not None:
            pixel_size = pixels.physicalSizeX
        elif "Pixel_Size" in script_params:
            pixel_size = script_params['Pixel_Size']

        # Save channel names and colors for each new image
        for img in new_images:
            for i, c in enumerate(img.getChannels()):
                lc = c.getLogicalChannel()
                lc.setName(c_names[i])
                lc.save()
                r, g, b = colors[i]
                # need to reload channels to avoid optimistic lock on update
                c_obj = conn.getQueryService().get("Channel", c.id)
                c_obj.red = omero.rtypes.rint(r)
                c_obj.green = omero.rtypes.rint(g)
                c_obj.blue = omero.rtypes.rint(b)
                c_obj.alpha = omero.rtypes.rint(255)
                conn.getUpdateService().saveObject(c_obj)
            img.resetRDefs()  # reset based on colors above

            # If we know pixel sizes, set them on the new image
            if pixel_size is not None or t_interval is not None:
                px = conn.getQueryService().get("Pixels", img.getPixelsId())
                microm = getattr(omero.model.enums.UnitsLength, "MICROMETER")
                if pixel_size is not None:
                    pixel_size = omero.model.LengthI(pixel_size, microm)
                    px.setPhysicalSizeX(pixel_size)
                if t_interval is not None:
                    t_per_pixel = t_interval / line_width
                    t_per_pixel = omero.model.LengthI(t_per_pixel, microm)
                    px.setPhysicalSizeY(t_per_pixel)
                conn.getUpdateService().saveObject(px)
        new_kymographs.extend(new_images)

    if not new_kymographs:
        message += "No kymograph created. See 'Error' or 'Info' for details."
    else:
        if not dataset:
            link_message = " but could not be attached"
        else:
            link_message = ""

        if len(new_images) == 1:
            message += "New kymograph created%s: %s." \
                % (link_message, new_images[0].getName())
        elif len(new_images) > 1:
            message += "%s new kymographs created%s." \
                % (len(new_images), link_message)

    return new_kymographs, message
def process_images(conn, script_params):

    file_anns = []
    message = ""
    # Get the images
    images, log_message = script_utils.get_objects(conn, script_params)
    message += log_message
    if not images:
        return None, message
    # Check for line and polyline ROIs and filter images list
    images = [
        image for image in images
        if image.getROICount(["Polyline", "Line"]) > 0
    ]
    if not images:
        message += "No ROI containing line or polyline was found."
        return None, message

    csv_data = []

    for image in images:

        if image.getSizeT() > 1:
            message += "%s ID: %s appears to be a time-lapse Image," \
                " not a kymograph." % (image.getName(), image.getId())
            continue

        roi_service = conn.getRoiService()
        result = roi_service.findByImage(image.getId(), None)

        secs_per_pixel_y = image.getPixelSizeY()
        microns_per_pixel_x = image.getPixelSizeX()
        if secs_per_pixel_y and microns_per_pixel_x:
            microns_per_sec = microns_per_pixel_x / secs_per_pixel_y
        else:
            microns_per_sec = None

        # for each line or polyline, create a row in csv table: y(t), x,
        # dy(dt), dx, x/t (line), x/t (average)
        col_names = "\nt_start (pixels), x_start (pixels), t_end (pixels)," \
            " x_end (pixels), dt (pixels), dx (pixels), x/t, speed(um/sec)," \
            "avg x/t, avg speed(um/sec)"
        table_data = ""
        for roi in result.rois:
            for s in roi.copyShapes():
                if s is None:
                    continue  # seems possible in some situations
                if type(s) == omero.model.LineI:
                    table_data += "\nLine ID: %s" % s.getId().getValue()
                    x1 = s.getX1().getValue()
                    x2 = s.getX2().getValue()
                    y1 = s.getY1().getValue()
                    y2 = s.getY2().getValue()
                    dx = abs(x1 - x2)
                    dy = abs(y1 - y2)
                    dx_per_y = float(dx) / dy
                    speed = ""
                    if microns_per_sec:
                        speed = dx_per_y * microns_per_sec
                    table_data += "\n"
                    table_data += ",".join([
                        str(x)
                        for x in (y1, x1, y2, x2, dy, dx, dx_per_y, speed)
                    ])

                elif type(s) == omero.model.PolylineI:
                    table_data += "\nPolyline ID: %s" % s.getId().getValue()
                    v = s.getPoints().getValue()
                    points = roi_utils.points_string_to_xy_list(v)
                    x_start, y_start = points[0]
                    for i in range(1, len(points)):
                        x1, y1 = points[i - 1]
                        x2, y2 = points[i]
                        dx = abs(x1 - x2)
                        dy = abs(y1 - y2)
                        dx_per_y = float(dx) / dy
                        av_x_per_y = abs(float(x2 - x_start) / (y2 - y_start))
                        speed = ""
                        avg_speed = ""
                        if microns_per_sec:
                            speed = dx_per_y * microns_per_sec
                            avg_speed = av_x_per_y * microns_per_sec
                        table_data += "\n"
                        table_data += ",".join([
                            str(x) for x in (y1, x1, y2, x2, dy, dx, dx_per_y,
                                             speed, av_x_per_y, avg_speed)
                        ])

        # write table data to csv...
        if len(table_data) > 0:
            table_string = "Image ID:, %s," % image.getId()
            table_string += "Name:, %s" % image.getName()
            table_string += "\nsecsPerPixelY: %s" % secs_per_pixel_y
            table_string += '\nmicronsPerPixelX: %s' % microns_per_pixel_x
            table_string += "\n"
            table_string += col_names
            table_string += table_data
            csv_data.append(table_string)

    iids = [str(i.getId()) for i in images]
    to_link_csv = [i.getId() for i in images if i.canAnnotate()]
    csv_file_name = 'kymograph_velocities_%s.csv' % "-".join(iids)
    with open(csv_file_name, 'w') as csv_file:
        csv_file.write("\n \n".join(csv_data))

    file_ann = conn.createFileAnnfromLocalFile(csv_file_name,
                                               mimetype="text/csv")
    fa_message = "Created Line Plot csv (Excel) file"

    links = []
    if len(to_link_csv) == 0:
        fa_message += " but could not attach to images."
    for iid in to_link_csv:
        link = ImageAnnotationLinkI()
        link.parent = ImageI(iid, False)
        link.child = file_ann._obj
        links.append(link)
    if len(links) > 0:
        links = conn.getUpdateService().saveAndReturnArray(links)

    if file_ann:
        file_anns.append(file_ann)

    if not file_anns:
        fa_message = "No Analysis files created. See 'Info' or 'Error'" \
            " for more details"
    elif len(file_anns) > 1:
        fa_message = "Created %s csv (Excel) files" % len(file_anns)
    message += fa_message
    return file_anns, message
예제 #18
0
def movie_figure(conn, command_args):
    """
    Makes the figure using the parameters in @command_args, attaches the figure
    to the parent Project/Dataset, and returns the file-annotation ID

    @param session      The OMERO session
    @param command_args Map of parameters for the script
    @ returns           Returns the id of the originalFileLink child. (ID
                        object, not value)
    """

    log("Movie figure created by OMERO on %s" % date.today())
    log("")

    time_labels = {
        "SECS_MILLIS": "seconds",
        "SECS": "seconds",
        "MINS": "minutes",
        "HOURS": "hours",
        "MINS_SECS": "mins:secs",
        "HOURS_MINS": "hours:mins"
    }
    time_units = "SECS"
    if "Time_Units" in command_args:
        time_units = command_args["Time_Units"]
        # convert from UI name to time_labels key
        time_units = time_units.replace(" ", "_")
    if time_units not in time_labels.keys():
        time_units = "SECS"
    log("Time units are in %s" % time_labels[time_units])

    pixel_ids = []
    image_ids = []
    image_labels = []
    message = ""  # message to be returned to the client

    # function for getting image labels.
    def get_image_names(full_name, tags_list, pd_list):
        name = full_name.split("/")[-1]
        return [name]

    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in command_args:
        if command_args["Image_Labels"] == "Datasets":

            def get_datasets(name, tags_list, pd_list):
                return [dataset for project, dataset in pd_list]

            get_labels = get_datasets
        elif command_args["Image_Labels"] == "Tags":

            def get_tags(name, tags_list, pd_list):
                return tags_list

            get_labels = get_tags
        else:
            get_labels = get_image_names
    else:
        get_labels = get_image_names

    # Get the images
    images, log_message = script_utils.get_objects(conn, command_args)
    message += log_message
    if not images:
        return None, message

    # Attach figure to the first image
    omero_image = images[0]

    # process the list of images
    log("Image details:")
    for image in images:
        image_ids.append(image.getId())
        pixel_ids.append(image.getPrimaryPixels().getId())

    # a map of imageId : list of (project, dataset) names.
    pd_map = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(),
                                                   image_ids)
    tag_map = figUtil.getTagsFromImages(conn.getMetadataService(), image_ids)
    # Build a legend entry for each image
    for image in images:
        name = image.getName()
        iid = image.getId()
        image_date = image.getAcquisitionDate()
        tags_list = tag_map[iid]
        pd_list = pd_map[iid]

        tags = ", ".join(tags_list)
        pd_string = ", ".join(["%s/%s" % pd for pd in pd_list])
        log(" Image: %s  ID: %d" % (name, iid))
        if image_date:
            log("  Date: %s" % image_date)
        else:
            log("  Date: not set")
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pd_string)

        image_labels.append(get_labels(name, tags_list, pd_list))

    # use the first image to define dimensions, channel colours etc.
    size_x = omero_image.getSizeX()
    size_y = omero_image.getSizeY()
    size_z = omero_image.getSizeZ()
    size_t = omero_image.getSizeT()

    t_indexes = []
    if "T_Indexes" in command_args:
        for t in command_args["T_Indexes"]:
            t_indexes.append(t)
    if len(t_indexes) == 0:  # if no t-indexes given, use all t-indices
        t_indexes = range(size_t)

    z_start = -1
    z_end = -1
    if "Z_Start" in command_args:
        z_start = command_args["Z_Start"]
    if "Z_End" in command_args:
        z_end = command_args["Z_End"]

    width = size_x
    if "Width" in command_args:
        width = command_args["Width"]

    height = size_y
    if "Height" in command_args:
        height = command_args["Height"]

    spacer = (width / 25) + 2

    algorithm = ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in command_args:
        a = command_args["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = ProjectionType.MEANINTENSITY

    stepping = 1
    if "Stepping" in command_args:
        s = command_args["Stepping"]
        if (0 < s < size_z):
            stepping = s

    scalebar = None
    if "Scalebar" in command_args:
        sb = command_args["Scalebar"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except ValueError:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None

    overlay_colour = (255, 255, 255)
    if "Scalebar_Colour" in command_args:
        r, g, b, a = OVERLAY_COLOURS[command_args["Scalebar_Colour"]]
        overlay_colour = (r, g, b)

    max_col_count = 10
    if "Max_Columns" in command_args:
        max_col_count = command_args["Max_Columns"]

    figure = createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end,
                                width, height, spacer, algorithm, stepping,
                                scalebar, overlay_colour, time_units,
                                image_labels, max_col_count)

    log("")
    fig_legend = "\n".join(log_lines)

    # print figLegend    # bug fixing only
    format = command_args["Format"]

    figure_name = "movie_figure"
    if "Figure_Name" in command_args:
        figure_name = str(command_args["Figure_Name"])
        figure_name = os.path.basename(figure_name)
    output = "localfile"
    if format == 'PNG':
        output = output + ".png"
        figure_name = figure_name + ".png"
        figure.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figure_name = figure_name + ".tiff"
        figure.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figure_name = figure_name + ".jpg"
        figure.save(output)
        mimetype = "image/jpeg"

    namespace = NSCREATED + "/omero/figure_scripts/Movie_Figure"
    file_annotation, fa_message = script_utils.create_link_file_annotation(
        conn,
        output,
        omero_image,
        output="Movie figure",
        mimetype=mimetype,
        namespace=namespace,
        description=fig_legend,
        orig_file_path_and_name=figure_name)
    message += fa_message

    return file_annotation, message
예제 #19
0
def write_movie(command_args, conn):
    """
    Makes the movie.

    @return        Returns the file annotation
    """
    log("Movie created by OMERO")
    log("")

    message = ""

    session = conn.c.sf
    update_service = session.getUpdateService()
    raw_file_store = session.createRawFileStore()

    # Get the images
    images, log_message = script_utils.get_objects(conn, command_args)
    message += log_message
    if not images:
        return None, message
    # Get the first valid image (should be expanded to process the list)
    omero_image = images[0]

    if command_args["RenderingDef_ID"] >= 0:
        rid = command_args["RenderingDef_ID"]
        omero_image._prepareRenderingEngine(rdid=rid)
    pixels = omero_image.getPrimaryPixels()
    pixels_id = pixels.getId()

    size_x = pixels.getSizeX()
    size_y = pixels.getSizeY()
    size_z = pixels.getSizeZ()
    size_c = pixels.getSizeC()
    size_t = pixels.getSizeT()

    if (size_x is None or size_y is None or size_z is None or size_t is None or
            size_c is None):
        return

    if (pixels.getPhysicalSizeX() is None):
        command_args["Scalebar"] = 0

    c_range = range(0, size_c)
    c_windows = None
    c_colours = None
    if "ChannelsExtended" in command_args and \
            valid_channels(command_args["ChannelsExtended"], size_c):
        c_range = []
        c_windows = []
        c_colours = []
        for c in command_args["ChannelsExtended"]:
            m = re.match('^(?P<i>\d+)(\|(?P<ws>\d+)' +
                         '\:(?P<we>\d+))?(\$(?P<c>.+))?$', c)
            if m is not None:
                c_range.append(int(m.group('i'))-1)
                c_windows.append([float(m.group('ws')), float(m.group('we'))])
                c_colours.append(m.group('c'))
    elif "Channels" in command_args and \
            valid_channels(command_args["Channels"], size_c):
        c_range = command_args["Channels"]

    tz_list = calculate_ranges(size_z, size_t, command_args)

    time_map = calculate_acquisition_time(conn, pixels_id, c_range, tz_list)
    if (time_map is None):
        command_args["Show_Time"] = False
    if (time_map is not None):
        if (len(time_map) == 0):
            command_args["Show_Time"] = False

    frame_no = 1
    omero_image.setActiveChannels([x+1 for x in c_range],
                                  c_windows, c_colours)

    overlay_colour = (255, 255, 255)
    if "Overlay_Colour" in command_args:
        r, g, b, a = COLOURS[command_args["Overlay_Colour"]]
        overlay_colour = (r, g, b)

    canvas_colour = tuple(COLOURS[command_args["Canvas_Colour"]][:3])
    mw = command_args["Min_Width"]
    if mw < size_x:
        mw = size_x
    mh = command_args["Min_Height"]
    if mh < size_y:
        mh = size_y
    ovlpos = None
    canvas = None
    if size_x < mw or size_y < mh:
        ovlpos = ((mw-size_x) / 2, (mh-size_y) / 2)
        canvas = Image.new("RGBA", (mw, mh), canvas_colour)

    format = command_args["Format"]
    file_names = []

    # add intro...
    if "Intro_Slide" in command_args and command_args["Intro_Slide"].id:
        intro_duration = command_args["Intro_Duration"]
        intro_file_id = command_args["Intro_Slide"].getId().getValue()
        intro_filenames = write_intro_end_slides(
            conn, command_args, intro_file_id, intro_duration, mw, mh)
        file_names.extend(intro_filenames)

    # prepare watermark
    if "Watermark" in command_args and command_args["Watermark"].id:
        watermark = prepare_watermark(conn, command_args, mw, mh)

    # add movie frames...
    for tz in tz_list:
        t = tz[0]
        z = tz[1]
        image = omero_image.renderImage(z, t)

        if ovlpos is not None:
            image2 = canvas.copy()
            image2.paste(image, ovlpos, image)
            image = image2

        if "Scalebar" in command_args and command_args["Scalebar"]:
            image = add_scalebar(
                command_args["Scalebar"], image, pixels, command_args)
        plane_info = "z:"+str(z)+"t:"+str(t)
        if "Show_Time" in command_args and command_args["Show_Time"]:
            time = time_map[plane_info]
            image = add_time_points(time, pixels, image, overlay_colour)
        if "Show_Plane_Info" in command_args and \
                command_args["Show_Plane_Info"]:
            image = add_plane_info(z, t, pixels, image, overlay_colour)
        if "Watermark" in command_args and command_args["Watermark"].id:
            image = paste_watermark(image, watermark)
        if format == QT:
            filename = str(frame_no) + '.png'
            image.save(filename, "PNG")
        else:
            filename = str(frame_no) + '.jpg'
            image.save(filename, "JPEG")
        file_names.append(filename)
        frame_no += 1

    # add exit frames... "outro"
    # add intro...
    if "Ending_Slide" in command_args and command_args["Ending_Slide"].id:
        end_duration = command_args["Ending_Duration"]
        end_file_id = command_args["Ending_Slide"].id.val
        end_filenames = write_intro_end_slides(
            conn, command_args, end_file_id, end_duration, mw, mh)
        file_names.extend(end_filenames)

    filelist = ",".join(file_names)

    ext = format_map[format]
    movie_name = "Movie"
    if "Movie_Name" in command_args:
        movie_name = command_args["Movie_Name"]
        movie_name = os.path.basename(movie_name)
    if not movie_name.endswith(".%s" % ext):
        movie_name = "%s.%s" % (movie_name, ext)

    # spaces etc in file name cause problems
    movie_name = re.sub("[$&\;|\(\)<>' ]", "", movie_name)
    frames_per_sec = 2
    if "FPS" in command_args:
        frames_per_sec = command_args["FPS"]
    output = "localfile.%s" % ext
    build_avi(mw, mh, filelist, frames_per_sec, output, format)
    mimetype = format_mimetypes[format]
    omero_image._re.close()

    if not os.path.exists(output):
        return None, "Failed to create movie file: %s" % output
    if not command_args["Do_Link"]:
        original_file = script_utils.create_file(
            update_service, output, mimetype, movie_name)
        script_utils.upload_file(raw_file_store, original_file, movie_name)
        return original_file, message

    namespace = NSCREATED + "/omero/export_scripts/Make_Movie"
    file_annotation, ann_message = script_utils.create_link_file_annotation(
        conn, output, omero_image, namespace=namespace,
        mimetype=mimetype, orig_file_path_and_name=movie_name)
    message += ann_message
    return file_annotation._obj, message