Пример #1
0
def aggregateProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["success"] = True
    resultToStore["completed"] = False
    resultToStore["message"] = ""
    resultToStore["nCopiedFiles"] = ""
    resultToStore["relativeExpFolder"] = ""
    resultToStore["zipArchiveFileName"] = ""
    resultToStore["mode"] = ""
    LRCache.set(uid, resultToStore)

    # Get path to containing folder
    # __file__ does not work (reliably) in Jython
    dbPath = "../core-plugins/microscopy/4/dss/reporting-plugins/export_microscopy_datasets"

    # Path to the logs subfolder
    logPath = os.path.join(dbPath, "logs")

    # Make sure the logs subforder exist
    if not os.path.exists(logPath):
        os.makedirs(logPath)

    # Path for the log file
    logFile = os.path.join(logPath, "log.txt")

    # Set up logging
    logging.basicConfig(filename=logFile, level=logging.DEBUG,
                        format='%(asctime)-15s %(levelname)s: %(message)s')
    logger = logging.getLogger()

    # Get parameters from plugin.properties
    properties = parsePropertiesFile()
    if properties is None:
        raise Exception("Could not process plugin.properties")

    # Get the COLLECTION experiment identifier
    experimentId = parameters.get("experimentId")

    # Get the MICROSCOPY_EXPERIMENT sample identifier
    expSamplePermId = parameters.get("expSamplePermId")

    # Get the MICROSCOPY_SAMPLE_TYPE sample identifier
    samplePermId = parameters.get("samplePermId")

    # Get the mode
    mode = parameters.get("mode")

    # Info
    logger.info("Aggregation plug-in called with following parameters:")
    logger.info("* COLLECTION experimentId              = " + experimentId)
    logger.info("* MICROSCOPY_EXPERIMENT sample permId  = " + expSamplePermId)
    logger.info("* MICROSCOPY_SAMPLE_TYPE sample permId = " + samplePermId)
    logger.info("* mode         = " + mode)
    logger.info("* userId       = " + userId)
    logger.info("Aggregation plugin properties:")
    logger.info(" * base_dir       = " + properties['base_dir'])
    logger.info(" * export_dir     = " + properties['export_dir'])
    logger.info(" * hrm_base_dir   = " + properties['hrm_base_dir'])
    logger.info(" * hrm_src_subdir = " + properties['hrm_src_subdir'])

    # Instantiate the Mover object - userId is a global variable
    # made available to the aggregation plug-in
    mover = Mover(experimentId, expSamplePermId, samplePermId, mode, userId, properties, logger)

    # Process
    success = mover.process()

    # Compress
    if mode == "zip":
        mover.compressIfNeeded()

    # Get some results info
    nCopiedFiles = mover.getNumberOfCopiedFiles()
    errorMessage = mover.getErrorMessage()
    relativeExpFolder = mover.getRelativeRootExperimentPath()
    zipFileName = mover.getZipArchiveFileName()

    # Update results and store them
    resultToStore["uid"] = uid
    resultToStore["completed"] = True
    resultToStore["success"] = success
    resultToStore["message"] = errorMessage
    resultToStore["nCopiedFiles"] = nCopiedFiles
    resultToStore["relativeExpFolder"] = relativeExpFolder
    resultToStore["zipArchiveFileName"] = zipFileName
    resultToStore["mode"] = mode
    LRCache.set(uid, resultToStore)

    # Email result to the user
    if success == True:

        subject = "Microscopy: successfully processed requested data"

        if nCopiedFiles == 1:
            snip = "One file was "
        else:
            snip = str(nCopiedFiles) + " files were "

        if mode == "normal":
            body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
        elif mode == "hrm":
            body = snip + "successfully exported to your HRM source folder."
        else:
            body = snip + "successfully packaged for download: " + zipFileName

    else:
        subject = "Microscopy: error processing request!"
        body = "Sorry, there was an error processing your request. " + \
        "Please send your administrator the following report:\n\n" + \
        "\"" + errorMessage + "\"\n"

    # Send
    try:
        mailService.createEmailSender().withSubject(subject).withBody(body).send()
    except:
        sys.stderr.write("export_microscopy_datasets: Failure sending email to user!")
Пример #2
0
def retrieveProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["completed"] = False
    resultToStore["success"] = True
    resultToStore["message"] = ""
    resultToStore["data"] = ""

    # Get the parameters

    # Get the entity code
    code = parameters.get("code")
    resultToStore["code"] = code

    # Get the X-axis parameter
    paramX = parameters.get("paramX")
    resultToStore["paramX"] = paramX

    # Get the Y-axis parameter
    paramY = parameters.get("paramY")
    resultToStore["paramY"] = paramY

    # Get the X-axis scaling
    displayX = parameters.get("displayX")
    resultToStore["displayX"] = displayX

    # Get the Y-axis scaling
    displayY = parameters.get("displayY")
    resultToStore["displayY"] = displayY

    # Number of events known to be in the file
    numEvents = int(parameters.get("numEvents"))
    resultToStore["numEvents"] = numEvents

    # Maximum number of events to return
    maxNumEvents = int(parameters.get("maxNumEvents"))
    resultToStore["maxNumEvents"] = maxNumEvents

    # Sampling
    samplingMethod = parameters.get("samplingMethod")
    resultToStore["samplingMethod"] = samplingMethod

    # Node key
    nodeKey = parameters.get("nodeKey")
    resultToStore["nodeKey"] = nodeKey

    # Store them into the cache
    LRCache.set(uid, resultToStore)

    # Set up logging
    _logger = setUpLogging()

    # Log parameter info
    _logger.info("Requested events for dataset " + code + " and parameters (" +
                 paramX + ", " + paramY + ")")
    _logger.info("Requested scaling for parameter " + paramX + ": " + displayX)
    _logger.info("Requested scaling for parameter " + paramY + ": " + displayY)
    _logger.info("Requested sampling method: " + samplingMethod)
    _logger.info("Number of events in file: " + str(numEvents) +
                 "; maximum number of events to return: " + str(maxNumEvents))

    # Get the FCS file to process
    dataSetFiles = getFileForCode(code)

    # Prepare the data
    dataJSON = ""

    if len(dataSetFiles) != 1:

        # Build the error message
        message = "Could not retrieve the FCS file to process!"

        # Log the error
        _logger.error(message)

        # Store the results and set the completed flag
        resultToStore["completed"] = True
        resultToStore["success"] = False
        resultToStore["message"] = message

        # Return here
        return

    else:

        # Get the FCS file path
        fcsFile = dataSetFiles[0]

        # Log
        _logger.info("Dataset code " + code + " corresponds to FCS file " + \
                     fcsFile)

        # Open the FCS file
        reader = FCSReader(java.io.File(fcsFile), True)

        # Parse the file with data
        if not reader.parse():

            # Build the error message
            message = "Could not process file " + os.path.basename(fcsFile)

            # Log the error
            _logger.error(message)

            # Store the results and set the completed flag
            resultToStore["completed"] = True
            resultToStore["success"] = False
            resultToStore["message"] = message

            # Return here
            return

        # Preparation steps were successful
        parameterNames = reader.getParameterNames()

        # Find the indices of the requested parameters
        indxX = int(parameterNames.indexOf(paramX))
        indxY = int(parameterNames.indexOf(paramY))

        # Prepare the data arrays
        data = []

        # Actual number of events to be extracted
        actualNumEvents = min(maxNumEvents, numEvents)

        # Data sampling method.
        #
        # Method 1: the get the requested number of events, we will sub-
        #           sample the file by skipping a certain number of rows
        #           ("step") in between the returned once.
        # Method 2: to get the requested number of events, we just return
        #           the first N rows at the beginning of the file. This is
        #           faster, and as far as the experts say, should still be
        #           reasonably representative of the underlying population.
        if samplingMethod == "1":
            sample = True
        else:
            sample = False

        # Now collect the first maxNumEvents rows
        dataX = reader.getDataPerColumnIndex(indxX, actualNumEvents, sample)
        dataY = reader.getDataPerColumnIndex(indxY, actualNumEvents, sample)

        # Is the Hyperlog scaling requested?
        if displayX == "Hyperlog":
            params = Hyperlog.estimateParamHeuristic(dataX)
            Hx = Hyperlog(params[0], params[1], params[2], params[3])
            dataX = Hx.transform(dataX)
            dataX = Hyperlog.arrayMult(dataX, params[0])

        if displayY == "Hyperlog":
            params = Hyperlog.estimateParamHeuristic(dataY)
            Hy = Hyperlog(params[0], params[1], params[2], params[3])
            dataY = Hy.transform(dataY)
            dataY = Hyperlog.arrayMult(dataY, params[0])

        # Build array to JSONify and return to the client
        for i in range(actualNumEvents):
            data.append([float(dataX[i]), float(dataY[i])])

        # JSON encode the data array
        dataJSON = json.dumps(data)

        # Success message
        message = "Successfully processed file " + fcsFile

        # Log
        _logger.info(message)

        # Success
        success = True

        # Store the results and set the completed flag
        resultToStore["completed"] = True
        resultToStore["success"] = True
        resultToStore["message"] = message
        resultToStore["data"] = dataJSON
def aggregateProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["success"] = True
    resultToStore["completed"] = False
    resultToStore["message"] = ""
    resultToStore["nCopiedFiles"] = ""
    resultToStore["relativeExpFolder"] = ""
    resultToStore["zipArchiveFileName"] = ""
    resultToStore["mode"] = ""
    LRCache.set(uid, resultToStore)

    # Get path to containing folder
    # __file__ does not work (reliably) in Jython
    dbPath = "../core-plugins/microscopy/1/dss/reporting-plugins/export_microscopy_datasets"

    # Path to the logs subfolder
    logPath = os.path.join(dbPath, "logs")

    # Make sure the logs subforder exist
    if not os.path.exists(logPath):
        os.makedirs(logPath)

    # Path for the log file
    logFile = os.path.join(logPath, "log.txt")

    # Set up logging
    logging.basicConfig(filename=logFile, level=logging.DEBUG, 
                        format='%(asctime)-15s %(levelname)s: %(message)s')
    logger = logging.getLogger()

    # Get parameters from plugin.properties
    properties = parsePropertiesFile()
    if properties is None:
        raise Exception("Could not process plugin.properties")

    # Get the experiment identifier
    experimentId = parameters.get("experimentId")

    # Get the sample identifier
    sampleId = parameters.get("sampleId")

    # Get the mode
    mode = parameters.get("mode")

    # Info
    logger.info("Aggregation plug-in called with following parameters:")
    logger.info("experimentId = " + experimentId)
    logger.info("sampleId     = " + sampleId)
    logger.info("mode         = " + mode)
    logger.info("userId       = " + userId)
    logger.info("Aggregation plugin properties:")
    logger.info("properties   = " + str(properties))

    # Instantiate the Mover object - userId is a global variable
    # made available to the aggregation plug-in
    mover = Mover(experimentId, sampleId, mode, userId, properties, logger)

    # Process
    success = mover.process()

    # Compress
    if mode == "zip":
        mover.compressIfNeeded()

    # Get some results info
    nCopiedFiles = mover.getNumberOfCopiedFiles()
    errorMessage = mover.getErrorMessage();
    relativeExpFolder = mover.getRelativeExperimentPath()
    zipFileName = mover.getZipArchiveFileName()

    # Update results and store them
    resultToStore["uid"] = uid
    resultToStore["completed"] = True
    resultToStore["success"] = success
    resultToStore["message"] = errorMessage
    resultToStore["nCopiedFiles"] = nCopiedFiles
    resultToStore["relativeExpFolder"] = relativeExpFolder
    resultToStore["zipArchiveFileName"] = zipFileName
    resultToStore["mode"] = mode
    LRCache.set(uid, resultToStore)

    # Email result to the user
    if success == True:
        
        subject = "Microscopy: successfully processed requested data"
        
        if nCopiedFiles == 1:
            snip = "One file was "
        else:
            snip = str(nCopiedFiles) + " files were "

        if mode == "normal":
            body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
        elif mode == "hrm":
            body = snip + "successfully exported to your HRM source folder."
        else:
            body = snip + "successfully packaged for download: " + zipFileName
            
    else:
        subject = "Microscopy: error processing request!"
        body = "Sorry, there was an error processing your request. " + \
        "Please send your administrator the following report:\n\n" + \
        "\"" + errorMessage + "\"\n"

    # Send
    try:
        mailService.createEmailSender().withSubject(subject).withBody(body).send()
    except:
        sys.stderr.write("export_microscopy_datasets: Failure sending email to user!")
def retrieveProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["completed"] = False
    resultToStore["success"] = True
    resultToStore["message"] = ""
    resultToStore["data"] = ""

    # Get the parameters

    # Get the entity code
    code = parameters.get("code")
    resultToStore["code"] = code

    # Get the X-axis parameter
    paramX = parameters.get("paramX")
    resultToStore["paramX"] = paramX

    # Get the Y-axis parameter
    paramY = parameters.get("paramY")
    resultToStore["paramY"] = paramY

    # Get the X-axis scaling
    displayX = parameters.get("displayX")
    resultToStore["displayX"] = displayX

    # Get the Y-axis scaling
    displayY = parameters.get("displayY")
    resultToStore["displayY"] = displayY

    # Number of events known to be in the file
    numEvents = int(parameters.get("numEvents"))
    resultToStore["numEvents"] = numEvents

    # Maximum number of events to return
    maxNumEvents = int(parameters.get("maxNumEvents"))
    resultToStore["maxNumEvents"] = maxNumEvents

    # Sampling
    samplingMethod = parameters.get("samplingMethod")
    resultToStore["samplingMethod"] = samplingMethod

    # Node key
    nodeKey = parameters.get("nodeKey")
    resultToStore["nodeKey"] = nodeKey

    # Store them into the cache
    LRCache.set(uid, resultToStore)

    # Set up logging
    _logger = setUpLogging()

    # Log parameter info
    _logger.info("Requested events for dataset " + code + 
                " and parameters (" + paramX + ", " + paramY + ")")
    _logger.info("Requested scaling for parameter " + paramX + ": " + displayX)
    _logger.info("Requested scaling for parameter " + paramY + ": " + displayY)
    _logger.info("Requested sampling method: " + samplingMethod)
    _logger.info("Number of events in file: " + str(numEvents) + 
                "; maximum number of events to return: " + str(maxNumEvents))

    # Get the FCS file to process
    dataSetFiles = getFileForCode(code)

    # Prepare the data
    dataJSON = ""

    if len(dataSetFiles) != 1:

        # Build the error message
        message = "Could not retrieve the FCS file to process!"

        # Log the error
        _logger.error(message)

        # Store the results and set the completed flag
        resultToStore["completed"] = True
        resultToStore["success"] = False
        resultToStore["message"] = message

        # Return here
        return

    else:

        # Get the FCS file path
        fcsFile = dataSetFiles[0] 

        # Log
        _logger.info("Dataset code " + code + " corresponds to FCS file " + \
                     fcsFile)

        # Open the FCS file
        reader = FCSReader(java.io.File(fcsFile), True);

        # Parse the file with data
        if not reader.parse():

            # Build the error message
            message = "Could not process file " + os.path.basename(fcsFile)

            # Log the error
            _logger.error(message)

            # Store the results and set the completed flag
            resultToStore["completed"] = True
            resultToStore["success"] = False
            resultToStore["message"] = message

            # Return here
            return

        # Preparation steps were successful
        parameterNames = reader.getParameterNames()

        # Find the indices of the requested parameters
        indxX = int(parameterNames.indexOf(paramX))
        indxY = int(parameterNames.indexOf(paramY))

        # Prepare the data arrays
        data = []

        # Actual number of events to be extracted
        actualNumEvents = min(maxNumEvents, numEvents)

        # Data sampling method.
        #
        # Method 1: the get the requested number of events, we will sub-
        #           sample the file by skipping a certain number of rows
        #           ("step") in between the returned once.
        # Method 2: to get the requested number of events, we just return
        #           the first N rows at the beginning of the file. This is
        #           faster, and as far as the experts say, should still be 
        #           reasonably representative of the underlying population.
        if samplingMethod == "1":
            sample = True
        else:
            sample = False

        # Now collect the first maxNumEvents rows
        dataX = reader.getDataPerColumnIndex(indxX, actualNumEvents, sample)
        dataY = reader.getDataPerColumnIndex(indxY, actualNumEvents, sample)

        # Is the Hyperlog scaling requested?
        if displayX == "Hyperlog":
            params = Hyperlog.estimateParamHeuristic(dataX)
            Hx = Hyperlog(params[0], params[1], params[2], params[3])
            dataX = Hx.transform(dataX)
            dataX = Hyperlog.arrayMult(dataX, params[0])

        if displayY == "Hyperlog":
            params = Hyperlog.estimateParamHeuristic(dataY)
            Hy = Hyperlog(params[0], params[1], params[2], params[3])
            dataY = Hy.transform(dataY)
            dataY = Hyperlog.arrayMult(dataY, params[0])

        # Build array to JSONify and return to the client
        for i in range (actualNumEvents):
            data.append([float(dataX[i]), float(dataY[i])])

        # JSON encode the data array
        dataJSON = json.dumps(data) 

        # Success message
        message = "Successfully processed file " + fcsFile

        # Log
        _logger.info(message)

        # Success
        success = True

        # Store the results and set the completed flag
        resultToStore["completed"] = True
        resultToStore["success"] = True
        resultToStore["message"] = message
        resultToStore["data"] = dataJSON
Пример #5
0
def aggregateProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["success"] = True
    resultToStore["completed"] = False
    resultToStore["message"] = ""
    resultToStore["nCopiedFiles"] = ""
    resultToStore["relativeExpFolder"] = ""
    resultToStore["zipArchiveFileName"] = ""
    resultToStore["mode"] = ""
    LRCache.set(uid, resultToStore)

    # Get path to containing folder
    # __file__ does not work (reliably) in Jython
    dbPath = "../core-plugins/flow/3/dss/reporting-plugins/export_flow_datasets"

    # Path to the logs subfolder
    logPath = os.path.join(dbPath, "logs")

    # Make sure the logs subforder exist
    if not os.path.exists(logPath):
        os.makedirs(logPath)

    # Path for the log file
    logFile = os.path.join(logPath, "log.txt")

    # Set up logging
    logging.basicConfig(filename=logFile,
                        level=logging.DEBUG,
                        format='%(asctime)-15s %(levelname)s: %(message)s')
    logger = logging.getLogger()

    # Get parameters from plugin.properties
    properties = parsePropertiesFile()
    if properties is None:
        msg = "Could not process plugin.properties"
        logger.error(msg)
        raise Exception(msg)

    if properties['base_dir'] == "" or properties['export_dir'] == '':
        msg = "Please set valid value for 'base_dir' and 'export_dir' in plugin.properties"
        logger.error(msg)
        raise Exception(msg)

    # Dump the properties dictionary to log
    logger.info(str(parameters))

    # Get the task
    task = parameters["task"]

    # Get the experiment identifier
    collectionId = parameters["collectionId"]

    # Get the experiment type
    collectionType = parameters["collectionType"]

    # Get the experiment sample identifier
    expSampleId = parameters.get("expSampleId")

    # Get the experiment sample perm identifier
    expSamplePermId = parameters.get("expSamplePermId")

    # Get the experiment sample type
    expSampleType = parameters.get("expSampleType")

    # Get the plate code
    platePermId = parameters.get("platePermId")

    # Get the plate type
    plateType = parameters.get("plateType")

    # Get the mode
    mode = parameters.get("mode")

    # Info
    logger.info("Aggregation plug-in called with following parameters:")
    logger.info("task            = " + task)
    logger.info("collectionId    = " + collectionId)
    logger.info("collectionType  = " + collectionType)
    logger.info("expSampleId     = " + expSampleId)
    logger.info("expSamplePermId = " + expSamplePermId)
    logger.info("expSampleType   = " + expSampleType)
    logger.info("platePermId     = " + platePermId)
    logger.info("plateType       = " + plateType)
    logger.info("mode            = " + mode)
    logger.info("userId          = " + userId)
    logger.info("Aggregation plugin properties:")
    logger.info("properties      = " + str(properties))

    # Consistency check: task must be one of a known set
    if task != "EXPERIMENT_SAMPLE" and \
        task != "ALL_PLATES" and \
        task != "PLATE" and \
        task != "TUBESET":
        msg = "The requested task " + task + " is not known!"
        logger.error(msg)
        raise Exception(msg)

    logger.info("Requested task: " + task)

    # Instantiate the Mover object - userId is a global variable
    # made available to the aggregation plug-in
    mover = Mover(task, collectionId, collectionType, expSampleId,
                  expSamplePermId, expSampleType, platePermId, plateType, mode,
                  userId, properties, logger)

    # Process
    success = mover.process()
    logger.info("Process ended successfully.")

    # Compress
    if mode == "zip":
        mover.compressIfNeeded()

    # Get some results info
    nCopiedFiles = mover.getNumberOfCopiedFiles()
    errorMessage = mover.getErrorMessage()
    relativeExpFolder = mover.getRelativeRootExperimentPath()
    zipFileName = mover.getZipArchiveFileName()

    # Update results and store them
    resultToStore["uid"] = uid
    resultToStore["completed"] = True
    resultToStore["success"] = success
    resultToStore["message"] = errorMessage
    resultToStore["nCopiedFiles"] = nCopiedFiles
    resultToStore["relativeExpFolder"] = relativeExpFolder
    resultToStore["zipArchiveFileName"] = zipFileName
    resultToStore["mode"] = mode
    LRCache.set(uid, resultToStore)

    # Email result to the user
    if success == True:

        subject = "Flow export: successfully processed requested data"

        if nCopiedFiles == 1:
            snip = "One file was "
        else:
            snip = str(nCopiedFiles) + " files were "

        if mode == "normal":
            body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
        else:
            body = snip + "successfully packaged for download: " + zipFileName

    else:
        subject = "Flow export: error processing request!"
        body = "Sorry, there was an error processing your request. " + \
        "Please send your administrator the following report:\n\n" + \
        "\"" + errorMessage + "\"\n"

    # Send
    try:
        mailService.createEmailSender().withSubject(subject).withBody(
            body).send()
    except:
        sys.stderr.write(
            "export_flow_datasets: Failure sending email to user!")
def aggregateProcess(parameters, tableBuilder, uid):

    # Make sure to initialize and store the results. We need to have them since
    # most likely the client will try to retrieve them again before the process
    # is finished.
    resultToStore = {}
    resultToStore["uid"] = uid
    resultToStore["success"] = True
    resultToStore["completed"] = False
    resultToStore["message"] = ""
    resultToStore["nCopiedFiles"] = ""
    resultToStore["relativeExpFolder"] = ""
    resultToStore["zipArchiveFileName"] = ""
    resultToStore["mode"] = ""
    LRCache.set(uid, resultToStore)

    # Get parameters from plugin.properties
    properties = parsePropertiesFile()
    if properties is None:
        raise Exception("Could not process plugin.properties")

    # Get the experiment identifier
    experimentId = parameters.get("experimentId")

    # Get the experiment type
    experimentType = parameters.get("experimentType")

    # Get the entity type
    entityType = parameters.get("entityType")

    # Get the entity code
    entityId = parameters.get("entityId")

    # Get the specimen name
    specimen = parameters.get("specimen")
    
    # Get the mode
    mode = parameters.get("mode")

    # Instantiate the Mover object - userId is a global variable
    # made available to the aggregation plug-in
    mover = Mover(experimentId, experimentType, entityType, entityId, specimen,
                  mode, userId, properties)

    # Process
    success = mover.process()
    
    # Compress
    if mode == "zip":
        mover.compressIfNeeded()
        
    # Get some results info
    nCopiedFiles = mover.getNumberOfCopiedFiles()
    errorMessage = mover.getErrorMessage();
    relativeExpFolder = mover.getRelativeRootExperimentPath()
    zipFileName = mover.getZipArchiveFileName()

    # Update results and store them
    resultToStore["uid"] = uid
    resultToStore["completed"] = True
    resultToStore["success"] = success
    resultToStore["message"] = errorMessage
    resultToStore["nCopiedFiles"] = nCopiedFiles
    resultToStore["relativeExpFolder"] = relativeExpFolder
    resultToStore["zipArchiveFileName"] = zipFileName
    resultToStore["mode"] = mode
    LRCache.set(uid, resultToStore)

    # Email result to the user
    if success == True:
        
        subject = "BD FACS DIVA export: successfully processed requested data"
        
        if nCopiedFiles == 1:
            snip = "One file was "
        else:
            snip = str(nCopiedFiles) + " files were "

        if mode == "normal":
            body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
        else:
            body = snip + "successfully packaged for download: " + zipFileName

    else:
        subject = "BD FACS DIVA export: error processing request!"
        body = "Sorry, there was an error processing your request. " + \
        "Please send your administrator the following report:\n\n" + \
        "\"" + errorMessage + "\"\n"

    # Send
    try:
        mailService.createEmailSender().withSubject(subject).withBody(body).send()
    except:
        sys.stderr.write("export_bdfacsdiva_datasets: Failure sending email to user!")