예제 #1
0
def export_skeleton_as_nrrd_async(skeleton_id,
                                  source_ref,
                                  target_ref,
                                  user_id,
                                  mirror=True,
                                  create_message=True):

    result = export_skeleton_as_nrrd(skeleton_id, source_ref, target_ref,
                                     user_id, mirror)
    if create_message:
        msg = Message()
        msg.user = User.objects.get(pk=int(user_id))
        msg.read = False
        if result['errors']:
            msg.title = "No NRRD file could be creaed for skeleton {}".format(
                skeleton_id)
            msg.text = "There was at least one error during the NRRD export: {}".format(
                '\n'.join(result['errors']))
            msg.action = ""
        else:
            url = urljoin(
                urljoin(settings.MEDIA_URL,
                        settings.MEDIA_EXPORT_SUBDIRECTORY),
                result['nrrd_name'])
            msg.title = "Exported skeleton {} as NRRD file".format(skeleton_id)
            msg.text = "The requested skeleton was exported as NRRD file. You " \
                    "can download it from this location: <a href='{}'>{}</a>".format(url, url)
            msg.action = url
        msg.save()

    return "Errors: {}".format('\n'.join(
        result['errors'])) if result['errors'] else result['nrrd_path']
예제 #2
0
def process_crop_job(job, create_message=True):
    """ This method does the actual cropping. It controls the data extraction
    and the creation of the sub-stack. It can be executed as Celery task.
    """
    try:
        # Create the sub-stack
        cropped_stack = extract_substack(job)

        # Create tho output image
        outputImage = ImageList()
        for img in cropped_stack:
            outputImage.append(img)

        # Save the resulting micro_stack to a temporary location
        no_error_occured = True
        error_message = ""
        # Only produce an image if parts of stacks are within the output
        if len(cropped_stack) > 0:
            outputImage.writeImages(job.output_path.encode('ascii', 'ignore'))
            # Add some meta data to the image
            addMetaData(job.output_path, job, cropped_stack)
        else:
            no_error_occured = False
            error_message = "A region outside the stack has been selected. " \
                    "Therefore, no image was produced."
    except (IOError, OSError, ValueError) as e:
        no_error_occured = False
        error_message = str(e)
        # Delete the file if parts of it have been written already
        if os.path.exists(job.output_path):
            os.remove(job.output_path)

    if create_message:
        # Create a notification message
        bb_text = "( %s, %s, %s ) -> ( %s, %s, %s )" % (job.x_min, job.y_min, \
                job.z_min, job.x_max, job.y_max, job.z_max)

        msg = Message()
        msg.user = User.objects.get(pk=int(job.user.id))
        msg.read = False
        if no_error_occured:
            file_name = os.path.basename(job.output_path)
            url = os.path.join(settings.CATMAID_URL,
                               "crop/download/" + file_name + "/")
            msg.title = "Microstack finished"
            msg.text = "The requested microstack %s is finished. You can " \
                    "download it from this location: <a href='%s'>%s</a>" % \
                    (bb_text, url, url)
            msg.action = url
        else:
            msg.title = "Microstack could not be created"
            msg.text = "The requested microstack %s could not be created due " \
                    "to an error while saving the result (%s)." % \
                    (bb_text, error_message)
            msg.action = ""
        msg.save()

    return job.output_path if no_error_occured else error_message
예제 #3
0
def process_crop_job(job, create_message=True):
    """ This method does the actual cropping. It controls the data extraction
    and the creation of the sub-stack. It can be executed as Celery task.
    """
    try:
        # Create the sub-stack
        cropped_stack = extract_substack(job)

        # Create tho output image
        outputImage = ImageList()
        for img in cropped_stack:
            outputImage.append( img )

        # Save the resulting micro_stack to a temporary location
        no_error_occured = True
        error_message = ""
        # Only produce an image if parts of stacks are within the output
        if len( cropped_stack ) > 0:
            outputImage.writeImages( job.output_path.encode('ascii', 'ignore') )
            # Add some meta data to the image
            addMetaData( job.output_path, job, cropped_stack )
        else:
            no_error_occured = False
            error_message = "A region outside the stack has been selected. " \
                    "Therefore, no image was produced."
    except (IOError, OSError, ValueError) as e:
        no_error_occured = False
        error_message = str(e)
        # Delete the file if parts of it have been written already
        if os.path.exists( job.output_path ):
            os.remove( job.output_path )

    if create_message:
        # Create a notification message
        bb_text = "( %s, %s, %s ) -> ( %s, %s, %s )" % (job.x_min, job.y_min, \
                job.z_min, job.x_max, job.y_max, job.z_max)

        msg = Message()
        msg.user = User.objects.get(pk=int(job.user.id))
        msg.read = False
        if no_error_occured:
            file_name = os.path.basename( job.output_path )
            url = os.path.join( settings.CATMAID_URL, "crop/download/" + file_name + "/")
            msg.title = "Microstack finished"
            msg.text = "The requested microstack %s is finished. You can " \
                    "download it from this location: <a href='%s'>%s</a>" % \
                    (bb_text, url, url)
            msg.action = url
        else:
            msg.title = "Microstack could not be created"
            msg.text = "The requested microstack %s could not be created due " \
                    "to an error while saving the result (%s)." % \
                    (bb_text, error_message)
            msg.action = ""
        msg.save()

    return job.output_path if no_error_occured else error_message
예제 #4
0
def process_crop_job(job: CropJob, create_message=True) -> str:
    """ This method does the actual cropping. It controls the data extraction
    and the creation of the sub-stack. It can be executed as Celery task.
    """
    try:
        # Create the sub-stack
        cropped_stack = extract_substack(job)
        # Save the resulting micro_stack to a temporary location
        no_error_occured = True
        error_message = ""
        # Only produce an image if parts of stacks are within the output
        if len(cropped_stack) > 0:
            metadata = job.create_tiff_metadata(len(cropped_stack))
            cropped_stack[0].save(job.output_path, compression="raw", save_all=True,
                    append_images=cropped_stack[1:], tiffinfo=metadata)
        else:
            no_error_occured = False
            error_message = "A region outside the stack has been selected. " \
                    "Therefore, no image was produced."
    except (IOError, OSError, ValueError) as e:
        no_error_occured = False
        error_message = str(e)
        # Delete the file if parts of it have been written already
        if os.path.exists( job.output_path ):
            os.remove( job.output_path )

    if create_message:
        # Create a notification message
        bb_text = "( %s, %s, %s ) -> ( %s, %s, %s )" % (job.x_min, job.y_min, \
                job.z_min, job.x_max, job.y_max, job.z_max)

        user = User.objects.get(pk=int(job.user.id))
        msg = Message()
        msg.user = user
        msg.read = False
        if no_error_occured:
            file_name = os.path.basename( job.output_path )
            url = os.path.join( settings.CATMAID_URL, "crop/download/" + file_name + "/")
            msg.title = "Microstack finished"
            msg.text = "The requested microstack %s is finished. You can " \
                    "download it from this location: <a href='%s'>%s</a>" % \
                    (bb_text, url, url)
            msg.action = url
        else:
            msg.title = "Microstack could not be created"
            msg.text = "The requested microstack %s could not be created due " \
                    "to an error while saving the result (%s)." % \
                    (bb_text, error_message)
            msg.action = ""
        msg.save()

        notify_user(user.id, msg.id, msg.title)

    return job.output_path if no_error_occured else error_message
예제 #5
0
 def create_message(self, title, message, url):
     msg = Message()
     msg.user = User.objects.get(pk=int(self.job.user.id))
     msg.read = False
     msg.title = title
     msg.text = message
     msg.action = url
     msg.save()
예제 #6
0
 def create_message(self, title, message, url):
     msg = Message()
     msg.user = User.objects.get(pk=int(self.job.user.id))
     msg.read = False
     msg.title = title
     msg.text = message
     msg.action = url
     msg.save()
예제 #7
0
파일: nat.py 프로젝트: tomka/CATMAID
def export_skeleton_as_nrrd_async(skeleton_id, source_ref, target_ref, user_id,
                                  mirror=True, create_message=True):

    result = export_skeleton_as_nrrd(skeleton_id, source_ref, target_ref,
                                     user_id, mirror)
    if create_message:
        msg = Message()
        msg.user = User.objects.get(pk=int(user_id))
        msg.read = False
        if result['errors']:
            msg.title = "No NRRD file could be creaed for skeleton {}".format(skeleton_id)
            msg.text = "There was at least one error during the NRRD export: {}".format('\n'.join(result['errors']))
            msg.action = ""
        else:
            url = urljoin(urljoin(settings.MEDIA_URL, settings.MEDIA_EXPORT_SUBDIRECTORY), result['nrrd_name'])
            msg.title = "Exported skeleton {} as NRRD file".format(skeleton_id)
            msg.text = "The requested skeleton was exported as NRRD file. You " \
                    "can download it from this location: <a href='{}'>{}</a>".format(url, url)
            msg.action = url
        msg.save()

    return "Errors: {}".format('\n'.join(result['errors'])) if result['errors'] else result['nrrd_path']
예제 #8
0
    if create_message:
        # Create a notification message
        bb_text = "( %s, %s, %s ) -> ( %s, %s, %s )" % (job.x_min, job.y_min, \
                job.z_min, job.x_max, job.y_max, job.z_max)

        msg = Message()
        msg.user = User.objects.get(pk=int(job.user.id))
        msg.read = False
        if no_error_occured:
            file_name = os.path.basename( job.output_path )
            url = os.path.join( settings.CATMAID_URL, "crop/download/" + file_name + "/")
            msg.title = "Microstack finished"
            msg.text = "The requested microstack %s is finished. You can " \
                    "download it from this location: <a href='%s'>%s</a>" % \
                    (bb_text, url, url)
            msg.action = url
        else:
            msg.title = "Microstack could not be created"
            msg.text = "The requested microstack %s could not be created due " \
                    "to an error while saving the result (%s)." % \
                    (bb_text, error_message)
            msg.action = ""
        msg.save()

    return None if no_error_occured else error_message

def start_asynch_process( job ):
    """ It launches the data extraction and sub-stack building as a seperate process.
    This process uses the addmessage command with manage.py to write a message for the
    user into the data base once the process is done.
    """
예제 #9
0
def query_segmentation_async(
    result,
    project_id,
    user_id,
    ssh_key,
    ssh_user,
    local_temp_dir,
    segmentations_dir,
    server,
    job_name,
    job_type,
):
    result.status = "computing"
    result.save()
    msg_user(user_id, "autoproofreader-result-update", {"status": "computing"})

    # copy temp files from django local temp media storage to server temp storage
    setup = (
        "scp -i {ssh_key} -pr {local_dir} "
        + "{ssh_user}@{server_address}:{server_results_dir}/{job_dir}"
    ).format(
        **{
            "local_dir": local_temp_dir,
            "server_address": server["address"],
            "server_results_dir": server["results_dir"],
            "job_dir": job_name,
            "ssh_key": ssh_key,
            "ssh_user": ssh_user,
        }
    )
    files = {}
    for f in local_temp_dir.iterdir():
        files[f.name.split(".")[0]] = Path(
            "~/", server["results_dir"], job_name, f.name
        )

    if job_type == "diluvian":
        extra_parameters = (
            "--model-weights-file {model_file} "
            + "--model-training-config {model_config_file} "
            + "--model-job-config {job_config_file} "
            + "--volume-file {volume_file} "
        ).format(
            **{
                "model_file": server["model_file"],
                "model_config_file": files["model_config"],
                "job_config_file": files["diluvian_config"],
                "volume_file": files["volume"],
            }
        )
    elif job_type == "cached_lsd":
        extra_parameters = "--cached-lsd-config {} ".format(files["cached_lsd_config"])
    else:
        extra_parameters = ""

    # connect to the server and run the autoproofreader algorithm on the provided skeleton
    query_seg = (
        "ssh -i {ssh_key} {ssh_user}@{server}\n"
        + "source {server_ff_env_path}\n"
        + "sarbor-error-detector "
        + "--skeleton-csv {skeleton_file} "
        + "--sarbor-config {sarbor_config} "
        + "--output-file {output_file} "
        + "{segmentation_type} "
        + "{type_parameters}"
    ).format(
        **{
            "ssh_key": ssh_key,
            "ssh_user": ssh_user,
            "server": server["address"],
            "server_ff_env_path": server["env_source"],
            "skeleton_file": files["skeleton"],
            "sarbor_config": files["sarbor_config"],
            "output_file": Path(server["results_dir"], job_name, "outputs"),
            "segmentation_type": job_type.replace("_", "-"),
            "type_parameters": extra_parameters,
        }
    )

    # Copy the numpy file containing the volume mesh and the csv containing the node connections
    # predicted by the autoproofreader run.

    fetch_files = (
        "scp -i {ssh_key} -r {ssh_user}@{server}:"
        + "{server_results_dir}/{server_job_dir}/* {local_temp_dir}\n"
    ).format(
        **{
            "ssh_key": ssh_key,
            "ssh_user": ssh_user,
            "server": server["address"],
            "server_results_dir": server["results_dir"],
            "server_job_dir": job_name,
            "local_temp_dir": local_temp_dir,
        }
    )

    process = subprocess.Popen(
        "/bin/bash", stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf8"
    )
    out, err = process.communicate(setup)
    logging.info(out)

    process = subprocess.Popen(
        "/bin/bash", stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf8"
    )
    out, err = process.communicate(query_seg)
    logging.info(out)

    process = subprocess.Popen(
        "/bin/bash", stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf8"
    )
    out, err = process.communicate(fetch_files)
    logging.info(out)

    nodes_path = Path(local_temp_dir, "outputs", "nodes.obj")
    Node = namedtuple("Node", ["node_id", "parent_id", "x", "y", "z"])
    # b is the branch score. c is the connectivity score
    Ranking = namedtuple(
        "Ranking", ["node_id", "parent_id", "c", "b", "b_dx", "b_dy", "b_dz"]
    )
    Combined = namedtuple(
        "Combined",
        ["node_id", "parent_id", "x", "y", "z", "b", "c", "b_dx", "b_dy", "b_dz"],
    )

    # Nodes are mandatory
    if nodes_path.exists():
        nodes = {row[0]: Node(*row) for row in pickle.load(nodes_path.open("rb"))}
    else:
        result.status = "failed"
        result.save()
        return "failed"

    rankings_path = Path(local_temp_dir, "outputs", "rankings.obj")
    # Rankings are mandatory
    if rankings_path.exists():
        with rankings_path.open("r") as f:
            rankings = {
                row[0]: Ranking(*row) for row in pickle.load(rankings_path.open("rb"))
            }
            node_data = [
                Combined(**{**nodes[nid]._asdict(), **rankings[nid]._asdict()})
                for nid in nodes.keys()
            ]
            proofread_nodes = [
                ProofreadTreeNodes(
                    node_id=row.node_id,
                    parent_id=row.parent_id,
                    x=row.x,
                    y=row.y,
                    z=row.z,
                    connectivity_score=row.c,
                    branch_score=row.b,
                    branch_dx=row.b_dx,
                    branch_dy=row.b_dy,
                    branch_dz=row.b_dz,
                    reviewed=False,
                    result=result,
                    user_id=user_id,
                    project_id=project_id,
                    editor_id=user_id,
                )
                for row in node_data
            ]
            ProofreadTreeNodes.objects.bulk_create(proofread_nodes)
    else:
        result.status = "failed"
        result.save()
        return "failed"

    mesh_path = Path(local_temp_dir, "outputs", "mesh.stl")
    # Mesh is optional
    if mesh_path.exists():
        with mesh_path.open("r") as f:
            stl_str = f.read()

            try:
                vertices, triangles = _stl_ascii_to_indexed_triangles(stl_str)
            except InvalidSTLError as e:
                raise ValueError("Invalid STL file ({})".format(str(e)))

            mesh = TriangleMeshVolume(
                project_id,
                user_id,
                {"type": "trimesh", "title": job_name, "mesh": [vertices, triangles]},
            )
            mesh_volume = mesh.save()
            result.volume = Volume.objects.get(id=mesh_volume)

    segmentation_path = Path(local_temp_dir, "outputs", "segmentations.n5")
    segmentation_dir = Path(segmentations_dir)
    if segmentation_path.exists():
        segmentation_dir.mkdir(parents=True, exist_ok=True)
        segmentation_path.rename(segmentation_dir / "segmentations.n5")

    cleanup = (
        "rm -r {local_temp_dir}\n"
        + "ssh -i {ssh_key} {ssh_user}@{server}\n"
        + "rm -r {server_results_dir}/{server_job_dir}"
    ).format(
        **{
            "ssh_key": ssh_key,
            "ssh_user": ssh_user,
            "server": server["address"],
            "server_results_dir": server["results_dir"],
            "server_job_dir": job_name,
            "local_temp_dir": local_temp_dir,
        }
    )

    process = subprocess.Popen(
        "/bin/bash", stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf8"
    )
    out, err = process.communicate(cleanup)
    logging.info(out)

    msg = Message()
    msg.user = User.objects.get(pk=int(user_id))
    msg.read = False

    msg.title = "Job {} complete!"
    msg.text = "IM DOING SOME STUFF, CHECK IT OUT"
    msg.action = "localhost:8000"

    notify_user(user_id, msg.id, msg.title)

    result.completion_time = datetime.datetime.now(pytz.utc)
    result.status = "complete"
    result.save()

    msg_user(user_id, "autoproofreader-result-update", {"status": "completed"})

    return "complete"
예제 #10
0
        # Create a notification message
        bb_text = "( %s, %s, %s ) -> ( %s, %s, %s )" % (job.x_min, job.y_min, \
                job.z_min, job.x_max, job.y_max, job.z_max)

        msg = Message()
        msg.user = User.objects.get(pk=int(job.user.id))
        msg.read = False
        if no_error_occured:
            file_name = os.path.basename(job.output_path)
            url = os.path.join(settings.CATMAID_URL,
                               "crop/download/" + file_name + "/")
            msg.title = "Microstack finished"
            msg.text = "The requested microstack %s is finished. You can " \
                    "download it from this location: <a href='%s'>%s</a>" % \
                    (bb_text, url, url)
            msg.action = url
        else:
            msg.title = "Microstack could not be created"
            msg.text = "The requested microstack %s could not be created due " \
                    "to an error while saving the result (%s)." % \
                    (bb_text, error_message)
            msg.action = ""
        msg.save()

    return None if no_error_occured else error_message


def start_asynch_process(job):
    """ It launches the data extraction and sub-stack building as a seperate process.
    This process uses the addmessage command with manage.py to write a message for the
    user into the data base once the process is done.