コード例 #1
0
def create_track_from_slices(image,
                             slices,
                             depth2slice,
                             id_project,
                             track_prefix="object",
                             label=None,
                             upload_group_id=False,
                             depth="time"):
    """Create an annotation track from a list of AnnotationSlice
    Parameters
    ----------
    image: ImageInstance
        The image instance in which the track is added
    slices: iterable (of AnnotationSlice)
        The polygon slices of the objects to draw
    depth2slice: dict
        A dictionary mapping the depths of the image instance with their respective SliceInstance
    id_project: int
        Project identifier
    track_prefix: str (default: "object")
        A prefix for the track name
    label: int|str (default: None)
        A label for the track
    upload_group_id: bool
        True to upload the group identifier
    depth: str
        Which depth field to read in the AnnotationSlice if both are present. One of {'time', 'depth'}.

    Returns
    -------
    saved_tracks: TrackCollection
        The saved track objects
    annotations: AnnotationCollection
        The annotations associated with the traped. The collection is NOT saved.
    """
    if label is None and len(slices) > 0:
        label = slices[0].label
    track = Track(name="{}-{}".format(track_prefix, label),
                  id_image=image.id,
                  color=None if upload_group_id else DEFAULT_COLOR).save()

    if upload_group_id:
        Property(track, key="label", value=label).save()

    collection = AnnotationCollection()
    for _slice in slices:
        collection.append(
            Annotation(
                location=change_referential(p=_slice.polygon,
                                            height=image.height).wkt,
                id_image=image.id,
                id_project=id_project,
                id_tracks=[track.id],
                slice=depth2slice[_slice.depth if _slice.time is None
                                  or depth == "depth" else _slice.time].id))
    return track, collection
コード例 #2
0
        # We first add a point in (10,10) where (0,0) is bottom-left corner
        point = Point(10, 10)
        annotation_point = Annotation(
            location=point.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_point.id, params.id_term).save()

        # Then, we add a rectangle as annotation
        rectangle = box(20, 20, 100, 100)
        annotation_rectangle = Annotation(
            location=rectangle.wkt, id_image=params.id_image_instance).save()
        if params.id_term:
            AnnotationTerm(annotation_rectangle.id, params.id_term).save()

        # We can also add a property (key-value pair) to an annotation
        Property(annotation_rectangle, key="my_property", value=10).save()

        # Print the list of annotations in the given image:
        annotations = AnnotationCollection()
        annotations.image = params.id_image_instance
        annotations.fetch()
        print(annotations)

        # We can also add multiple annotation in one request:
        annotations = AnnotationCollection()
        annotations.append(
            Annotation(location=point.wkt,
                       id_image=params.id_image_instance,
                       id_project=params.id_project))
        annotations.append(
            Annotation(location=rectangle.wkt,
コード例 #3
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        status_comment="Initialization of the training phase")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path

        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        in_txt = os.path.join(in_path, 'txt/')
        out_path = os.path.join(working_path, "out/")
        gt_path = os.path.join(working_path, "ground_truth/")
        tmp_path = os.path.join(working_path, "tmp/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)
            os.makedirs(in_txt)
        # 2. Download the images (first input, then ground truth image)
        conn.job.update(
            progress=10,
            statusComment="Downloading images (to {})...".format(in_path))
        print(conn.parameters)
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        xpos = {}
        ypos = {}
        terms = {}

        for image in images:
            image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' %
                       (image.id, 'jpg'))

            annotations = AnnotationCollection()
            annotations.project = conn.parameters.cytomine_id_project
            annotations.showWKT = True
            annotations.showMeta = True
            annotations.showGIS = True
            annotations.showTerm = True
            annotations.image = image.id
            annotations.fetch()

            for ann in annotations:
                l = ann.location
                if l.rfind('POINT') == -1:
                    pol = shapely.wkt.loads(l)
                    poi = pol.centroid
                else:
                    poi = shapely.wkt.loads(l)
                (cx, cy) = poi.xy
                xpos[(ann.term[0], image.id)] = int(cx[0])
                ypos[(ann.term[0], image.id)] = image.height - int(cy[0])
                terms[ann.term[0]] = 1

        for image in images:
            F = open(in_txt + '%d.txt' % image.id, 'w')
            for t in terms.keys():
                if (t, image.id) in xpos:
                    F.write('%d %d %d %f %f\n' %
                            (t, xpos[(t, image.id)], ypos[(t, image.id)],
                             xpos[(t, image.id)] / float(image.width),
                             ypos[(t, image.id)] / float(image.height)))
            F.close()

        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt)

        if conn.parameters.cytomine_id_terms == 'all':
            term_list = t_to_i.keys()
        else:
            term_list = [
                int(term)
                for term in conn.parameters.cytomine_id_terms.split(',')
            ]

        if conn.parameters.cytomine_training_images == 'all':
            tr_im = ims
        else:
            tr_im = [
                int(id_im) for id_im in
                conn.parameters.cytomine_training_images.split(',')
            ]

        DATA = None
        REP = None
        be = 0

        #leprogres = 10
        #pr_spacing = 90/len(term_list)
        #print(term_list)
        sfinal = ""
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            sfinal += "%d " % id_term

            (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'jpg', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}

            parameters_hash[
                'cytomine_id_terms'] = conn.parameters.cytomine_id_terms
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
コード例 #4
0
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()

        conn.job.update(status=Job.RUNNING,
                        progress=80,
                        statusComment="Computing the post-processing model...")
        xt = procrustes(Xc, Yc)
        (mu, P) = apply_pca(xt, conn.parameters.model_n_reduc)
        muP_filename = joblib.dump((mu, P), 'muP.joblib', compress=3)[0]
        features_filename = joblib.dump((h2, v2, h3, v3, sq),
                                        'features.joblib',
                                        compress=3)[0]
        coords_filename = joblib.dump((xc, yc), 'coords.joblib', compress=3)[0]
        AttachedFile(conn.job,
                     domainIdent=conn.job.id,
                     filename=muP_filename,
                     domainClassName="be.cytomine.processing.Job").upload()
        AttachedFile(conn.job,
                     domainIdent=conn.job.id,
                     filename=features_filename,
                     domainClassName="be.cytomine.processing.Job").upload()
        AttachedFile(conn.job,
                     domainIdent=conn.job.id,
                     filename=coords_filename,
                     domainClassName="be.cytomine.processing.Job").upload()
        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
コード例 #5
0
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")
        tmp_path = os.path.join(working_path, "tmp")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        input_images = [
            i for i in image_instances if gt_suffix not in i.originalFilename
        ]
        gt_images = [
            i for i in image_instances if gt_suffix in i.originalFilename
        ]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.originalFilename.replace(gt_suffix, '')
            related_image = [
                i for i in input_images if related_name == i.originalFilename
            ]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")
        cj.job.update(progress=30,
                      statusComment="Execution: download model...")
        model_job = Job().fetch(cj.parameters.model_job_id)
        model_path = load_model(model_job,
                                tmp_path,
                                model_filename="weights.hf5")
        height, width = load_property(model_job,
                                      "image_height"), load_property(
                                          model_job, "image_width")
        n_channels = load_property(model_job, "n_channels")
        train_mean = load_property(model_job, "train_mean")
        train_std = load_property(model_job, "train_std")

        # load data
        cj.job.update(progress=30,
                      statusComment="Execution: preparing data...")
        dims = height, width, n_channels

        # load input images
        images = load_data(
            cj, dims, in_path, **{
                "start": 35,
                "end": 45,
                "period": 0.1,
                "prefix": "Execution: load training input images"
            })
        images -= train_mean
        images /= train_std

        # load model
        cj.job.update(progress=45, statusComment="Execution: build model...")
        unet = create_unet(dims)
        unet.load_weights(model_path)

        # inference
        masks = np.zeros([len(images), 1, dims[0], dims[1]], dtype=np.uint8)
        for i, image_name in cj.monitor(enumerate(images),
                                        start=45,
                                        end=55,
                                        period=0.1,
                                        prefix="Execution: inference"):
            masks[i] = unet.predict([images[i]])[0]
            cv2.imwrite(os.path.join(out_path, image_name),
                        (masks[i] >= cj.parameters.threshold_probas).astype(
                            np.uint8))

        # 4. Upload the annotation and masks to Cytomine (annotations are extracted from the mask using
        # the AnnotationExporter module)
        # for image in cj.monitor(input_images, start=60, end=80, period=0.1, prefix="Extracting and uploading polygons from masks"):
        #     file = "{}.tif".format(image.id)
        #     path = os.path.join(out_path, file)
        #     data = io.imread(path)
        #
        #     # extract objects
        #     slices = mask_to_objects_2d(data)
        #
        #     print("Found {} polygons in this image {}.".format(len(slices), image.id))
        #
        #     # upload
        #     collection = AnnotationCollection()
        #     for obj_slice in slices:
        #         collection.append(Annotation(
        #             location=affine_transform(obj_slice.polygon, [1, 0, 0, -1, 0, image.height]).wkt,
        #             id_image=image.id, id_project=cj.parameters.cytomine_id_project, property=[
        #                 {"key": "index", "value": str(obj_slice.label)}
        #             ]
        #         ))
        #     collection.save()

        # 5. Compute and upload the metrics
        cj.job.update(progress=80,
                      statusComment="Computing and uploading metrics...")
        outfiles, reffiles = zip(
            *[(os.path.join(out_path, "{}.tif".format(image.id)),
               os.path.join(gt_path, "{}.tif".format(image.id)))
              for image in input_images])

        results = computemetrics_batch(outfiles, reffiles, "PixCla", tmp_path)

        for key, value in results.items():
            Property(cj.job, key=key, value=str(value)).save()
        Property(cj.job,
                 key="IMAGE_INSTANCES",
                 value=str([im.id for im in input_images])).save()

        # 6. End
        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
コード例 #6
0
def main():
    with NeubiasJob.from_cli(sys.argv) as conn:
        problem_cls = get_discipline(conn, default=CLASS_LNDDET)
        is_2d = True
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization of the training phase")
        in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(
            problem_cls, conn, is_2d=is_2d, **conn.flags)
        tmax = 1
        for f in os.listdir(gt_path):
            if f.endswith('.tif'):
                gt_img = imageio.imread(os.path.join(gt_path, f))
                tmax = np.max(gt_img)
                break

        term_list = range(1, tmax + 1)
        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        tr_im = [
            int(id_im)
            for id_im in conn.parameters.cytomine_training_images.split(',')
        ]

        DATA = None
        REP = None
        be = 0
        sfinal = ""
        for id_term in term_list:
            sfinal += "%d " % id_term
        sfinal = sfinal.rstrip(' ')
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            (xc, yc, xr, yr) = getcoordsim_neubias(gt_path, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'tif', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}
            parameters_hash['cytomine_id_terms'] = sfinal.replace(' ', ',')
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
コード例 #7
0
    def end_successful_import(self, path: Path, image: Image, *args, **kwargs):
        uf = self.get_uf(path)

        ai = AbstractImage()
        ai.uploadedFile = uf.id
        ai.originalFilename = uf.originalFilename
        ai.width = image.width
        ai.height = image.height
        ai.depth = image.depth
        ai.duration = image.duration
        ai.channels = image.n_intrinsic_channels
        ai.extrinsicChannels = image.n_channels
        if image.physical_size_x:
            ai.physicalSizeX = round(
                convert_quantity(image.physical_size_x, "micrometers"), 6)
        if image.physical_size_y:
            ai.physicalSizeY = round(
                convert_quantity(image.physical_size_y, "micrometers"), 6)
        if image.physical_size_z:
            ai.physicalSizeZ = round(
                convert_quantity(image.physical_size_z, "micrometers"), 6)
        if image.frame_rate:
            ai.fps = round(convert_quantity(image.frame_rate, "Hz"), 6)
        ai.magnification = parse_int(image.objective.nominal_magnification)
        ai.bitPerSample = dtype_to_bits(image.pixel_type)
        ai.samplePerPixel = image.n_channels / image.n_intrinsic_channels
        ai.save()
        self.abstract_images.append(ai)

        asc = AbstractSliceCollection()
        set_channel_names = image.n_intrinsic_channels == image.n_channels
        for c in range(image.n_intrinsic_channels):
            name = None
            color = None
            if set_channel_names:
                name = image.channels[c].suggested_name
                color = image.channels[c].hex_color
            for z in range(image.depth):
                for t in range(image.duration):
                    mime = "image/pyrtiff"  # TODO: remove
                    asc.append(
                        AbstractSlice(ai.id,
                                      uf.id,
                                      mime,
                                      c,
                                      z,
                                      t,
                                      channelName=name,
                                      channelColor=color))
        asc.save()

        properties = PropertyCollection(ai)
        for metadata in image.raw_metadata.values():
            if metadata.value is not None and str(metadata.value) != '':
                properties.append(
                    Property(ai, metadata.namespaced_key, str(metadata.value)))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        uf.status = UploadedFile.DEPLOYED
        uf.update()

        properties = PropertyCollection(ai)
        for k, v in self.user_properties:
            if v is not None and str(v) != '':
                properties.append(Property(ai, k, v))
        try:
            properties.save()
        except CollectionPartialUploadException:
            pass  # TODO: improve handling of this exception, but prevent to fail the import

        instances = []
        for p in self.projects:
            instances.append(ImageInstance(ai.id, p.id).save())
        self.images.append((ai, instances))

        # TODO: temporary add annotations for backwards compatibility.
        #  BUT it should be done by core when an image instance is created.
        if image.n_planes == 1 and len(instances) > 0:
            # TODO: currently only supports metadata annots on 2D images

            metadata_annots = image.annotations
            if len(metadata_annots) > 0:
                metadata_terms = [
                    ma.terms for ma in metadata_annots if len(ma.terms) > 0
                ]
                metadata_terms = set(flatten(metadata_terms))

                for instance in instances:
                    project_id = instance.project
                    project = self.projects.find_by_attribute('id', project_id)
                    ontology_id = project.ontology  # noqa
                    ontology_terms = TermCollection().fetch_with_filter(
                        "project", project_id)
                    terms_id_mapping = {t.name: t.id for t in ontology_terms}

                    for metadata_term in metadata_terms:
                        if metadata_term not in terms_id_mapping:
                            # TODO: user must have ontology rights !
                            term = Term(name=metadata_term,
                                        id_ontology=ontology_id,
                                        color="#AAAAAA").save()
                            terms_id_mapping[term.name] = term.id

                    annots = AnnotationCollection()
                    for metadata_annot in metadata_annots:
                        term_ids = [
                            terms_id_mapping[t] for t in metadata_annot.terms
                        ]
                        properties = [
                            dict(key=k, value=v)
                            for k, v in metadata_annot.properties.items()
                        ]
                        annots.append(
                            Annotation(location=metadata_annot.wkt,
                                       id_image=instance.id,
                                       id_terms=term_ids
                                       if len(term_ids) > 0 else None,
                                       properties=properties
                                       if len(properties) > 0 else None,
                                       user=uf.user))

                    try:
                        annots.save()
                    except CollectionPartialUploadException:
                        pass
コード例 #8
0
def run(debug=False):
    """
    Gets project image from cytomine

    Args:
        debug (bool): If true will save annotations individually and plot any error

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'dadb7d7a-5822-48f7-ab42-59bce27750ae' --cytomine_private_key 'd73f4602-51d2-4d15-91e4-d4cc175d65fd' --cytomine_id_project 187 --cytomine_id_image_instance 375 --cytomine_id_software 228848

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_project 154 --cytomine_id_image_instance 3643

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 2140 --cytomine_id_software 2633

      docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/Cytomine/cyto_CRLM/,target=/CRLM,bind-propagation=private --network=host ttt --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 31296 --cytomine_id_software 79732
    """

    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine connection parameters
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project from which we want the images")
    parser.add_argument('--cytomine_id_software',
                        dest='id_software',
                        help="The software to be used to process the image")
    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image to which the annotation will be added")

    params, _ = parser.parse_known_args(sys.argv[1:])

    with CytomineJob.from_cli(sys.argv[1:]) as cytomine:
        # TODO: To be tested on TITANx
        img = ImageInstance().fetch(params.id_image_instance)
        download_image(img)
        process_wsi_and_save(get_container_image_path(img))
        new_annotations = generate_polygons(get_container_image_path(img),
                                            adapt_to_cytomine=True)
        annotation_collection = None

        for label_key in new_annotations:
            # Sending annotation batches to the server
            for sub_list in chunks(new_annotations[label_key],
                                   ANNOTATION_BATCH):
                if not debug:
                    annotation_collection = AnnotationCollection()

                for exterior_points in sub_list:
                    if debug:
                        annotation_collection = AnnotationCollection()

                    annotation_collection.append(
                        Annotation(location=Polygon(
                            exterior_points.astype(int).reshape(
                                exterior_points.shape[0],
                                exterior_points.shape[2]).tolist()).wkt,
                                   id_image=params.id_image_instance,
                                   id_project=params.id_project,
                                   id_terms=[CYTOMINE_LABELS[label_key]]))

                    if debug:
                        try:
                            annotation_collection.save()
                        except Exception as e:
                            print(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0],
                                    exterior_points.shape[2]).tolist())
                            plt.plot(*Polygon(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0], exterior_points.
                                    shape[2])).exterior.coords.xy)
                            plt.show()
                            # raise(e)
                            print(e)
                        finally:
                            time.sleep(1)

                if not debug:
                    annotation_collection.save()
                    time.sleep(ANNOTATION_SLEEP_TIME)

        # Adding pie chart labels data as image property
        # TODO: Change delete_results_file to True for final test on titanX
        num_pixels_per_label = get_pie_chart_data(
            get_container_image_path(img), delete_results_file=False)

        for percentage, label_ in zip(num_pixels_per_label, Label.names):
            Property(img, key=label_, value='{}%'.format(percentage)).save()

        remove_image_local_copy(img)

        cytomine.job.update(statusComment="Finished.")
コード例 #9
0
def create_tracking_from_slice_group(image,
                                     slices,
                                     slice2point,
                                     depth2slice,
                                     id_project,
                                     upload_object=False,
                                     track_prefix="object",
                                     label=None,
                                     upload_group_id=False):
    """Create a set of tracks and annotations to represent a tracked element. A trackline is created to reflect the
    movement of the object in the image. Optionally the object's polygon can also be uploaded.

    Parameters
    ----------
    image: ImageInstance
        An ImageInstance
    slices: list of AnnotationSlice
        A list of AnnotationSlice of one object
    slice2point: callable
        A function that transform a slice into its representative point to be used for generating the tracking line
    depth2slice: dict
        Maps time step with corresponding SliceInstance
    id_project: int
        Project identifier
    upload_object: bool
        True if the object should be uploaded as well (the trackline is uploaded in any case)
    track_prefix: str
        A prefix for the track name
    label: int (default: None)
        The label of the tracked object
    upload_group_id: bool
        True for uploading the object label with the track

    Returns
    -------
    saved_tracks: TrackCollection
        The saved track objects
    annotations: AnnotationCollection
        The annotations associated with the traped. The collection is NOT saved.
    """
    if label is None and len(slices) > 0:
        label = slices[0].label

    # create tracks
    tracks = TrackCollection()
    object_track = Track(
        "{}-{}".format(track_prefix, label),
        image.id,
        color=None if upload_group_id else DEFAULT_COLOR).save()
    trackline_track = Track(
        "{}-{}-trackline".format(track_prefix, label),
        image.id,
        color=None if upload_group_id else DEFAULT_COLOR).save()
    tracks.extend([object_track, trackline_track])

    if upload_group_id:
        Property(object_track, key="label", value=int(label)).save()
        Property(trackline_track, key="label", value=int(label)).save()

    # create actual annotations
    annotations = AnnotationCollection()
    sorted_group = sorted(slices, key=lambda s: s.time)
    prev_line = []
    for _slice in sorted_group:
        point = slice2point(_slice)
        if point.is_empty:  # skip empty points
            continue
        if len(prev_line) == 0 or not prev_line[-1].equals(point):
            prev_line.append(point)

        if len(prev_line) == 1:
            polygon = slice2point(_slice)
        else:
            polygon = LineString(prev_line)

        depth = _slice.time if _slice.depth is None else _slice.depth
        annotations.append(
            Annotation(location=change_referential(polygon, image.height).wkt,
                       id_image=image.id,
                       slice=depth2slice[depth].id,
                       id_project=id_project,
                       id_tracks=[trackline_track.id]))

        if upload_object:
            annotations.append(
                Annotation(location=change_referential(_slice.polygon,
                                                       image.height).wkt,
                           id_image=image.id,
                           slice=depth2slice[depth].id,
                           id_project=id_project,
                           id_tracks=[object_track.id]))

    return tracks, annotations
コード例 #10
0
def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim < 3:
        raise ValueError(
            "Object tracking should be at least 3D (only {} spatial dimension(s) found)"
            .format(ndim))

    tracks = TrackCollection()
    annotations = AnnotationCollection()

    if ndim == 3:
        slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True)
        time_to_image = get_depth_to_slice(image)

        for slice_group in slices:
            curr_tracks, curr_annots = create_tracking_from_slice_group(
                image,
                slice_group,
                slice2point=lambda _slice: _slice.polygon.centroid,
                depth2slice=time_to_image,
                id_project=project_id,
                upload_object=True,
                upload_group_id=True,
                track_prefix=track_prefix + "-object")
            tracks.extend(curr_tracks)
            annotations.extend(curr_annots)
    elif ndim == 4:
        objects = mask_to_objects_3dt(mask=data)
        depths_to_image = get_depth_to_slice(image, depth=("time", "depth"))
        # TODO add tracking lines one way or another
        for time_steps in objects:
            label = time_steps[0][0].label
            track = Track(name="{}-{}".format(track_prefix, label),
                          id_image=image.id,
                          color=DEFAULT_COLOR).save()
            Property(track, key="label", value=label).save()
            annotations.extend([
                Annotation(location=change_referential(
                    p=slice.polygon, height=image.height).wkt,
                           id_image=image.id,
                           id_project=project_id,
                           id_tracks=[track.id],
                           slice=depths_to_image[(slice.time, slice.depth)].id)
                for slices in time_steps for slice in slices
            ])

            tracks.append(track)

    else:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 4 dims..."
        )

    return tracks, annotations
コード例 #11
0
    parser.add_argument('--key', help="the property key")
    parser.add_argument('--value', help="the property value")

    parser.add_argument('--cytomine_id_project', dest='id_project', required=False,
                        help="The project to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance', required=False,
                        help="The image to which the property will be added (optional)")
    parser.add_argument('--cytomine_id_annotation', dest='id_annotation', required=False,
                        help="The annotation to which the property will be added (optional)")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:

        if params.id_project:
            prop = Property(Project().fetch(params.id_project), key=params.key, value=params.value).save()
            print(prop)

        if params.id_image_instance:
            prop = Property(ImageInstance().fetch(params.id_image_instance), key=params.key, value=params.value).save()
            print(prop)

        if params.id_annotation:
            prop = Property(Annotation().fetch(params.id_annotation), key=params.key, value=params.value).save()
            print(prop)

        """
        You can add property to any Cytomine domain.
        You can also attach a file (see AttachedFile) or add a description (see Description) to any Cytomine domain.
        """
コード例 #12
0
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")
        tmp_path = os.path.join(working_path, "tmp")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        input_images = [
            i for i in image_instances if gt_suffix not in i.originalFilename
        ]
        gt_images = [
            i for i in image_instances if gt_suffix in i.originalFilename
        ]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.originalFilename.replace(gt_suffix, '')
            related_image = [
                i for i in input_images if related_name == i.originalFilename
            ]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")

        # load data
        cj.job.update(progress=30, statusComment="Workflow: preparing data...")
        dims = (cj.parameters.image_height, cj.parameters.image_width,
                cj.parameters.n_channels)
        mask_dims = (dims[0], dims[1], cj.parameters.n_classes)

        # load input images
        imgs = load_data(
            cj, dims, in_path, **{
                "start": 35,
                "end": 45,
                "period": 0.1,
                "prefix": "Workflow: load training input images"
            })
        train_mean = np.mean(imgs)
        train_std = np.std(imgs)
        imgs -= train_mean
        imgs /= train_std

        # load masks
        masks = load_data(cj,
                          mask_dims,
                          gt_path,
                          dtype=np.int,
                          is_masks=True,
                          n_classes=cj.parameters.n_classes,
                          **{
                              "start": 45,
                              "end": 55,
                              "period": 0.1,
                              "prefix": "Workflow: load training masks images"
                          })

        cj.job.update(progress=56, statusComment="Workflow: build model...")
        unet = create_unet(dims, n_classes=cj.parameters.n_classes)
        unet.compile(optimizer=Adam(lr=cj.parameters.learning_rate),
                     loss='binary_crossentropy')

        cj.job.update(progress=60,
                      statusComment="Workflow: prepare training...")
        datagen = ImageDataGenerator(
            rotation_range=cj.parameters.aug_rotation,
            width_shift_range=cj.parameters.aug_width_shift,
            height_shift_range=cj.parameters.aug_height_shift,
            shear_range=cj.parameters.aug_shear_range,
            horizontal_flip=cj.parameters.aug_hflip,
            vertical_flip=cj.parameters.aug_vflip)

        weight_filepath = os.path.join(tmp_path, 'weights.hdf5')
        callbacks = [
            ModelCheckpoint(weight_filepath,
                            monitor='loss',
                            save_best_only=True)
        ]

        cj.job.update(progress=65, statusComment="Workflow: train...")
        unet.fit_generator(datagen.flow(imgs,
                                        masks,
                                        batch_size=cj.parameters.batch_size,
                                        seed=42),
                           steps_per_epoch=math.ceil(imgs.shape[0] /
                                                     cj.parameters.batch_size),
                           epochs=cj.parameters.epochs,
                           callbacks=callbacks)

        # save model and metadata
        cj.job.update(progress=85, statusComment="Save model...")
        AttachedFile(cj.job,
                     domainIdent=cj.job.id,
                     filename=weight_filepath,
                     domainClassName="be.cytomine.processing.Job").upload()

        cj.job.update(progress=90, statusComment="Save metadata...")
        Property(cj.job, key="image_width",
                 value=cj.parameters.image_width).save()
        Property(cj.job, key="image_height",
                 value=cj.parameters.image_height).save()
        Property(cj.job, key="n_channels",
                 value=cj.parameters.n_channels).save()
        Property(cj.job, key="train_mean", value=float(train_mean)).save()
        Property(cj.job, key="image_width", value=float(train_std)).save()

        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
コード例 #13
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")

        # hardcode parameter for setup classify to fetch alphamask instead of plain crop.
        cj.parameters.cytomine_download_alpha = True
        cj.parameters.cytomine_id_projects = "{}".format(cj.project.id)
        cj.job.update(progress=2, statusComment="Downloading crops.")
        base_path, downloaded = setup_classify(args=cj.parameters,
                                               logger=cj.job_logger(2, 25),
                                               dest_pattern=os.path.join(
                                                   "{term}",
                                                   "{image}_{id}.png"),
                                               root_path=str("tmp"),
                                               set_folder="train",
                                               showTerm=True)

        x = np.array(
            [f for annotation in downloaded for f in annotation.filenames])
        y = np.array([
            int(os.path.basename(os.path.dirname(filepath))) for filepath in x
        ])

        # transform classes
        cj.job.update(progress=25, statusComment="Transform classes...")
        positive_terms = parse_domain_list(
            cj.parameters.cytomine_id_positive_terms)
        selected_terms = parse_domain_list(cj.parameters.cytomine_id_terms)
        is_binary = len(selected_terms) > 0 and len(positive_terms) > 0
        foreground_terms = np.unique(y) if len(
            selected_terms) == 0 else np.array(selected_terms)
        if len(positive_terms) == 0:
            classes = np.hstack((np.zeros((1, ), dtype=int), foreground_terms))
        else:  # binary
            foreground_terms = np.array(positive_terms)
            classes = np.array([0, 1])
            # cast to binary
            fg_idx = np.in1d(y, list(foreground_terms))
            bg_idx = np.in1d(
                y, list(set(selected_terms).difference(foreground_terms)))
            y[fg_idx] = 1
            y[bg_idx] = 0

        n_classes = classes.shape[0]

        # filter unwanted terms
        cj.logger.info("Size before filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))
        keep = np.in1d(y, classes)
        x, y = x[keep], y[keep]
        cj.logger.info("Size after filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))

        if x.shape[0] == 0:
            raise ValueError("No training data")

        if is_binary:
            # 0 (background) vs 1 (classes in foreground )
            cj.logger.info("Binary segmentation:")
            cj.logger.info("> class '0': background & terms {}".format(
                set(selected_terms).difference(positive_terms)))
            cj.logger.info("> class '1': {}".format(set(foreground_terms)))
        else:
            # 0 (background vs 1 vs 2 vs ... n (n classes from cytomine_id_terms)
            cj.logger.info("Multi-class segmentation:")
            cj.logger.info("> background class '0'")
            cj.logger.info("> term classes: {}".format(set(foreground_terms)))

        # build model
        cj.job.update(progress=27, statusComment="Build model...")
        et, pyxit = build_models(
            n_subwindows=cj.parameters.pyxit_n_subwindows,
            min_size=cj.parameters.pyxit_min_size,
            max_size=cj.parameters.pyxit_max_size,
            target_width=cj.parameters.pyxit_target_width,
            target_height=cj.parameters.pyxit_target_height,
            interpolation=cj.parameters.pyxit_interpolation,
            transpose=cj.parameters.pyxit_transpose,
            colorspace=cj.parameters.pyxit_colorspace,
            fixed_size=cj.parameters.pyxit_fixed_size,
            verbose=int(cj.logger.level == 10),
            random_state=cj.parameters.seed,
            n_estimators=cj.parameters.forest_n_estimators,
            min_samples_split=cj.parameters.forest_min_samples_split,
            max_features=cj.parameters.forest_max_features,
            n_jobs=cj.parameters.n_jobs)

        # to extract the classes form the mask
        pyxit.get_output = _get_output_from_mask

        # extract subwindows manually to avoid class problem
        cj.job.update(progress=30, statusComment="Extract subwindwos...")
        _x, _y = pyxit.extract_subwindows(x, y)

        actual_classes = np.unique(_y)
        if actual_classes.shape[0] != classes.shape[0]:
            raise ValueError(
                "Some classes are missing from the dataset: actual='{}', expected='{}'"
                .format(",".join(map(str, actual_classes)),
                        ",".join(map(str, classes))))

        cj.logger.info("Size of actual training data:")
        cj.logger.info(" - x   : {}".format(_x.shape))
        cj.logger.info(" - y   : {}".format(_y.shape))
        cj.logger.info(" - dist: {}".format(", ".join([
            "{}: {}".format(v, c)
            for v, c in zip(*np.unique(_y, return_counts=True))
        ])))

        cj.job.update(progress=60, statusComment="Train model...")
        # "re-implement" pyxit.fit to avoid incorrect class handling
        pyxit.classes_ = classes
        pyxit.n_classes_ = n_classes
        pyxit.base_estimator.fit(_x, _y)

        cj.job.update(progress=90, statusComment="Save model....")
        model_filename = joblib.dump(pyxit,
                                     os.path.join(base_path, "model.joblib"),
                                     compress=3)[0]

        AttachedFile(cj.job,
                     domainIdent=cj.job.id,
                     filename=model_filename,
                     domainClassName="be.cytomine.processing.Job").upload()

        Property(cj.job, key="classes", value=stringify(classes)).save()
        Property(cj.job, key="binary", value=is_binary).save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
コード例 #14
0
def load_property(job, property_name):
    property = Property(job, key=property_name).fetch()
    return property.value
コード例 #15
0
    def run(self):
        self.super_admin = Cytomine.get_instance().current_user
        connect_as(self.super_admin, True)

        users = UserCollection().fetch()
        users_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("user-collection")
        ][0]
        remote_users = UserCollection()
        for u in json.load(open(os.path.join(self.working_path, users_json))):
            remote_users.append(User().populate(u))

        roles = ["project_manager", "project_contributor", "ontology_creator"]
        if self.with_images:
            roles += ["image_creator", "image_reviewer"]

        if self.with_userannotations:
            roles += ["userannotation_creator", "userannotationterm_creator"]

        roles = set(roles)
        remote_users = [
            u for u in remote_users
            if len(roles.intersection(set(u.roles))) > 0
        ]

        for remote_user in remote_users:
            user = find_first(
                [u for u in users if u.username == remote_user.username])
            if not user:
                user = copy.copy(remote_user)
                if not user.password:
                    user.password = random_string(8)
                if not self.with_original_date:
                    user.created = None
                    user.updated = None
                user.save()
            self.id_mapping[remote_user.id] = user.id

        # --------------------------------------------------------------------------------------------------------------
        logging.info("1/ Import ontology and terms")
        """
        Import the ontology with terms and relation terms that are stored in pickled files in working_path.
        If the ontology exists (same name and same terms), the existing one is used.
        Otherwise, an ontology with an available name is created with new terms and corresponding relationships.
        """
        ontologies = OntologyCollection().fetch()
        ontology_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("ontology")
        ][0]
        remote_ontology = Ontology().populate(
            json.load(open(os.path.join(self.working_path, ontology_json))))
        remote_ontology.name = remote_ontology.name.strip()

        terms = TermCollection().fetch()
        terms_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("term-collection")
        ]
        remote_terms = TermCollection()
        if len(terms_json) > 0:
            for t in json.load(
                    open(os.path.join(self.working_path, terms_json[0]))):
                remote_terms.append(Term().populate(t))

        def ontology_exists():
            compatible_ontology = find_first([
                o for o in ontologies
                if o.name == remote_ontology.name.strip()
            ])
            if compatible_ontology:
                set1 = set((t.name, t.color) for t in terms
                           if t.ontology == compatible_ontology.id)
                difference = [
                    term for term in remote_terms
                    if (term.name, term.color) not in set1
                ]
                if len(difference) == 0:
                    return True, compatible_ontology
                return False, None
            else:
                return True, None

        i = 1
        remote_name = remote_ontology.name
        found, existing_ontology = ontology_exists()
        while not found:
            remote_ontology.name = "{} ({})".format(remote_name, i)
            found, existing_ontology = ontology_exists()
            i += 1

        # SWITCH to ontology creator user
        connect_as(User().fetch(self.id_mapping[remote_ontology.user]))
        if not existing_ontology:
            ontology = copy.copy(remote_ontology)
            ontology.user = self.id_mapping[remote_ontology.user]
            if not self.with_original_date:
                ontology.created = None
                ontology.updated = None
            ontology.save()
            self.id_mapping[remote_ontology.id] = ontology.id
            logging.info("Ontology imported: {}".format(ontology))

            for remote_term in remote_terms:
                logging.info("Importing term: {}".format(remote_term))
                term = copy.copy(remote_term)
                term.ontology = self.id_mapping[term.ontology]
                term.parent = None
                if not self.with_original_date:
                    term.created = None
                    term.updated = None
                term.save()
                self.id_mapping[remote_term.id] = term.id
                logging.info("Term imported: {}".format(term))

            remote_relation_terms = [(term.parent, term.id)
                                     for term in remote_terms]
            for relation in remote_relation_terms:
                parent, child = relation
                if parent:
                    rt = RelationTerm(self.id_mapping[parent],
                                      self.id_mapping[child]).save()
                    logging.info("Relation term imported: {}".format(rt))
        else:
            self.id_mapping[remote_ontology.id] = existing_ontology.id

            ontology_terms = [
                t for t in terms if t.ontology == existing_ontology.id
            ]
            for remote_term in remote_terms:
                self.id_mapping[remote_term.id] = find_first([
                    t for t in ontology_terms if t.name == remote_term.name
                ]).id

            logging.info(
                "Ontology already encoded: {}".format(existing_ontology))

        # SWITCH USER
        connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info("2/ Import project")
        """
        Import the project (i.e. the Cytomine Project domain) stored in pickled file in working_path.
        If a project with the same name already exists, append a (x) suffix where x is an increasing number.
        """
        projects = ProjectCollection().fetch()
        project_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("project")
        ][0]
        remote_project = Project().populate(
            json.load(open(os.path.join(self.working_path, project_json))))
        remote_project.name = remote_project.name.strip()

        def available_name():
            i = 1
            existing_names = [o.name for o in projects]
            new_name = project.name
            while new_name in existing_names:
                new_name = "{} ({})".format(project.name, i)
                i += 1
            return new_name

        project = copy.copy(remote_project)
        project.name = available_name()
        project.discipline = None
        project.ontology = self.id_mapping[project.ontology]
        project_contributors = [
            u for u in remote_users if "project_contributor" in u.roles
        ]
        project.users = [self.id_mapping[u.id] for u in project_contributors]
        project_managers = [
            u for u in remote_users if "project_manager" in u.roles
        ]
        project.admins = [self.id_mapping[u.id] for u in project_managers]
        if not self.with_original_date:
            project.created = None
            project.updated = None
        project.save()
        self.id_mapping[remote_project.id] = project.id
        logging.info("Project imported: {}".format(project))

        # --------------------------------------------------------------------------------------------------------------
        logging.info("3/ Import images")
        storages = StorageCollection().fetch()
        abstract_images = AbstractImageCollection().fetch()
        images_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("imageinstance-collection")
        ]
        remote_images = ImageInstanceCollection()
        if len(images_json) > 0:
            for i in json.load(
                    open(os.path.join(self.working_path, images_json[0]))):
                remote_images.append(ImageInstance().populate(i))

        remote_images_dict = {}

        for remote_image in remote_images:
            image = copy.copy(remote_image)

            # Fix old image name due to urllib3 limitation
            remote_image.originalFilename = bytes(
                remote_image.originalFilename,
                'utf-8').decode('ascii', 'ignore')
            if remote_image.originalFilename not in remote_images_dict.keys():
                remote_images_dict[remote_image.originalFilename] = [
                    remote_image
                ]
            else:
                remote_images_dict[remote_image.originalFilename].append(
                    remote_image)
            logging.info("Importing image: {}".format(remote_image))

            # SWITCH user to image creator user
            connect_as(User().fetch(self.id_mapping[remote_image.user]))
            # Get its storage
            storage = find_first([
                s for s in storages
                if s.user == Cytomine.get_instance().current_user.id
            ])
            if not storage:
                storage = storages[0]

            # Check if image is already in its storage
            abstract_image = find_first([
                ai for ai in abstract_images
                if ai.originalFilename == remote_image.originalFilename and
                ai.width == remote_image.width and ai.height == remote_image.
                height and ai.resolution == remote_image.resolution
            ])
            if abstract_image:
                logging.info(
                    "== Found corresponding abstract image. Linking to project."
                )
                ImageInstance(abstract_image.id,
                              self.id_mapping[remote_project.id]).save()
            else:
                logging.info("== New image starting to upload & deploy")
                filename = os.path.join(
                    self.working_path, "images",
                    image.originalFilename.replace("/", "-"))
                Cytomine.get_instance().upload_image(
                    self.host_upload, filename, storage.id,
                    self.id_mapping[remote_project.id])
                time.sleep(0.8)

            # SWITCH USER
            connect_as(self.super_admin, True)

        # Waiting for all images...
        n_new_images = -1
        new_images = None
        count = 0
        while n_new_images != len(
                remote_images) and count < len(remote_images) * 5:
            new_images = ImageInstanceCollection().fetch_with_filter(
                "project", self.id_mapping[remote_project.id])
            n_new_images = len(new_images)
            if count > 0:
                time.sleep(5)
            count = count + 1
        print("All images have been deployed. Fixing image-instances...")

        # Fix image instances meta-data:
        for new_image in new_images:
            remote_image = remote_images_dict[new_image.originalFilename].pop()
            if self.with_original_date:
                new_image.created = remote_image.created
                new_image.updated = remote_image.updated
            new_image.reviewStart = remote_image.reviewStart if hasattr(
                remote_image, 'reviewStart') else None
            new_image.reviewStop = remote_image.reviewStop if hasattr(
                remote_image, 'reviewStop') else None
            new_image.reviewUser = self.id_mapping[
                remote_image.reviewUser] if hasattr(
                    remote_image,
                    'reviewUser') and remote_image.reviewUser else None
            new_image.instanceFilename = remote_image.instanceFilename
            new_image.update()
            self.id_mapping[remote_image.id] = new_image.id
            self.id_mapping[remote_image.baseImage] = new_image.baseImage

            new_abstract = AbstractImage().fetch(new_image.baseImage)
            if self.with_original_date:
                new_abstract.created = remote_image.created
                new_abstract.updated = remote_image.updated
            if new_abstract.resolution is None:
                new_abstract.resolution = remote_image.resolution
            if new_abstract.magnification is None:
                new_abstract.magnification = remote_image.magnification
            new_abstract.update()

        print("All image-instances have been fixed.")

        # --------------------------------------------------------------------------------------------------------------
        logging.info("4/ Import user annotations")
        annots_json = [
            f for f in os.listdir(self.working_path) if f.endswith(".json")
            and f.startswith("user-annotation-collection")
        ]
        remote_annots = AnnotationCollection()
        if len(annots_json) > 0:
            for a in json.load(
                    open(os.path.join(self.working_path, annots_json[0]))):
                remote_annots.append(Annotation().populate(a))

        def _add_annotation(remote_annotation, id_mapping, with_original_date):
            if remote_annotation.project not in id_mapping.keys() \
                    or remote_annotation.image not in id_mapping.keys():
                return

            annotation = copy.copy(remote_annotation)
            annotation.project = id_mapping[remote_annotation.project]
            annotation.image = id_mapping[remote_annotation.image]
            annotation.user = id_mapping[remote_annotation.user]
            annotation.term = [id_mapping[t] for t in remote_annotation.term]
            if not with_original_date:
                annotation.created = None
                annotation.updated = None
            annotation.save()

        for user in [
                u for u in remote_users if "userannotation_creator" in u.roles
        ]:
            remote_annots_for_user = [
                a for a in remote_annots if a.user == user.id
            ]
            # SWITCH to annotation creator user
            connect_as(User().fetch(self.id_mapping[user.id]))
            Parallel(n_jobs=-1, backend="threading")(
                delayed(_add_annotation)(remote_annotation, self.id_mapping,
                                         self.with_original_date)
                for remote_annotation in remote_annots_for_user)

            # SWITCH back to admin
            connect_as(self.super_admin, True)

        # --------------------------------------------------------------------------------------------------------------
        logging.info(
            "5/ Import metadata (properties, attached files, description)")
        obj = Model()
        obj.id = -1
        obj.class_ = ""

        properties_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("properties")
        ]
        for property_json in properties_json:
            for remote_prop in json.load(
                    open(os.path.join(self.working_path, property_json))):
                prop = Property(obj).populate(remote_prop)
                prop.domainIdent = self.id_mapping[prop.domainIdent]
                prop.save()

        attached_files_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("attached-files")
        ]
        for attached_file_json in attached_files_json:
            for remote_af in json.load(
                    open(os.path.join(self.working_path, attached_file_json))):
                af = AttachedFile(obj).populate(remote_af)
                af.domainIdent = self.id_mapping[af.domainIdent]
                af.filename = os.path.join(self.working_path, "attached_files",
                                           remote_af.filename)
                af.save()

        descriptions_json = [
            f for f in os.listdir(self.working_path)
            if f.endswith(".json") and f.startswith("description")
        ]
        for description_json in descriptions_json:
            desc = Description(obj).populate(
                json.load(
                    open(os.path.join(self.working_path, description_json))))
            desc.domainIdent = self.id_mapping[desc.domainIdent]
            desc._object.class_ = desc.domainClassName
            desc._object.id = desc.domainIdent
            desc.save()
コード例 #16
0
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            annot = Annotation().fetch(annotation.id)
            # Toutes les proprietes (collection) de l annotation
            properties = PropertyCollection(annot).fetch()
            # Une propriété avec une clé spécifique de l'annotation
            propert = Property(annot).fetch(key="ANNOTATION_GROUP_ID")

            image_id = str(annotation.image)

            if image_id in id2info:

                tissue, dye = id2info[image_id]

                path_patch = os.path.join(params.download_path,
                                          str(params.size), tissue, dye,
                                          str(propert.value) + ".jpg")

                if params.download_path and not os.path.exists(path_patch):
                    # default size is 300x300
                    annotation.dump(dest_pattern=path_patch,
                                    increase_area=params.size / 100)
コード例 #17
0
def main():
	with NeubiasJob.from_cli(sys.argv) as conn:
		problem_cls = get_discipline(conn, default=CLASS_LNDDET)
		conn.job.update(progress=0, status=Job.RUNNING, statusComment="Initialization of the training phase...")
		in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, conn, is_2d=True, **conn.flags)

		tmax = 1
		for f in os.listdir(gt_path):
			if f.endswith('.tif'):
				gt_img = imageio.imread(os.path.join(gt_path, f))
				tmax = np.max(gt_img)
				break

		term_list = range(1, tmax + 1)
		tr_im = [int(id_im) for id_im in conn.parameters.cytomine_training_images.split(',')]
		(xc, yc, xr, yr) = get_neubias_coords(gt_path, tr_im)
		(nims, nldms) = xc.shape
		Xc = np.zeros((nims, len(term_list)))
		Yc = np.zeros(Xc.shape)
		for id_term in term_list:
			Xc[:, id_term - 1] = xc[:, id_term - 1]
			Yc[:, id_term - 1] = yc[:, id_term - 1]
		conn.job.update(progress=10, status=Job.RUNNING, statusComment="Building model for phase 1")
		(dataset, rep, img, feature_offsets_1) = build_phase_1_model(in_path, image_ids=tr_im, n_jobs=conn.parameters.model_njobs, F=conn.parameters.model_F_P1, R=conn.parameters.model_R_P1, sigma=conn.parameters.model_sigma, delta=conn.parameters.model_delta, P=conn.parameters.model_P, X=Xc, Y=Yc)
		clf = SeparateTrees(n_estimators=int(conn.parameters.model_NT_P1), n_jobs=int(conn.parameters.model_njobs))
		clf = clf.fit(dataset, rep)
		model_filename = joblib.dump(clf, os.path.join(out_path, 'model_phase1.joblib'), compress=3)[0]
		AttachedFile(
			conn.job,
			domainIdent=conn.job.id,
			filename=model_filename,
			domainClassName="be.cytomine.processing.Job"
		).upload()

		model_filename = joblib.dump((Xc, Yc), os.path.join(out_path, 'coords.joblib'), compress=3)[0]
		AttachedFile(
			conn.job,
			domainIdent=conn.job.id,
			filename=model_filename,
			domainClassName="be.cytomine.processing.Job"
		).upload()

		model_filename = joblib.dump(feature_offsets_1, os.path.join(out_path, 'offsets_phase1.joblib'), compress=3)[0]
		AttachedFile(
			conn.job,
			domainIdent=conn.job.id,
			filename=model_filename,
			domainClassName="be.cytomine.processing.Job"
		).upload()

		for id_term in conn.monitor(term_list, start=20, end=80, period=0.05,prefix="Visual model building for terms..."):
			(dataset, rep, number, feature_offsets_2) = build_phase_2_model(in_path, image_ids=tr_im, n_jobs=conn.parameters.model_njobs, NT=conn.parameters.model_NT_P2, F=conn.parameters.model_F_P2, R=conn.parameters.model_R_P2, N=conn.parameters.model_ns_P2, sigma=conn.parameters.model_sigma, delta=conn.parameters.model_delta, Xc = Xc[:, id_term-1], Yc = Yc[:, id_term-1])
			reg = SeparateTreesRegressor(n_estimators=int(conn.parameters.model_NT_P2), n_jobs=int(conn.parameters.model_njobs))
			reg.fit(dataset, rep)
			model_filename = joblib.dump(reg, os.path.join(out_path, 'reg_%d_phase2.joblib'%id_term), compress=3)[0]
			AttachedFile(
				conn.job,
				domainIdent=conn.job.id,
				filename=model_filename,
				domainClassName="be.cytomine.processing.Job"
			).upload()
			model_filename = joblib.dump(feature_offsets_2, os.path.join(out_path, 'offsets_%d_phase2.joblib' % id_term), compress=3)[0]
			AttachedFile(
				conn.job,
				domainIdent=conn.job.id,
				filename=model_filename,
				domainClassName="be.cytomine.processing.Job"
			).upload()

		conn.job.update(progress=90, status=Job.RUNNING, statusComment="Building model for phase 3")
		edges = build_edgematrix_phase_3(Xc, Yc, conn.parameters.model_sde, conn.parameters.model_delta, conn.parameters.model_T)
		model_filename = joblib.dump(edges, os.path.join(out_path, 'model_edges.joblib'), compress=3)[0]
		AttachedFile(
			conn.job,
			domainIdent=conn.job.id,
			filename=model_filename,
			domainClassName="be.cytomine.processing.Job"
		).upload()

		sfinal = ""
		for id_term in term_list:
			sfinal += "%d " % id_term
		sfinal = sfinal.rstrip(' ')
		Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
		conn.job.update(progress=100, status=Job.TERMINATED, statusComment="Job terminated.")