Ejemplo n.º 1
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        if not os.path.exists(job.parameters.working_path):
            os.makedirs(job.parameters.working_path)

        # create workflow component
        logger = StandardOutputLogger(Logger.INFO)
        random_state = check_random_state(int(job.parameters.rseed))
        tile_builder = CytomineTileBuilder(
            working_path=job.parameters.working_path)
        segmenter = DemoSegmenter(job.parameters.threshold)
        area_rule = ValidAreaRule(job.parameters.min_area)
        classifier = PyxitClassifierAdapter.build_from_pickle(
            job.parameters.pyxit_model_path,
            tile_builder,
            logger,
            random_state=random_state,
            n_jobs=job.parameters.n_jobs,
            working_path=job.parameters.working_path)

        builder = SLDCWorkflowBuilder()
        builder.set_n_jobs(job.parameters.n_jobs)
        builder.set_logger(logger)
        builder.set_overlap(job.parameters.sldc_tile_overlap)
        builder.set_tile_size(job.parameters.sldc_tile_width,
                              job.parameters.sldc_tile_height)
        builder.set_tile_builder(tile_builder)
        builder.set_segmenter(segmenter)
        builder.add_classifier(area_rule,
                               classifier,
                               dispatching_label="valid")
        workflow = builder.get()

        slide = CytomineSlide(job.parameters.cytomine_image_id)
        results = workflow.process(slide)

        # Upload results
        for polygon, label, proba, dispatch in results:
            if label is not None:
                # if image is a window, the polygon must be translated
                if isinstance(slide, ImageWindow):
                    polygon = translate(polygon, slide.abs_offset_x,
                                        slide.abs_offset_y)
                # upload the annotation
                polygon = affine_transform(
                    polygon, [1, 0, 0, -1, 0, slide.image_instance.height])
                annotation = Annotation(
                    location=polygon.wkt,
                    id_image=slide.image_instance.id).save()
                AlgoAnnotationTerm(id_annotation=annotation.id,
                                   id_term=label,
                                   rate=float(proba)).save()
Ejemplo n.º 3
0
def main():
    """
    Executes some code as a Cytomine Job

    Usage:
        # Execute run.py through your local python
        python run.py --cytomine_host 'localhost-core' --cytomine_public_key '9af03585-c162-464e-bbf9-9196ff084487' --cytomine_private_key 'fb14e576-c534-410d-8206-5e70b0d97d96' --cytomine_id_project 237 --cytomine_id_image_instance 1220 --cytomine_id_software 4882

        # Execute your image on your computer.
        # remember to first build your image: docker build -t cyto_soft-1 .
        docker run --gpus all -it --rm --network=host cyto_soft-1 --cytomine_host 'localhost-core' --cytomine_public_key '9af03585-c162-464e-bbf9-9196ff084487' --cytomine_private_key 'fb14e576-c534-410d-8206-5e70b0d97d96' --cytomine_id_project 237 --cytomine_id_image_instance 1220 --cytomine_id_software 4882

        # Execute your image on your computer sharing your project directory with your docker image
        # so all the changes, during development, can be tested without re-building the the image
        # remember to first build your image: docker build -t cyto_soft-1 .
        docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/cyto_soft-1/,target=/myapp,bind-propagation=private --network=host cyto_soft-1 --cytomine_host 'localhost-core' --cytomine_public_key '9af03585-c162-464e-bbf9-9196ff084487' --cytomine_private_key 'fb14e576-c534-410d-8206-5e70b0d97d96' --cytomine_id_project 237 --cytomine_id_image_instance 1220 --cytomine_id_software 4882
    """

    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine connection parameters
    parser.add_argument('--cytomine_host', dest='host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project', dest='id_project',
                        help="The project from which we want the images")
    parser.add_argument('--cytomine_id_software', dest='id_software',
                        help="The software to be used to process the image")
    parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
                        help="The image to which the annotations will be added")

    params, _ = parser.parse_known_args(sys.argv[1:])

    with CytomineJob.from_cli(sys.argv[1:]) as cytomine:
        # Place your processing operations here
        print("Parameters received:")
        print('host: {}'.format(params.host))
        print('public_key: {}'.format(params.public_key))
        print('private_key: {}'.format(params.private_key))
        print('id_project: {}'.format(params.id_project))
        print('id_software: {}'.format(params.id_software))
        print('id_image_instance: {}'.format(params.id_image_instance))
        cytomine.job.update(statusComment="Finished.")
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initializing...")

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        tf_sess = tf.Session(config=tf_config)
        tf_sess.run(tf.global_variables_initializer())

        ray.init(num_cpus=os.cpu_count(), include_webui=False)

        cj.job.update(progress=1, statusComment="Fetching image...")
        image = cj.get_image_instance(cj.parameters.cytomine_id_image)
        image_path = os.path.join("/tmp", image.originalFilename)
        image.download(image_path)

        batch_size = cj.parameters.batch_size
        if batch_size is None:
            batch_size = os.cpu_count()

        slide_seg = SlideSegmentation(
            cj=cj,
            tf_sess=tf_sess,
            image_instance=image,
            image_path=image_path,
            batch_size=batch_size,
            num_slide_actor=cj.parameters.num_slide_actor,
            threshold=cj.parameters.threshold)
        predicted = slide_seg.predict()

        slide_seg.upload_annotation(
            predicted_data=predicted,
            project_id=cj.parameters.cytomine_id_project)

        # TODO: delete data saved on disk

        ray.shutdown()
        cj.job.update(status=Job.SUCCESS,
                      progress=100,
                      statusComment="Complete")
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")
        tmp_path = os.path.join(working_path, "tmp")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        input_images = [
            i for i in image_instances if gt_suffix not in i.originalFilename
        ]
        gt_images = [
            i for i in image_instances if gt_suffix in i.originalFilename
        ]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.originalFilename.replace(gt_suffix, '')
            related_image = [
                i for i in input_images if related_name == i.originalFilename
            ]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")

        # load data
        cj.job.update(progress=30, statusComment="Workflow: preparing data...")
        dims = (cj.parameters.image_height, cj.parameters.image_width,
                cj.parameters.n_channels)
        mask_dims = (dims[0], dims[1], cj.parameters.n_classes)

        # load input images
        imgs = load_data(
            cj, dims, in_path, **{
                "start": 35,
                "end": 45,
                "period": 0.1,
                "prefix": "Workflow: load training input images"
            })
        train_mean = np.mean(imgs)
        train_std = np.std(imgs)
        imgs -= train_mean
        imgs /= train_std

        # load masks
        masks = load_data(cj,
                          mask_dims,
                          gt_path,
                          dtype=np.int,
                          is_masks=True,
                          n_classes=cj.parameters.n_classes,
                          **{
                              "start": 45,
                              "end": 55,
                              "period": 0.1,
                              "prefix": "Workflow: load training masks images"
                          })

        cj.job.update(progress=56, statusComment="Workflow: build model...")
        unet = create_unet(dims, n_classes=cj.parameters.n_classes)
        unet.compile(optimizer=Adam(lr=cj.parameters.learning_rate),
                     loss='binary_crossentropy')

        cj.job.update(progress=60,
                      statusComment="Workflow: prepare training...")
        datagen = ImageDataGenerator(
            rotation_range=cj.parameters.aug_rotation,
            width_shift_range=cj.parameters.aug_width_shift,
            height_shift_range=cj.parameters.aug_height_shift,
            shear_range=cj.parameters.aug_shear_range,
            horizontal_flip=cj.parameters.aug_hflip,
            vertical_flip=cj.parameters.aug_vflip)

        weight_filepath = os.path.join(tmp_path, 'weights.hdf5')
        callbacks = [
            ModelCheckpoint(weight_filepath,
                            monitor='loss',
                            save_best_only=True)
        ]

        cj.job.update(progress=65, statusComment="Workflow: train...")
        unet.fit_generator(datagen.flow(imgs,
                                        masks,
                                        batch_size=cj.parameters.batch_size,
                                        seed=42),
                           steps_per_epoch=math.ceil(imgs.shape[0] /
                                                     cj.parameters.batch_size),
                           epochs=cj.parameters.epochs,
                           callbacks=callbacks)

        # save model and metadata
        cj.job.update(progress=85, statusComment="Save model...")
        AttachedFile(cj.job,
                     domainIdent=cj.job.id,
                     filename=weight_filepath,
                     domainClassName="be.cytomine.processing.Job").upload()

        cj.job.update(progress=90, statusComment="Save metadata...")
        Property(cj.job, key="image_width",
                 value=cj.parameters.image_width).save()
        Property(cj.job, key="image_height",
                 value=cj.parameters.image_height).save()
        Property(cj.job, key="n_channels",
                 value=cj.parameters.n_channels).save()
        Property(cj.job, key="train_mean", value=float(train_mean)).save()
        Property(cj.job, key="image_width", value=float(train_std)).save()

        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
Ejemplo n.º 6
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        status_comment="Initialization of the training phase")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path

        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        in_txt = os.path.join(in_path, 'txt/')
        out_path = os.path.join(working_path, "out/")
        gt_path = os.path.join(working_path, "ground_truth/")
        tmp_path = os.path.join(working_path, "tmp/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)
            os.makedirs(in_txt)
        # 2. Download the images (first input, then ground truth image)
        conn.job.update(
            progress=10,
            statusComment="Downloading images (to {})...".format(in_path))
        print(conn.parameters)
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        xpos = {}
        ypos = {}
        terms = {}

        for image in images:
            image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' %
                       (image.id, 'jpg'))

            annotations = AnnotationCollection()
            annotations.project = conn.parameters.cytomine_id_project
            annotations.showWKT = True
            annotations.showMeta = True
            annotations.showGIS = True
            annotations.showTerm = True
            annotations.image = image.id
            annotations.fetch()

            for ann in annotations:
                l = ann.location
                if l.rfind('POINT') == -1:
                    pol = shapely.wkt.loads(l)
                    poi = pol.centroid
                else:
                    poi = shapely.wkt.loads(l)
                (cx, cy) = poi.xy
                xpos[(ann.term[0], image.id)] = int(cx[0])
                ypos[(ann.term[0], image.id)] = image.height - int(cy[0])
                terms[ann.term[0]] = 1

        for image in images:
            F = open(in_txt + '%d.txt' % image.id, 'w')
            for t in terms.keys():
                if (t, image.id) in xpos:
                    F.write('%d %d %d %f %f\n' %
                            (t, xpos[(t, image.id)], ypos[(t, image.id)],
                             xpos[(t, image.id)] / float(image.width),
                             ypos[(t, image.id)] / float(image.height)))
            F.close()

        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt)

        if conn.parameters.cytomine_id_terms == 'all':
            term_list = t_to_i.keys()
        else:
            term_list = [
                int(term)
                for term in conn.parameters.cytomine_id_terms.split(',')
            ]

        if conn.parameters.cytomine_training_images == 'all':
            tr_im = ims
        else:
            tr_im = [
                int(id_im) for id_im in
                conn.parameters.cytomine_training_images.split(',')
            ]

        DATA = None
        REP = None
        be = 0

        #leprogres = 10
        #pr_spacing = 90/len(term_list)
        #print(term_list)
        sfinal = ""
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            sfinal += "%d " % id_term

            (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'jpg', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}

            parameters_hash[
                'cytomine_id_terms'] = conn.parameters.cytomine_id_terms
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
Ejemplo n.º 7
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # annotation filtering
        cj.logger.info(str(cj.parameters))

        cj.job.update(progress=1, statuscomment="Preparing execution (creating folders,...).")
        base_path, downloaded = setup_classify(
            args=cj.parameters, logger=cj.job_logger(1, 40),
            dest_pattern=os.path.join("{term}", "{image}_{id}.png"),
            root_path=Path.home(), set_folder="train", showTerm=True
        )

        x = np.array([f for annotation in downloaded for f in annotation.filenames])
        y = np.array([int(os.path.basename(os.path.dirname(filepath))) for filepath in x])

        # transform classes
        cj.job.update(progress=50, statusComment="Transform classes...")
        classes = parse_domain_list(cj.parameters.cytomine_id_terms)
        positive_classes = parse_domain_list(cj.parameters.cytomine_positive_terms)
        classes = np.array(classes) if len(classes) > 0 else np.unique(y)
        n_classes = classes.shape[0]

        # filter unwanted terms
        cj.logger.info("Size before filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))
        keep = np.in1d(y, classes)
        x, y = x[keep], y[keep]
        cj.logger.info("Size after filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))

        labels = np.array([int(os.path.basename(f).split("_", 1)[0]) for f in x])

        if cj.parameters.cytomine_binary:
            cj.logger.info("Will be training on 2 classes ({} classes before binarization).".format(n_classes))
            y = np.in1d(y, positive_classes).astype(np.int)
        else:
            cj.logger.info("Will be training on {} classes.".format(n_classes))
            y = np.searchsorted(classes, y)

        # build model
        random_state = check_random_state(cj.parameters.seed)
        cj.job.update(progress=55, statusComment="Build model...")
        _, pyxit = build_models(
            n_subwindows=cj.parameters.pyxit_n_subwindows,
            min_size=cj.parameters.pyxit_min_size,
            max_size=cj.parameters.pyxit_max_size,
            target_width=cj.parameters.pyxit_target_width,
            target_height=cj.parameters.pyxit_target_height,
            interpolation=cj.parameters.pyxit_interpolation,
            transpose=cj.parameters.pyxit_transpose,
            colorspace=cj.parameters.pyxit_colorspace,
            fixed_size=cj.parameters.pyxit_fixed_size,
            verbose=int(cj.logger.level == 10),
            create_svm=cj.parameters.svm,
            C=cj.parameters.svm_c,
            random_state=random_state,
            n_estimators=cj.parameters.forest_n_estimators,
            min_samples_split=cj.parameters.forest_min_samples_split,
            max_features=cj.parameters.forest_max_features,
            n_jobs=cj.parameters.n_jobs
        )

        cj.job.update(progress=60, statusComment="Start cross-validation...")
        n_splits = cj.parameters.eval_k
        cv = ShuffleSplit(n_splits, test_size=cj.parameters.eval_test_fraction)
        if cj.parameters.folds == "group":
            cv = GroupKFold(n_splits)
        elif cj.parameters.folds == "stratified":
            cv = StratifiedKFold(n_splits, shuffle=True, random_state=random_state)
        elif cj.parameters.folds != "shuffle":
            raise ValueError("Unknown folding policy '{}'.".format(cj.parameters.folds))

        # Fit
        accuracies = np.zeros(n_splits)
        test_sizes = np.zeros(n_splits)

        _x, _y = pyxit.extract_subwindows(x, y)

        # CV loop
        for i, (train, test) in cj.monitor(enumerate(cv.split(x, y, labels)), start=60, end=90, prefix="cross val. iteration"):
            _pyxit = clone(pyxit)
            w_train = window_indexes(x.shape[0], train, _pyxit.n_subwindows)
            w_test = window_indexes(x.shape[0], test, _pyxit.n_subwindows)
            _pyxit.fit(x[train], y[train], _X=_x[w_train], _y=_y[w_train])
            y_pred = _pyxit.predict(x[test], _x[w_test])
            accuracies[i] = accuracy_score(y[test], y_pred)
            test_sizes[i] = test.shape[0] / float(x.shape[0])
            del _pyxit

        pyxit.fit(x, y)

        accuracy = float(np.mean(test_sizes * accuracies))
        cj.job.update(progress=90, statusComment="Accuracy: {}".format(accuracy))
        cj.job.update(progress=90, statusComment="Save model...")

        model_filename = joblib.dump(pyxit, os.path.join(base_path, "model.joblib"), compress=3)[0]

        AttachedFile(
            cj.job,
            domainIdent=cj.job.id,
            filename=model_filename,
            domainClassName="be.cytomine.processing.Job"
        ).upload()

        Property(cj.job, key="classes", value=stringify(classes)).save()
        Property(cj.job, key="binary", value=cj.parameters.cytomine_binary).save()
        Property(cj.job, key="positive_classes", value=stringify(positive_classes)).save()
        Property(cj.job, key="accuracies", value=array2str(accuracies))
        Property(cj.job, key="test_sizes", value=array2str(test_sizes))
        Property(cj.job, key="accuracy", value=accuracy)

        cj.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
Ejemplo n.º 8
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        # with Cytomine(argv) as conn:
        print(conn.parameters)

        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        base_path = "{}".format(os.getenv("HOME"))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
        #           verbose=logging.INFO) as cytomine:

        # ontology = Ontology("classPNcells"+str(conn.parameters.cytomine_id_project)).save()
        # ontology_collection=OntologyCollection().fetch()
        # print(ontology_collection)
        # ontology = Ontology("CLASSPNCELLS").save()
        # terms = TermCollection().fetch_with_filter("ontology", ontology.id)
        terms = TermCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        conn.job.update(status=Job.RUNNING,
                        progress=1,
                        statusComment="Terms collected...")
        print(terms)

        # term_P = Term("PositiveCell", ontology.id, "#FF0000").save()
        # term_N = Term("NegativeCell", ontology.id, "#00FF00").save()
        # term_P = Term("PositiveCell", ontology, "#FF0000").save()
        # term_N = Term("NegativeCell", ontology, "#00FF00").save()

        # Get all the terms of our ontology
        # terms = TermCollection().fetch_with_filter("ontology", ontology.id)
        # terms = TermCollection().fetch_with_filter("ontology", ontology)
        # print(terms)

        # #Loading pre-trained Stardist model
        # np.random.seed(17)
        # lbl_cmap = random_label_cmap()
        # #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        # #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        # model = StarDist2D(None, name='2D_versatile_HE', basedir='/models/')   #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        conn.job.update(status=Job.RUNNING,
                        progress=2,
                        statusComment="Images gathered...")

        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]
            print(list_imgs)

        #Go over images
        conn.job.update(status=Job.RUNNING,
                        progress=10,
                        statusComment="Running PN classification on image...")
        #for id_image in conn.monitor(list_imgs, prefix="Running PN classification on image", period=0.1):
        for id_image in list_imgs:

            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_cell_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.job = conn.parameters.cytomine_id_annotation_job
            roi_annotations.user = conn.parameters.cytomine_id_user_job
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)

            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------Cells------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                # print("ROI Geometry from Shapely: {}".format(roi_geometry))
                #                 print("ROI Bounds")
                #                 print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                # roi_path=os.path.join(working_path,str(roi_annotations.project)+'/'+str(roi_annotations.image)+'/'+str(roi.id))
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/')
                #                 print(roi_path)
                roi_png_filename = os.path.join(roi_path + str(roi.id) +
                                                '.png')
                conn.job.update(status=Job.RUNNING,
                                progress=20,
                                statusComment=roi_png_filename)
                #                 print("roi_png_filename: %s" %roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                # im=Image.open(roi_png_filename)

                J = cv2.imread(roi_png_filename, cv2.IMREAD_UNCHANGED)
                J = cv2.cvtColor(J, cv2.COLOR_BGRA2RGBA)
                [r, c, h] = J.shape
                # print("J: ",J)

                if r < c:
                    blocksize = r
                else:
                    blocksize = c
                # print("blocksize:",blocksize)
                rr = np.zeros((blocksize, blocksize))
                cc = np.zeros((blocksize, blocksize))

                zz = [*range(1, blocksize + 1)]
                # print("zz:", zz)
                for i in zz:
                    rr[i - 1, :] = zz
                # print("rr shape:",rr.shape)

                zz = [*range(1, blocksize + 1)]
                for i in zz:
                    cc[:, i - 1] = zz
                # print("cc shape:",cc.shape)

                cc1 = np.asarray(cc) - 16.5
                rr1 = np.asarray(rr) - 16.5
                cc2 = np.asarray(cc1)**2
                rr2 = np.asarray(rr1)**2
                rrcc = np.asarray(cc2) + np.asarray(rr2)

                weight = np.sqrt(rrcc)
                # print("weight: ",weight)
                weight2 = 1. / weight
                # print("weight2: ",weight2)
                #                 print("weight2 shape:",weight2.shape)
                coord = [c / 2, r / 2]
                halfblocksize = blocksize / 2

                y = round(coord[1])
                x = round(coord[0])

                # Convert the RGB image to HSV
                Jalpha = J[:, :, 3]
                Jalphaloc = Jalpha / 255
                Jrgb = cv2.cvtColor(J, cv2.COLOR_RGBA2RGB)
                Jhsv = cv2.cvtColor(Jrgb, cv2.COLOR_RGB2HSV_FULL)
                Jhsv = Jhsv / 255
                Jhsv[:, :, 0] = Jhsv[:, :, 0] * Jalphaloc
                Jhsv[:, :, 1] = Jhsv[:, :, 1] * Jalphaloc
                Jhsv[:, :, 2] = Jhsv[:, :, 2] * Jalphaloc
                # print("Jhsv: ",Jhsv)

                # print("Jhsv size:",Jhsv.shape)
                # print("Jhsv class:",Jhsv.dtype)

                currentblock = Jhsv[0:blocksize, 0:blocksize, :]
                # print("currentblock: ",currentblock)
                #                 print(currentblock.dtype)
                currentblockH = currentblock[:, :, 0]
                currentblockV = 1 - currentblock[:, :, 2]
                hue = sum(sum(currentblockH * weight2))
                val = sum(sum(currentblockV * weight2))
                #                 print("hue:", hue)
                #                 print("val:", val)

                if hue < 2:
                    cellclass = 1
                elif val < 15:
                    cellclass = 2
                else:
                    if hue < 30 or val > 40:
                        cellclass = 1
                    else:
                        cellclass = 2

                # tags = TagCollection().fetch()
                # tags = TagCollection()
                # print(tags)

                if cellclass == 1:
                    #                     print("Positive (H: ", str(hue), ", V: ", str(val), ")")
                    id_terms = conn.parameters.cytomine_id_positive_term
                    # tag = Tag("Positive (H: ", str(hue), ", V: ", str(val), ")").save()
                    # print(tag)
                    # id_terms=Term("PositiveCell", ontology.id, "#FF0000").save()
                elif cellclass == 2:
                    #                     print("Negative (H: ", str(hue), ", V: ", str(val), ")")
                    id_terms = conn.parameters.cytomine_id_negative_term
                    # for t in tags:
                    # tag = Tag("Negative (H: ", str(hue), ", V: ", str(val), ")").save()
                    # print(tag)
                    # id_terms=Term("NegativeCell", ontology.id, "#00FF00").save()

                    # First we create the required resources

                cytomine_annotations = AnnotationCollection()
                # property_collection = PropertyCollection(uri()).fetch("annotation",id_image)
                # property_collection = PropertyCollection().uri()
                # print(property_collection)
                # print(cytomine_annotations)

                # property_collection.append(Property(Annotation().fetch(id_image), key="Hue", value=str(hue)))
                # property_collection.append(Property(Annotation().fetch(id_image), key="Val", value=str(val)))
                # property_collection.save()

                # prop1 = Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save()
                # prop2 = Property(Annotation().fetch(id_image), key="Val", value=str(val)).save()

                # prop1.Property(Annotation().fetch(id_image), key="Hue", value=str(hue)).save()
                # prop2.Property(Annotation().fetch(id_image), key="Val", value=str(val)).save()

                # for pos, polygroup in enumerate(roi_geometry,start=1):
                #     points=list()
                #     for i in range(len(polygroup[0])):
                #         p=Point(minx+polygroup[1][i],miny-polygroup[0][i])
                #         points.append(p)

                annotation = roi_geometry

                # tags.append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save()

                # association = append(TagDomainAssociation(Annotation().fetch(id_image, tag.id))).save()
                # print(association)

                cytomine_annotations.append(
                    Annotation(
                        location=annotation.wkt,  #location=roi_geometry,
                        id_image=id_image,  #conn.parameters.cytomine_id_image,
                        id_project=conn.parameters.cytomine_id_project,
                        id_terms=[id_terms]))
                print(".", end='', flush=True)

                #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
Ejemplo n.º 9
0
def run(argv):
    # CytomineJob.from_cli() uses the descriptor.json to automatically create the ArgumentParser
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(statusComment="Initialization...")
        id_project = cj.parameters.cytomine_id_project
        id_terms = cj.parameters.cytomine_id_terms
        id_tags_for_images = cj.parameters.cytomine_id_tags_for_images
        working_path = cj.parameters.working_path

        terms = TermCollection().fetch_with_filter("project", id_project)
        if id_terms:
            filtered_term_ids = [
                int(id_term) for id_term in id_terms.split(',')
            ]
            filtered_terms = TermCollection()
            for term in terms:
                if term.id in filtered_term_ids:
                    filtered_terms.append(term)
        else:
            filtered_terms = terms

        # Associate YOLO class index to Cytomine term
        classes_filename = os.path.join(working_path, CLASSES_FILENAME)
        with open(classes_filename, 'r') as f:
            classes = f.readlines()
            indexes_terms = {}
            for i, _class in enumerate(classes):
                _class = _class.strip()
                indexes_terms[i] = filtered_terms.find_by_attribute(
                    "name", _class)

        cj.job.update(statusComment="Open model...", progress=1)
        # TODO...

        cj.job.update(statusComment="Predictions...", progress=5)
        images = ImageInstanceCollection(
            tags=id_tags_for_images).fetch_with_filter("project", id_project)
        for image in images:
            print("Prediction for image {}".format(image.instanceFilename))
            # TODO: get predictions from YOLO
            # TODO: I suppose here for the sake of the demo that the output format is the same as input, which is not sure
            # <class> <x_center> <y_center> <width> <height> <proba>
            sample_predictions = [(0, 0.604000000000, 0.493846153846,
                                   0.105600000000, 0.461538461538, 0.9),
                                  (0, 0.409200000000, 0.606153846154,
                                   0.050400000000, 0.095384615385, 0.5)]

            ac = AnnotationCollection()
            for pred in sample_predictions:
                _class, xcenter, ycenter, width, height, proba = pred
                term_ids = [indexes_terms[_class].id
                            ] if _class in indexes_terms.keys() else None
                if term_ids is None:
                    print("No term found for class {}".format(_class))
                geometry = yolo_to_geometry((xcenter, ycenter, width, height),
                                            image.width, image.height)
                properties = [{"key": "probability", "value": proba}]
                ac.append(
                    Annotation(id_image=image.id,
                               id_terms=term_ids,
                               location=geometry.wkt,
                               properties=properties))

            ac.save()

        cj.job.update(statusComment="Finished", progress=100)
Ejemplo n.º 10
0
def main(argv):
    print(argv)
    with CytomineJob.from_cli(argv) as cj:

        images = ImageInstanceCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project)
        for image in cj.monitor(images, prefix="Running detection on image", period=0.1):
            # Resize image if needed
            resize_ratio = max(image.width, image.height) / cj.parameters.max_image_size
            if resize_ratio < 1:
                resize_ratio = 1

            resized_width = int(image.width / resize_ratio)
            resized_height = int(image.height / resize_ratio)

            image.dump(dest_pattern="/tmp/{id}.jpg", max_size=max(resized_width, resized_height), bits=image.bitDepth)
            img = cv2.imread(image.filename, cv2.IMREAD_GRAYSCALE)

            thresholded_img = cv2.adaptiveThreshold(img, 2**image.bitDepth, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                    cv2.THRESH_BINARY, cj.parameters.threshold_blocksize,
                                                    cj.parameters.threshold_constant)

            kernel = np.ones((5, 5), np.uint8)
            eroded_img = cv2.erode(thresholded_img, kernel, iterations=cj.parameters.erode_iterations)
            dilated_img = cv2.dilate(eroded_img, kernel, iterations=cj.parameters.dilate_iterations)

            extension = 10
            extended_img = cv2.copyMakeBorder(dilated_img, extension, extension, extension, extension,
                                              cv2.BORDER_CONSTANT, value=2**image.bitDepth)

            components = find_components(extended_img)
            zoom_factor = image.width / float(resized_width)
            for i, component in enumerate(components):
                converted = []
                for point in component[0]:
                    x = int((point[0] - extension) * zoom_factor)
                    y = int(image.height - ((point[1] - extension) * zoom_factor))
                    converted.append((x, y))

                components[i] = Polygon(converted)

            # Find largest component (whole image)
            largest = max(components, key=attrgetter('area'))
            components.remove(largest)

            # Only keep components greater than 5% of whole image
            min_area = int(0.05 * image.width * image.height)

            annotations = AnnotationCollection()
            for component in components:
                if component.area > min_area:
                    annotations.append(Annotation(location=component.wkt, id_image=image.id,
                                                  id_terms=[cj.parameters.cytomine_id_predicted_term],
                                                  id_project=cj.parameters.cytomine_id_project))

                    if len(annotations) % 100 == 0:
                        annotations.save()
                        annotations = AnnotationCollection()

            annotations.save()

        cj.job.update(statusComment="Finished.")
Ejemplo n.º 11
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(progress=1, statusComment="Preparing execution")

        # extract images to process
        if cj.parameters.cytomine_zoom_level > 0 and (
                cj.parameters.cytomine_tile_size != 256
                or cj.parameters.cytomine_tile_overlap != 0):
            raise ValueError(
                "when using zoom_level > 0, tile size should be 256 "
                "(given {}) and overlap should be 0 (given {})".format(
                    cj.parameters.cytomine_tile_size,
                    cj.parameters.cytomine_tile_overlap))

        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")
        # working path
        root_path = str(Path.home())
        working_path = os.path.join(root_path, "images")
        os.makedirs(working_path, exist_ok=True)

        # load training information
        cj.job.update(progress=5,
                      statusComment="Extract properties from training job.")
        train_job = Job().fetch(cj.parameters.cytomine_id_job)
        properties = PropertyCollection(train_job).fetch().as_dict()
        binary = str2bool(properties["binary"].value)
        classes = parse_domain_list(properties["classes"].value)

        cj.job.update(progress=10, statusComment="Download the model file.")
        attached_files = AttachedFileCollection(train_job).fetch()
        model_file = attached_files.find_by_attribute("filename",
                                                      "model.joblib")
        model_filepath = os.path.join(root_path, "model.joblib")
        model_file.download(model_filepath, override=True)
        pyxit = joblib.load(model_filepath)

        # set n_jobs
        pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
        pyxit.n_jobs = cj.parameters.n_jobs

        cj.job.update(progress=45, statusComment="Build workflow.")
        builder = SSLWorkflowBuilder()
        builder.set_tile_size(cj.parameters.cytomine_tile_size,
                              cj.parameters.cytomine_tile_size)
        builder.set_overlap(cj.parameters.cytomine_tile_overlap)
        builder.set_tile_builder(
            CytomineTileBuilder(working_path, n_jobs=cj.parameters.n_jobs))
        builder.set_logger(StandardOutputLogger(level=Logger.INFO))
        builder.set_n_jobs(1)
        builder.set_background_class(0)
        # value 0 will prevent merging but still requires to run the merging check
        # procedure (inefficient)
        builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0)
        builder.set_segmenter(
            ExtraTreesSegmenter(
                pyxit=pyxit,
                classes=classes,
                prediction_step=cj.parameters.pyxit_prediction_step,
                background=0,
                min_std=cj.parameters.tile_filter_min_stddev,
                max_mean=cj.parameters.tile_filter_max_mean))
        workflow = builder.get()

        area_checker = AnnotationAreaChecker(
            min_area=cj.parameters.min_annotation_area,
            max_area=cj.parameters.max_annotation_area)

        def get_term(label):
            if binary:
                if "cytomine_id_predict_term" not in cj.parameters:
                    return []
                else:
                    return [int(cj.parameters.cytomine_id_predict_term)]
            # multi-class
            return [label]

        zones = extract_images_or_rois(cj.parameters)
        for zone in cj.monitor(zones,
                               start=50,
                               end=90,
                               period=0.05,
                               prefix="Segmenting images/ROIs"):
            results = workflow.process(zone)

            annotations = AnnotationCollection()
            for obj in results:
                if not area_checker.check(obj.polygon):
                    continue
                polygon = obj.polygon
                if isinstance(zone, ImageWindow):
                    polygon = affine_transform(
                        polygon,
                        [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])
                polygon = change_referential(polygon, zone.base_image.height)
                if cj.parameters.cytomine_zoom_level > 0:
                    zoom_mult = (2**cj.parameters.cytomine_zoom_level)
                    polygon = affine_transform(
                        polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])
                annotations.append(
                    Annotation(location=polygon.wkt,
                               id_terms=get_term(obj.label),
                               id_project=cj.project.id,
                               id_image=zone.base_image.image_instance.id))
            annotations.save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
Ejemplo n.º 12
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(progress=1, statusComment="Initialisation")
        cj.log(str(cj.parameters))

        term_ids = [cj.parameters.cytomine_id_predicted_term] \
            if hasattr(cj.parameters, "cytomine_id_predicted_term") else None

        image_ids = [
            int(image_id)
            for image_id in cj.parameters.cytomine_id_images.split(",")
        ]
        images = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        images = [image for image in images if image.id in image_ids]

        tile_size = cj.parameters.tile_size
        tile_overlap = cj.parameters.tile_overlap
        filter_func = _get_filter(cj.parameters.filter)
        projection = cj.parameters.projection
        if projection not in ('min', 'max', 'average'):
            raise ValueError("Projection {} is not found".format(projection))

        cj.log("Filter: {}".format(cj.parameters.filter))
        cj.log("Projection: {}".format(projection))
        for image in cj.monitor(images,
                                prefix="Running detection on image",
                                start=5,
                                end=99):

            def worker_tile_func(tile):
                window = tile.np_image
                threshold = filter_func(window)
                return window, threshold

            cj.log("Get tiles for image {}".format(image.instanceFilename))
            sldc_image = CytomineProjectionSlide(image, projection)
            tile_builder = CytomineProjectionTileBuilder("/tmp")
            topology = sldc_image.tile_topology(tile_builder, tile_size,
                                                tile_size, tile_overlap)

            results = generic_parallel(topology, worker_tile_func)
            thresholds = list()
            for result in results:
                tile, output = result
                window, threshold = output
                thresholds.append(threshold)

            global_threshold = int(np.mean(thresholds))
            cj.log("Mean threshold is {}".format(global_threshold))

            def worker_annotations_func(tile):
                filtered = img_as_uint(tile.np_image > global_threshold)
                return mask_to_objects_2d(filtered, offset=tile.abs_offset)

            cj.log(
                "Extract annotations from filtered tiles for image {}".format(
                    image.instanceFilename))
            results = generic_parallel(topology, worker_annotations_func)
            ids, geometries = list(), list()
            for result in results:
                tile, tile_geometries = result
                # Workaround for slow SemanticMerger but geometries shouldn't be filtered at this stage.
                tile_geometries = [
                    g for g in tile_geometries
                    if g.area > cj.parameters.min_area
                ]
                ids.append(tile.identifier)
                geometries.append(tile_geometries)

            cj.log("Merge annotations from filtered tiles for image {}".format(
                image.instanceFilename))
            merged_geometries = SemanticMerger(tolerance=1).merge(
                ids, geometries, topology)
            cj.log("{} merged geometries".format(len(merged_geometries)))

            if cj.parameters.annotation_slices == 'median':
                # By default, if no slice is given, an annotation is added to the median slice
                slice_ids = [None]
            else:
                slices = SliceInstanceCollection().fetch_with_filter(
                    "imageinstance", image.id)
                if cj.parameters.annotation_slices == 'first':
                    slice_ids = [slices[0].id]
                else:
                    slice_ids = [sl.id for sl in slices]

            ac = AnnotationCollection()
            for geometry in merged_geometries:
                if geometry.area > cj.parameters.min_area:
                    for slice_id in slice_ids:
                        ac.append(
                            Annotation(location=change_referential(
                                geometry, image.height).wkt,
                                       id_image=image.id,
                                       id_terms=term_ids,
                                       id_slice=slice_id))
            ac.save()

        cj.job.update(statusComment="Finished.", progress=100)
Ejemplo n.º 13
0
def main(argv):
    base_path = str(Path.home())

    #Available filters
    filters = {
        'binary': BinaryFilter(),
        'adaptive': AdaptiveThresholdFilter(),
        'otsu': OtsuFilter()
    }

    #Connect to Cytomine
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        working_path = os.path.join(base_path, "data", str(cj.job.id))
        if not os.path.exists(working_path):
            os.makedirs(working_path)

        filter = filters.get(cj.parameters.cytomine_filter)

        #Initiatlize the reader to browse the whole image
        whole_slide = WholeSlide(
            cj.get_image_instance(cj.parameters.cytomine_id_image, True))
        reader = CytomineReader(whole_slide,
                                window_position=Bounds(
                                    0, 0, cj.parameters.cytomine_tile_size,
                                    cj.parameters.cytomine_tile_size),
                                zoom=cj.parameters.cytomine_zoom_level,
                                overlap=cj.parameters.cytomine_tile_overlap)
        reader.window_position = Bounds(0, 0, reader.window_position.width,
                                        reader.window_position.height)

        #Browse the slide using reader
        i = 0
        geometries = []
        cj.job.update(progress=1, status_comment="Browsing big image...")

        while True:
            #Read next tile
            reader.read()
            image = reader.data
            #Saving tile image locally
            tile_filename = "%s/image-%d-zoom-%d-tile-%d-x-%d-y-%d.png" % (
                working_path, cj.parameters.cytomine_id_image,
                cj.parameters.cytomine_zoom_level, i, reader.window_position.x,
                reader.window_position.y)
            image.save(tile_filename, "PNG")
            #Apply filtering
            cv_image = np.array(reader.result())
            filtered_cv_image = filter.process(cv_image)
            i += 1
            #Detect connected components
            components = ObjectFinder(filtered_cv_image).find_components()
            #Convert local coordinates (from the tile image) to global coordinates (the whole slide)
            components = whole_slide.convert_to_real_coordinates(
                components, reader.window_position, reader.zoom)
            geometries.extend(
                get_geometries(components, cj.parameters.cytomine_min_area,
                               cj.parameters.cytomine_max_area))

            #Upload annotations (geometries corresponding to connected components) to Cytomine core
            #Upload each geometry and predicted term
            annotations = AnnotationCollection()
            for geometry in geometries:
                pol = shapely.wkt.loads(geometry)
                if pol.is_valid:
                    annotations.append(
                        Annotation(
                            location=geometry,
                            id_image=cj.parameters.cytomine_id_image,
                            id_project=cj.parameters.cytomine_id_project,
                            id_terms=[
                                cj.parameters.cytomine_id_predicted_term
                            ]))
                #Batches of 100 annotations
                if len(annotations) % 100 == 0:
                    annotations.save()
                    annotations = AnnotationCollection()

            annotations.save()
            geometries = []
            if not reader.next(): break

        cj.job.update(progress=50,
                      status_comment=
                      "Detection done, starting Union over whole big image...")

        #Perform Union of geometries (because geometries are computed locally in each tile but objects (e.g. cell clusters) might overlap several tiles)
        host = cj.parameters.cytomine_host.replace("http://", "")
        unioncommand = "groovy -cp \"/lib/jars/*\" /app/union4.groovy http://%s %s %s %d %d %d %d %d %d %d %d %d %d" % (
            host,
            cj._public_key,
            cj._private_key,
            cj.parameters.cytomine_id_image,
            cj.job.userJob,
            cj.parameters.cytomine_id_predicted_term,  #union_term
            cj.parameters.cytomine_union_min_length,  #union_minlength,
            cj.parameters.cytomine_union_bufferoverlap,  #union_bufferoverlap,
            cj.parameters.
            cytomine_union_min_point_for_simplify,  #union_minPointForSimplify,
            cj.parameters.cytomine_union_min_point,  #union_minPoint,
            cj.parameters.cytomine_union_max_point,  #union_maxPoint,
            cj.parameters.cytomine_union_nb_zones_width,  #union_nbzonesWidth,
            cj.parameters.cytomine_union_nb_zones_height
        )  #union_nbzonesHeight)

        os.chdir(base_path)
        print(unioncommand)
        os.system(unioncommand)

    cj.job.update(status=Job.TERMINATED,
                  progress=100,
                  statusComment="Finished.")
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")

        # hardcode parameter for setup classify to fetch alphamask instead of plain crop.
        cj.parameters.cytomine_download_alpha = True
        cj.parameters.cytomine_id_projects = "{}".format(cj.project.id)
        cj.job.update(progress=2, statusComment="Downloading crops.")
        base_path, downloaded = setup_classify(args=cj.parameters,
                                               logger=cj.job_logger(2, 25),
                                               dest_pattern=os.path.join(
                                                   "{term}",
                                                   "{image}_{id}.png"),
                                               root_path=str("tmp"),
                                               set_folder="train",
                                               showTerm=True)

        x = np.array(
            [f for annotation in downloaded for f in annotation.filenames])
        y = np.array([
            int(os.path.basename(os.path.dirname(filepath))) for filepath in x
        ])

        # transform classes
        cj.job.update(progress=25, statusComment="Transform classes...")
        positive_terms = parse_domain_list(
            cj.parameters.cytomine_id_positive_terms)
        selected_terms = parse_domain_list(cj.parameters.cytomine_id_terms)
        is_binary = len(selected_terms) > 0 and len(positive_terms) > 0
        foreground_terms = np.unique(y) if len(
            selected_terms) == 0 else np.array(selected_terms)
        if len(positive_terms) == 0:
            classes = np.hstack((np.zeros((1, ), dtype=int), foreground_terms))
        else:  # binary
            foreground_terms = np.array(positive_terms)
            classes = np.array([0, 1])
            # cast to binary
            fg_idx = np.in1d(y, list(foreground_terms))
            bg_idx = np.in1d(
                y, list(set(selected_terms).difference(foreground_terms)))
            y[fg_idx] = 1
            y[bg_idx] = 0

        n_classes = classes.shape[0]

        # filter unwanted terms
        cj.logger.info("Size before filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))
        keep = np.in1d(y, classes)
        x, y = x[keep], y[keep]
        cj.logger.info("Size after filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))

        if x.shape[0] == 0:
            raise ValueError("No training data")

        if is_binary:
            # 0 (background) vs 1 (classes in foreground )
            cj.logger.info("Binary segmentation:")
            cj.logger.info("> class '0': background & terms {}".format(
                set(selected_terms).difference(positive_terms)))
            cj.logger.info("> class '1': {}".format(set(foreground_terms)))
        else:
            # 0 (background vs 1 vs 2 vs ... n (n classes from cytomine_id_terms)
            cj.logger.info("Multi-class segmentation:")
            cj.logger.info("> background class '0'")
            cj.logger.info("> term classes: {}".format(set(foreground_terms)))

        # build model
        cj.job.update(progress=27, statusComment="Build model...")
        et, pyxit = build_models(
            n_subwindows=cj.parameters.pyxit_n_subwindows,
            min_size=cj.parameters.pyxit_min_size,
            max_size=cj.parameters.pyxit_max_size,
            target_width=cj.parameters.pyxit_target_width,
            target_height=cj.parameters.pyxit_target_height,
            interpolation=cj.parameters.pyxit_interpolation,
            transpose=cj.parameters.pyxit_transpose,
            colorspace=cj.parameters.pyxit_colorspace,
            fixed_size=cj.parameters.pyxit_fixed_size,
            verbose=int(cj.logger.level == 10),
            random_state=cj.parameters.seed,
            n_estimators=cj.parameters.forest_n_estimators,
            min_samples_split=cj.parameters.forest_min_samples_split,
            max_features=cj.parameters.forest_max_features,
            n_jobs=cj.parameters.n_jobs)

        # to extract the classes form the mask
        pyxit.get_output = _get_output_from_mask

        # extract subwindows manually to avoid class problem
        cj.job.update(progress=30, statusComment="Extract subwindwos...")
        _x, _y = pyxit.extract_subwindows(x, y)

        actual_classes = np.unique(_y)
        if actual_classes.shape[0] != classes.shape[0]:
            raise ValueError(
                "Some classes are missing from the dataset: actual='{}', expected='{}'"
                .format(",".join(map(str, actual_classes)),
                        ",".join(map(str, classes))))

        cj.logger.info("Size of actual training data:")
        cj.logger.info(" - x   : {}".format(_x.shape))
        cj.logger.info(" - y   : {}".format(_y.shape))
        cj.logger.info(" - dist: {}".format(", ".join([
            "{}: {}".format(v, c)
            for v, c in zip(*np.unique(_y, return_counts=True))
        ])))

        cj.job.update(progress=60, statusComment="Train model...")
        # "re-implement" pyxit.fit to avoid incorrect class handling
        pyxit.classes_ = classes
        pyxit.n_classes_ = n_classes
        pyxit.base_estimator.fit(_x, _y)

        cj.job.update(progress=90, statusComment="Save model....")
        model_filename = joblib.dump(pyxit,
                                     os.path.join(base_path, "model.joblib"),
                                     compress=3)[0]

        AttachedFile(cj.job,
                     domainIdent=cj.job.id,
                     filename=model_filename,
                     domainClassName="be.cytomine.processing.Job").upload()

        Property(cj.job, key="classes", value=stringify(classes)).save()
        Property(cj.job, key="binary", value=is_binary).save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
def train(argv):
    parser = ArgumentParser(prog="Extra-Trees Object Counter Model Builder")

    # Cytomine
    parser.add_argument('--cytomine_host',
                        dest='cytomine_host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='cytomine_public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='cytomine_private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_base_path',
                        dest='cytomine_base_path',
                        default='/api/',
                        help="The Cytomine base path")
    parser.add_argument('--cytomine_working_path',
                        dest='cytomine_working_path',
                        default=None,
                        help="The working directory (eg: /tmp)")
    parser.add_argument('--cytomine_id_software',
                        dest='cytomine_software',
                        type=int,
                        help="The Cytomine software identifier")
    parser.add_argument('--cytomine_id_project',
                        dest='cytomine_project',
                        type=int,
                        help="The Cytomine project identifier")
    parser.add_argument('--cytomine_force_download',
                        dest='cytomine_force_download',
                        type=str,
                        default=True,
                        help="Force download from Cytomine or not")

    # Objects
    parser.add_argument('--cytomine_object_term',
                        dest='cytomine_object_term',
                        type=int,
                        help="The Cytomine identifier of object term")
    parser.add_argument('--cytomine_object_user',
                        dest='cytomine_object_user',
                        type=str,
                        help="The Cytomine identifier of object owner")
    parser.add_argument('--cytomine_object_reviewed_only',
                        dest='cytomine_object_reviewed_only',
                        type=str,
                        help="Whether objects have to be reviewed or not")

    # ROI
    parser.add_argument(
        '--cytomine_roi_term',
        dest='cytomine_roi_term',
        type=int,
        default=None,
        help="The Cytomine identifier of region of interest term")
    parser.add_argument('--cytomine_roi_user',
                        dest='cytomine_roi_user',
                        type=str,
                        help="The Cytomine identifier of ROI owner")
    parser.add_argument('--cytomine_roi_reviewed_only',
                        dest='cytomine_roi_reviewed_only',
                        type=str,
                        help="Whether ROIs have to be reviewed or not")

    # Pre-processing
    parser.add_argument('--mean_radius',
                        dest='mean_radius',
                        type=int,
                        required=True,
                        help="The mean radius of object to detect")
    parser.add_argument(
        '--pre_transformer',
        dest='pre_transformer',
        default=None,
        choices=[
            'edt', 'euclidean_distance_transform', 'density', '', None, 'None'
        ],
        help=
        "Scoremap transformer (None, edt, euclidean_distance_transform, density)"
    )
    parser.add_argument('--pre_alpha',
                        dest='pre_alpha',
                        default=3,
                        help="Exponential decrease rate of distance (if EDT)")

    # Subwindows
    parser.add_argument('--sw_input_size',
                        dest='sw_input_size',
                        type=int,
                        default=8,
                        help="Size of input subwindow")
    parser.add_argument('--sw_output_size',
                        dest='sw_output_size',
                        type=int,
                        default=1,
                        help="Size of output subwindow (ignored for FCRN)")
    parser.add_argument(
        '--sw_extr_mode',
        dest='sw_extr_mode',
        choices=['random', 'sliding', 'scoremap_constrained'],
        default='random',
        help="Mode of extraction (random, scoremap_constrained)")
    parser.add_argument(
        '--sw_extr_score_thres',
        dest='sw_extr_score_thres',
        default=0.4,
        help="Minimum threshold to be foreground in subwindows extraction"
        "(if 'scoremap_constrained' mode)")
    parser.add_argument(
        '--sw_extr_ratio',
        dest='sw_extr_ratio',
        default=0.5,
        help="Ratio of background subwindows extracted in subwindows "
        "extraction (if 'scoremap_constrained' mode)")
    parser.add_argument(
        '--sw_extr_npi',
        dest="sw_extr_npi",
        default=100,
        help="Number of extracted subwindows per image (if 'random' mode)")
    parser.add_argument('--sw_colorspace',
                        dest="sw_colorspace",
                        type=str,
                        default='RGB__rgb',
                        help="List of colorspace features")

    # Forest
    parser.add_argument('--forest_method',
                        dest='forest_method',
                        type=str,
                        default='ET-regr',
                        choices=['ET-clf', 'ET-regr', 'RF-clf', 'RF-regr'],
                        help="Type of forest method")
    parser.add_argument('--forest_n_estimators',
                        dest='forest_n_estimators',
                        default=10,
                        type=int,
                        help="Number of trees in forest")
    parser.add_argument('--forest_min_samples_split',
                        dest='forest_min_samples_split',
                        default=10,
                        type=int,
                        help="Minimum number of samples for further splitting")
    parser.add_argument('--forest_max_features',
                        dest='forest_max_features',
                        default='sqrt',
                        help="Max features")

    # Dataset augmentation
    parser.add_argument('--augmentation',
                        dest='augmentation',
                        type=str,
                        default=False)
    parser.add_argument('--aug_rotation_range',
                        dest='rotation_range',
                        type=float,
                        default=0.)
    parser.add_argument('--aug_width_shift_range',
                        dest='width_shift_range',
                        type=float,
                        default=0.)
    parser.add_argument('--aug_height_shift_range',
                        dest='height_shift_range',
                        type=float,
                        default=0.)
    parser.add_argument('--aug_zoom_range',
                        dest='zoom_range',
                        type=float,
                        default=0.)
    parser.add_argument('--aug_fill_mode',
                        dest='fill_mode',
                        type=str,
                        default="reflect")
    parser.add_argument('--aug_horizontal_flip',
                        dest='horizontal_flip',
                        type=bool,
                        default=False)
    parser.add_argument('--aug_vertical_flip',
                        dest='vertical_flip',
                        type=bool,
                        default=False)
    parser.add_argument('--aug_featurewise_center',
                        dest='featurewise_center',
                        type=bool,
                        default=False)
    parser.add_argument('--aug_featurewise_std_normalization',
                        dest='featurewise_std_normalization',
                        type=bool,
                        default=False)

    # Execution
    parser.add_argument('--n_jobs',
                        dest='n_jobs',
                        type=int,
                        default=1,
                        help="Number of jobs")
    parser.add_argument('--verbose',
                        '-v',
                        dest='verbose',
                        default=0,
                        type=int,
                        help="Level of verbosity")

    params, other = parser.parse_known_args(argv)
    if params.cytomine_working_path is None:
        params.cytomine_working_path = os.path.join(tempfile.gettempdir(),
                                                    "cytomine")
    make_dirs(params.cytomine_working_path)

    params.cytomine_force_download = str2bool(params.cytomine_force_download)
    params.cytomine_object_reviewed_only = str2bool(
        params.cytomine_object_reviewed_only)
    params.cytomine_roi_reviewed_only = str2bool(
        params.cytomine_roi_reviewed_only)
    params.cytomine_object_user = str2int(params.cytomine_object_user)
    params.cytomine_roi_user = str2int(params.cytomine_roi_user)
    params.pre_alpha = str2int(params.pre_alpha)
    params.sw_extr_score_thres = str2float(params.sw_extr_score_thres)
    params.sw_extr_ratio = str2float(params.sw_extr_ratio)
    params.sw_extr_npi = str2int(params.sw_extr_npi)
    params.forest_max_features = check_max_features(
        params.forest_max_features)[0]
    params.sw_input_size = (params.sw_input_size, params.sw_input_size)
    params.sw_output_size = (params.sw_output_size, params.sw_output_size)
    params.sw_colorspace = params.sw_colorspace.split('+')

    # Initialize logger
    logger = StandardOutputLogger(params.verbose)
    for key, val in sorted(vars(params).iteritems()):
        logger.info("[PARAMETER] {}: {}".format(key, val))

    # Start job
    with CytomineJob(params.cytomine_host,
                     params.cytomine_public_key,
                     params.cytomine_private_key,
                     params.cytomine_software,
                     params.cytomine_project,
                     parameters=vars(params),
                     working_path=params.cytomine_working_path,
                     base_path=params.cytomine_base_path,
                     verbose=(params.verbose >= Logger.DEBUG)) as job:
        cytomine = job
        cytomine.update_job_status(job.job,
                                   status_comment="Starting...",
                                   progress=0)
        logger.i("Starting...")

        cytomine.update_job_status(job.job,
                                   status_comment="Loading training set...",
                                   progress=1)
        logger.i("Loading training set...")
        X, y = get_dataset(
            cytomine, params.cytomine_working_path, params.cytomine_project,
            params.cytomine_object_term, params.cytomine_roi_term,
            params.cytomine_object_user, params.cytomine_object_reviewed_only,
            params.cytomine_roi_user, params.cytomine_roi_reviewed_only,
            params.cytomine_force_download)
        logger.d("X size: {} samples".format(len(X)))
        logger.d("y size: {} samples".format(len(y)))

        cytomine.update_job_status(job.job,
                                   status_comment="Training forest...",
                                   progress=5)
        logger.i("Training forest...")
        estimator = CellCountRandomizedTrees(logger=logger, **vars(params))
        estimator.fit(np.asarray(X), np.asarray(y))

        cytomine.update_job_status(job.job,
                                   status_comment="Saving (best) model",
                                   progress=95)
        logger.i("Saving model...")
        model_path = os.path.join(params.cytomine_working_path, "models",
                                  str(params.cytomine_software))
        model_file = os.path.join(model_path, "{}.pkl".format(job.job.id))
        make_dirs(model_path)
        estimator.save(model_file)

        cytomine.update_job_status(job.job,
                                   status_comment="Finished.",
                                   progress=100)
Ejemplo n.º 16
0
import sys

from cytomine import CytomineJob

from preprocessing import preprocess

if __name__ == '__main__':
    params = sys.argv[1:]

    # CytomineJob.from_cli() uses the descriptor.json to automatically create the ArgumentParser
    with CytomineJob.from_cli(params) as cj:
        cj.job.update(statusComment="Preprocessing...", progress=1)
        result = preprocess(cj, cj.parameters.working_path, cj.parameters.cytomine_id_project, cj.parameters.cytomine_id_terms,
                   cj.parameters.cytomine_id_tags_for_images)

        classes_filename, image_filenames, annotation_filenames = result
        print("The classes are in file: {}".format(classes_filename))
        print("There are {} images".format(len(image_filenames)))
        print("There are {} annotation files".format(len(annotation_filenames)))

        # Train YOLO
        cj.job.update(statusComment="Start to train YOLO", progress=5)
        # TODO

        # Save model
        cj.job.update(statusComment="Saving model", progress=95)
        # TODO

        cj.job.update(statusComment="Finished", progress=100)
Ejemplo n.º 17
0
def run(debug=False):
    """
    Gets project image from cytomine

    Args:
        debug (bool): If true will save annotations individually and plot any error

    Example:
      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'dadb7d7a-5822-48f7-ab42-59bce27750ae' --cytomine_private_key 'd73f4602-51d2-4d15-91e4-d4cc175d65fd' --cytomine_id_project 187 --cytomine_id_image_instance 375 --cytomine_id_software 228848

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'b6ebb23c-00ff-427b-be24-87b2a82490df' --cytomine_private_key '6812f09b-3f33-4938-82ca-b23032d377fd' --cytomine_id_project 154 --cytomine_id_image_instance 3643

      python main.py --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 2140 --cytomine_id_software 2633

      docker run --gpus all -it --rm --mount type=bind,source=/home/giussepi/Public/environments/Cytomine/cyto_CRLM/,target=/CRLM,bind-propagation=private --network=host ttt --cytomine_host 'localhost-core' --cytomine_public_key 'd2be8bd7-2b0b-40c3-9e81-5ad5765568f3' --cytomine_private_key '6dfe27d7-2ad1-4ca2-8ee9-6321ec3f1318' --cytomine_id_project 197 --cytomine_id_image_instance 31296 --cytomine_id_software 79732
    """

    parser = ArgumentParser(prog="Cytomine Python client example")

    # Cytomine connection parameters
    parser.add_argument('--cytomine_host',
                        dest='host',
                        default='demo.cytomine.be',
                        help="The Cytomine host")
    parser.add_argument('--cytomine_public_key',
                        dest='public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key',
                        dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project',
                        dest='id_project',
                        help="The project from which we want the images")
    parser.add_argument('--cytomine_id_software',
                        dest='id_software',
                        help="The software to be used to process the image")
    parser.add_argument('--cytomine_id_image_instance',
                        dest='id_image_instance',
                        help="The image to which the annotation will be added")

    params, _ = parser.parse_known_args(sys.argv[1:])

    with CytomineJob.from_cli(sys.argv[1:]) as cytomine:
        # TODO: To be tested on TITANx
        img = ImageInstance().fetch(params.id_image_instance)
        download_image(img)
        process_wsi_and_save(get_container_image_path(img))
        new_annotations = generate_polygons(get_container_image_path(img),
                                            adapt_to_cytomine=True)
        annotation_collection = None

        for label_key in new_annotations:
            # Sending annotation batches to the server
            for sub_list in chunks(new_annotations[label_key],
                                   ANNOTATION_BATCH):
                if not debug:
                    annotation_collection = AnnotationCollection()

                for exterior_points in sub_list:
                    if debug:
                        annotation_collection = AnnotationCollection()

                    annotation_collection.append(
                        Annotation(location=Polygon(
                            exterior_points.astype(int).reshape(
                                exterior_points.shape[0],
                                exterior_points.shape[2]).tolist()).wkt,
                                   id_image=params.id_image_instance,
                                   id_project=params.id_project,
                                   id_terms=[CYTOMINE_LABELS[label_key]]))

                    if debug:
                        try:
                            annotation_collection.save()
                        except Exception as e:
                            print(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0],
                                    exterior_points.shape[2]).tolist())
                            plt.plot(*Polygon(
                                exterior_points.astype(int).reshape(
                                    exterior_points.shape[0], exterior_points.
                                    shape[2])).exterior.coords.xy)
                            plt.show()
                            # raise(e)
                            print(e)
                        finally:
                            time.sleep(1)

                if not debug:
                    annotation_collection.save()
                    time.sleep(ANNOTATION_SLEEP_TIME)

        # Adding pie chart labels data as image property
        # TODO: Change delete_results_file to True for final test on titanX
        num_pixels_per_label = get_pie_chart_data(
            get_container_image_path(img), delete_results_file=False)

        for percentage, label_ in zip(num_pixels_per_label, Label.names):
            Property(img, key=label_, value='{}%'.format(percentage)).save()

        remove_image_local_copy(img)

        cytomine.job.update(statusComment="Finished.")
Ejemplo n.º 18
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING, progress=0, statusComment='Intialization...')
        base_path = "{}".format(os.getenv('HOME'))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # Loading models from models directory
        with tf.device('/cpu:0'):
            h_model = load_model('/models/head_dice_sm_9976.hdf5', compile=False)  # head model
            h_model.compile(optimizer='adam', loss=dice_coef_loss,
                            metrics=['accuracy'])
            op_model = load_model('/models/op_ce_sm_9991.hdf5', compile=True)  # operculum model
            #op_model.compile(optimizer='adam', loss=dice_coef_loss,
                            #metrics=['accuracy'])

        # Select images to process
        images = ImageInstanceCollection().fetch_with_filter('project', conn.parameters.cytomine_id_project)
        if conn.parameters.cytomine_id_images != 'all':  # select only given image instances = [image for image in image_instances if image.id in id_list]
            images = [_ for _ in images if _.id
                      in map(lambda x: int(x.strip()),
                             conn.parameters.cytomine_id_images.split(','))]
        images_id = [image.id for image in images]

        # Download selected images into 'working_directory'
        img_path = os.path.join(working_path, 'images')
        # if not os.path.exists(img_path):
        os.makedirs(img_path)

        for image in conn.monitor(
                images, start=2, end=50, period=0.1,
                prefix='Downloading images into working directory...'):
            fname, fext = os.path.splitext(image.filename)
            if image.download(dest_pattern=os.path.join(
                    img_path,
                    "{}{}".format(image.id, fext))) is not True:  # images are downloaded with image_ids as names
                print('Failed to download image {}'.format(image.filename))

        # Prepare image file paths from image directory for execution
        conn.job.update(progress=50,
                        statusComment="Preparing data for execution..")
        image_paths = glob.glob(os.path.join(img_path, '*'))
        std_size = (1032,1376)   #maximum size that the model can handle
        model_size = 256
        for i in range(len(image_paths)):

            org_img = Image.open(image_paths[i]) 
            
            filename = os.path.basename(image_paths[i])
            fname, fext = os.path.splitext(filename)
            fname = int(fname)
            org_img = img_to_array(org_img)
            img = org_img.copy()
            org_size = org_img.shape[:2]
            asp_ratio = org_size[0] / org_size[1]  #for cropping and upscaling to original size
            if org_size[1] > std_size[1]:
                img = tf.image.resize(img, (675,900), method='nearest')
                img = tf.image.resize_with_crop_or_pad(img, std_size[0],std_size[1])
                h_mask = predict_mask(img, h_model,model_size)
                h_mask = crop_to_aspect(h_mask, asp_ratio)
                h_mask = tf.image.resize(h_mask, std_size, method='nearest')
                h_up_mask = tf.image.resize_with_crop_or_pad(h_mask, 675,900)
                h_up_mask = tf.image.resize(h_up_mask, org_size, method='nearest')
                h_up_mask = np.asarray(h_up_mask).astype(np.uint8)
                _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0)
                kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (17, 17))
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_OPEN, kernel, iterations=5)
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=1)
                #h_up_mask = cv.erode(h_up_mask ,kernel,iterations = 3)
                #h_up_mask = cv.dilate(h_up_mask ,kernel,iterations = 3)
                h_up_mask = np.expand_dims(h_up_mask, axis=-1)
                
            else:
                h_mask = predict_mask(img, h_model, model_size)
                h_mask = crop_to_aspect(h_mask, asp_ratio)
                h_up_mask = tf.image.resize(h_mask, org_size, method='nearest')
                h_up_mask = np.asarray(h_up_mask).astype(np.uint8)
                _, h_up_mask = cv.threshold(h_up_mask, 0.001, 255, 0)
                kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
                #kernel = np.ones((9,9),np.uint8)
                h_up_mask = cv.morphologyEx(h_up_mask, cv.MORPH_CLOSE, kernel, iterations=3)
                h_up_mask = np.expand_dims(h_up_mask, axis=-1)
        
            box = bb_pts(h_up_mask)  # bounding box points for operculum (x_min, y_min, x_max, y_max)
            w = box[0]
            h = box[1]
            tr_h = box[3] - box[1]  # target height
            tr_w = box[2] - box[0]  # target width
            crop_op_img = tf.image.crop_to_bounding_box(org_img, h, w, tr_h, tr_w)

            op_asp_ratio = crop_op_img.shape[0] / crop_op_img.shape[1]
            op_mask = predict_mask(crop_op_img, op_model, model_size)
            op_mask = crop_to_aspect(op_mask, op_asp_ratio)
            op_mask = tf.image.resize(op_mask, (crop_op_img.shape[0], crop_op_img.shape[1]), method='nearest')
            op_up_mask = np.zeros((org_img.shape[0],org_img.shape[1],1)).astype(np.uint8) # array of zeros to be filled with op mask
            op_up_mask[box[1]:box[3], box[0]:box[2]] = op_mask # paste op_mask in org_img (reversing the crop operation)
            #op_up_mask = tf.image.resize_with_crop_or_pad(op_mask, org_size[0], org_size[1])
        

            h_polygon = h_make_polygon(h_up_mask)
            op_polygon = o_make_polygon(op_up_mask)

            conn.job.update(
                status=Job.RUNNING, progress=95,
                statusComment="Uploading new annotations to Cytomine server..")

            annotations = AnnotationCollection()
            annotations.append(Annotation(location=h_polygon[0].wkt, id_image=fname, id_terms=143971108,
                                          id_project=conn.parameters.cytomine_id_project))
            annotations.append(Annotation(location=op_polygon[0].wkt, id_image=fname, id_term=143971084,
                                          id_project=conn.parameters.cytomine_id_project))
            annotations.save()

        conn.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)  # 524787186
Ejemplo n.º 19
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(progress=1, statusComment="Initialisation")
        cj.log(str(cj.parameters))

        term_ids = [int(term_id) for term_id in cj.parameters.cytomine_id_terms.split(",")]
        terms = TermCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project)
        terms = [term for term in terms if term.id in term_ids]

        image_ids = [int(image_id) for image_id in cj.parameters.cytomine_id_images.split(",")]
        images = ImageInstanceCollection(light=True).fetch_with_filter("project", cj.parameters.cytomine_id_project)
        images = [image for image in images if image.id in image_ids]

        if hasattr(cj.parameters, "cytomine_id_users") and cj.parameters.cytomine_id_users is not None:
            user_ids = [int(user_id) for user_id in cj.parameters.cytomine_id_users.split(",")]
        else:
            user_ids = []

        if hasattr(cj.parameters, "cytomine_id_jobs") and cj.parameters.cytomine_id_jobs is not None:
            job_ids = [int(job_id) for job_id in cj.parameters.cytomine_id_jobs.split(",")]
            jobs = JobCollection(project=cj.parameters.cytomine_id_project).fetch()
            jobs = [job for job in jobs if job.id in job_ids]
        else:
            jobs = []

        userjobs_ids = [job.userJob for job in jobs]
        all_user_ids = user_ids + userjobs_ids

        cj.job.update(progress=20, statusComment="Collect data")
        ac = AnnotationCollection()
        ac.terms = term_ids
        ac.images = image_ids
        ac.showMeta = True
        ac.showGIS = True
        ac.showTerm = True
        ac.reviewed = True if cj.parameters.cytomine_reviewed_only else None
        ac.users = all_user_ids if len(all_user_ids) > 0 else None
        ac.fetch()

        cj.job.update(progress=55, statusComment="Compute statistics")
        data = dict()
        for image in images:
            d = dict()
            areas = [a.area for a in ac if a.image == image.id]
            total_area = np.sum(areas)
            d['total'] = total_area
            d['count'] = len(areas)
            d['ratio'] = 1.0
            for term in terms:
                annotations = [a for a in ac if a.image == image.id and term.id in a.term]
                areas = [a.area for a in annotations]
                d[term.name] = dict()
                d[term.name]['total'] = np.sum(areas)
                d[term.name]['count'] = len(annotations)
                d[term.name]['ratio'] = d[term.name]['total'] / float(total_area) if total_area > 0 else 0
                d[term.name]['mean'] = np.mean(areas)
                d[term.name]['annotations'] = [{"created": a.created, "area": a.area} for a in annotations]
            data[image.instanceFilename] = d

        cj.job.update(progress=90, statusComment="Write CSV report")
        with open("stat-area.csv", "w") as f:
            for l in write_csv(data, terms):
                f.write("{}\n".format(l))

        job_data = JobData(id_job=cj.job.id, key="Area CSV report", filename="stat-area.csv")
        job_data = job_data.save()
        job_data.upload("stat-area.csv")
        
        cj.job.update(statusComment="Finished.", progress=100)
Ejemplo n.º 20
0
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_group = ImageGroupCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)

        input_images = [i for i in image_group if gt_suffix not in i.name]
        gt_images = [i for i in image_group if gt_suffix in i.name]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.name.replace(gt_suffix, '')
            related_image = [i for i in input_images if related_name == i.name]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")
        #TODO: error handling
        workflow(in_path, out_path)

        #        if return_code != 0:
        #           err_desc = "Failed to execute the ImageJ macro (return code: {})".format(return_code)
        #           cj.job.update(progress=50, statusComment=err_desc)
        #            raise ValueError(err_desc)

        # 4. Upload .swc and attach to correponding image
        # ! not needed if we compute directly the metric
        for image in cj.monitor(
                input_images,
                start=60,
                end=80,
                period=0.1,
                prefix="Extracting and uploading polygons from masks"):
            afile = "{}.swc".format(image.id)
            path = os.path.join(out_path, afile)
            AttachedFile(image, filename=path).upload()

        # 4. Upload the annotation and labels to Cytomine (annotations are extracted from the mask using
        # the AnnotationExporter module)


#        for image in cj.monitor(input_images, start=60, end=80, period=0.1, prefix="Extracting and uploading polygons from masks"):
#            file = "{}.tif".format(image.id)
#            path = os.path.join(out_path, file)
#            data = io.imread(path)

# extract objects
#            slices = mask_to_objects_2d(data)

#            print("Found {} polygons in this image {}.".format(len(slices), image.id))

# upload
#            collection = AnnotationCollection()
#            for obj_slice in slices:
#                collection.append(Annotation(
#                    location=affine_transform(obj_slice.polygon, [1, 0, 0, -1, 0, image.height]).wkt,
#                    id_image=image.id, id_project=cj.parameters.cytomine_id_project, property=[
#                        {"key": "index", "value": str(obj_slice.label)}
#                    ]
#                ))
#            collection.save()

# 5. Compute the metrics
        cj.job.update(progress=80, statusComment="Computing metrics...")

        # TODO: compute metrics:
        # in /out: output files {id}.tiff
        # in /ground_truth: label files {id}.tiff

        cj.job.update(progress=99, statusComment="Cleaning...")
        for image in input_images:
            os.remove(os.path.join(in_path, "{}.tif".format(image.id)))

        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
def predict(argv):
    parser = ArgumentParser(prog="Extra-Trees Object Counter Predictor")

    # Cytomine
    parser.add_argument('--cytomine_host', dest='cytomine_host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='cytomine_public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='cytomine_private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_base_path', dest='cytomine_base_path',
                        default='/api/', help="The Cytomine base path")
    parser.add_argument('--cytomine_working_path', dest='cytomine_working_path',
                        default=None, help="The working directory (eg: /tmp)")
    parser.add_argument('--cytomine_id_software', dest='cytomine_software', type=int,
                        help="The Cytomine software identifier")
    parser.add_argument('--cytomine_id_project', dest='cytomine_project', type=int,
                        help="The Cytomine project identifier")

    # Objects
    parser.add_argument('--cytomine_object_term', dest='cytomine_object_term', type=int,
                        help="The Cytomine identifier of object term")

    # Post-processing
    parser.add_argument('--post_threshold', dest='post_threshold', type=float,
                        help="Post-processing discarding threshold")
    parser.add_argument('--post_sigma', dest='post_sigma', type=float,
                        help="Std-dev of Gauss filter applied to smooth prediction")
    parser.add_argument('--post_min_dist', dest='post_min_dist', type=int,
                        help="Minimum distance between two peaks")

    # ROI
    parser.add_argument('--annotation', dest='annotation', type=str, action='append', default=[])
    parser.add_argument('--image', dest='image', type=str, action='append', default=[])

    # Execution
    parser.add_argument('--n_jobs', dest='n_jobs', type=int, default=1, help="Number of jobs")
    parser.add_argument('--verbose', '-v', dest='verbose', type=int, default=0, help="Level of verbosity")
    parser.add_argument('--model_id_job', dest='model_id_job', type=str, default=None, help="Model job ID")
    parser.add_argument('--model_file', dest="model_file", type=str, default=None, help="Model file")

    params, other = parser.parse_known_args(argv)
    if params.cytomine_working_path is None:
        params.cytomine_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
    make_dirs(params.cytomine_working_path)

    params.model_id_job = str2int(params.model_id_job)
    params.image = [str2int(i) for i in params.image]
    params.annotation = [str2int(i) for i in params.annotation]

    # Initialize logger
    logger = StandardOutputLogger(params.verbose)
    for key, val in sorted(vars(params).iteritems()):
        logger.info("[PARAMETER] {}: {}".format(key, val))

    # Start job
    with CytomineJob(params.cytomine_host,
                     params.cytomine_public_key,
                     params.cytomine_private_key,
                     params.cytomine_software,
                     params.cytomine_project,
                     parameters=vars(params),
                     working_path=params.cytomine_working_path,
                     base_path=params.cytomine_base_path,
                     verbose=(params.verbose >= Logger.DEBUG)) as job:
        cytomine = job
        cytomine.update_job_status(job.job, status_comment="Starting...", progress=0)

        cytomine.update_job_status(job.job, status_comment="Loading model...", progress=1)
        logger.i("Loading model...")
        if params.model_file:
            model_file = params.model_file
        else:
            model_job = cytomine.get_job(params.model_id_job)
            model_file = os.path.join(params.cytomine_working_path, "models", str(model_job.software),
                                      "{}.pkl".format(model_job.id))
        with open(model_file, 'rb') as f:
            estimator = pickle.load(f)
            predict_params = vars(params).copy()
            predict_params.pop("image", None)
            predict_params.pop("annotation", None)
            estimator.set_params(**predict_params)

        cytomine.update_job_status(job.job, status_comment="Dumping annotations/images to predict...", progress=3)
        logger.i("Dumping annotations/images to predict...")
        if params.annotation[0] is not None:
            annots = [cytomine.get_annotation(id) for id in params.annotation]
            annots_collection = AnnotationCollection()
            annots_collection._data = annots
            crops = cytomine.dump_annotations(annotations=annots_collection,
                                              dest_path=os.path.join(params.cytomine_working_path, "crops",
                                                                     str(params.cytomine_project)),
                                              desired_zoom=0,
                                              get_image_url_func=Annotation.get_annotation_alpha_crop_url)
            X = crops.data()
        elif params.image[0] is not None:
            image_instances = [cytomine.get_image_instance(id) for id in params.image]
            image_instances = cytomine.dump_project_images(id_project=params.cytomine_project,
                                                           dest_path="/imageinstances/",
                                                           image_instances=image_instances,
                                                           max_size=True)
            X = image_instances
        else:
            X = []

        logger.d("X size: {} samples".format(len(X)))

        for i, x in enumerate(X):
            logger.i("Predicting ID {}...".format(x.id))
            cytomine.update_job_status(job.job, status_comment="Predicting ID {}...".format(x.id),
                                       progress=5 + np.ceil(i / len(X)) * 95)
            y = estimator.predict([x.filename])
            y = estimator.postprocessing([y], **estimator.filter_sk_params(estimator.postprocessing))

            logger.i("Uploading annotations...")
            cytomine.update_job_status(job.job, status_comment="Uploading annotations...")
            upload_annotations(cytomine, x, y, term=params.cytomine_object_term)

        logger.i("Finished.")
        cytomine.update_job_status(job.job, status_comment="Finished.", progress=100)
Ejemplo n.º 22
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # Implements your software here.

        cj.job.update(statusComment="Finished.")
Ejemplo n.º 23
0
                    default="")
parser.add_argument('--cytomine_private_key',
                    dest="cytomine_private_key",
                    default="")
parser.add_argument("--cytomine_id_project",
                    dest="cytomine_id_project",
                    default="5378")
parser.add_argument("--cytomine_id_software",
                    dest="cytomine_id_software",
                    default="")
parser.add_argument("--icy_scale3sensitivity", dest="scale3sens", default="40")
params, others = parser.parse_known_args(sys.argv)

with CytomineJob(params.cytomine_host,
                 params.cytomine_public_key,
                 params.cytomine_private_key,
                 params.cytomine_id_software,
                 params.cytomine_id_project,
                 verbose=logging.INFO) as cj:
    cj.job.update(status=Job.RUNNING,
                  progress=0,
                  statusComment="Initialisation...")

    working_path = os.path.join(base_path, str(cj.job.id))
    in_path = os.path.join(working_path, "in")
    makedirs(in_path)
    out_path = os.path.join(working_path, "out")
    makedirs(out_path)

    cj.job.update(progress=1, statusComment="Downloading images...")
    images = ImageInstanceCollection().fetch_with_filter(
        "project", params.cytomine_id_project)
Ejemplo n.º 24
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # annotation filtering
        cj.logger.info(str(cj.parameters))

        # use only images from the current project
        cj.parameters.cytomine_id_projects = "{}".format(cj.parameters.cytomine_id_project)

        cj.job.update(progress=1, statuscomment="Preparing execution (creating folders,...).")
        root_path = "/data/" #Path.home()
        image_path, downloaded = setup_classify(
            args=cj.parameters, logger=cj.job_logger(1, 40),
            dest_pattern="{image}_{id}.png", root_path=root_path,
            set_folder="test", showWKT=True
        )

        annotations = [annotation for annotation in downloaded for f in annotation.filenames]
        x = np.array([f for annotation in downloaded for f in annotation.filenames])

        # extract model data from previous job
        cj.job.update(progress=45, statusComment="Extract properties from training job.")
        train_job = Job().fetch(cj.parameters.cytomine_id_job)
        properties = PropertyCollection(train_job).fetch().as_dict()

        binary = str2bool(properties["binary"].value)
        if binary:
            classes = np.array([cj.parameters.cytomine_id_term_negative, cj.parameters.cytomine_id_term_positive])
        else:
            classes = np.array(parse_domain_list(properties["classes"].value))

        # extract model
        cj.job.update(progress=50, statusComment="Download the model file.")
        attached_files = AttachedFileCollection(train_job).fetch()
        model_file = attached_files.find_by_attribute("filename", "model.joblib")
        model_filepath = os.path.join(root_path, "model.joblib")
        model_file.download(model_filepath, override=True)
        pyxit = joblib.load(model_filepath)

        # set n_jobs
        pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
        pyxit.n_jobs = cj.parameters.n_jobs

        cj.job.update(progress=55, statusComment="Predict...")
        if hasattr(pyxit, "predict_proba"):
            probas = pyxit.predict_proba(x)
            y_pred = np.argmax(probas, axis=1)
        else:
            probas = [None] * x.shape[0]
            y_pred = pyxit.predict(x)

        predicted_terms = classes.take(y_pred, axis=0)
        collection = AnnotationCollection()
        for i in cj.monitor(range(x.shape[0]), start=80, end=99, period=0.005, prefix="Uploading predicted terms"):
            annot, term, proba = annotations[i], predicted_terms[i], probas[i]

            parameters = {
                "location": annot.location,
                "id_image": annot.image,
                "id_project": cj.project.id,
                "id_terms": [int(term)]
            }
            if proba is not None:
                parameters["rate"] = float(np.max(proba))
            collection.append(Annotation(**parameters))
        collection.save()
        cj.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        statusComment="Initialization...")
        # base_path = "{}".format(os.getenv("HOME")) # Mandatory for Singularity
        base_path = "/home/mmu/Desktop"
        working_path = os.path.join(base_path, str(conn.job.id))

        #Loading pre-trained Stardist model
        np.random.seed(17)
        lbl_cmap = random_label_cmap()
        #Stardist H&E model downloaded from https://github.com/mpicbg-csbd/stardist/issues/46
        #Stardist H&E model downloaded from https://drive.switch.ch/index.php/s/LTYaIud7w6lCyuI
        model = StarDist2D(
            None, name='2D_versatile_HE', basedir='/models/'
        )  #use local model file in ~/models/2D_versatile_HE/

        #Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        list_imgs = []
        if conn.parameters.cytomine_id_images == 'all':
            for image in images:
                list_imgs.append(int(image.id))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.cytomine_id_images.split(',')
            ]

        #Go over images
        for id_image in conn.monitor(list_imgs,
                                     prefix="Running detection on image",
                                     period=0.1):
            #Dump ROI annotations in img from Cytomine server to local images
            #conn.job.update(status=Job.RUNNING, progress=0, statusComment="Fetching ROI annotations...")
            roi_annotations = AnnotationCollection()
            roi_annotations.project = conn.parameters.cytomine_id_project
            roi_annotations.term = conn.parameters.cytomine_id_roi_term
            roi_annotations.image = id_image  #conn.parameters.cytomine_id_image
            roi_annotations.showWKT = True
            roi_annotations.fetch()
            print(roi_annotations)
            #Go over ROI in this image
            #for roi in conn.monitor(roi_annotations, prefix="Running detection on ROI", period=0.1):
            for roi in roi_annotations:
                #Get Cytomine ROI coordinates for remapping to whole-slide
                #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                print(
                    "----------------------------ROI------------------------------"
                )
                roi_geometry = wkt.loads(roi.location)
                print("ROI Geometry from Shapely: {}".format(roi_geometry))
                print("ROI Bounds")
                print(roi_geometry.bounds)
                minx = roi_geometry.bounds[0]
                miny = roi_geometry.bounds[3]
                #Dump ROI image into local PNG file
                roi_path = os.path.join(
                    working_path,
                    str(roi_annotations.project) + '/' +
                    str(roi_annotations.image) + '/' + str(roi.id))
                roi_png_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.png')
                print("roi_png_filename: %s" % roi_png_filename)
                roi.dump(dest_pattern=roi_png_filename, mask=True, alpha=True)
                #roi.dump(dest_pattern=os.path.join(roi_path,"{id}.png"), mask=True, alpha=True)

                #Stardist works with TIFF images without alpha channel, flattening PNG alpha mask to TIFF RGB
                im = Image.open(roi_png_filename)
                bg = Image.new("RGB", im.size, (255, 255, 255))
                bg.paste(im, mask=im.split()[3])
                roi_tif_filename = os.path.join(roi_path + '/' + str(roi.id) +
                                                '.tif')
                bg.save(roi_tif_filename, quality=100)
                X_files = sorted(glob(roi_path + '/' + str(roi.id) + '*.tif'))
                X = list(map(imread, X_files))
                n_channel = 3 if X[0].ndim == 3 else X[0].shape[-1]
                axis_norm = (
                    0, 1
                )  # normalize channels independently  (0,1,2) normalize channels jointly
                if n_channel > 1:
                    print("Normalizing image channels %s." %
                          ('jointly' if axis_norm is None or 2 in axis_norm
                           else 'independently'))

                #Going over ROI images in ROI directory (in our case: one ROI per directory)
                for x in range(0, len(X)):
                    print("------------------- Processing ROI file %d: %s" %
                          (x, roi_tif_filename))
                    img = normalize(X[x],
                                    conn.parameters.stardist_norm_perc_low,
                                    conn.parameters.stardist_norm_perc_high,
                                    axis=axis_norm)
                    #Stardist model prediction with thresholds
                    labels, details = model.predict_instances(
                        img,
                        prob_thresh=conn.parameters.stardist_prob_t,
                        nms_thresh=conn.parameters.stardist_nms_t)
                    print("Number of detected polygons: %d" %
                          len(details['coord']))
                    cytomine_annotations = AnnotationCollection()
                    #Go over detections in this ROI, convert and upload to Cytomine
                    for pos, polygroup in enumerate(details['coord'], start=1):
                        #Converting to Shapely annotation
                        points = list()
                        for i in range(len(polygroup[0])):
                            #Cytomine cartesian coordinate system, (0,0) is bottom left corner
                            #Mapping Stardist polygon detection coordinates to Cytomine ROI in whole slide image
                            p = Point(minx + polygroup[1][i],
                                      miny - polygroup[0][i])
                            points.append(p)

                        annotation = Polygon(points)
                        #Append to Annotation collection
                        cytomine_annotations.append(
                            Annotation(
                                location=annotation.wkt,
                                id_image=
                                id_image,  #conn.parameters.cytomine_id_image,
                                id_project=conn.parameters.cytomine_id_project,
                                id_terms=[
                                    conn.parameters.cytomine_id_cell_term
                                ]))
                        print(".", end='', flush=True)

                    #Send Annotation Collection (for this ROI) to Cytomine server in one http request
                    ca = cytomine_annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        progress=100,
                        statusComment="Finished.")
Ejemplo n.º 26
0
def main():
    base_path = "{}".format(os.getenv("HOME"))  # Mandatory for Singularity

    with CytomineJob.from_cli(sys.argv[1:]) as cj:
        scale3sens = cj.parameters.icy_scale3sensitivity

        working_path = os.path.join(base_path, "data", str(cj.job.id))
        in_dir = os.path.join(working_path, "in")
        makedirs(in_dir)
        out_dir = os.path.join(working_path, "out")
        makedirs(out_dir)

        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_dir))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.project.id)

        for image in image_instances:
            image.download(os.path.join(in_dir, "{id}.tif"))

        cj.job.update(progress=25, statusComment="Launching workflow...")
        call("java -cp /icy/lib/ -jar /icy/icy.jar -hl", shell=True)
        call(
            "cd /icy && java -cp /icy/lib/ -jar /icy/icy.jar -hl -x plugins.adufour.protocols.Protocols "
            "protocol=\"/icy/protocols/protocol.protocol\" inputFolder=\"{}\" extension=tif csvFileSuffix=_results "
            "scale3enable=true scale3sensitivity={}".format(
                in_dir, scale3sens),
            shell=True)

        # # remove existing annotations if any
        # for image in cj.monitor(image_instances, start=60, end=75, period=0.1, prefix="Delete previous annotations"):
        #     annotations = AnnotationCollection.fetch_with_filter({"image": image.id})
        #     for annotation in annotations:
        #         annotation.delete()

        cj.job.update(progress=75, status_comment="Extracting polygons...")
        annotations = AnnotationCollection()
        for image in cj.monitor(image_instances,
                                start=75,
                                end=95,
                                period=0.1,
                                prefix="Upload annotations"):
            file = str(image.id) + "_results.txt"
            path = os.path.join(in_dir, file)
            if os.path.isfile(path):
                (X, Y) = readcoords(path)
                for i in range(len(X)):
                    center = Point(X[i], image.height - Y[i])
                    annotations.append(
                        Annotation(
                            location=center.wkt,
                            id_image=image.id,
                            id_project=cj.parameters.cytomine_id_project))

                    if len(annotations) % 100 == 0:
                        annotations.save()
                        annotations = AnnotationCollection()
            else:
                print("No output file at '{}' for image with id:{}.".format(
                    path, image.id),
                      file=sys.stderr)
        # Save last annotations
        annotations.save()

        # Launch the metrics computation here
        # TODO

        cj.job.update(progress=100,
                      status=Job.TERMINATED,
                      status_comment="Finished.")
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")
        tmp_path = os.path.join(working_path, "tmp")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        input_images = [
            i for i in image_instances if gt_suffix not in i.originalFilename
        ]
        gt_images = [
            i for i in image_instances if gt_suffix in i.originalFilename
        ]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.originalFilename.replace(gt_suffix, '')
            related_image = [
                i for i in input_images if related_name == i.originalFilename
            ]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")
        cj.job.update(progress=30,
                      statusComment="Execution: download model...")
        model_job = Job().fetch(cj.parameters.model_job_id)
        model_path = load_model(model_job,
                                tmp_path,
                                model_filename="weights.hf5")
        height, width = load_property(model_job,
                                      "image_height"), load_property(
                                          model_job, "image_width")
        n_channels = load_property(model_job, "n_channels")
        train_mean = load_property(model_job, "train_mean")
        train_std = load_property(model_job, "train_std")

        # load data
        cj.job.update(progress=30,
                      statusComment="Execution: preparing data...")
        dims = height, width, n_channels

        # load input images
        images = load_data(
            cj, dims, in_path, **{
                "start": 35,
                "end": 45,
                "period": 0.1,
                "prefix": "Execution: load training input images"
            })
        images -= train_mean
        images /= train_std

        # load model
        cj.job.update(progress=45, statusComment="Execution: build model...")
        unet = create_unet(dims)
        unet.load_weights(model_path)

        # inference
        masks = np.zeros([len(images), 1, dims[0], dims[1]], dtype=np.uint8)
        for i, image_name in cj.monitor(enumerate(images),
                                        start=45,
                                        end=55,
                                        period=0.1,
                                        prefix="Execution: inference"):
            masks[i] = unet.predict([images[i]])[0]
            cv2.imwrite(os.path.join(out_path, image_name),
                        (masks[i] >= cj.parameters.threshold_probas).astype(
                            np.uint8))

        # 4. Upload the annotation and masks to Cytomine (annotations are extracted from the mask using
        # the AnnotationExporter module)
        # for image in cj.monitor(input_images, start=60, end=80, period=0.1, prefix="Extracting and uploading polygons from masks"):
        #     file = "{}.tif".format(image.id)
        #     path = os.path.join(out_path, file)
        #     data = io.imread(path)
        #
        #     # extract objects
        #     slices = mask_to_objects_2d(data)
        #
        #     print("Found {} polygons in this image {}.".format(len(slices), image.id))
        #
        #     # upload
        #     collection = AnnotationCollection()
        #     for obj_slice in slices:
        #         collection.append(Annotation(
        #             location=affine_transform(obj_slice.polygon, [1, 0, 0, -1, 0, image.height]).wkt,
        #             id_image=image.id, id_project=cj.parameters.cytomine_id_project, property=[
        #                 {"key": "index", "value": str(obj_slice.label)}
        #             ]
        #         ))
        #     collection.save()

        # 5. Compute and upload the metrics
        cj.job.update(progress=80,
                      statusComment="Computing and uploading metrics...")
        outfiles, reffiles = zip(
            *[(os.path.join(out_path, "{}.tif".format(image.id)),
               os.path.join(gt_path, "{}.tif".format(image.id)))
              for image in input_images])

        results = computemetrics_batch(outfiles, reffiles, "PixCla", tmp_path)

        for key, value in results.items():
            Property(cj.job, key=key, value=str(value)).save()
        Property(cj.job,
                 key="IMAGE_INSTANCES",
                 value=str([im.id for im in input_images])).save()

        # 6. End
        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
Ejemplo n.º 28
0
def train(argv):
    parser = ArgumentParser(prog="CNN Object Counter Model Builder")

    # Cytomine
    parser.add_argument('--cytomine_host', dest='cytomine_host',
                        default='demo.cytomine.be', help="The Cytomine host")
    parser.add_argument('--cytomine_public_key', dest='cytomine_public_key',
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='cytomine_private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_base_path', dest='cytomine_base_path',
                        default='/api/', help="The Cytomine base path")
    parser.add_argument('--cytomine_working_path', dest='cytomine_working_path',
                        default=None, help="The working directory (eg: /tmp)")
    parser.add_argument('--cytomine_id_software', dest='cytomine_software', type=int,
                        help="The Cytomine software identifier")
    parser.add_argument('--cytomine_id_project', dest='cytomine_project', type=int,
                        help="The Cytomine project identifier")
    parser.add_argument('--cytomine_force_download', dest='cytomine_force_download', type=str, default=True,
                        help="Force download from Cytomine or not")

    # Objects
    parser.add_argument('--cytomine_object_term', dest='cytomine_object_term', type=int,
                        help="The Cytomine identifier of object term")
    parser.add_argument('--cytomine_object_user', dest='cytomine_object_user', type=str,
                        help="The Cytomine identifier of object owner")
    parser.add_argument('--cytomine_object_reviewed_only', dest='cytomine_object_reviewed_only', type=str,
                        help="Whether objects have to be reviewed or not")

    # ROI
    parser.add_argument('--cytomine_roi_term', dest='cytomine_roi_term', type=int, default=None,
                        help="The Cytomine identifier of region of interest term")
    parser.add_argument('--cytomine_roi_user', dest='cytomine_roi_user', type=str,
                        help="The Cytomine identifier of ROI owner")
    parser.add_argument('--cytomine_roi_reviewed_only', dest='cytomine_roi_reviewed_only', type=str,
                        help="Whether ROIs have to be reviewed or not")

    # Pre-processing
    parser.add_argument('--pre_transformer', dest='pre_transformer',
                        default='density', choices=['edt', 'euclidean_distance_transform', 'density', None, 'None'],
                        help="Scoremap transformer (None, edt, euclidean_distance_transform, density)")
    parser.add_argument('--pre_alpha', dest='pre_alpha', type=int, default=3,
                        help="Exponential decrease rate of distance (if EDT)")

    # Subwindows for training
    parser.add_argument('--sw_input_size', dest='sw_input_size', type=int, default=128,
                        help="Size of input subwindow")
    parser.add_argument('--sw_colorspace', dest="sw_colorspace", type=str, default='RGB__rgb',
                        help="List of colorspace features")
    parser.add_argument('--sw_extr_npi', dest="sw_extr_npi", type=int, default=100,
                        help="Number of extracted subwindows per image (if 'random' mode)")

    # CNN
    parser.add_argument('--cnn_architecture', '--architecture', dest='cnn_architecture',
                        type=str, choices=['FCRN-A', 'FCRN-B'], default='FCRN-A')
    parser.add_argument('--cnn_initializer', '--initializer', dest='cnn_initializer', type=str, default='orthogonal')
    parser.add_argument('--cnn_batch_normalization', '--batch_normalization', dest='cnn_batch_normalization', type=str,
                        default=True)
    parser.add_argument('--cnn_learning_rate', '--learning_rate', '--lr', dest='cnn_learning_rate', type=float,
                        default=0.01)
    parser.add_argument('--cnn_momentum', '--momentum', dest='cnn_momentum', type=float, default=0.9)
    parser.add_argument('--cnn_nesterov', '--nesterov', dest='cnn_nesterov', type=str, default=True)
    parser.add_argument('--cnn_decay', '--decay', dest='cnn_decay', type=float, default=0.0)
    parser.add_argument('--cnn_epochs', '--epochs', dest='cnn_epochs', type=int, default=24)
    parser.add_argument('--cnn_batch_size', '--batch_size', dest='cnn_batch_size', type=int, default=16)

    # Dataset augmentation
    parser.add_argument('--augmentation', dest='augmentation', type=str, default=True)
    parser.add_argument('--aug_rotation_range', dest='rotation_range', type=float, default=0.)
    parser.add_argument('--aug_width_shift_range', dest='width_shift_range', type=float, default=0.)
    parser.add_argument('--aug_height_shift_range', dest='height_shift_range', type=float, default=0.)
    parser.add_argument('--aug_zoom_range', dest='zoom_range', type=float, default=0.)
    parser.add_argument('--aug_fill_mode', dest='fill_mode', type=str, default="reflect")
    parser.add_argument('--aug_horizontal_flip', dest='horizontal_flip', type=bool, default=False)
    parser.add_argument('--aug_vertical_flip', dest='vertical_flip', type=bool, default=False)
    parser.add_argument('--aug_featurewise_center', dest='featurewise_center', type=bool, default=False)
    parser.add_argument('--aug_featurewise_std_normalization', dest='featurewise_std_normalization', type=bool,
                        default=False)

    # Execution
    parser.add_argument('--n_jobs', dest='n_jobs', type=int, default=1, help="Number of jobs")
    parser.add_argument('--verbose', '-v', dest='verbose', type=int, default=0, help="Level of verbosity")

    params, other = parser.parse_known_args(argv)
    if params.cytomine_working_path is None:
        params.cytomine_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
    make_dirs(params.cytomine_working_path)

    params.cytomine_force_download = str2bool(params.cytomine_force_download)
    params.cytomine_object_reviewed_only = str2bool(params.cytomine_object_reviewed_only)
    params.cytomine_roi_reviewed_only = str2bool(params.cytomine_roi_reviewed_only)
    params.cnn_batch_normalization = str2bool(params.cnn_batch_normalization)
    params.cnn_nesterov = str2bool(params.cnn_nesterov)
    params.augmentation = str2bool(params.augmentation)

    d = 8. if params.cnn_architecture == 'FCRN-A' else 4.
    params.sw_size = (int(np.ceil(params.sw_input_size / d) * d), int(np.ceil(params.sw_input_size / d) * d))
    params.sw_input_size = params.sw_size
    params.sw_output_size = params.sw_size
    params.sw_colorspace = params.sw_colorspace.split(' ')
    params.sw_extr_mode = 'random'
    params.cnn_regularizer = None
    params.mean_radius = 2
    params.k_factor = 100

    if params.augmentation:
        params.rotation_range = check_default(params.rotation_range, 30., return_list=False)
        params.width_shift_range = check_default(params.width_shift_range, 0.3, return_list=False)
        params.height_shift_range = check_default(params.height_shift_range, 0.3, return_list=False)
        params.zoom_range = check_default(params.zoom_range, 0.3, return_list=False)
        params.fill_mode = check_default(params.fill_mode, 'constant', return_list=False)
        params.horizontal_flip = check_default(params.horizontal_flip, True, return_list=False)
        params.vertical_flip = check_default(params.vertical_flip, True, return_list=False)
        params.featurewise_center = check_default(params.featurewise_center, False, return_list=False)
        params.featurewise_std_normalization = check_default(params.featurewise_std_normalization, False,
                                                             return_list=False)

    # Initialize logger
    logger = StandardOutputLogger(params.verbose)
    for key, val in sorted(vars(params).iteritems()):
        logger.info("[PARAMETER] {}: {}".format(key, val))

    # Start job
    with CytomineJob(params.cytomine_host,
                     params.cytomine_public_key,
                     params.cytomine_private_key,
                     params.cytomine_software,
                     params.cytomine_project,
                     parameters=vars(params),
                     working_path=params.cytomine_working_path,
                     base_path=params.cytomine_base_path,
                     verbose=(params.verbose >= Logger.DEBUG)) as job:
        cytomine = job
        cytomine.update_job_status(job.job, status_comment="Starting...", progress=0)

        cytomine.update_job_status(job.job, status_comment="Loading training set...", progress=1)
        logger.i("Loading training set...")
        X, y = get_dataset(cytomine, params.cytomine_working_path, params.cytomine_project, params.cytomine_object_term,
                           params.cytomine_roi_term, params.cytomine_object_user, params.cytomine_object_reviewed_only,
                           params.cytomine_roi_user, params.cytomine_roi_reviewed_only, params.cytomine_force_download)
        logger.d("X size: {} samples".format(len(X)))
        logger.d("y size: {} samples".format(len(y)))

        # Rename parameters
        params.architecture = params.cnn_architecture
        params.initializer = params.cnn_initializer
        params.regularizer = params.cnn_regularizer
        params.batch_normalization = params.cnn_batch_normalization
        params.learning_rate = params.cnn_learning_rate
        params.momentum = params.cnn_momentum
        params.nesterov = params.cnn_nesterov
        params.decay = params.cnn_decay
        params.epochs = params.cnn_epochs
        params.batch_size = params.cnn_batch_size

        model_path = os.path.join(params.cytomine_working_path, "models", str(params.cytomine_software))
        model_file = os.path.join(model_path, "{}.h5".format(job.job.id))
        make_dirs(model_path)

        # Callbacks
        # checkpoint_callback = ModelCheckpoint(model_file, monitor='loss', save_best_only=True)
        lr_callback = LearningRateScheduler(lr_scheduler)
        callbacks = [lr_callback]

        logger.i("Training FCRN...")
        cytomine.update_job_status(job.job, status_comment="Training FCRN...", progress=5)
        estimator = FCRN(FCRN.build_fcrn, callbacks, **vars(params))
        estimator.fit(np.asarray(X), np.asarray(y))

        logger.i("Saving model...")
        cytomine.update_job_status(job.job, status_comment="Saving (best) model", progress=95)
        estimator.save(model_file)

        logger.i("Finished.")
        cytomine.update_job_status(job.job, status_comment="Finished.", progress=100)
Ejemplo n.º 29
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # annotation filtering
        cj.logger.info(str(cj.parameters))

        # use only images from the current project
        cj.parameters.cytomine_id_projects = "{}".format(cj.parameters.cytomine_id_project)

        cj.job.update(progress=1, statuscomment="Preparing execution (creating folders,...).")
        base_path, downloaded = setup_classify(
            args=cj.parameters, logger=cj.job_logger(1, 40),
            dest_pattern=os.path.join("{term}", "{image}_{id}.png"),
            root_path=Path.home(), set_folder="train", showTerm=True
        )

        x = np.array([f for annotation in downloaded for f in annotation.filenames])
        y = np.array([int(os.path.basename(os.path.dirname(filepath))) for filepath in x])

        # transform classes
        cj.job.update(progress=50, statusComment="Transform classes...")
        classes = parse_domain_list(cj.parameters.cytomine_id_terms)
        positive_classes = parse_domain_list(cj.parameters.cytomine_positive_terms)
        classes = np.array(classes) if len(classes) > 0 else np.unique(y)
        n_classes = classes.shape[0]

        # filter unwanted terms
        cj.logger.info("Size before filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))
        keep = np.in1d(y, classes)
        x, y = x[keep], y[keep]
        cj.logger.info("Size after filtering:")
        cj.logger.info(" - x: {}".format(x.shape))
        cj.logger.info(" - y: {}".format(y.shape))

        if cj.parameters.cytomine_binary:
            cj.logger.info("Will be training on 2 classes ({} classes before binarization).".format(n_classes))
            y = np.in1d(y, positive_classes).astype(np.int)
        else:
            cj.logger.info("Will be training on {} classes.".format(n_classes))
            y = np.searchsorted(classes, y)

        # build model
        cj.job.update(progress=55, statusComment="Build model...")
        _, pyxit = build_models(
            n_subwindows=cj.parameters.pyxit_n_subwindows,
            min_size=cj.parameters.pyxit_min_size,
            max_size=cj.parameters.pyxit_max_size,
            target_width=cj.parameters.pyxit_target_width,
            target_height=cj.parameters.pyxit_target_height,
            interpolation=cj.parameters.pyxit_interpolation,
            transpose=cj.parameters.pyxit_transpose,
            colorspace=cj.parameters.pyxit_colorspace,
            fixed_size=cj.parameters.pyxit_fixed_size,
            verbose=int(cj.logger.level == 10),
            create_svm=cj.parameters.svm,
            C=cj.parameters.svm_c,
            random_state=cj.parameters.seed,
            n_estimators=cj.parameters.forest_n_estimators,
            min_samples_split=cj.parameters.forest_min_samples_split,
            max_features=cj.parameters.forest_max_features,
            n_jobs=cj.parameters.n_jobs
        )
        cj.job.update(progress=60, statusComment="Train model...")
        pyxit.fit(x, y)

        cj.job.update(progress=90, statusComment="Save model....")
        model_filename = joblib.dump(pyxit, os.path.join(base_path, "model.joblib"), compress=3)[0]

        AttachedFile(
            cj.job,
            domainIdent=cj.job.id,
            filename=model_filename,
            domainClassName="be.cytomine.processing.Job"
        ).upload()

        Property(cj.job, key="classes", value=stringify(classes)).save()
        Property(cj.job, key="binary", value=cj.parameters.cytomine_binary).save()
        Property(cj.job, key="positive_classes", value=stringify(positive_classes)).save()

        cj.job.update(status=Job.TERMINATED, status_comment="Finish", progress=100)
Ejemplo n.º 30
0
def main(argv):
    with CytomineJob.from_cli(argv) as conn:
        conn.job.update(progress=0, statusComment="Initialization..")
        base_path = "{}".format(os.getenv("HOME"))  # Mandatory for Singularity
        working_path = os.path.join(base_path, str(conn.job.id))

        # Load pretrained model (assume the best of all)
        conn.job.update(progress=0,
                        statusComment="Loading segmentation model..")

        with open("/models/resnet50b_fpn256/config.json") as f:
            config = json.load(f)
        model = FPN.build_resnet_fpn(
            name=config['name'],
            input_size=conn.parameters.dataset_patch_size,  # must be / by 16
            input_channels=1 if config['input']['mode'] == 'grayscale' else 3,
            output_channels=config['fpn']['out_channels'],
            num_classes=2,  # legacy
            in_features=config['fpn']['in_features'],
            out_features=config['fpn']['out_features'])
        model.to(_DEVICE)
        model_dict = torch.load(config['weights'],
                                map_location=torch.device(_DEVICE))
        model.load_state_dict(model_dict['model'])

        # Select images to process
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)

        if conn.parameters.cytomine_id_images != 'all':
            images = [
                _ for _ in images
                if _.id in map(lambda x: int(x.strip()),
                               conn.parameters.cytomine_id_images.split(','))
            ]
        images_id = [image.id for image in images]

        # Download selected images into "working_directory"
        img_path = os.path.join(working_path, "images")
        os.makedirs(img_path)
        for image in conn.monitor(
                images,
                start=2,
                end=50,
                period=0.1,
                prefix="Downloading images into working directory.."):
            fname, fext = os.path.splitext(image.filename)
            if image.download(dest_pattern=os.path.join(
                    img_path, "{}{}".format(image.id, fext))) is not True:

                print("Failed to download image {}".format(image.filename))

        # create a file that lists all images (used by PatchBasedDataset
        conn.job.update(progress=50,
                        statusComment="Preparing data for execution..")
        images = os.listdir(img_path)
        images = list(map(lambda x: x + '\n', images))
        with open(os.path.join(working_path, 'images.txt'), 'w') as f:
            f.writelines(images)

        # Prepare dataset and dataloader objects
        ImgTypeBits = {'.dcm': 16}
        channel_bits = ImgTypeBits.get(fext.lower(), 8)
        mean, std = compute_mean_and_std(img_path, bits=channel_bits)

        dataset = InferencePatchBasedDataset(
            path=working_path,
            subset='images',
            patch_size=conn.parameters.dataset_patch_size,
            mode=config['input']['mode'],
            bits=channel_bits,
            mean=mean,
            std=std)

        dataloader = DataLoader(
            dataset=dataset,
            batch_size=conn.parameters.model_batch_size,
            drop_last=False,
            shuffle=False,
            num_workers=0,
            collate_fn=InferencePatchBasedDataset.collate_fn)

        # Go over images
        conn.job.update(status=Job.RUNNING,
                        progress=55,
                        statusComment="Running inference on images..")
        results = inference_on_segmentation(
            model, dataloader, conn.parameters.postprocess_p_threshold)

        for id_image in conn.monitor(
                images_id,
                start=90,
                end=95,
                prefix="Deleting old annotations on images..",
                period=0.1):
            # Delete old annotations
            del_annotations = AnnotationCollection()
            del_annotations.image = id_image
            del_annotations.user = conn.job.id
            del_annotations.project = conn.parameters.cytomine_id_project
            del_annotations.term = conn.parameters.cytomine_id_predict_term,
            del_annotations.fetch()
            for annotation in del_annotations:
                annotation.delete()

        conn.job.update(
            status=Job.RUNNING,
            progress=95,
            statusComment="Uploading new annotations to Cytomine server..")
        annotations = AnnotationCollection()
        for instance in results:
            idx, _ = os.path.splitext(instance['filename'])
            width, height = instance['size']

            for box in instance['bbox']:
                points = [
                    Point(box[0], height - 1 - box[1]),
                    Point(box[0], height - 1 - box[3]),
                    Point(box[2], height - 1 - box[3]),
                    Point(box[2], height - 1 - box[1])
                ]
                annotation = Polygon(points)

                annotations.append(
                    Annotation(
                        location=annotation.wkt,
                        id_image=int(idx),
                        id_terms=[conn.parameters.cytomine_id_predict_term],
                        id_project=conn.parameters.cytomine_id_project))
        annotations.save()

        conn.job.update(status=Job.TERMINATED,
                        status_comment="Finish",
                        progress=100)