コード例 #1
0
def _load_rectangles(job: Job, image_id: str, term: int,
                     detections: dict) -> None:

    progress = 10
    job.update(
        progress=progress,
        status=Job.RUNNING,
        statusComment=
        f"Uploading detections of type rectangles to image {image_id} with terms {term}"
    )

    rectangles = _generate_rectangles(detections)

    # Upload annotations to server
    delta = 85 / len(rectangles)
    annotations = AnnotationCollection()
    for rectangle in rectangles:
        annotations.append(
            Annotation(location=rectangle.wkt,
                       id_image=image_id,
                       id_terms=[term]))
        progress += delta
        job.update(progress=int(progress), status=Job.RUNNING)

    annotations.save()
    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="All detections have been uploaded")
コード例 #2
0
def _load_polygons(job: Job, image_id: str, term: int,
                   detections: dict) -> None:

    progress = 10
    job.update(
        progress=progress,
        status=Job.RUNNING,
        statusComment=
        f"Uploading detections of type polygons to image {image_id} with terms {term}"
    )

    polygons = _generate_polygons(detections)

    annotation = Annotation(location=polygons.wkt,
                            id_image=image_id,
                            id_terms=[term]).save()

    progress = 85
    job.update(progress=int(progress), status=Job.RUNNING)

    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="All detections have been uploaded")
コード例 #3
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # use only images from the current project
        cj.job.update(progress=1, statusComment="Preparing execution")

        # extract images to process
        if cj.parameters.cytomine_zoom_level > 0 and (
                cj.parameters.cytomine_tile_size != 256
                or cj.parameters.cytomine_tile_overlap != 0):
            raise ValueError(
                "when using zoom_level > 0, tile size should be 256 "
                "(given {}) and overlap should be 0 (given {})".format(
                    cj.parameters.cytomine_tile_size,
                    cj.parameters.cytomine_tile_overlap))

        cj.job.update(
            progress=1,
            statusComment="Preparing execution (creating folders,...).")
        # working path
        root_path = str(Path.home())
        working_path = os.path.join(root_path, "images")
        os.makedirs(working_path, exist_ok=True)

        # load training information
        cj.job.update(progress=5,
                      statusComment="Extract properties from training job.")
        train_job = Job().fetch(cj.parameters.cytomine_id_job)
        properties = PropertyCollection(train_job).fetch().as_dict()
        binary = str2bool(properties["binary"].value)
        classes = parse_domain_list(properties["classes"].value)

        cj.job.update(progress=10, statusComment="Download the model file.")
        attached_files = AttachedFileCollection(train_job).fetch()
        model_file = attached_files.find_by_attribute("filename",
                                                      "model.joblib")
        model_filepath = os.path.join(root_path, "model.joblib")
        model_file.download(model_filepath, override=True)
        pyxit = joblib.load(model_filepath)

        # set n_jobs
        pyxit.base_estimator.n_jobs = cj.parameters.n_jobs
        pyxit.n_jobs = cj.parameters.n_jobs

        cj.job.update(progress=45, statusComment="Build workflow.")
        builder = SSLWorkflowBuilder()
        builder.set_tile_size(cj.parameters.cytomine_tile_size,
                              cj.parameters.cytomine_tile_size)
        builder.set_overlap(cj.parameters.cytomine_tile_overlap)
        builder.set_tile_builder(
            CytomineTileBuilder(working_path, n_jobs=cj.parameters.n_jobs))
        builder.set_logger(StandardOutputLogger(level=Logger.INFO))
        builder.set_n_jobs(1)
        builder.set_background_class(0)
        # value 0 will prevent merging but still requires to run the merging check
        # procedure (inefficient)
        builder.set_distance_tolerance(2 if cj.parameters.union_enabled else 0)
        builder.set_segmenter(
            ExtraTreesSegmenter(
                pyxit=pyxit,
                classes=classes,
                prediction_step=cj.parameters.pyxit_prediction_step,
                background=0,
                min_std=cj.parameters.tile_filter_min_stddev,
                max_mean=cj.parameters.tile_filter_max_mean))
        workflow = builder.get()

        area_checker = AnnotationAreaChecker(
            min_area=cj.parameters.min_annotation_area,
            max_area=cj.parameters.max_annotation_area)

        def get_term(label):
            if binary:
                if "cytomine_id_predict_term" not in cj.parameters:
                    return []
                else:
                    return [int(cj.parameters.cytomine_id_predict_term)]
            # multi-class
            return [label]

        zones = extract_images_or_rois(cj.parameters)
        for zone in cj.monitor(zones,
                               start=50,
                               end=90,
                               period=0.05,
                               prefix="Segmenting images/ROIs"):
            results = workflow.process(zone)

            annotations = AnnotationCollection()
            for obj in results:
                if not area_checker.check(obj.polygon):
                    continue
                polygon = obj.polygon
                if isinstance(zone, ImageWindow):
                    polygon = affine_transform(
                        polygon,
                        [1, 0, 0, 1, zone.abs_offset_x, zone.abs_offset_y])
                polygon = change_referential(polygon, zone.base_image.height)
                if cj.parameters.cytomine_zoom_level > 0:
                    zoom_mult = (2**cj.parameters.cytomine_zoom_level)
                    polygon = affine_transform(
                        polygon, [zoom_mult, 0, 0, zoom_mult, 0, 0])
                annotations.append(
                    Annotation(location=polygon.wkt,
                               id_terms=get_term(obj.label),
                               id_project=cj.project.id,
                               id_image=zone.base_image.image_instance.id))
            annotations.save()

        cj.job.update(status=Job.TERMINATED,
                      status_comment="Finish",
                      progress=100)
コード例 #4
0
def main(argv):
    # 0. Initialize Cytomine client and job
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialisation...")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path
        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(cj.job.id))
        in_path = os.path.join(working_path, "in")
        out_path = os.path.join(working_path, "out")
        gt_path = os.path.join(working_path, "ground_truth")
        tmp_path = os.path.join(working_path, "tmp")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)

        # 2. Download the images (first input, then ground truth image)
        cj.job.update(
            progress=1,
            statusComment="Downloading images (to {})...".format(in_path))
        image_instances = ImageInstanceCollection().fetch_with_filter(
            "project", cj.parameters.cytomine_id_project)
        input_images = [
            i for i in image_instances if gt_suffix not in i.originalFilename
        ]
        gt_images = [
            i for i in image_instances if gt_suffix in i.originalFilename
        ]

        for input_image in input_images:
            input_image.download(os.path.join(in_path, "{id}.tif"))

        for gt_image in gt_images:
            related_name = gt_image.originalFilename.replace(gt_suffix, '')
            related_image = [
                i for i in input_images if related_name == i.originalFilename
            ]
            if len(related_image) == 1:
                gt_image.download(
                    os.path.join(gt_path,
                                 "{}.tif".format(related_image[0].id)))

        # 3. Call the image analysis workflow using the run script
        cj.job.update(progress=25, statusComment="Launching workflow...")
        cj.job.update(progress=30,
                      statusComment="Execution: download model...")
        model_job = Job().fetch(cj.parameters.model_job_id)
        model_path = load_model(model_job,
                                tmp_path,
                                model_filename="weights.hf5")
        height, width = load_property(model_job,
                                      "image_height"), load_property(
                                          model_job, "image_width")
        n_channels = load_property(model_job, "n_channels")
        train_mean = load_property(model_job, "train_mean")
        train_std = load_property(model_job, "train_std")

        # load data
        cj.job.update(progress=30,
                      statusComment="Execution: preparing data...")
        dims = height, width, n_channels

        # load input images
        images = load_data(
            cj, dims, in_path, **{
                "start": 35,
                "end": 45,
                "period": 0.1,
                "prefix": "Execution: load training input images"
            })
        images -= train_mean
        images /= train_std

        # load model
        cj.job.update(progress=45, statusComment="Execution: build model...")
        unet = create_unet(dims)
        unet.load_weights(model_path)

        # inference
        masks = np.zeros([len(images), 1, dims[0], dims[1]], dtype=np.uint8)
        for i, image_name in cj.monitor(enumerate(images),
                                        start=45,
                                        end=55,
                                        period=0.1,
                                        prefix="Execution: inference"):
            masks[i] = unet.predict([images[i]])[0]
            cv2.imwrite(os.path.join(out_path, image_name),
                        (masks[i] >= cj.parameters.threshold_probas).astype(
                            np.uint8))

        # 4. Upload the annotation and masks to Cytomine (annotations are extracted from the mask using
        # the AnnotationExporter module)
        # for image in cj.monitor(input_images, start=60, end=80, period=0.1, prefix="Extracting and uploading polygons from masks"):
        #     file = "{}.tif".format(image.id)
        #     path = os.path.join(out_path, file)
        #     data = io.imread(path)
        #
        #     # extract objects
        #     slices = mask_to_objects_2d(data)
        #
        #     print("Found {} polygons in this image {}.".format(len(slices), image.id))
        #
        #     # upload
        #     collection = AnnotationCollection()
        #     for obj_slice in slices:
        #         collection.append(Annotation(
        #             location=affine_transform(obj_slice.polygon, [1, 0, 0, -1, 0, image.height]).wkt,
        #             id_image=image.id, id_project=cj.parameters.cytomine_id_project, property=[
        #                 {"key": "index", "value": str(obj_slice.label)}
        #             ]
        #         ))
        #     collection.save()

        # 5. Compute and upload the metrics
        cj.job.update(progress=80,
                      statusComment="Computing and uploading metrics...")
        outfiles, reffiles = zip(
            *[(os.path.join(out_path, "{}.tif".format(image.id)),
               os.path.join(gt_path, "{}.tif".format(image.id)))
              for image in input_images])

        results = computemetrics_batch(outfiles, reffiles, "PixCla", tmp_path)

        for key, value in results.items():
            Property(cj.job, key=key, value=str(value)).save()
        Property(cj.job,
                 key="IMAGE_INSTANCES",
                 value=str([im.id for im in input_images])).save()

        # 6. End
        cj.job.update(status=Job.TERMINATED,
                      progress=100,
                      statusComment="Finished.")
コード例 #5
0
def _load_multi_class_points(job: Job) -> None:

    progress = 100
    job.update(progress=progress,
               status=Job.TERMINATED,
               statusComment="Job finished")
コード例 #6
0
def main():
	with NeubiasJob.from_cli(sys.argv) as conn:
		problem_cls = get_discipline(conn, default=CLASS_LNDDET)
		is_2d = True
		conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization of the prediction phase")
		in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, conn, is_2d=is_2d, **conn.flags)
		list_imgs = [int(image.rstrip('.tif')) for image in os.listdir(in_path) if image.endswith('.tif')]

		train_job = Job().fetch(conn.parameters.model_to_use)
		properties = PropertyCollection(train_job).fetch()
		str_terms = ""
		for prop in properties:
			if prop.fetch(key='id_terms')!=None:
				str_terms = prop.fetch(key='id_terms').value
		term_list = [int(x) for x in str_terms.split(' ')]
		attached_files = AttachedFileCollection(train_job).fetch()

		hash_pos = {}
		hash_size = {}
		for id_term in conn.monitor(term_list, start=10, end=70, period = 0.05, prefix="Finding landmarks for terms..."):
			model_file = find_by_attribute(attached_files, "filename", "%d_model.joblib"%id_term)
			model_filepath = os.path.join(in_path, "%d_model.joblib"%id_term)
			model_file.download(model_filepath, override=True)
			cov_file = find_by_attribute(attached_files, 'filename', '%d_cov.joblib'%id_term)
			cov_filepath = os.path.join(in_path, "%d_cov.joblib"%id_term)
			cov_file.download(cov_filepath, override=True)
			parameters_file = find_by_attribute(attached_files, 'filename', '%d_parameters.joblib'%id_term)
			parameters_filepath = os.path.join(in_path, '%d_parameters.joblib'%id_term)
			parameters_file.download(parameters_filepath, override=True)

			model = joblib.load(model_filepath)
			[mx, my, cm] = joblib.load(cov_filepath)
			parameters_hash = joblib.load(parameters_filepath)
			feature_parameters = None
			if parameters_hash['feature_type'] in ['haar', 'gaussian']:
				fparameters_file = find_by_attribute(attached_files, 'filename', "%d_fparameters.joblib"%id_term)
				fparametersl_filepath = os.path.join(in_path, "%d_fparameters.joblib"%id_term)
				fparameters_file.download(fparametersl_filepath, override=True)
				feature_parameters = joblib.load(fparametersl_filepath)
			for id_img in list_imgs:
				(x, y, height, width) = searchpoint_cytomine(in_path, id_img, model, mx, my, cm, 1. / (2. ** np.arange(parameters_hash['model_depth'])), parameters_hash['window_size'], parameters_hash['feature_type'], feature_parameters, 'tif', parameters_hash['model_npred'])
				if (not id_img in hash_size):
					hash_size[id_img] = (height, width)
					hash_pos[id_img] = []
				hash_pos[id_img].append(((id_term, x, y)))
		conn.job.update(status=Job.RUNNING, progress=95, statusComment="Uploading the results...")
		for id_img in list_imgs:
			(h, w) = hash_size[id_img]
			lbl_img = np.zeros((h, w), 'uint8')
			for (id_term, x, y) in hash_pos[id_img]:
				intx = int(x)
				inty = int(y)
				if lbl_img[inty, intx] > 0:
					(ys, xs) = np.where(lbl_img==0)
					dis = np.sqrt((ys-y)**2 + (xs-x)**2)
					j = np.argmin(dis)
					intx = int(xs[j])
					inty = int(ys[j])
				lbl_img[inty, intx] = id_term
			imwrite(path=os.path.join(out_path, '%d.tif'%id_img), image=lbl_img.astype(np.uint8), is_2d=is_2d)
		upload_data(problem_cls, conn, in_images, out_path, **conn.flags, is_2d=is_2d, monitor_params={"start": 70, "end": 90, "period": 0.1})
		conn.job.update(progress=90, statusComment="Computing and uploading metrics (if necessary)...")
		upload_metrics(problem_cls, conn, in_images, gt_path, out_path, tmp_path, **conn.flags)
		conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
コード例 #7
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        base_path = "{}".format(os.getenv("HOME"))
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        out_path = os.path.join(working_path, "out/")

        tr_working_path = os.path.join(base_path,
                                       str(conn.parameters.model_to_use))
        tr_out_path = os.path.join(tr_working_path, "out/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)

        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        list_imgs = []
        if conn.parameters.images_to_predict == 'all':
            for image in images:
                list_imgs.append(int(image.id))
                image.dump(os.path.join(in_path, '%d.jpg' % (image.id)))
        else:
            list_imgs = [
                int(id_img)
                for id_img in conn.parameters.images_to_predict.split(',')
            ]
            for image in images:
                if image.id in list_imgs:
                    image.dump(os.path.join(in_path, '%d.jpg' % (image.id)))

        annotation_collection = AnnotationCollection()
        train_job = Job().fetch(conn.parameters.model_to_use)
        properties = PropertyCollection(train_job).fetch()
        str_terms = ""
        for prop in properties:
            if prop.fetch(key='id_terms') != None:
                str_terms = prop.fetch(key='id_terms').value
        term_list = [int(x) for x in str_terms.split(' ')]
        attached_files = AttachedFileCollection(train_job).fetch()

        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Finding landmarks for terms..."):
            model_file = find_by_attribute(attached_files, "filename",
                                           "%d_model.joblib" % id_term)
            model_filepath = os.path.join(in_path, "%d_model.joblib" % id_term)
            model_file.download(model_filepath, override=True)
            cov_file = find_by_attribute(attached_files, 'filename',
                                         '%d_cov.joblib' % id_term)
            cov_filepath = os.path.join(in_path, "%d_cov.joblib" % id_term)
            cov_file.download(cov_filepath, override=True)
            parameters_file = find_by_attribute(
                attached_files, 'filename', '%d_parameters.joblib' % id_term)
            parameters_filepath = os.path.join(
                in_path, '%d_parameters.joblib' % id_term)
            parameters_file.download(parameters_filepath, override=True)

            model = joblib.load(model_filepath)
            [mx, my, cm] = joblib.load(cov_filepath)
            parameters_hash = joblib.load(parameters_filepath)
            feature_parameters = None
            if parameters_hash['feature_type'] in ['haar', 'gaussian']:
                fparameters_file = find_by_attribute(
                    attached_files, 'filename',
                    "%d_fparameters.joblib" % id_term)
                fparametersl_filepath = os.path.join(
                    in_path, "%d_fparameters.joblib" % id_term)
                fparameters_file.download(fparametersl_filepath, override=True)
                feature_parameters = joblib.load(fparametersl_filepath)
            for id_img in list_imgs:
                (x, y) = searchpoint_cytomine(
                    in_path, id_img, model, mx, my, cm,
                    1. / (2.**np.arange(parameters_hash['model_depth'])),
                    parameters_hash['window_size'],
                    parameters_hash['feature_type'], feature_parameters, 'jpg',
                    parameters_hash['model_npred'])
                circle = Point(x, y)
                annotation_collection.append(
                    Annotation(location=circle.wkt,
                               id_image=id_img,
                               id_terms=[id_term],
                               id_project=conn.parameters.cytomine_id_project))

        annotation_collection.save()
コード例 #8
0
def main():
	with NeubiasJob.from_cli(sys.argv) as conn:
		problem_cls = get_discipline(conn, default=CLASS_LNDDET)
		conn.job.update(status=Job.RUNNING, progress=0, statusComment="Initialization of the prediction phase")
		in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, conn, is_2d=True, **conn.flags)
		train_job = Job().fetch(conn.parameters.model_to_use)
		properties = PropertyCollection(train_job).fetch()
		str_terms = ""
		for prop in properties:
			if prop.fetch(key='id_terms') != None:
				str_terms = prop.fetch(key='id_terms').value
		term_list = [int(x) for x in str_terms.split(' ')]
		attached_files = AttachedFileCollection(train_job).fetch()
		model_file = find_by_attribute(attached_files, "filename", "model_phase1.joblib")
		model_filepath = os.path.join(in_path, "model_phase1.joblib")
		model_file.download(model_filepath, override=True)
		clf = joblib.load(model_filepath)
		pr_ims = [int(p) for p in conn.parameters.cytomine_predict_images.split(',')]
		tifimg = readimage(in_path, pr_ims[0], image_type='tif')
		init_h = 100
		init_w = 100
		if len(tifimg.shape)==3:
			(init_h, init_w, init_d) = tifimg.shape
		else:
			(init_h, init_w) = tifimg.shape
		offset_file = find_by_attribute(attached_files, "filename", "offsets_phase1.joblib")
		offset_filepath = os.path.join(in_path, "offsets_phase1.joblib")
		offset_file.download(offset_filepath, override=True)
		feature_offsets_1 = joblib.load(offset_filepath)
		train_parameters = {}
		for hashmap in train_job.jobParameters:
			train_parameters[hashmap['name']] = hashmap['value']
		train_parameters['model_delta'] = float(train_parameters['model_delta'])
		train_parameters['model_sde'] = float(train_parameters['model_sde'])
		train_parameters['model_T'] = int(train_parameters['model_T'])
		for j in conn.monitor(pr_ims, start=10, end=33, period=0.05,prefix="Phase 1 for images..."):
			probability_map = probability_map_phase_1(in_path, j, clf, feature_offsets_1, float(train_parameters['model_delta']))
			filesave = os.path.join(out_path, 'pmap_%d.npy'%j)
			np.savez_compressed(filesave,probability_map)

		clf = None

		coords_file = find_by_attribute(attached_files, "filename", "coords.joblib")
		coords_filepath = os.path.join(in_path, "coords.joblib")
		coords_file.download(coords_filepath, override=True)
		(Xc, Yc) = joblib.load(coords_filepath)

		for j in conn.monitor(pr_ims, start=33, end=66, period=0.05,prefix="Phase 2 for images..."):
			filesave = os.path.join(out_path, 'pmap_%d.npy.npz' % j)
			probability_map = np.load(filesave)['arr_0']
			for id_term in term_list:
				reg_file = find_by_attribute(attached_files, "filename", "reg_%d_phase2.joblib"%id_term)
				reg_filepath = os.path.join(in_path, "reg_%d_phase2.joblib"%id_term)
				reg_file.download(reg_filepath, override=True)
				reg = joblib.load(reg_filepath)
				off_file = find_by_attribute(attached_files, "filename", 'offsets_%d_phase2.joblib' % id_term)
				off_filepath = os.path.join(in_path, 'offsets_%d_phase2.joblib' % id_term)
				off_file.download(off_filepath, override=True)
				feature_offsets_2 = joblib.load(off_filepath)
				probability_map_phase_2 = agregation_phase_2(in_path, j, id_term-1, probability_map, reg, train_parameters['model_delta'], feature_offsets_2, conn.parameters.model_filter_size, conn.parameters.model_beta, conn.parameters.model_n_iterations)
				filesave = os.path.join(out_path, 'pmap2_%d_%d.npy' % (j, id_term))
				np.savez_compressed(filesave, probability_map_phase_2)

		edge_file = find_by_attribute(attached_files, "filename", "model_edges.joblib")
		edge_filepath = os.path.join(in_path, "model_edges.joblib")
		edge_file.download(edge_filepath, override=True)
		edges = joblib.load(edge_filepath)
		for j in conn.monitor(pr_ims, start=66, end=90, period=0.05,prefix="Phase 3 for images..."):
			filesave = os.path.join(out_path, 'pmap2_%d_%d.npy.npz' % (j, term_list[0]))
			probability_map = np.load(filesave)['arr_0']
			(hpmap,wpmap) = probability_map.shape
			probability_volume = np.zeros((hpmap,wpmap,len(term_list)))
			probability_volume[:,:,0] = probability_map
			for i in range(1,len(term_list)):
				filesave = os.path.join(out_path, 'pmap2_%d_%d.npy.npz' % (j, term_list[i]))
				probability_volume[:,:,i] = np.load(filesave)['arr_0']
			x_final, y_final = compute_final_solution_phase_3(Xc, Yc, probability_volume, conn.parameters.model_n_candidates, train_parameters['model_sde'], train_parameters['model_delta'], train_parameters['model_T'], edges)
			lbl_img = np.zeros((init_h, init_w), 'uint8')
			for i in range(x_final.size):
				x = min(init_w-1, max(0, int(x_final[i])))
				y = min(init_h-1, max(0, int(y_final[i])))
				lbl_img[y, x] = term_list[i]
			imwrite(path=os.path.join(out_path, '%d.tif' % j), image=lbl_img.astype(np.uint8), is_2d=True)
		upload_data(problem_cls, conn, in_images, out_path, **conn.flags, is_2d=True, monitor_params={"start": 90, "end": 95, "period": 0.1})
		conn.job.update(progress=90, statusComment="Computing and uploading metrics (if necessary)...")
		upload_metrics(problem_cls, conn, in_images, gt_path, out_path, tmp_path, **conn.flags)
		conn.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        # prepare paths
        working_path = str(Path.home())
        data_path = os.path.join(working_path, "pred_data")
        if not os.path.exists(data_path):
            os.makedirs(data_path)

        model_filename = "model.pkl"

        cj.job.update(progress=5, statusComment="Download model ...")
        model_job = Job().fetch(cj.parameters.cytomine_model_job_id)
        attached_files = AttachedFileCollection(model_job).fetch_with_filter(
            "project", cj.project.id)
        if not (0 < len(attached_files) < 2):
            raise ValueError(
                "More or less than 1 file attached to the Job (found {} file(s))."
                .format(len(attached_files)))
        attached_file = attached_files[0]
        if attached_file.filename != model_filename:
            raise ValueError(
                "Expected model file name is '{}' (found: '{}').".format(
                    model_filename, attached_file.filename))
        model_path = os.path.join(working_path, model_filename)
        attached_file.download(model_path)

        # load model
        with open(model_path, "rb") as file:
            data = pickle.load(file)
            model = data["model"]
            classifier = data["classifier"]
            network = data["network"]
            reduction = data["reduction"]

        # load and dump annotations
        cj.job.update(progress=10, statusComment="Download annotations.")
        annotations = get_annotations(
            project_id=cj.parameters.cytomine_project_id,
            images=parse_list_or_none(cj.parameters.cytomine_images_ids),
            users=parse_list_or_none(cj.parameters.cytomine_users_ids),
            showWKT=True)

        cj.job.update(statusComment="Fetch crops.", progress=15)
        n_samples = len(annotations)
        x = np.zeros([n_samples], dtype=np.object)
        for i, annotation in cj.monitor(enumerate(annotations),
                                        start=15,
                                        end=40,
                                        prefix="Fetch crops",
                                        period=0.1):
            file_format = os.path.join(data_path, "{id}.png")
            if not annotation.dump(dest_pattern=file_format):
                raise ValueError("Download error for annotation '{}'.".format(
                    annotation.id))
            x[i] = file_format.format(id=annotation.id)

        available_nets = {
            MODEL_RESNET50, MODEL_VGG19, MODEL_VGG16, MODEL_INCEPTION_V3,
            MODEL_INCEPTION_RESNET_V2, MODEL_MOBILE, MODEL_DENSE_NET_201,
            MODEL_NASNET_LARGE, MODEL_NASNET_MOBILE
        }

        if network not in available_nets:
            raise ValueError(
                "Invalid value (='{}'} for parameter 'network'.".format(
                    network))
        if reduction not in {"average_pooling"}:
            raise ValueError(
                "Invalid value (='{}') for parameter 'reduction'.".format(
                    reduction))
        if classifier not in {"svm"}:
            raise ValueError(
                "Invalid value (='{}') for parameter 'classifier'.".format(
                    classifier))

        # prepare network
        cj.job.update(statusComment="Load neural network '{}'".format(network),
                      progress=40)
        features = PretrainedModelFeatures(model=network,
                                           layer="last",
                                           reduction=reduction,
                                           weights="imagenet")
        height, width, _ = features._get_input_shape(network)
        loader = ImageLoader(load_size_range=(height, height),
                             crop_size=height,
                             random_crop=False)

        cj.job.update(statusComment="Transform features.", progress=50)
        x_feat = batch_transform(loader,
                                 features,
                                 x,
                                 logger=cj.logger(start=50, end=70,
                                                  period=0.1),
                                 batch_size=128)

        cj.job.update(statusComment="Prediction with '{}'.".format(classifier),
                      progress=70)
        if hasattr(model, "n_jobs"):
            model.n_jobs = cj.parameters.n_jobs

        probas = None
        if hasattr(model, "predict_proba"):
            probas = model.predict_proba(x_feat)
            y_pred = model.classes_.take(np.argmax(probas, axis=1), axis=0)
        else:
            y_pred = model.predict(x_feat)

        cj.job.update(statusComment="Upload annotations.", progress=80)
        annotation_collection = AnnotationCollection()
        for i, annotation in cj.monitor(enumerate(annotations),
                                        start=80,
                                        end=100,
                                        period=0.1,
                                        prefix="Upload annotations"):
            annotation_collection.append(
                Annotation(location=annotation.location,
                           id_image=annotation.image,
                           id_project=annotation.project,
                           term=[int(y_pred[i])],
                           rate=float(probas[i])
                           if probas is not None else 1.0).save())
        annotation_collection.save()

        cj.job.update(statusComment="Finished.", progress=100)