for row in f_csv:
            image_id = row[0]
            tissue = row[1]
            dye = row[2]

            id2info[image_id] = (tissue, dye)

    with Cytomine(host=params.host,
                  public_key=params.public_key,
                  private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            annot = Annotation().fetch(annotation.id)
            # Toutes les proprietes (collection) de l annotation
            properties = PropertyCollection(annot).fetch()
            # Une propriété avec une clé spécifique de l'annotation
            propert = Property(annot).fetch(key="ANNOTATION_GROUP_ID")
Пример #2
0
def main():
    with CytomineJob.from_cli(sys.argv) as conn:
        conn.job.update(status=Job.RUNNING,
                        progress=0,
                        status_comment="Initialization of the training phase")

        # 1. Create working directories on the machine:
        # - WORKING_PATH/in: input images
        # - WORKING_PATH/out: output images
        # - WORKING_PATH/ground_truth: ground truth images
        # - WORKING_PATH/tmp: temporary path

        base_path = "{}".format(os.getenv("HOME"))
        gt_suffix = "_lbl"
        working_path = os.path.join(base_path, str(conn.job.id))
        in_path = os.path.join(working_path, "in/")
        in_txt = os.path.join(in_path, 'txt/')
        out_path = os.path.join(working_path, "out/")
        gt_path = os.path.join(working_path, "ground_truth/")
        tmp_path = os.path.join(working_path, "tmp/")

        if not os.path.exists(working_path):
            os.makedirs(working_path)
            os.makedirs(in_path)
            os.makedirs(out_path)
            os.makedirs(gt_path)
            os.makedirs(tmp_path)
            os.makedirs(in_txt)
        # 2. Download the images (first input, then ground truth image)
        conn.job.update(
            progress=10,
            statusComment="Downloading images (to {})...".format(in_path))
        print(conn.parameters)
        images = ImageInstanceCollection().fetch_with_filter(
            "project", conn.parameters.cytomine_id_project)
        xpos = {}
        ypos = {}
        terms = {}

        for image in images:
            image.dump(dest_pattern=in_path.rstrip('/') + '/%d.%s' %
                       (image.id, 'jpg'))

            annotations = AnnotationCollection()
            annotations.project = conn.parameters.cytomine_id_project
            annotations.showWKT = True
            annotations.showMeta = True
            annotations.showGIS = True
            annotations.showTerm = True
            annotations.image = image.id
            annotations.fetch()

            for ann in annotations:
                l = ann.location
                if l.rfind('POINT') == -1:
                    pol = shapely.wkt.loads(l)
                    poi = pol.centroid
                else:
                    poi = shapely.wkt.loads(l)
                (cx, cy) = poi.xy
                xpos[(ann.term[0], image.id)] = int(cx[0])
                ypos[(ann.term[0], image.id)] = image.height - int(cy[0])
                terms[ann.term[0]] = 1

        for image in images:
            F = open(in_txt + '%d.txt' % image.id, 'w')
            for t in terms.keys():
                if (t, image.id) in xpos:
                    F.write('%d %d %d %f %f\n' %
                            (t, xpos[(t, image.id)], ypos[(t, image.id)],
                             xpos[(t, image.id)] / float(image.width),
                             ypos[(t, image.id)] / float(image.height)))
            F.close()

        depths = 1. / (2.**np.arange(conn.parameters.model_depth))

        (xc, yc, xr, yr, ims, t_to_i, i_to_t) = getallcoords(in_txt)

        if conn.parameters.cytomine_id_terms == 'all':
            term_list = t_to_i.keys()
        else:
            term_list = [
                int(term)
                for term in conn.parameters.cytomine_id_terms.split(',')
            ]

        if conn.parameters.cytomine_training_images == 'all':
            tr_im = ims
        else:
            tr_im = [
                int(id_im) for id_im in
                conn.parameters.cytomine_training_images.split(',')
            ]

        DATA = None
        REP = None
        be = 0

        #leprogres = 10
        #pr_spacing = 90/len(term_list)
        #print(term_list)
        sfinal = ""
        for id_term in conn.monitor(term_list,
                                    start=10,
                                    end=90,
                                    period=0.05,
                                    prefix="Model building for terms..."):
            sfinal += "%d " % id_term

            (xc, yc, xr, yr) = getcoordsim(in_txt, id_term, tr_im)
            nimages = np.max(xc.shape)
            mx = np.mean(xr)
            my = np.mean(yr)
            P = np.zeros((2, nimages))
            P[0, :] = xr
            P[1, :] = yr
            cm = np.cov(P)
            passe = False
            # additional parameters
            feature_parameters = None
            if conn.parameters.model_feature_type.lower() == 'gaussian':
                std_matrix = np.eye(2) * (
                    conn.parameters.model_feature_gaussian_std**2)
                feature_parameters = np.round(
                    np.random.multivariate_normal(
                        [0, 0], std_matrix,
                        conn.parameters.model_feature_gaussian_n)).astype(int)
            elif conn.parameters.model_feature_type.lower() == 'haar':
                W = conn.parameters.model_wsize
                n = conn.parameters.model_feature_haar_n / (
                    5 * conn.parameters.model_depth)
                h2 = generate_2_horizontal(W, n)
                v2 = generate_2_vertical(W, n)
                h3 = generate_3_horizontal(W, n)
                v3 = generate_3_vertical(W, n)
                sq = generate_square(W, n)
                feature_parameters = (h2, v2, h3, v3, sq)

            for times in range(conn.parameters.model_ntimes):
                if times == 0:
                    rangrange = 0
                else:
                    rangrange = conn.parameters.model_angle

                T = build_datasets_rot_mp(
                    in_path, tr_im, xc, yc, conn.parameters.model_R,
                    conn.parameters.model_RMAX, conn.parameters.model_P,
                    conn.parameters.model_step, rangrange,
                    conn.parameters.model_wsize,
                    conn.parameters.model_feature_type, feature_parameters,
                    depths, nimages, 'jpg', conn.parameters.model_njobs)
                for i in range(len(T)):
                    (data, rep, img) = T[i]
                    (height, width) = data.shape
                    if not passe:
                        passe = True
                        DATA = np.zeros((height * (len(T) + 100) *
                                         conn.parameters.model_ntimes, width))
                        REP = np.zeros(height * (len(T) + 100) *
                                       conn.parameters.model_ntimes)
                        b = 0
                        be = height
                    DATA[b:be, :] = data
                    REP[b:be] = rep
                    b = be
                    be = be + height

            REP = REP[0:b]
            DATA = DATA[0:b, :]

            clf = ExtraTreesClassifier(
                n_jobs=conn.parameters.model_njobs,
                n_estimators=conn.parameters.model_ntrees)
            clf = clf.fit(DATA, REP)

            parameters_hash = {}

            parameters_hash[
                'cytomine_id_terms'] = conn.parameters.cytomine_id_terms
            parameters_hash['model_R'] = conn.parameters.model_R
            parameters_hash['model_RMAX'] = conn.parameters.model_RMAX
            parameters_hash['model_P'] = conn.parameters.model_P
            parameters_hash['model_npred'] = conn.parameters.model_npred
            parameters_hash['model_ntrees'] = conn.parameters.model_ntrees
            parameters_hash['model_ntimes'] = conn.parameters.model_ntimes
            parameters_hash['model_angle'] = conn.parameters.model_angle
            parameters_hash['model_depth'] = conn.parameters.model_depth
            parameters_hash['model_step'] = conn.parameters.model_step
            parameters_hash['window_size'] = conn.parameters.model_wsize
            parameters_hash[
                'feature_type'] = conn.parameters.model_feature_type
            parameters_hash[
                'feature_haar_n'] = conn.parameters.model_feature_haar_n
            parameters_hash[
                'feature_gaussian_n'] = conn.parameters.model_feature_gaussian_n
            parameters_hash[
                'feature_gaussian_std'] = conn.parameters.model_feature_gaussian_std

            model_filename = joblib.dump(clf,
                                         os.path.join(
                                             out_path,
                                             '%d_model.joblib' % (id_term)),
                                         compress=3)[0]
            cov_filename = joblib.dump([mx, my, cm],
                                       os.path.join(
                                           out_path,
                                           '%d_cov.joblib' % (id_term)),
                                       compress=3)[0]
            parameter_filename = joblib.dump(
                parameters_hash,
                os.path.join(out_path, '%d_parameters.joblib' % id_term),
                compress=3)[0]
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=model_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIdent=conn.job.id,
                filename=cov_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            AttachedFile(
                conn.job,
                domainIndent=conn.job.id,
                filename=parameter_filename,
                domainClassName="be.cytomine.processing.Job").upload()
            if conn.parameters.model_feature_type == 'haar' or conn.parameters.model_feature_type == 'gaussian':
                add_filename = joblib.dump(
                    feature_parameters,
                    out_path.rstrip('/') + '/' + '%d_fparameters.joblib' %
                    (id_term))[0]
                AttachedFile(
                    conn.job,
                    domainIdent=conn.job.id,
                    filename=add_filename,
                    domainClassName="be.cytomine.processing.Job").upload()

        Property(conn.job, key="id_terms", value=sfinal.rstrip(" ")).save()
        conn.job.update(progress=100,
                        status=Job.TERMINATED,
                        statusComment="Job terminated.")
                        help="The Cytomine public key")
    parser.add_argument('--cytomine_private_key', dest='private_key',
                        help="The Cytomine private key")
    parser.add_argument('--cytomine_id_project', dest='id_project',
                        help="The project from which we want the crop")
    parser.add_argument('--download_path', required=False,
                        help="Where to store images")
    params, other = parser.parse_known_args(sys.argv[1:])

    with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key,
                  verbose=logging.INFO) as cytomine:
        annotations = AnnotationCollection()
        annotations.project = params.id_project
        annotations.showWKT = True
        annotations.showMeta = True
        annotations.showGIS = True
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print("ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}".format(
                annotation.id,
                annotation.image,
                annotation.project,
                annotation.term,
                annotation.user,
                annotation.area,
                annotation.perimeter,
                annotation.location
            ))
def get_images_mask_per_annotation_per_user(proj_id, image_id, user_id,
                                            scale_factor, dest):
    im = ImageInstanceCollection()
    im.project = proj_id
    im.image = image_id
    im.fetch_with_filter("project", proj_id)
    image_width = int(im[0].width)
    image_height = int(im[0].height)
    print(image_height, image_width)

    annotations = AnnotationCollection()
    annotations.project = proj_id
    annotations.image = image_id

    annotations.user = user_id
    annotations.showWKT = True
    annotations.showMeta = True
    annotations.showTerm = True
    annotations.showGIS = True
    annotations.showImage = True
    annotations.showUser = True
    annotations.fetch()

    dct_anotations = {}
    for a in annotations:
        print(a.user)
        if len(a.term) == 1:
            term = a.term[0]
            if term not in dct_anotations:
                dct_anotations[term] = []
            dct_anotations[term].append(a.location)
        else:
            warnings.warn("Not suited for multiple or no annotation term")
    for t, lanno in dct_anotations.items():
        result_image = Image.new(mode='1',
                                 size=(int(image_width * scale_factor),
                                       int(image_height * scale_factor)),
                                 color=0)
        for pwkt in lanno:
            if pwkt.startswith("POLYGON"):
                label = "POLYGON"
            elif pwkt.startswith("MULTIPOLYGON"):
                label = "MULTIPOLYGON"

            coordinatesStringList = pwkt.replace(label, '')

            if label == "POLYGON":
                coordinates_string_lists = [coordinatesStringList]
            elif label == "MULTIPOLYGON":
                coordinates_string_lists = coordinatesStringList.split(
                    ')), ((')

                coordinates_string_lists = [
                    coordinatesStringList.replace('(', '').replace(')', '')
                    for coordinatesStringList in coordinates_string_lists
                ]

            for coordinatesStringList in coordinates_string_lists:
                #  create lists of x and y coordinates
                x_coords = []
                y_coords = []
                for point in coordinatesStringList.split(','):
                    point = point.strip(
                        string.whitespace)  # remove leading and ending spaces
                    point = point.strip(
                        string.punctuation
                    )  # Have seen some strings have a ')' at the end so remove it
                    x_coords.append(round(float(point.split(' ')[0])))
                    y_coords.append(round(float(point.split(' ')[1])))

                x_coords_correct_lod = [
                    int(x * scale_factor) for x in x_coords
                ]
                y_coords_correct_lod = [
                    image_height * scale_factor - int(x * scale_factor)
                    for x in y_coords
                ]
                coords = [
                    (i, j)
                    for i, j in zip(x_coords_correct_lod, y_coords_correct_lod)
                ]

                #  draw the polygone in an image and fill it
                ImageDraw.Draw(result_image).polygon(coords, outline=1, fill=1)

        result_image.save(params.dest + '/' + str(t) + '.png')
Пример #5
0
def main(argv):
    with CytomineJob.from_cli(argv) as cj:
        cj.job.update(progress=1, statusComment="Initialisation")
        cj.log(str(cj.parameters))

        term_ids = [int(term_id) for term_id in cj.parameters.cytomine_id_terms.split(",")]
        terms = TermCollection().fetch_with_filter("project", cj.parameters.cytomine_id_project)
        terms = [term for term in terms if term.id in term_ids]

        image_ids = [int(image_id) for image_id in cj.parameters.cytomine_id_images.split(",")]
        images = ImageInstanceCollection(light=True).fetch_with_filter("project", cj.parameters.cytomine_id_project)
        images = [image for image in images if image.id in image_ids]

        if hasattr(cj.parameters, "cytomine_id_users") and cj.parameters.cytomine_id_users is not None:
            user_ids = [int(user_id) for user_id in cj.parameters.cytomine_id_users.split(",")]
        else:
            user_ids = []

        if hasattr(cj.parameters, "cytomine_id_jobs") and cj.parameters.cytomine_id_jobs is not None:
            job_ids = [int(job_id) for job_id in cj.parameters.cytomine_id_jobs.split(",")]
            jobs = JobCollection(project=cj.parameters.cytomine_id_project).fetch()
            jobs = [job for job in jobs if job.id in job_ids]
        else:
            jobs = []

        userjobs_ids = [job.userJob for job in jobs]
        all_user_ids = user_ids + userjobs_ids

        cj.job.update(progress=20, statusComment="Collect data")
        ac = AnnotationCollection()
        ac.terms = term_ids
        ac.images = image_ids
        ac.showMeta = True
        ac.showGIS = True
        ac.showTerm = True
        ac.reviewed = True if cj.parameters.cytomine_reviewed_only else None
        ac.users = all_user_ids if len(all_user_ids) > 0 else None
        ac.fetch()

        cj.job.update(progress=55, statusComment="Compute statistics")
        data = dict()
        for image in images:
            d = dict()
            areas = [a.area for a in ac if a.image == image.id]
            total_area = np.sum(areas)
            d['total'] = total_area
            d['count'] = len(areas)
            d['ratio'] = 1.0
            for term in terms:
                annotations = [a for a in ac if a.image == image.id and term.id in a.term]
                areas = [a.area for a in annotations]
                d[term.name] = dict()
                d[term.name]['total'] = np.sum(areas)
                d[term.name]['count'] = len(annotations)
                d[term.name]['ratio'] = d[term.name]['total'] / float(total_area) if total_area > 0 else 0
                d[term.name]['mean'] = np.mean(areas)
                d[term.name]['annotations'] = [{"created": a.created, "area": a.area} for a in annotations]
            data[image.instanceFilename] = d

        cj.job.update(progress=90, statusComment="Write CSV report")
        with open("stat-area.csv", "w") as f:
            for l in write_csv(data, terms):
                f.write("{}\n".format(l))

        job_data = JobData(id_job=cj.job.id, key="Area CSV report", filename="stat-area.csv")
        job_data = job_data.save()
        job_data.upload("stat-area.csv")
        
        cj.job.update(statusComment="Finished.", progress=100)
        if params.opencv:
            image_instances = ImageInstanceCollection().fetch_with_filter(
                "project", params.id_project)

        # We want all annotations in a given project.
        annotations = AnnotationCollection()
        annotations.project = params.id_project  # Add a filter: only annotations from this project
        # You could add other filters:
        # annotations.image = id_image => Add a filter: only annotations from this image
        # annotations.images = [id1, id2] => Add a filter: only annotations from these images
        # annotations.user = id_user => Add a filter: only annotations from this user
        # ...
        annotations.showWKT = True  # Ask to return WKT location (geometry) in the response
        annotations.showMeta = True  # Ask to return meta information (id, ...) in the response
        annotations.showGIS = True  # Ask to return GIS information (perimeter, area, ...) in the response
        # ...
        # => Fetch annotations from the server with the given filters.
        annotations.fetch()
        print(annotations)

        for annotation in annotations:
            print(
                "ID: {} | Image: {} | Project: {} | Term: {} | User: {} | Area: {} | Perimeter: {} | WKT: {}"
                .format(annotation.id, annotation.image, annotation.project,
                        annotation.term, annotation.user, annotation.area,
                        annotation.perimeter, annotation.location))

            # Annotation location is the annotation geometry in WKT format.
            # See https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry