Esempio n. 1
0
def getModelFromPoints(sourcePoints, targetPoints):
	rigidModel = RigidModel2D()
	pointMatches = HashSet()
	for a in zip(sourcePoints, targetPoints):
		pm = PointMatch(Point([a[0][0], a[0][1]]), Point([a[1][0], a[1][1]]))
		pointMatches.add(pm)
	rigidModel.fit(pointMatches)
	return rigidModel
Esempio n. 2
0
 def fromFeatures(features1, features2, angle_epsilon, len_epsilon_sq):
     """ Compare all features of one image to all features of the other image,
     to identify matching features and then create PointMatch instances. """
     return PointMatches([
         PointMatch(c1.position, c2.position)
         for c1, c2 in product(features1, features2)
         if c1.matches(c2, angle_epsilon, len_epsilon_sq)
     ])
Esempio n. 3
0
    features = {ti: list(f.get()) for ti, f in futures.iteritems()}

    for ti, fs in features.iteritems():
        print "Found", len(fs), "features in timepoint", ti

    # Compare constellation features from one time point to the next
    # to extract candidate PointMatch instances
    angle_epsilon = 0.05  # in radians. 0.05 is 2.8 degrees
    len_epsilon_sq = pow(somaDiameter / 2, 2)  # in calibrated units, squared
    ti_pointmatches = defaultdict(list)

    for t0, t1 in [[0, 1]]:
        # Compare all possible pairs of constellation features
        for c0, c1 in product(features[t0], features[t1]):
            if c0.compareTo(c1, angle_epsilon, len_epsilon_sq):
                ti_pointmatches[t1].append(PointMatch(c0.position,
                                                      c1.position))
        print t0, "vs", t1, "- found", len(
            ti_pointmatches[t1]), "point matches"

    # Reduce each list of pointmatches to a spatially coherent subset
    maxEpsilon = somaDiameter / 2  # max allowed alignment error in calibrated units (a distance)
    minInlierRatio = 0.01  # ratio inliers/candidates
    minNumInliers = 5  # minimum number of good matches to accept the result
    n_iterations = 2000  # for estimating the model

    def fit(model, pointmatches):
        inliers = ArrayList()
        exception = None
        try:
            modelFound = model.filterRansac(pointmatches, inliers,
                                            n_iterations, maxEpsilon,
Esempio n. 4
0
def extractBlockMatches(filepath1, filepath2, params, paramsSIFT, properties,
                        csvDir, exeload, load):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        fp1 = futures[0].get(
        )  # FloatProcessor, already Gaussian-blurred, contrast-corrected and scaled!
        fp2 = futures[1].get()  # FloatProcessor, idem

        # Define points from the mesh
        sourcePoints = ArrayList()
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        # Don't use blockmatching if the dimensions are different
        use_blockmatching = fp1.getWidth() == fp2.getWidth() and fp1.getHeight(
        ) == fp2.getHeight()

        # Fill the sourcePoints
        mesh = TransformMesh(params["meshResolution"], fp1.width, fp1.height)
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        syncPrintQ("Extracting block matches for \n S: " + filepath1 +
                   "\n T: " + filepath2 + "\n  with " +
                   str(sourcePoints.size()) + " mesh sourcePoints.")
        # Run
        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            fp1,
            fp2,
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrintQ(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrintQ(
                "... therefore invoking SIFT pointmatching for:\n  S: " +
                basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath1,
                         params,
                         paramsSIFT,
                         scale=False)),
                exeload.submit(
                    Task(loadFloatProcessor,
                         filepath2,
                         params,
                         paramsSIFT,
                         scale=False))
            ]

            fp1 = futures[0].get()  # FloatProcessor, original
            fp2 = futures[1].get()  # FloatProcessor, original

            # Images can be of different size: scale them the same way
            area1 = fp1.width * fp1.height
            area2 = fp2.width * fp2.height

            if area1 == area2:
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = int(
                    max(properties.get("SIFT_max_size", 2048),
                        fp1.width * params["scale"]))
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT1
            else:
                bigger, smaller = (fp1, fp2) if area1 > area2 else (fp2, fp1)
                target_width_bigger = int(
                    max(1024, bigger.width * params["scale"]))
                if 1024 == target_width_bigger:
                    target_width_smaller = int(1024 * float(smaller.width) /
                                               bigger.width)
                else:
                    target_width_smaller = smaller.width * params["scale"]
                #
                paramsSIFT1 = paramsSIFT.clone()
                paramsSIFT1.maxOctaveSize = target_width_bigger
                paramsSIFT1.minOctaveSize = int(paramsSIFT1.maxOctaveSize /
                                                pow(2, paramsSIFT1.steps))
                paramsSIFT2 = paramsSIFT.clone()
                paramsSIFT2.maxOctaveSize = target_width_smaller
                paramsSIFT2.minOctaveSize = int(paramsSIFT2.maxOctaveSize /
                                                pow(2, paramsSIFT2.steps))

            ijSIFT1 = SIFT(FloatArray2DSIFT(paramsSIFT1))
            features1 = ArrayList()  # of Point instances
            ijSIFT1.extractFeatures(fp1, features1)

            ijSIFT2 = SIFT(FloatArray2DSIFT(paramsSIFT2))
            features2 = ArrayList()  # of Point instances
            ijSIFT2.extractFeatures(fp2, features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                params.get(
                    "max_sd",
                    1.5),  # max_sd: maximal difference in size (ratio max/min)
                TranslationModel2D(),
                params.get("max_id", Double.MAX_VALUE
                           ),  # max_id: maximal distance in image space
                params.get("rod", 0.9))  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        printException()
Esempio n. 5
0
    # Some matches are spatially incoherent: filter matches with RANSAC
    model = RigidModel2D()  # supports translation, rotation and scaling
    candidates = pointmatches  # possibly good matches as determined above
    inliers = []  # good point matches, to be filled in by model.filterRansac
    maxEpsilon = 25.0  # max allowed alignment error in pixels (a distance)
    minInlierRatio = 0.05  # ratio inliers/candidates
    minNumInliers = 5  # minimum number of good matches to accept the result

    try:
        modelFound = model.filterRansac(candidates, inliers, 1000, maxEpsilon,
                                        minInlierRatio, minNumInliers)
        if modelFound:
            # Apply the transformation defined by the model to the first point
            # of each pair (PointMatch) of points. That is, to the point from
            # the first image.
            PointMatch.apply(inliers, model)
    except NotEnoughDataPointsException, e:
        print e

    if modelFound:
        print("SUCCESS! Saving results...")
        # Register images
        rigid = newArray(Double.TYPE, (6, ))
        model.toArray(rigid)
        line = "%d,%s,%s,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%d\n" % (
            i, list1[i], list2[i], rigid[0], rigid[1], rigid[2], rigid[3],
            rigid[4], rigid[5], model.cost, 1)
        f.write(line)
        print("Done.")
        print
    else:
 def fromRows(rows):
   """ rows: from a CSV file, as lists of strings. """
   return PointMatches([PointMatch(Point(array(imap(float, row[0:3]), 'd')),
                                   Point(array(imap(float, row[3:6]), 'd')))
                        for row in rows])
def computeRegistration():
    while atomicI.get() < nSections:
        l = atomicI.getAndIncrement()
        if l < nSections:
            IJ.log('Computing EM/LM registration for layer ' + str(l).zfill(4))

            layerFolder = fc.mkdir_p(
                os.path.join(registrationFolder, 'layer_' + str(l).zfill(4)))
            toRegisterFolder = fc.mkdir_p(
                os.path.join(layerFolder, 'toRegister'))
            registeredFolder = fc.mkdir_p(
                os.path.join(layerFolder, 'registered'))

            # Applying appropriate filters to make lowresEM and LM look similar for layer l
            imLM = IJ.openImage(imPaths['LM'][l])
            imLM = fc.localContrast(imLM)
            imLMPath = os.path.join(toRegisterFolder,
                                    'imLM_' + str(l).zfill(4) + '.tif')
            IJ.save(imLM, imLMPath)

            imEM = IJ.openImage(imPaths['EM'][l])
            imEM = fc.localContrast(imEM)
            imEM = fc.blur(imEM, 2)
            imEMPath = os.path.join(toRegisterFolder,
                                    'imEM_' + str(l).zfill(4) + '.tif')
            IJ.save(imEM, imEMPath)

            # Compute first a simple affine registration on the non-cropped images
            IJ.log(
                'Computing affine and moving least squares alignment for layer '
                + str(l).zfill(4))
            firstStepRegistered = False

            # registration at first step with 1step/octave (less features)
            pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8,
                                                4)

            featuresLM = getFeatures(imLMPath, pLowRes)
            featuresEM = getFeatures(imEMPath, pLowRes)

            matchingResults = getMatchingResults(featuresLM, featuresEM)
            if matchingResults is None:
                IJ.log(
                    'No registration matching at low resolution matching step 1 in layer '
                    + str(l).zfill(4))
            else:
                model, inliers = matchingResults
                distance = PointMatch.meanDistance(
                    inliers
                )  # mean displacement of the remaining matching features
                IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' +
                       str(distance) + ' px with ' + str(len(inliers)) +
                       ' inliers')
                if distance > matchingThreshold[0]:
                    IJ.log(
                        'Matching accuracy is lower than the threshold at the low resolution step 1 - '
                        + str(l).zfill(4) + ' - distance - ' + str(distance))
                else:
                    affTransform = model.createAffine()
                    s1, s2 = getScalingFactors(affTransform)
                    IJ.log('Layer ' + str(l).zfill(4) +
                           ' scaling factors - step 1 - ' + str(s1) + ' - ' +
                           str(s2) + '--' + str(s1 * s2) + ' affDeterminant ' +
                           str(affTransform.getDeterminant()) + ' nInliers ' +
                           str(len(inliers)))
                    if (abs(s1 - 1) < 0.2) and (
                            abs(s2 - 1) < 0.2
                    ):  # scaling in both directions should be close to 1
                        IJ.log('First step ok - layer ' + str(l).zfill(4))
                        firstStepRegistered = True
                        loaderZ.serialize(
                            affTransform,
                            os.path.join(registeredFolder, 'affineSerialized'))

            if not firstStepRegistered:
                IJ.log(
                    'First step registration in layer ' + str(l).zfill(4) +
                    ' with few features has failed. Trying with more features.'
                )
                # registration at first step with 3steps/octave (more features)
                pLowRes = getSIFTMatchingParameters(3, 1.6, 64, 4000, 8, 4)
                #pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8, 4) # for BIB

                featuresLM = getFeatures(imLMPath, pLowRes)
                featuresEM = getFeatures(imEMPath, pLowRes)

                matchingResults = getMatchingResults(featuresLM, featuresEM)
                if matchingResults is None:
                    IJ.log(
                        'No registration matching at low resolution matching step 1bis in layer '
                        + str(l).zfill(4))
                else:
                    model, inliers = matchingResults
                    distance = PointMatch.meanDistance(
                        inliers
                    )  # mean displacement of the remaining matching features
                    IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' +
                           str(distance) + ' px with ' + str(len(inliers)) +
                           ' inliers')
                    if distance > matchingThreshold[0]:
                        IJ.log(
                            'Matching accuracy is lower than the threshold at the high resolution step 1bis - '
                            + str(l).zfill(4) + ' - distance - ' +
                            str(distance))
                    else:
                        affTransform = model.createAffine()
                        s1, s2 = getScalingFactors(affTransform)
                        IJ.log('Layer ' + str(l).zfill(4) +
                               ' scaling factors - step 1bis - ' + str(s1) +
                               ' - ' + str(s2) + '--' + str(s1 * s2) +
                               ' affDeterminant ' +
                               str(affTransform.getDeterminant()) +
                               ' nInliers ' + str(len(inliers)))
                        if (abs(s1 - 1) < 0.2) and (
                                abs(s2 - 1) < 0.2
                        ):  # scaling in both directions should be close to 1
                            IJ.log('First step 1bis ok - layer ' +
                                   str(l).zfill(4))
                            firstStepRegistered = True
                            loaderZ.serialize(
                                affTransform,
                                os.path.join(registeredFolder,
                                             'affineSerialized'))

            if not firstStepRegistered:
                IJ.log('The two first step trials in layer ' +
                       str(l).zfill(4) + ' have failed')
            else:
                # Affine transform and crop the LM, and compute a high res MLS matching
                with lock:  # only one trakem working at a time
                    # apply affTransform
                    patch = Patch.createPatch(pZ, imLMPath)
                    layerZ.add(patch)
                    patch.setAffineTransform(affTransform)
                    patch.updateBucket()

                    # crop and export
                    bb = Rectangle(0, 0, widthEM, heightEM)
                    affineCroppedIm = loaderZ.getFlatImage(
                        layerZ, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch,
                        layerZ.getAll(Patch), True, Color.black, None)
                    affineCroppedImPath = os.path.join(
                        toRegisterFolder,
                        'affineCroppedLM_' + str(l).zfill(4) + '.tif')
                    IJ.save(affineCroppedIm, affineCroppedImPath)
                    affineCroppedIm.close()

                    layerZ.remove(patch)
                    layerZ.recreateBuckets()

                pHighRes = getSIFTMatchingParameters(nOctaves[1], 1.6, 64,
                                                     4096, 8, 4)
                featuresLM = getFeatures(affineCroppedImPath, pHighRes)
                featuresEM = getFeatures(imEMPath, pHighRes)

                # get the MLS
                matchingResults = getMatchingResults(featuresLM, featuresEM)
                if matchingResults is None:
                    IJ.log(
                        'It cannot be, there should be a good match given that an affine was computed. Layer '
                        + str(l).zfill(4))
                else:
                    model, inliers = matchingResults
                    affTransform = model.createAffine()
                    s1, s2 = getScalingFactors(affTransform)
                    IJ.log('Second step determinant - layer ' +
                           str(l).zfill(4) + ' - determinant - ' +
                           str(affTransform.getDeterminant()) + ' nInliers ' +
                           str(len(inliers)) + 'Scaling factors - step 2 - ' +
                           str(s1) + ' - ' + str(s2))
                    if (abs(s1 - 1) < 0.2) and (abs(s2 - 1) < 0.2) and len(
                            inliers
                    ) > 50:  # scaling in both directions should be close to 1
                        distance = PointMatch.meanDistance(
                            inliers
                        )  # mean displacement of the remaining matching features
                        if distance > matchingThreshold[1]:
                            IJ.log(
                                'Weird: matching accuracy is lower than the threshold at the high resolution step 2 - '
                                + str(l).zfill(4) + ' - distance - ' +
                                str(distance))
                        else:
                            mlst = MovingLeastSquaresTransform()
                            mlst.setModel(AffineModel2D)
                            mlst.setAlpha(1)
                            mlst.setMatches(inliers)

                            xmlMlst = mlst.toXML('\t')
                            with open(
                                    os.path.join(registeredFolder, 'MLST.xml'),
                                    'w') as f:
                                f.write(xmlMlst)

                            loaderZ.serialize(
                                mlst,
                                os.path.join(registeredFolder,
                                             'mlstSerialized'))

                            registrationStats.append(
                                [l, distance, len(inliers)])
Esempio n. 8
0
	def call(self, save_images=False):
		self.thread_used = threading.currentThread().getName()
		self.started = time.time()
		try:
			filenames = os.listdir(self.params.input_folder)
			filenames.sort()

			imp1 = IJ.openImage(os.path.join(self.params.input_folder, filenames[self.imgA]))
			imp2 = IJ.openImage(os.path.join(self.params.input_folder, filenames[self.imgB]))
			IJ.log(time.asctime())
			IJ.log(str(self.imgA) + ": " + str(imp1))
			IJ.log(str(self.imgB) + ": " + str(imp2))			
			print str(self.imgA) + ": " + str(imp1)
			print str(self.imgB) + ": " + str(imp2)

			mesh_resolution = int(imp1.getWidth() / self.params.point_distance)
			effective_point_distance = imp1.getWidth() / mesh_resolution

			mesh = SpringMesh(mesh_resolution, imp1.getWidth(), imp1.getHeight(), 1, 1000, 0.9)
			vertices = mesh.getVertices()
			maskSamples = RealPointSampleList(2)
			for vertex in vertices:
				maskSamples.add(RealPoint(vertex.getL()), ARGBType(-1)) # equivalent of 0xffffffff
			pm12 = ArrayList()
			v1 = mesh.getVertices()

			ip1 = imp1.getProcessor().convertToFloat().duplicate()
			ip2 = imp2.getProcessor().convertToFloat().duplicate()

			ip1Mask = self.createMask(imp1.getProcessor().convertToRGB())
			ip2Mask = self.createMask(imp2.getProcessor().convertToRGB())

			ct = TranslationModel2D()

			BlockMatching.matchByMaximalPMCC(
							ip1,
							ip2,
							ip1Mask,
							ip2Mask,
							self.params.scale,
							ct,
							self.params.block_radius,
							self.params.block_radius,
							self.params.search_radius,
							self.params.search_radius,
							self.params.min_R,
							self.params.rod_R,
							self.params.max_curvature_R,
							v1,
							pm12,
							ErrorStatistic(1))

			pre_smooth_block_matches = len(pm12)
			if self.params.save_data:
				pre_smooth_filename = self.params.output_folder + str(imp2.getTitle())[:-4] + "_" + str(imp1.getTitle())[:-4] + "_pre_smooth_pts.txt"
				self.writePointsFile(pm12, pre_smooth_filename)

			if self.params.use_local_smoothness_filter:
				model = Util.createModel(self.params.local_model_index)
				try:
					model.localSmoothnessFilter(pm12, pm12, self.params.local_region_sigma, self.params.max_local_epsilon, self.params.max_local_trust)
					if self.params.save_data:
						post_smooth_filename = self.params.output_folder + str(imp2.getTitle())[:-4] + "_" + str(imp1.getTitle())[:-4] + "_post_smooth_pts.txt"
						self.writePointsFile(pm12, post_smooth_filename)
				except:
					pm12.clear()

			color_samples, max_displacement = self.matches2ColorSamples(pm12)
			post_smooth_block_matches = len(pm12)
				
			print time.asctime()
			print str(self.imgB) + "_" + str(self.imgA) + "\tblock_matches\t" + str(pre_smooth_block_matches) + "\tsmooth_filtered\t" + str(pre_smooth_block_matches - post_smooth_block_matches) + "\tmax_displacement\t" + str(max_displacement) + "\trelaxed_length\t" + str(effective_point_distance) + "\tsigma\t" + str(self.params.local_region_sigma)
			IJ.log(time.asctime())
			IJ.log(str(self.imgB) + "_" + str(self.imgA) + ": block_matches " + str(pre_smooth_block_matches) + ", smooth_filtered " + str(pre_smooth_block_matches - post_smooth_block_matches) + ", max_displacement " + str(max_displacement) + ", relaxed_length " + str(effective_point_distance) + ", sigma " + str(self.params.local_region_sigma))
			if self.params.save_data and self.wf:
				self.wf.write(str(self.imgB) + 
					"\t" + str(self.imgA) + 
					"\t" + str(imp2.getTitle())[:-4] + 
					"\t" + str(imp1.getTitle())[:-4] + 
					"\t" + str(pre_smooth_block_matches) + 
					"\t" + str(pre_smooth_block_matches - post_smooth_block_matches) + 
					"\t" + str(max_displacement) + 
					"\t" + str(effective_point_distance) + 
					"\t" + str(self.params.local_region_sigma) + 
					"\t" + str(mesh_resolution) + "\n")

			if self.params.export_point_roi:
				pm12Sources = ArrayList()
				pm12Targets = ArrayList()

				PointMatch.sourcePoints(pm12, pm12Sources)
				PointMatch.targetPoints(pm12, pm12Targets)

				roi1 = pointsToPointRoi(pm12Sources)
				roi2 = pointsToPointRoi(pm12Targets)

				imp1.setRoi(roi1)
				imp2.setRoi(roi2)

			if self.params.export_displacement_vectors:
				pm12Targets = ArrayList()
				PointMatch.targetPoints(pm12, pm12Targets)

				maskSamples2 = RealPointSampleList(2)
				for point in pm12Targets:
					maskSamples2.add(RealPoint(point.getW()), ARGBType(-1))
				factory = ImagePlusImgFactory()
				kdtreeMatches = KDTree(color_samples)
				kdtreeMask = KDTree(maskSamples)
				
				img = factory.create([imp1.getWidth(), imp1.getHeight()], ARGBType())
				self.drawNearestNeighbor(
							img, 
							NearestNeighborSearchOnKDTree(kdtreeMatches),
							NearestNeighborSearchOnKDTree(kdtreeMask))
				scaled_img = self.scaleIntImagePlus(img, 0.1)
				if self.params.save_data:
					fs = FileSaver(scaled_img)
					fs.saveAsTiff(self.params.output_folder + str(imp2.getTitle())[:-4] + "_" + str(imp1.getTitle()))
				else:
					scaled_img.show()
				print time.asctime()
				print str(self.imgB) + "_" + str(self.imgA) + "\tsaved\t" + filenames[self.imgB]
				IJ.log(time.asctime())
				IJ.log(str(self.imgB) + "_" + str(self.imgA) + ": saved " + filenames[self.imgB])
		except Exception, ex:
			self.exception = ex
			print str(ex)
			IJ.log(str(ex))
			if self.params.save_data and self.wf:
				self.wf.write(str(ex) + "\n")
	def call(self, save_images=False):
		self.thread_used = threading.currentThread().getName()
		self.started = time.time()
		try:
			IJ.log(time.asctime())
			IJ.log(str(self.patchA))
			IJ.log(str(self.patchB))			
			print str(self.patchA)
			print str(self.patchB)

			# Adapted from ElasticMontage.java
			# https://github.com/trakem2/TrakEM2/blob/master/TrakEM2_/src/main/java/mpicbg/trakem2/align/ElasticMontage.java
			pi1 = self.patchA.createTransformedImage()
			pi2 = self.patchB.createTransformedImage()

			fp1 = pi1.target.convertToFloat()
			mask1 = pi1.getMask()
			if mask1 is None:
				fpMask1 = None
			else:
				fpMask1 = scaleByte(mask1)

			fp2 = pi2.target.convertToFloat()
			mask2 = pi2.getMask()
			if mask2 is None:
				fpMask2 = None
			else:
				fpMask2 = scaleByte(mask1)

			w = self.tileA.getWidth()
			h = self.tileA.getHeight()
			num_x = Math.max(2, int(Math.ceil(w / self.params.spring_length) + 1))
			num_y = Math.max(2, int(Math.ceil(h / self.params.spring_triangle_height_twice) + 1))
			w_mesh = (num_x - 1) * self.params.spring_length
			h_mesh = (num_y - 1) * self.params.spring_triangle_height_twice

			mesh = SpringMesh(num_x,
								num_y,
								w_mesh,
								h_mesh,
								self.params.stiffness,
								self.params.max_stretch * self.params.scale,
								self.params.damp)

			vertices = mesh.getVertices()
			maskSamples = RealPointSampleList(2)
			for vertex in vertices:
				maskSamples.add(RealPoint(vertex.getL()), ARGBType(-1)) # equivalent of 0xffffffff
			pm12 = ArrayList()
			v1 = mesh.getVertices()

			t = self.tileB.getModel().createInverse()
			t.concatenate(self.tileA.getModel())

			BlockMatching.matchByMaximalPMCC(
							fp1,
							fp2,
							fpMask1,
							fpMask2,
							self.params.scale,
							t,
							self.params.block_radius,
							self.params.block_radius,
							self.params.search_radius,
							self.params.search_radius,
							self.params.min_R,
							self.params.rod_R,
							self.params.max_curvature_R,
							v1,
							pm12,
							ErrorStatistic(1))

			pre_smooth_block_matches = len(pm12)
			if self.params.save_data:
				pre_smooth_filename = self.params.output_folder + str(self.patchB) + "_" + str(self.patchA) + "_pre_smooth_pts.txt"
				self.writePointsFile(pm12, pre_smooth_filename)

			if self.params.use_local_smoothness_filter:
				model = Util.createModel(self.params.local_model_index)
				try:
					model.localSmoothnessFilter(pm12, pm12, self.params.local_region_sigma, self.params.max_local_epsilon, self.params.max_local_trust)
					if self.params.save_data:
						post_smooth_filename = self.params.output_folder + str(self.patchB) + "_" + str(self.patchA) + "_post_smooth_pts.txt"
						self.writePointsFile(pm12, post_smooth_filename)
				except:
					pm12.clear()

			color_samples, max_displacement = self.matches2ColorSamples(pm12)
			post_smooth_block_matches = len(pm12)
				
			print time.asctime()
			print str(self.patchB) + "_" + str(self.patchA) + "\tblock_matches\t" + str(pre_smooth_block_matches) + "\tsmooth_filtered\t" + str(pre_smooth_block_matches - post_smooth_block_matches) + "\tmax_displacement\t" + str(max_displacement) + "\trelaxed_length\t" + str(self.params.spring_length) + "\tsigma\t" + str(self.params.local_region_sigma)
			IJ.log(time.asctime())
			IJ.log(str(self.patchB) + "_" + str(self.patchA) + ": block_matches " + str(pre_smooth_block_matches) + ", smooth_filtered " + str(pre_smooth_block_matches - post_smooth_block_matches) + ", max_displacement " + str(max_displacement) + ", relaxed_length " + str(self.params.spring_length) + ", sigma " + str(self.params.local_region_sigma))
			if self.params.save_data and self.wf:
				self.wf.write(str(self.patchB) + 
					"\t" + str(self.patchA) + 
					"\t" + str(pre_smooth_block_matches) + 
					"\t" + str(pre_smooth_block_matches - post_smooth_block_matches) + 
					"\t" + str(max_displacement) + 
					"\t" + str(self.params.spring_length) + 
					"\t" + str(self.params.local_region_sigma) + 
					"\t" + str(num_x) + "\n")

			if self.params.export_point_roi:
				pm12Sources = ArrayList()
				pm12Targets = ArrayList()

				PointMatch.sourcePoints(pm12, pm12Sources)
				PointMatch.targetPoints(pm12, pm12Targets)

				roi1 = pointsToPointRoi(pm12Sources)
				roi2 = pointsToPointRoi(pm12Targets)

				# # Adapted from BlockMatching.java
				# # https://github.com/axtimwalde/mpicbg/blob/master/mpicbg/src/main/java/mpicbg/ij/blockmatching/BlockMatching.java
				# tTarget = TranslationModel2D()
				# sTarget = SimilarityModel2D()
				# tTarget.set(-self.params.search_radius, -self.params.search_radius)
				# sTarget.set(1.0/self.params.scale, 0, 0, 0)
				# lTarget = CoordinateTransformList()
				# lTarget.add(sTarget)
				# lTarget.add(tTarget)
				# lTarget.add(t)
				# targetMapping = TransformMapping(lTarget)

				# mappedScaledTarget = FloatProcessor(fp1.getWidth() + 2*search_radius, fp1.getHeight() + 2*search_radius)

				# targetMapping.mapInverseInterpolated(fp2, mappedScaledTarget)
				# imp1 = tileImagePlus("imp1", fp1)
				# imp1.show()				
				# imp2 = ImagePlus("imp2", mappedScaledTarget)
				# imp2.show()

				imp1 = ImagePlus("imp1", fp1)
				imp1.show()				
				imp2 = ImagePlus("imp2", fp2)
				imp2.show()				

				imp1.setRoi(roi1)
				imp2.setRoi(roi2)

			if self.params.export_displacement_vectors:
				pm12Targets = ArrayList()
				PointMatch.targetPoints(pm12, pm12Targets)

				maskSamples2 = RealPointSampleList(2)
				for point in pm12Targets:
					maskSamples2.add(RealPoint(point.getW()), ARGBType(-1))
				factory = ImagePlusImgFactory()
				kdtreeMatches = KDTree(color_samples)
				kdtreeMask = KDTree(maskSamples)
				
				img = factory.create([fp1.getWidth(), fp1.getHeight()], ARGBType())
				self.drawNearestNeighbor(
							img, 
							NearestNeighborSearchOnKDTree(kdtreeMatches),
							NearestNeighborSearchOnKDTree(kdtreeMask))
				scaled_img = self.scaleIntImagePlus(img, 0.03)
				if self.params.save_data:
					fs = FileSaver(scaled_img)
					fs.saveAsTiff(self.params.output_folder + str(self.patchB) + "_" + str(self.patchA) + ".tif")
				else:
					scaled_img.show()
				print time.asctime()
				print str(self.patchB) + "_" + str(self.patchA) + "\tsaved"
				IJ.log(time.asctime())
				IJ.log(str(self.patchB) + "_" + str(self.patchA) + ": saved")
		except Exception, ex:
			self.exception = ex
			print str(ex)
			IJ.log(str(ex))
			if self.params.save_data and self.wf:
				self.wf.write(str(ex) + "\n")
Esempio n. 10
0
def findPointMatches(pairs, angle_epsilon, len_epsilon_sq):
  for c0, c1 in pairs:
    if c0.matches(c1, angle_epsilon, len_epsilon_sq):
      yield PointMatch(c0.position, c1.position)
Esempio n. 11
0
      len2s.append(cons.len2)
      angles.append(cons.angle)
    print ti, "len1:", sqrt(min(len1s)), sqrt(max(len1s)), sqrt(sum(len1s)/float(len(len1s)))
    print ti, "len2:", sqrt(min(len2s)), sqrt(max(len2s)), sqrt(sum(len2s)/float(len(len2s)))
    print ti, "angle:", min(angles), max(angles), sum(angles)/float(len(angles))

  # Compare constellation features to find candidate PointMatch instances
  ti_pointmatches = {}

  for t0, t1 in [[0, 1]]:
    # Compare all possible pairs of constellation features
    #futures = [exe.submit(Task(findPointMatches, pairs, angle_epsilon, len_epsilon_sq))
    #           for pairs in partition(product(features[t0], features[t1]), len(features[t0]) * len(features[t1]) / n_threads)]
    #ti_pointmatches[t1].extend(chain.from_iterable(f.get() for f in futures))

    ti_pointmatches[t1] = [PointMatch(c0.position, c1.position)
                           for c0, c1 in product(features[t0], features[t1])
                           if c0.matches(c1, angle_epsilon, len_epsilon_sq)]
    
    print t0, "vs", t1, "- found", len(ti_pointmatches[t1]), "point matches"

  def fit(model, pointmatches):
    inliers = ArrayList()
    exception = None
    try:
      modelFound = model.filterRansac(pointmatches, inliers, n_iterations,
                                    maxEpsilon, minInlierRatio, minNumInliers, maxTrust)
    except NotEnoughDataPointsException, e:
      exception = e
    return model, modelFound, inliers, exception
def extractBlockMatches(filepath1,
                        filepath2,
                        params,
                        csvDir,
                        exeload,
                        load=loadFPMem):
    """
  filepath1: the file path to an image of a section.
  filepath2: the file path to an image of another section.
  params: dictionary of parameters necessary for BlockMatching.
  exeload: an ExecutorService for parallel loading of image files.
  load: a function that knows how to load the image from the filepath.

  return False if the CSV file already exists, True if it has to be computed.
  """

    # Skip if pointmatches CSV file exists already:
    csvpath = os.path.join(
        csvDir,
        basename(filepath1) + '.' + basename(filepath2) + ".pointmatches.csv")
    if os.path.exists(csvpath):
        return False

    try:

        # Load files in parallel
        futures = [
            exeload.submit(Task(load, filepath1)),
            exeload.submit(Task(load, filepath2))
        ]

        # Define points from the mesh
        sourcePoints = ArrayList()
        mesh = TransformMesh(params["meshResolution"], dimensions[0],
                             dimensions[1])
        PointMatch.sourcePoints(mesh.getVA().keySet(), sourcePoints)
        # List to fill
        sourceMatches = ArrayList(
        )  # of PointMatch from filepath1 to filepath2

        syncPrint("Extracting block matches for \n S: " + filepath1 +
                  "\n T: " + filepath2 + "\n  with " +
                  str(sourcePoints.size()) + " mesh sourcePoints.")

        BlockMatching.matchByMaximalPMCCFromPreScaledImages(
            futures[0].get(),  # FloatProcessor
            futures[1].get(),  # FloatProcessor
            params["scale"],  # float
            params["blockRadius"],  # X
            params["blockRadius"],  # Y
            params["searchRadius"],  # X
            params["searchRadius"],  # Y
            params["minR"],  # float
            params["rod"],  # float
            params["maxCurvature"],  # float
            sourcePoints,
            sourceMatches)

        # At least some should match to accept the translation
        if len(sourceMatches) < max(20, len(sourcePoints) / 5) / 2:
            syncPrint(
                "Found only %i blockmatching pointmatches (from %i source points)"
                % (len(sourceMatches), len(sourcePoints)))
            syncPrint("... therefore invoking SIFT pointmatching for:\n  S: " +
                      basename(filepath1) + "\n  T: " + basename(filepath2))
            # Can fail if there is a shift larger than the searchRadius
            # Try SIFT features, which are location independent
            #
            # Images are now scaled: load originals
            futures = [
                exeload.submit(Task(loadFloatProcessor, filepath1,
                                    scale=False)),
                exeload.submit(Task(loadFloatProcessor, filepath2,
                                    scale=False))
            ]
            ijSIFT = SIFT(FloatArray2DSIFT(paramsSIFT))
            features1 = ArrayList()  # of Point instances
            ijSIFT.extractFeatures(futures[0].get(), features1)
            features2 = ArrayList()  # of Point instances
            ijSIFT.extractFeatures(futures[1].get(), features2)
            # Vector of PointMatch instances
            sourceMatches = FloatArray2DSIFT.createMatches(
                features1,
                features2,
                1.5,  # max_sd
                TranslationModel2D(),
                Double.MAX_VALUE,
                params["rod"])  # rod: ratio of best vs second best

        # Store pointmatches
        savePointMatches(os.path.basename(filepath1),
                         os.path.basename(filepath2), sourceMatches, csvDir,
                         params)

        return True
    except:
        syncPrint(sys.exc_info())
        syncPrint("".join(traceback.format_exception()), out="stderr")
Esempio n. 13
0
import sys, os
import cPickle as pickle
curDir = os.path.abspath(os.path.dirname(__file__))
sys.path.append("/plugin/mpicbg-master/mpicbg/target/mpicLatest.jar")
from mpicbg.models import Point, PointMatch, AffineModel3D, MovingLeastSquaresTransform

with open(os.path.join(curDir, "MLS",
                       "pointsToMatchDec2017EntryPointsB.pkl")) as fI:
    landmarkList = pickle.load(fI)
matches = []
for thisLand in landmarkList:
    for thisSide in ["right", "left", "centre"]:
        if "FROM_" + thisSide in thisLand and "TO_" + thisSide in thisLand:
            p1 = Point(map(float, thisLand["FROM_" + thisSide]))
            p2 = Point(map(float, thisLand["TO_" + thisSide]))
            matches.append(PointMatch(p1, p2, 1.0))

print len(matches), "landmark points used"

mls = MovingLeastSquaresTransform()
model = AffineModel3D()
mls.setModel(model)
mls.setMatches(matches)

with open(os.path.join(curDir, "MLS", "Points_input.pkl")) as fI:
    pointList = pickle.load(fI)
output = []
for i in range(len(pointList)):
    output.append(map(float, mls.apply(pointList[i])))

with open(os.path.join(curDir, "MLS", "ConvertedPoints_output.pkl"),