Exemplo n.º 1
0
    def solveLabels_temp(self):
        points, pointLabels, pointNormals, E, x2d_labels = Recon.solve_x3ds_normals(
            self.x2ds, self.splits, self.labels_temp, self.Ps, self.rays)

        print "solveLabels:", len(points), np.min(pointLabels), np.max(
            pointLabels), "(#points | min label | max label)"
        return points, pointLabels, pointNormals
Exemplo n.º 2
0
def detect_wand(x2ds_data, x2ds_splits, mats, thresh=20. / 2000., x3d_threshold=1000000.):
	Ps = np.array([m[2] / np.linalg.norm(m[2][0, :3]) for m in mats], dtype=np.float32)
	wand_x3ds = np.array([[160, 0, 0], [0, 0, 0], [-80, 0, 0], [0, 0, -120], [0, 0, -240]], dtype=np.float32)
	x2ds_labels = -np.ones(x2ds_data.shape[0], dtype=np.int32)
	ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
	x2ds_labels2 = x2ds_labels.copy()
	count = np.sum(x2ds_labels2 != -1) / 5
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	count = ISCV.project_and_clean(x3ds, Ps, x2ds_data, x2ds_splits, x2ds_labels, x2ds_labels2, thresh ** 2, thresh ** 2, x3d_threshold)
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	assert np.all(x3ds_labels == [0, 1, 2, 3, 4]), 'ERROR: Labels do not match' # skip if somehow not all points seen
	assert np.max(x3ds ** 2) < 1e9, 'ERROR: Values out of bounds' + repr(x3ds)
	mat = rigid_align_points(wand_x3ds, x3ds)
	x3ds = np.dot(wand_x3ds, mat[:3, :3].T) + mat[:, 3]
	return x3ds, x3ds_labels, x2ds_labels2
Exemplo n.º 3
0
def setFrame(frame):
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands, dot_detections, track3d, prev_frame, booting, trackGraph
    key = State.getKey('dotParams/attrs')

    skipping, prev_frame = (frame != prev_frame
                            and frame - 1 != prev_frame), frame
    booting = 10 if skipping else booting - 1

    p0, p1 = [], []

    if True:  #dot_detections is None:

        for pair in enumerate(movies):
            pts = process_frame(deinterlacing, detectingWands, frame, key,
                                pair)
            p0.append(pts[0])
            p1.append(pts[1])

        def make_bounds(lens):
            return np.array([sum(lens[:x]) for x in xrange(len(lens) + 1)],
                            dtype=np.int32)

        data0 = np.array(np.concatenate(p0),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p0))
        data1 = np.array(np.concatenate(p1),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p1))
    else:
        #dot_detections = movies_to_detections(movies, [frame], deinterlacing, key)
        data0, data1 = dot_detections[frame] if dot_detections.has_key(
            frame) else dot_detections.values()[0]
        for ci, md in enumerate(movies):
            try:
                MovieReader.readFrame(md, seekFrame=frame)
            except:
                print 'oops', frame
                return None, None
            #img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
            QApp.view().cameras[ci + 1].invalidateImageData()
            data0 = data0[0].copy(), data0[
                1]  # so that undistort doesn't modify the raw detections
            data1 = data1[0].copy(), data1[1]
    # TODO, move this to the viewer...
    data0 = ViconReader.frameCentroidsToDets(data0, mats)
    data1 = ViconReader.frameCentroidsToDets(data1, mats)

    primitives2D[0].setData(data0[0], data0[1])
    primitives2D[1].setData(data1[0], data1[1])

    #print x2ds_labels
    if len(movieFilenames) is not 1:
        if 1:
            #x2ds_data, x2ds_splits = data0 # dark points only
            x2ds_data, x2ds_splits = data1  # light points only
            if skipping:
                x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
                #trackGraph = Label.TrackGraph()
            else:
                x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
                # coarse bounding box
                if False:
                    for xi, x in zip(x3ds_labels, x3ds):
                        if x[0] < -200 or x[0] > 200 or x[1] < 800 or x[
                                1] > 1200 or x[2] < -50 or x[2] > 300:
                            track3d.x2ds_labels[np.where(
                                track3d.x2ds_labels == xi)[0]] = -1
                            x[:] = 0
            primitives[0].setData(x3ds)
            #trackGraph.push(x3ds,x3ds_labels)
            #primitives[0].graph = trackGraph.drawing_graph()
        elif False:
            Ps = np.array([m[2] / (m[0][0, 0]) for m in mats],
                          dtype=np.float32)
            data = data0  # dark points
            #data = data1 # light points
            x3ds, x2ds_labels = Recon.intersect_rays(data[0],
                                                     data[1],
                                                     Ps,
                                                     mats,
                                                     tilt_threshold=0.003,
                                                     x2d_threshold=0.02,
                                                     x3d_threshold=5.0,
                                                     min_rays=2)
            primitives[0].setData(x3ds)
        if detectingTiara:
            global c3d_frames
            frame = c3d_frames[(frame - 55) % len(c3d_frames)]
            which = np.where(frame[:, 3] == 0)[0]
            x3ds = frame[which, :3]
            #print frame,'len',len(x3ds)
            primitives[1].setData(x3ds)
    QApp.app.refreshImageData()
    QApp.app.updateGL()
Exemplo n.º 4
0
                    img[int(r.sy - dy):int(r.sy + dy),
                        int(r.sx - dx):int(r.sx + dx), 0] = 128
    else:
        pts0 = pts1 = []
    return (pts0, pts1)


def tighten_calibration(
    (x3s, x3s_labels), (x2s, x2s_splits, x2s_labels), mats):
    x3s_original = x3s.copy()
    x2s_labels_original = x2s_labels.copy()
    for it in range(10):
        x2d_threshold = 0.08  # - it * 0.04/50.
        Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32)
        u2s, _ = Calibrate.undistort_dets(x2s, x2s_splits, mats)
        x3s, x3s_labels, E, x2d_labels = Recon.solve_x3ds(
            u2s, x2s_splits, x2s_labels_original, Ps, True)
        clouds = ISCV.HashCloud2DList(u2s, x2s_splits, x2d_threshold)
        sc, x2s_labels, _ = Label.project_assign(clouds, x3s, x3s_labels, Ps,
                                                 x2d_threshold)
        print 'it', it, sc
        tiara_xis = np.where(x3s_labels < len(VICON_tiara_x3ds))[0]
        tiara_lis = x3s_labels[tiara_xis]
        tiara_true = VICON_tiara_x3ds[tiara_lis] + [0, 1000, 0]
        tiara_xs = x3s[tiara_xis]
        # now solve the tiara into place by finding a rigid transform
        RT, inliers = Calibrate.rigid_align_points_inliers(tiara_xs,
                                                           tiara_true,
                                                           scale=True)
        x3s = np.dot(x3s, RT[:3, :3].T) + RT[:, 3]
        x3s[tiara_xis] = tiara_true
        singles = np.where([x in list(x2d_labels) for x in x2s_labels])[0]
Exemplo n.º 5
0
    def cook(self, location, interface, attrs):
        if not self.useFrame(interface.frame(), attrs['frameRange']): return

        calibrationLocation = attrs['calibration']
        if not calibrationLocation: calibrationLocation = interface.root()

        # Get the mats from the calibration location
        mats = interface.attr('mats', atLocation=calibrationLocation)
        if mats is None:
            self.logger.error('Attribute mats not found at: %s' %
                              calibrationLocation)
            return

        Ps = interface.attr('Ps', atLocation=calibrationLocation)
        if Ps is None:
            Ps = np.array([m[2] / (np.sum(m[2][0, :3]**2)**0.5) for m in mats],
                          dtype=np.float32)

        # Get the detections from the location we are cooking
        # x2ds = interface.attr('x2ds')
        # x2ds_splits = interface.attr('x2ds_splits')
        x2ds = interface.attr('x2ds')
        x2ds_splits = interface.attr('x2ds_splits')
        x2ds_bright = interface.attr('x2ds', atLocation='/root/cameras/bright')
        x2ds_bright_splits = interface.attr('x2ds_splits',
                                            atLocation='/root/cameras/bright')

        if x2ds is None or x2ds_splits is None:
            self.logger.error('Detections not found at: %s' % location)
            return

        # Get configuration parameters
        tilt_threshold = attrs['tilt_threshold']
        x2d_threshold = attrs['x2d_threshold']
        x3d_threshold = attrs['x3d_threshold']
        min_rays = attrs['min_rays']
        seed_x3ds_location = attrs['seed_x3ds']
        seed_x3ds = None

        if min_rays < 2:
            self.logger.error(
                'You need at least 2 rays but you specified the minimum to be: %d'
                % min_rays)
            return

        if seed_x3ds_location:
            seed_x3ds = interface.attr('x3ds', atLocation=seed_x3ds_location)

        if self.visibility is None:
            self.visibility = ISCV.ProjectVisibility.create()

        # Check if we have normals
        if attrs['mesh'] and interface.hasAttr('normals',
                                               atLocation=attrs['mesh']):
            normals = interface.attr('normals', atLocation=attrs['mesh'])
            self.visibility.setNormals(normals)

        # Check if we have visibility LODs
        if 'visibilityLod' in attrs and attrs['visibilityLod']:
            visibilityLod = interface.location(attrs['visibilityLod'])
            if visibilityLod is not None:
                lodTris = visibilityLod['tris']
                lodVerts = visibilityLod['verts']
                lodNormals = visibilityLod['faceNormals']
                tris = lodVerts[lodTris]
                cameraPositions = np.array([m[4] for m in mats],
                                           dtype=np.float32)
                self.visibility.setLods(tris, cameraPositions,
                                        np.concatenate((lodNormals)),
                                        attrs['intersection_threshold'],
                                        attrs['generateNormals'])

        # Calculate the 3D reconstructions from the detections
        x3ds, labels, _, _ = Recon.intersect_rays(
            x2ds,
            x2ds_splits,
            Ps,
            mats,
            seed_x3ds=seed_x3ds,
            tilt_threshold=tilt_threshold,
            x2d_threshold=x2d_threshold,
            x3d_threshold=x3d_threshold,
            min_rays=min_rays,
            numPolishIts=3,
            forceRayAgreement=True,
            visibility=self.visibility)

        if not x3ds.any() or not labels.any(): return
        x3ds_labels = np.arange(np.max(labels) + 1)

        if attrs['setLabels']:
            interface.setAttr('labels', labels)
        else:
            interface.setAttr('labels', [])

        # Find which cameras contribute to the 3D reconstructions (optional?)
        cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
        cameraContributions = {}
        for label3d in x3ds_labels:
            camIds = [
                interface.findCameraIdFromRayId(rayId, x2ds_splits)
                for rayId in np.where(labels == label3d)[0]
            ]
            cameraContributions[label3d] = camIds

        # Create 3D points attributes on the cooked location
        pAttrs = {
            'x3ds': x3ds,
            'x3ds_labels': x3ds_labels,
            'x3ds_colour': eval(attrs['colour']),
            'x3ds_pointSize': attrs['pointSize'],
            'cameraContributions': cameraContributions,
            'showCameraContributions': attrs['show_contributions'],
            'cameraPositions': cameraPositions
        }
        interface.createChild('reconstructed', 'points3d', attrs=pAttrs)
Exemplo n.º 6
0
def cleanAndLabelX3ds(labellingData,
                      x3ds,
                      N,
                      allowStealing=True,
                      pts=np.array([]),
                      visibility=None):
    global cameraContributions, rayInfo

    labels = labellingData.labels_temp
    labelPositions = labellingData.labelPositions
    x3d_threshold = labellingData.x3d_threshold
    x2ds = labellingData.x2ds
    splits = labellingData.splits
    reconstructionData = labellingData.reconstructionData
    rays = labellingData.rays
    cameraPositions = labellingData.cameraPositions

    # We want to get only the points that have N neighbours within 1cm
    # TODO: Cache this as we'll be using it multiple times
    #cloud = ISCV.HashCloud3D(x3ds, x3d_threshold)
    #scores, matches, matches_splits = cloud.score(x3ds)
    scores, matches, matches_splits = labellingData.getClusterData(x3ds)

    #clusterMeanPoints = []
    registry = []
    x3ds_means = []
    x3ds_normals = []
    cameraContributions = []
    # clusterCameraContributions = []
    rawData = None
    rayInfo = []
    labelsAdded = []

    #x2ds, splits = data1
    #Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
    Ps = labellingData.Ps

    volatileLabels = []
    goldStandardLabels = []

    for n in N:
        #print ">> Min Rays:", n
        whichMatches = np.where(
            matches_splits[1:] - matches_splits[:-1] >= n)[0]
        clusterSplitPairs = np.array(
            zip(matches_splits[:-1], matches_splits[1:]))[whichMatches]

        if n == N: rawData = x3ds[whichMatches]

        clusterCounter = 0
        x3ds_clusters = []
        x3ds_clusterColours = []
        x3ds_clusterMeans = []
        x3ds_clusterMeansColours = []
        x3ds_clusterLabels = []

        for matchFrom, matchTo in clusterSplitPairs:
            # Find the points for this cluster and calculate the mean position
            pointIndices = matches[matchFrom:matchTo]
            numPoints = len(pointIndices)
            assert (numPoints >= n)
            clusterMean = np.mean(x3ds[pointIndices], axis=0)

            if len(
                    np.where(
                        np.linalg.norm(clusterMean - cameraPositions, axis=1) <
                        x3d_threshold * 6.0)[0]) > 0:
                continue

            if pts.any():
                if len(pts.shape) == 1:
                    dists = np.linalg.norm(clusterMean - pts)
                else:
                    dists = np.linalg.norm(clusterMean - pts, axis=1)

                if len(np.where(dists > x3d_threshold * 10.0)[0]) > 0:
                    continue

            cluster = x3ds[pointIndices]
            x3ds_clusters.extend(cluster)
            randomColour = np.concatenate(
                (np.random.rand(3), np.array([0.5], dtype=np.float32)))
            x3ds_clusterColours.extend(
                np.tile(randomColour, (cluster.shape[0], 1)))
            x3ds_clusterMeans.append(clusterMean)
            x3ds_clusterMeansColours.append(randomColour)
            x3ds_clusterLabels.append(clusterCounter)

            # Get all the rays used to make the points in this cluster. This will be a Nx3 matrix
            rayIndices = np.unique(
                [reconstructionData[pi]['pair'] for pi in pointIndices])
            pointRays = rays[rayIndices]

            # Calculate the dot product for each combination of rays. This will be a NxN matrix
            raysDps = np.dot(pointRays, pointRays.T)

            # Find the ray which has the highest agreement with the others (sum of dot products)
            bestRay = np.sum(raysDps > 0, axis=0).argmax()

            # Find which other rays are in agreement with the best ray (dp > 0)
            goodRays = np.where(raysDps[bestRay] > 0.05)[0]

            # As all the (good) rays in the cluster should be contributing to creating a single point, we will
            # give them a new label that identifies them with the detection/reconstruction for that point
            #currentLabel = len(clusterMeanPoints)
            currentLabel = len(labelPositions)
            labelForPointReconstruction = currentLabel

            # Only continue with rays from a unique set of cameras
            camerasForRays = [
                findCameraIdFromRayId(rayId, splits)
                for rayId in rayIndices[goodRays]
            ]
            uniqueRayCams, uniqueRayCamsIdx = np.unique(camerasForRays,
                                                        return_index=True)
            goodRays = goodRays[uniqueRayCamsIdx]
            rayInfo.append(raysDps[goodRays])  # TODO: Fix.. nonsense

            existingLabelsForRays = labels[rayIndices[goodRays]]
            knownLabelIndices = np.where(existingLabelsForRays != -1)[0]
            rayIdsForKnownLabels = rayIndices[knownLabelIndices]
            camerasForKnownLabels = [
                findCameraIdFromRayId(rayId, splits)
                for rayId in rayIdsForKnownLabels
            ]
            uniqueCams, uniqueCamsIdx = np.unique(camerasForKnownLabels,
                                                  return_index=True)
            knownLabelIndices = knownLabelIndices[uniqueCamsIdx]
            knownLabels = existingLabelsForRays[knownLabelIndices]

            clusterCounter += 1

            # We check if any of the rays have been assigned a label before (i.e. they will contribute to
            # reconstructing a 3D point). If that is the case then we have to make decision whether we
            # want to our rays in this cluster to contribute to the existing label (reconstruction), or
            # if we want to steal the labelled rays so that they now contribute to creating a new label
            # for this cluster
            threshold = x3d_threshold**2
            for label in np.unique(knownLabels):
                # The ray has been labelled to create a 3D point. If that point is within threshold distance
                # of the current cluster we give this cluster the same label. In essence we are merging the
                # rays in this cluster with the rays that are already contributing to the label.
                # However, if the reconstructed label and the cluster mean are further away from each other
                # we will relabel it with the new label for this cluster which equates to stealing it.
                #dist = np.linalg.norm(clusterMeanPoints[label] - clusterMean)
                dist = np.linalg.norm(labelPositions[label] - clusterMean)
                if dist < threshold:
                    labelForPointReconstruction = label
                    break
                    # threshold = dist

            _clusterId, _clusterX3dId = len(labelPositions) - 1, len(
                x3ds_clusterMeans) - 1

            # Label the rays with the new or existing (merged) label
            useNewLabel = False
            unknownLabels = np.where(existingLabelsForRays == -1)[0]
            if labelForPointReconstruction == currentLabel:
                # No merging is going on
                if len(unknownLabels) > 0:
                    labels[rayIndices[goodRays][unknownLabels]] = currentLabel
                    useNewLabel = True

                if allowStealing:
                    for knownLabel in knownLabelIndices:
                        rayIdsWithLabel = np.where(
                            labels == existingLabelsForRays[knownLabel])[0]
                        numRaysForLabel = len(rayIdsWithLabel)
                        # if existingLabelsForRays[knownLabel] not in volatileLabels and numRaysForLabel < 3:
                        # if existingLabelsForRays[knownLabel] not in goldStandardLabels and numRaysForLabel < 3:
                        # if existingLabelsForRays[knownLabel] not in goldStandardLabels:
                        agreement = np.where(
                            np.sum(np.dot(bestRay, rays[rayIdsWithLabel]) > 0,
                                   axis=1) > 1)[0]
                        if True:
                            labels[rayIndices[goodRays]
                                   [knownLabel]] = currentLabel
                            useNewLabel = True

            else:
                # Employ merging strategy
                if allowStealing:
                    rayIdsWithLabel = np.where(
                        labels == labelForPointReconstruction)[0]
                    agreement = np.where(
                        np.sum(np.dot(bestRay, rays[rayIdsWithLabel]) > 0,
                               axis=1) > 1)[0]

                    labels[rayIndices[goodRays]
                           [unknownLabels]] = labelForPointReconstruction

                    for knownLabel in knownLabelIndices:
                        numRaysForLabel = len(
                            np.where(labels ==
                                     existingLabelsForRays[knownLabel])[0])
                        # if existingLabelsForRays[knownLabel] not in goldStandardLabels and numRaysForLabel < 3:
                        if existingLabelsForRays[
                                knownLabel] not in goldStandardLabels:
                            labels[rayIndices[goodRays]
                                   [knownLabel]] = currentLabel
                            useNewLabel = True
                else:
                    labels[rayIndices[goodRays]] = labelForPointReconstruction

            if useNewLabel:
                labelPositions[currentLabel] = clusterMean
                labelsAdded.append(currentLabel)

        goldStandardLabels = np.where(labels != -1)[0]

    if len(np.where(labels != -1)[0]) == 0:
        return np.array([]), np.array([]), np.array([]), rawData, labelsAdded

    # x3ds_means, x3ds_labels, _, _ = Recon.solve_x3ds(x2ds, splits, labels, Ps)
    x3ds_means, x3ds_labels, x3ds_normals, _, _ = Recon.solve_x3ds_normals(
        x2ds, splits, labels, Ps, rays)

    # x2d_threshold = 30. / 2000.
    # clouds = ISCV.HashCloud2DList(x2ds, splits, x2d_threshold)
    # _, labels, _ = clouds.project_assign_visibility(x3ds_means, None, Ps, x2d_threshold, visibility)

    labellingData.labels = labels
    usedLabels = np.array(np.where(labels != -1)[0], dtype=np.int32)

    return x3ds_means, x3ds_labels, x3ds_normals, rawData, labelsAdded
Exemplo n.º 7
0
    def calculate3dPointsFromDetections(self,
                                        x2ds,
                                        splits,
                                        mats,
                                        Ps=None,
                                        tilt_threshold=0.0002):
        import itertools

        Ts = np.array(zip(*mats)[4], dtype=np.float32)

        if Ps is None:
            Ps = np.array([m[2] / (np.sum(m[2][0, :3]**2)**0.5) for m in mats],
                          dtype=np.float32)

        numCameras = len(splits) - 1
        E = ISCV.compute_E(x2ds, splits, Ps)
        rays = Recon.dets_to_rays(x2ds, splits, mats)
        cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
        data = []

        def norm(a):
            return a / (np.sum(a**2)**0.5)

        tilt_axes = np.array([
            norm(np.dot([-m[0][0, 2], -m[0][1, 2], m[0][0, 0]], m[1][:3, :3]))
            for m in mats
        ],
                             dtype=np.float32)

        # Create all combinations of ci < cj
        cameraPairCombinations = np.array(list(
            itertools.combinations(range(numCameras), 2)),
                                          dtype=np.int32)

        knownCamPairs = [(7, 12), (5, 9), (3, 9), (4, 12), (7, 10), (8, 12),
                         (0, 9), (3, 4), (1, 9), (2, 7), (1, 2), (0, 11),
                         (5, 11), (1, 3), (2, 12), (9, 10), (10, 12), (7, 8),
                         (9, 12), (4, 10), (11, 12), (6, 10), (6, 9), (8, 10),
                         (3, 6), (0, 7), (4, 9), (1, 7),
                         (0, 5), (2, 4), (1, 10), (5, 7), (3, 12), (4, 6),
                         (2, 11), (3, 7), (3, 10), (4, 8), (4, 11), (0, 1),
                         (5, 12), (1, 6), (7, 11), (2, 3), (2, 8), (1, 4),
                         (1, 8), (0, 8), (6, 7), (1, 11), (8, 9), (0, 10),
                         (10, 11), (9, 11), (5, 10), (0, 12), (3, 5), (8, 11),
                         (0, 3), (5, 8), (7, 9), (6, 11), (6, 12), (1, 5),
                         (6, 8), (3, 8), (0, 6), (2, 5), (0, 4), (5, 6),
                         (1, 12), (4, 7), (2, 6), (2, 10), (4, 5), (3, 11),
                         (0, 2), (2, 9)]

        # Find valid pairs of camera rays that could intersect and create a 3D reconstruction
        for ci, cj in cameraPairCombinations:
            # for (ci, cj) in knownCamPairs:
            ui, uj = range(splits[ci],
                           splits[ci + 1]), range(splits[cj], splits[cj + 1])
            if len(ui) == 0 or len(uj) == 0: continue
            axis = cameraPositions[cj] - cameraPositions[ci]
            camPairDist = np.linalg.norm(axis)
            if camPairDist > 7000.: continue
            tilt_i = np.dot(map(norm, np.cross(rays[ui], axis)), tilt_axes[ci])
            tilt_j = np.dot(map(norm, np.cross(rays[uj], axis)),
                            tilt_axes[ci])  # NB tilt_axes[ci] not a bug
            io = np.argsort(tilt_i)
            jo = np.argsort(tilt_j)
            for ii, d0 in enumerate(tilt_i[io]):
                for ji, d1 in enumerate(tilt_j[jo]):
                    diff = d0 - d1
                    if abs(diff) < tilt_threshold:
                        d = [int(ui[io[ii]]), int(uj[jo[ji]])]
                        cams = [int(ci), int(cj)]
                        entry = {'pair': d, 'cameraIds': cams}
                        data.append(entry)

        # Create 3D reconstructions from ray pairs
        x3ds = []
        for entry in data:
            d = entry['pair']
            E0, e0 = E[d, :, :3].reshape(-1, 3), E[d, :, 3].reshape(-1)
            x3d = np.linalg.solve(
                np.dot(E0.T, E0) + np.eye(3) * 1e-7, -np.dot(E0.T, e0))
            ai, aj = x3d - Ts[ci], x3d - Ts[cj]
            angle = np.degrees(
                np.arccos(
                    np.dot(ai, aj) /
                    (np.linalg.norm(ai) * np.linalg.norm(aj))))
            if angle > 120: continue
            x3ds.append(x3d)

        return x3ds, data, rays, cameraPositions
Exemplo n.º 8
0
def solve_skeleton_from_2d(x2ds,
                           splits,
                           labels,
                           effectorLabels,
                           Ps,
                           skelDict,
                           effectorData,
                           rootMat,
                           outerIts=5):
    """
	Given a posed skeleton and some labelled 2d points, solve the skeleton to better fit the points.
	
	Args:
		x2ds (float[][2]): 2d Detections from all cameras
		splits (int[]): list of camera indices
		labels (int[]): Assigned labels of the x2ds
		effectorLabels (?): For each effector, which label it depends on.
			Joints may be effected by a number of labellings.
		Ps (float[][3][4]): Projection matrices of the cameras.
		skelDict (GskelDict): The Skeleton to process
		effectorData (?): What's this?
		rootMat (float[3][4]): reference frame of the Skeleton.
		outerIts (int): IK Iterations to solve the skeleton. Default = 5.
		
	Returns:
		float[][3]: (x3ds) - the resulting 3D reconstructions.
		int[]: (x3d_labels) - the labels for the 3D points.
		??: (E[singles]) - Equations describing 2D detections not born of the 3D yet.
		int[] (x2d_labels) - labels for the 2D contributions.
		
	Requires:
		Recon.solve_x3ds
		
	"""
    x3ds, x3d_labels, E, x2d_labels = Recon.solve_x3ds(x2ds, splits, labels,
                                                       Ps)

    # effectorLabels tells, for each effector, which label it depends on
    # effectorLabels[ei] = li
    # given a list of labels, collect all the effectors that depend on those labels; and then find the reordering of the
    # original labels (which may include duplicates) that matches the effectors.

    numLabels = np.max(effectorLabels) + 1

    lbl3_inv = -np.ones(numLabels + 1, dtype=np.int32)
    lbl3_inv[x3d_labels] = range(len(x3d_labels))
    tmp3 = lbl3_inv[effectorLabels]
    ae3 = np.array(np.where(tmp3 != -1)[0], dtype=np.int32)
    tmp3 = tmp3[ae3]

    lbl2_inv = -np.ones(numLabels + 1, dtype=np.int32)
    lbl2_inv[x2d_labels] = range(len(x2d_labels))
    tmp2 = lbl2_inv[effectorLabels]
    ae2 = np.array(np.where(tmp2 != -1)[0], dtype=np.int32)
    tmp2 = tmp2[ae2]
    #
    solveIK1Ray(skelDict,
                effectorData,
                x3ds.take(tmp3, axis=0),
                ae3,
                E.take(tmp2, axis=0),
                ae2,
                outerIts=outerIts,
                rootMat=rootMat)
    return x3ds, x3d_labels, E, x2d_labels
Exemplo n.º 9
0
def generate_wand_correspondences(wand_frames, mats2, camera_solved, rigid_filter=True, error_thresholds=None, x3d_threshold=1000000.):
	"""
	Args:
		wand_frames
		mats2
		camera_solved
		rigid_filter = True
		error_thresholds = None
		
	Returns:
		x2s_cameras
		x3s_cameras
		frames_cameras
		num_kept_frames
		
	Requires:
		ISCV.undistort_points
		ISCV.label_T_wand
		Recon.solve_x3ds
		ISCV.project_and_clean
		
	"""

	def get_order(labels):
		"""
		Return the x2d index of the five points of the T Wand
		
		Args:
			labels (int[]): 
			
		Returns:
			int[5]: "order" label indexes
			
		"""
		try:
			l = list(labels)
			order = [l.index(x) for x in xrange(5)]
			return order
		except:
			return None
	
	numCameras = len(mats2)
	Ps2 = np.array([m[2]/np.linalg.norm(m[2][0,:3]) for m in mats2],dtype=np.float32)
	x2ds_frames = []
	x2ds_labels_frames = []
	x2ds_splits_frames = []
	x3ds_frames = []
	# TODO wand geo should be passed in? must be compatible with the label_T_wand
	wand_x3ds = np.array([[160,0,0],[0,0,0],[-80,0,0],[0,0,-120],[0,0,-240]],dtype=np.float32)
	thresh = (20./2000.)**2 if error_thresholds is None else error_thresholds**2 # projection must be close to be included for intersection
	num_kept_frames = 0
	for fi,(x2ds_raw_data,x2ds_splits) in enumerate(wand_frames): # intersect over all frames with current solved cameras
		x2ds_data,_ = undistort_dets(x2ds_raw_data, x2ds_splits, mats2)
		x2ds_labels = -np.ones(x2ds_data.shape[0],dtype=np.int32)
		ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
		x2ds_labels2 = x2ds_labels.copy()
		for cs,c0,c1 in zip(camera_solved,x2ds_splits[:-1],x2ds_splits[1:]): # remove labels for unsolved cameras
			if not cs: x2ds_labels2[c0:c1] = -1
		count = np.sum(x2ds_labels2 != -1)/5
		if count >= 3: # only use points seen in three solved cameras
			x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			count = ISCV.project_and_clean(x3ds, Ps2, x2ds_data, x2ds_splits, x2ds_labels, x2ds_labels2, thresh, thresh, x3d_threshold)
			if count < 3: continue
			x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			#if not np.all(x3ds_labels == [0,1,2,3,4]): print 'ERROR'; continue # skip if somehow not all points seen
			#if np.max(x3ds**2) > 1e9: print 'ERROR oh oh',x3ds; continue
			if rigid_filter: # enforce x3ds must be a rigid transform of the wand
				mat = rigid_align_points(wand_x3ds, x3ds)
				x3ds = np.dot(wand_x3ds,mat[:3,:3].T) + mat[:,3]
			for cs,c0,c1 in zip(camera_solved,x2ds_splits[:-1],x2ds_splits[1:]): #copy 'cleaned' labels for solved cameras to avoid bad data
				if cs: x2ds_labels[c0:c1] = x2ds_labels2[c0:c1]
			x2ds_frames.append(x2ds_raw_data)
			x2ds_splits_frames.append(x2ds_splits)
			x2ds_labels_frames.append(x2ds_labels) # CBD not x2ds_labels2, otherwise we can't add cameras!
			x3ds_frames.append(x3ds)
			num_kept_frames+=1

	# TODO collapse this into the code above and clean up
	x2s_cameras,x3s_cameras,frames_cameras = [],[],[]
	for ci in xrange(numCameras):
		orders = [get_order(xlf[xsf[ci]:xsf[ci+1]]) for xlf,xsf in zip(x2ds_labels_frames,x2ds_splits_frames)]
		which_frames = np.where([o is not None for o in orders])[0]
		if len(which_frames) == 0:
			x2s,x3s = np.zeros((0,2),dtype=np.float32),np.zeros((0,3),dtype=np.float32)
		else:
			x2s = np.vstack([x2ds_frames[fi][x2ds_splits_frames[fi][ci]:x2ds_splits_frames[fi][ci+1]][orders[fi]] for fi in which_frames])
			x3s = np.vstack([x3ds_frames[fi] for fi in which_frames])
		x2s_cameras.append(x2s)
		x3s_cameras.append(x3s)
		frames_cameras.append(which_frames)

	return x2s_cameras,x3s_cameras,frames_cameras,num_kept_frames
Exemplo n.º 10
0
def boot_cameras_from_wand(wand_frames, cameras_info, lo_focal_threshold=0.5, hi_focal_threshold=4.0, cv_2d_threshold=0.02):
	"""
	Attempt to boot position of cameras from 2d data containing a wand. This is assumed to be 5 marker T wand.
	
	TODO: Generalise size of wand to allow 120mm, 240mm, 780mm etc variations. Also use actual measurements of wand.

	Args:
		wand_frames
		cameras_info
		lo_focal_threshold=0.5
		hi_focal_threshold=4.0
		cv_2d_threshold=0.02
		
	Returns:
		Mat[]: "mats2" - list of GRIP Camera Mats of solved or uninitalised cameras.
		bool[]: "camera_solved" flag to show which cameras have been solved in this process.
		
	Requires:
		ISCV.label_T_wand
		Recon.solve_x3ds
		np.linalg.norm
		
	"""
	
	numCameras = len(cameras_info)
	numFrames = len(wand_frames)
	camera_solved = [False]*numCameras
	# use the wand to boot the first camera
	x2ds_data,x2ds_splits = wand_frames[0]
	x2ds_labels = -np.ones(x2ds_data.shape[0], dtype=np.int32)
	ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
	first_x3ds = np.array([[160, 0, 0],[0, 0, 0],[-80, 0, 0],[0, 0, -120],[0, 0, -240]], dtype=np.float32)
	mats2 = [None]*numCameras
	first_good_cameras = [None]*numCameras
	for ci,(c0,c1) in enumerate(zip(x2ds_splits[:-1], x2ds_splits[1:])):
		x2ds = x2ds_data[c0:c1]
		labels = x2ds_labels[c0:c1]
		try:
			order = [list(labels).index(x) for x in range(5)]
		except:
			mats2[ci] = makeUninitialisedMat(ci, cameras_info[ci])
			camera_solved[ci] = False
			continue
		print ('found wand in camera',ci)
		first_good_cameras[ci] = x2ds[order]
		cv2_mat = cv2_solve_camera_from_3d(first_x3ds, x2ds[order])
		rms = cv2_mat[2]
		mats2[ci] = makeMat(cv2_mat[0], cv2_mat[1], cameras_info[ci])
		camera_solved[ci] = True
		if mats2[ci][0][0,0] < lo_focal_threshold or mats2[ci][0][0,0] > hi_focal_threshold or rms > cv_2d_threshold:
			print ('resetting bad camera',ci,'with focal',mats2[ci][0][0,0],'and error',rms)
			mats2[ci] = makeUninitialisedMat(ci,cameras_info[ci])
			camera_solved[ci] = False
	Ps2 = np.array([m[2]/m[0][0,0] for m in mats2],dtype=np.float32)
	x2ds_labels2 = x2ds_labels.copy()
	for ci in xrange(numCameras): # remove unsolved cameras
		if not camera_solved[ci]: x2ds_labels2[x2ds_splits[ci]:x2ds_splits[ci+1]] = -1
	x3ds_ret, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)

	print (x3ds_ret,first_x3ds) # all points should be within 2.5mm of 'true'
	assert(np.allclose(x3ds_ret, first_x3ds, 0.0, 2.5))

	# so, we booted some cameras and they reconstruct the wand in the correct place.
	# unfortunately, there is still an ambiguity: something like the Necker cube (two different ways we can perceive the wand).
	# as soon as the wand moves, we can resolve this
	for mfi in xrange(40,numFrames,20):
		print (mfi)
		x2ds_data,x2ds_splits = wand_frames[mfi]
		x2ds_labels = -np.ones(x2ds_data.shape[0],dtype=np.int32)
		ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
		solved_cameras = np.where(camera_solved)[0]
		good_cameras = []
		second_good_cameras = [None]*numCameras
		print (solved_cameras)
		for ci in solved_cameras:
			c0,c1 = x2ds_splits[ci:ci+2]
			x2ds = x2ds_data[c0:c1]
			labels = x2ds_labels[c0:c1]
			try:
				order = [list(labels).index(x) for x in range(5)]
			except:
				continue
			diff = x2ds[order] - first_good_cameras[ci]
			if np.linalg.norm(diff) < 0.02*len(diff): continue # must have moved 'enough'
			good_cameras.append(ci)
			second_good_cameras[ci] = x2ds[order]
		print (good_cameras)
		if len(good_cameras) >= 3: # this is the good frame...
			x2ds_labels2 = x2ds_labels.copy()
			for ci in xrange(numCameras): # remove unsolved cameras
				if not ci in good_cameras: x2ds_labels2[x2ds_splits[ci]:x2ds_splits[ci+1]] = -1
			second_x3ds, second_x3ds_labels, _,_ = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			for ci in solved_cameras:
				if ci not in good_cameras:
					print ('resetting bad camera',ci)
					mats2[ci] = makeUninitialisedMat(ci,mats2[ci][5])
					camera_solved[ci] = False
			for ci in good_cameras:
				cv2_mat = cv2_solve_camera_from_3d(np.concatenate((first_x3ds,second_x3ds)), np.concatenate((first_good_cameras[ci],second_good_cameras[ci])))
				rms = cv2_mat[2]
				print (ci,rms)
				mats2[ci] = makeMat(cv2_mat[0],cv2_mat[1],mats2[ci][5])
				camera_solved[ci] = True
			break # finished
	return mats2, camera_solved