Beispiel #1
0
def removeHotPixels(inDict, **kwargs):
    # boilerplate to get down to dvs container
    if isinstance(inDict, list):
        return [removeHotPixels(inDictSingle, **kwargs)
                for inDictSingle in inDict]
    if not isinstance(inDict, dict):
        return inDict
    if 'ts' not in inDict:
        outDict = {}
        for key in inDict.keys():
            outDict[key] = removeHotPixels(inDict[key], **kwargs)
        return outDict
    # From this point onwards, it's a data-type container
    if 'pol' not in inDict:
        return
    # From this point onwards, it's a dvs container
    events = inDict
    eventImage = getEventImage(events, contrast=np.inf, polarised=False)
    contrast1d = eventImage.flatten()
    mean = np.mean(contrast1d)
    std = np.std(contrast1d)
    threshold = mean + kwargs.get('threshold', 3) * std
    (y, x) = np.where(eventImage > threshold)
    dimY = kwargs.get('dimY', events.get('dimY', events['y'].max() + 1))
    #dimX = kwargs.get('dimX', events.get('dimX', events['x'].max()))
    addrsToRemove = x * dimY + y
    eventAddrs = events['x'] * dimY + events['y']
    toKeep = np.logical_not(np.isin(eventAddrs, addrsToRemove))
    return selectByBool(events, toKeep)
Beispiel #2
0
def refractoryPeriod(inDict, refractoryPeriod=0.001, **kwargs):
    # boilerplate to get down to dvs container
    if isinstance(inDict, list):
        return [refractoryPeriod(inDictSingle, **kwargs)
                for inDictSingle in inDict]
    if not isinstance(inDict, dict):
        return inDict
    if 'ts' not in inDict:
        outDict = {}
        for key in inDict.keys():
            outDict[key] = refractoryPeriod(inDict[key], **kwargs)
        return outDict
    # From this point onwards, it's a data-type container
    if 'pol' not in inDict:
        return
    # From this point onwards, it's a dvs container
    events = inDict
    ts = events['ts']
    x = events['y']
    y = events['x']
    numEvents = len(ts)
    maxX = x.max()
    maxY = y.max()
    prevTs = np.zeros((maxY+1, maxX+1))
    toKeep = np.ones((numEvents), dtype=np.bool)
    for idx in trange(numEvents, leave=True, position=0):
        if ts[idx] >= prevTs[y[idx], x[idx]] + refractoryPeriod: 
            prevTs[y[idx], x[idx]] = ts[idx]
        else:
            toKeep[idx] = False
    outDict = selectByBool(inDict, toKeep)
    return outDict
Beispiel #3
0
x = meshgrid[0].flatten()
y = meshgrid[1].flatten()
z = np.zeros_like(y)

# generate calibration pattern coords
coords = np.concatenate((x[:, np.newaxis], y[:, np.newaxis], z[:, np.newaxis]),
                        axis=1).astype(np.float32)
usableImagePoints = []
usableBool = np.zeros((len(frames['frames'])), dtype=np.bool)
for idx, frame in enumerate(tqdm(frames['frames'], file=sys.stdout)):
    corners = cv2.findChessboardCorners(frames['frames'][idx],
                                        patternSize=checkerboardDims)
    if corners[0] == True:
        usableBool[idx] = True
        usableImagePoints.append(corners[1][:, 0, :])
frames = selectByBool(frames, usableBool)
frames['imagePoints'] = usableImagePoints

#%% Run the calibration

#frames = framesBeforeCalibration

objectPoints = [coords for idx in range(len(frames['frames']))]
# camera matrix is an initial guess
cameraMatrix = np.array([[200, 0, 152], [0, 200, 120], [0, 0, 1]],
                        dtype=np.float64)
distCoeffs = np.array([0, 0, 0, 0, 0], dtype=np.float64)
retval, cameraMatrix, distCoeffs, rVecs, tVecs = cv2.calibrateCamera(
    objectPoints=objectPoints,
    imagePoints=frames['imagePoints'],
    imageSize=(240, 304),
Beispiel #4
0
x = meshgrid[0].flatten()
y = meshgrid[1].flatten()
z = np.zeros_like(y)

# generate calibration pattern coords
coords = np.concatenate((x[:, np.newaxis], y[:, np.newaxis], z[:, np.newaxis]),
                        axis=1).astype(np.float32)
usableImagePoints = []
usableBool = np.zeros((len(frames['frames'])), dtype=np.bool)
for idx, frame in enumerate(tqdm(frames['frames'])):
    corners = cv2.findChessboardCorners(frames['frames'][idx],
                                        patternSize=(7, 4))
    if corners[0] == True:
        usableBool[idx] = True
        usableImagePoints.append(corners[1][:, 0, :])
frames = selectByBool(frames, usableBool)
frames['imagePoints'] = usableImagePoints

#%% Select poses which match the frames

poses = getSamplesAtTimes(poses, frames['ts'])

#%% Filter poses too far from frames

import math

maxTimeDifference = 0.01

numSamples = poses['ts'].shape[0]
keepBool = np.ones((numSamples), dtype=np.bool)
for idx in range(numSamples):
Beispiel #5
0
from bimvee.split import selectByBool

containerVicon = importAe(filePathOrName=os.path.join(filePathOrName, 'Vicon'))
# Filter out just the desired pose samples
from bimvee.importIitVicon import separateMarkersFromSegments

containerVicon['data']['vicon'] = separateMarkersFromSegments(
    containerVicon['data']['vicon']['pose6q'])
del containerVicon['data']['vicon']['point3']

# Remove null pose samples (where tracking failed)
nullPoses = containerVicon['data']['vicon']['pose6q']['point'] == np.zeros(
    (1, 3))
nullPoses = np.any(nullPoses, axis=1)

containerVicon['data']['vicon']['pose6q'] = selectByBool(
    containerVicon['data']['vicon']['pose6q'], ~nullPoses)

#%%

from bimvee.info import info

info(containerVicon)

#%% Time alignment
'''
Now for some irreducible complexity
events and vicon were simultaneuous yarp imports. They need to be time-aligned 
using tsOffsetFromInfo.
Reconstructed frames takes the same timestamp as the dvs events - they need 
to be aligned according to those. Here we just use the coincidence that dvs 
recording started before vicon recording, so dvs events happen to be aligned