示例#1
0
 def testCorruptedVideoFails(self):
     # Unfortunately this invalid read creates a semi-unblockable write to stderr
     # Workaround: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
     # Just take a lot of extra code, probably not worth it
     # TODO think about implementing it anyway
     with self.assertRaises(OSError):
         print('\nExpecting IO error... ', end='')
         readVideo('tests/data/corrupt.avi')
示例#2
0
    def testReadFramesAreCorrect(self):
        frames = readVideo(
            'tests/data/black.avi')  # 5x frames of straight black
        self.assertEqual(len(frames), 5)

        gold = [[[0 for i in range(1024)] for j in range(1024)]
                for k in range(5)]
        self.assertTrue(numpy.array_equal(frames, gold))
示例#3
0
    def setUpClass(self):
        # Cache test data
        self.frames = readVideo('tests/data/small.avi').astype(numpy.int16)
        self.firstDiff = self.frames[1] - self.frames[0]

        self.thread = reikna.cluda.ocl_api().Thread.create()

        # Get the OpenCL kernels from the 2DF module
        self.fft = createComplexFFTKernel(self.thread, self.frames[0].shape)
        self.normalise = createNormalisationKernel(self.thread,
                                                   self.frames[0].shape)
示例#4
0
 def testEmptyVideoFails(self):
     with self.assertRaises(OSError):
         print('\nExpecting IO error... ', end='')
         frames = readVideo('tests/data/empty.avi')
示例#5
0
 def testValidVideoOpens(self):
     readVideo('tests/data/short.avi')  # single black frame
示例#6
0
def sequentialGPUChunker(filename,
                         spacings,
                         RAMGB=4,
                         progress=None,
                         abortFlag=None):
    if progress is not None:
        progress.setText('Reading Video from Disk')
        progress.cycle()

    videoInput = readVideo(filename)
    numFrames = videoInput.shape[0]
    correlations = [None] * (numFrames - 1)

    if abortFlag: return None

    showProgress = progress is not None
    if showProgress: progress.setText('Getting OpenCL Context')

    # Create access node for OpenCL
    api = reikna.cluda.ocl_api()
    try:
        thr = api.Thread.create()
    except LogicError:
        print('No OpenCL-compatible devices (e.g. GPUs) found',
              file=sys.stderr)
        exit()

    # Display accessible devices
    for plat in api.get_platforms():
        for cld in plat.get_devices():
            print(
                f'Using {cld.name} with {cld.global_mem_size/1024**3:.1f}GB VRAM'
            )
            # print('Has extensions:', cld.extensions)

    if showProgress: progress.setText('Creating OpenCL Kernels')

    # need to compile an OpenCL kernel to calculate FFTs with
    size = [d - 1 for d in videoInput[0].shape]
    fftComplex = createComplexFFTKernel(thr, size)
    fftNorm = createNormalisationKernel(thr, size)

    # Number of pixels per frame, multiplied by 128 for the size of a complex
    # float, but halved because the real transform is used
    RAMBytes = RAMGB * numpy.power(2.0, 30.0)
    complexFrameByteSize = videoInput.shape[1] * videoInput.shape[2] * 128 / 2

    # One frame's RAM in reserve for the head
    framesPerSlice = int((RAMBytes // complexFrameByteSize) - 1)

    # The number of different slice intervals that must be taken
    numSpacingSets = int(numpy.ceil((numFrames - 1) / framesPerSlice))

    # Used to show progress in the UI
    framesProcessed = 0
    target = numFrames * (numFrames - 1) / 2  # algorithm complexity
    # Allow 10% extra time to calculate the q curves
    qProgress = target * 0.1 / numSpacingSets  # per-slice q-curve allowance
    target += qProgress * numSpacingSets

    if abortFlag: return None

    # For each diagonal section
    for sliceSpacing in range(0, numSpacingSets):
        if progress is not None:
            progress.setText(
                f'Working on Slice {sliceSpacing+1}/{numSpacingSets}')

        #A double ended queue, more efficient than a list for queue operations
        currentSlice = deque()
        #The index by which new frames are grabbed for the slice
        baseIndex = 0
        #Finding the expected shape of the transform results

        transformShape = (videoInput.shape[1] - 1, videoInput.shape[2] - 1)
        totalDifferencesShape = (framesPerSlice, transformShape[0],
                                 transformShape[1])
        #Preparing the destination of the frame differences
        totalDifferences = numpy.zeros(totalDifferencesShape)
        numDifferences = numpy.zeros((framesPerSlice, ))

        #For each head
        for headIndex in range((sliceSpacing * framesPerSlice) + 1, numFrames):
            #If the queue is full, remove the oldest element
            if len(currentSlice) == framesPerSlice:
                currentSlice.popleft()
            #Get a new value into the slice queue
            #Also drops a row and column
            currentSlice.append(
                runKernelOperation(thr, fftComplex,
                                   videoInput[baseIndex, :-1, :-1]))
            baseIndex += 1
            #Drops a row and column
            head = videoInput[headIndex, :-1, :-1]
            head = runKernelOperation(thr, fftComplex, head)

            #time difference between this frame and the first in the queue
            relativeDifference = 0
            #iterating backwards through the list, oldest element first
            for sliceFrameIndex in range(len(currentSlice) - 1, -1, -1):
                # Update progress tracker
                if progress is not None:
                    framesProcessed += 1
                    progress.setProgress(framesProcessed, target)

                difference = head - currentSlice[sliceFrameIndex]
                normalFrame = thr.array(size, dtype=numpy.float64)
                fftNorm(normalFrame, difference)

                totalDifferences[relativeDifference, :, :] += normalFrame.get()

                # TODO - Need to make this ^^^ run on the GPU
                # allocate list of empty buffers, add into?

                numDifferences[relativeDifference] += 1
                relativeDifference += 1

                if abortFlag: return None

        for relativeDifference in range(0, len(currentSlice)):
            if progress is not None:
                framesProcessed += qProgress / len(currentSlice)
                progress.setProgress(framesProcessed, target)

            meanDifference = (totalDifferences[relativeDifference, :, :] /
                              numDifferences[relativeDifference])
            timeDifference = relativeDifference + sliceSpacing * framesPerSlice
            correlations[timeDifference] = calculateWithCalls(meanDifference)

            if abortFlag: return None

    if progress is not None:
        progress.cycle()
        progress.setText('Calculating Correlation Curves')

    correlations = calculateCorrelation(correlations)

    frameRate = readFramerate(filename)
    timeSpacings = numpy.array(numpy.arange(1,
                                            len(correlations) + 1)) / frameRate
    # This is how you stack arrays in numpy, apparently 🙃
    outputMatrix = numpy.c_[timeSpacings, correlations]

    if abortFlag: return None

    if progress is not None:
        progress.setPercentage(100)
        progress.setText('Done!')

    return outputMatrix
示例#7
0
def sequentialChunkerMain(videoPath,
                          spacings,
                          outputPath=None,
                          RAMGB=1,
                          progress=None,
                          abortFlag=None):
    if progress is not None:
        progress.setText('Reading Video from Disk')
        progress.cycle()

    videoInput = rV.readVideo(videoPath)
    numFrames = videoInput.shape[0]
    correlations = [None] * (numFrames - 1)

    #Number of pixels per frame, times 128 for the size of a complex float,
    #but halved because the real transform is used
    RAMBytes = RAMGB * np.power(2.0, 30.0)
    complexFrameByteSize = videoInput.shape[1] * videoInput.shape[2] * 128 / 2
    #TODO: adjust for the other RAM using variables
    #one frame's RAM in reserve for the head
    framesPerSlice = int((RAMBytes // complexFrameByteSize) - 1)

    #The number of different slice intervals that must be take
    numSpacingSets = int(np.ceil((numFrames - 1) / framesPerSlice))

    print('numSpacingSets:', numSpacingSets)
    print('framesPerSlice:', framesPerSlice)
    print('complexFrameByteSize:', complexFrameByteSize)

    # Used to show progress in the UI
    framesProcessed = 0
    target = numFrames * (numFrames - 1) / 2  # algorithm complexity
    # allow 10% extra time to calculate the q curves
    qProgress = target * 0.1 / numSpacingSets  # per-slice q-curve allowance
    target += qProgress * numSpacingSets

    #For each diagonal section
    for sliceSpacing in range(0, numSpacingSets):
        if progress is not None:
            progress.setText(
                f'Working on Slice {sliceSpacing+1}/{numSpacingSets}')

        #A double ended queue, more efficient than a list for queue operations
        currentSlice = deque()
        #The index by which new frames are grabbed for the slice
        baseIndex = 0
        #Finding the expected shape of the transform results

        #trying something new, dropping a couple of samples to match matlab (1 in each dimension)
        if (videoInput.shape[2] - 1) % 2 == 0:
            transformShape = (videoInput.shape[1] - 1,
                              (videoInput.shape[2] - 1) // 2 + 1)
        else:
            #+1 for the real transform correction, -1 to drop a sample based on MATLAB
            transformShape = (videoInput.shape[1] - 1,
                              (videoInput.shape[2] + 1 - 1) // 2)
        totalDifferencesShape = (framesPerSlice, transformShape[0],
                                 transformShape[1])
        #Preparing the destination of the frame differences
        totalDifferences = np.zeros(totalDifferencesShape)
        numDifferences = np.zeros((framesPerSlice, ))

        #For each head
        for headIndex in range((sliceSpacing * framesPerSlice) + 1, numFrames):
            #If the queue is full, remove the oldest element
            if len(currentSlice) == framesPerSlice:
                currentSlice.popleft()
            #Get a new value into the slice queue
            #Also drops a row and column
            currentSlice.append(
                tDF.realTwoDFourierUnnormalized(
                    videoInput[baseIndex, :-1, :-1]))
            baseIndex += 1
            #Drops a row and column
            head = videoInput[headIndex, :-1, :-1]
            head = tDF.realTwoDFourierUnnormalized(head)
            #time difference between this frame and the first in the queue
            relativeDifference = 0
            #iterating backwards through the list, oldest element first
            for sliceFrameIndex in range(len(currentSlice) - 1, -1, -1):
                # Update progress tracker
                if progress is not None:
                    framesProcessed += 1
                    progress.setProgress(framesProcessed, target)

                difference = head - currentSlice[sliceFrameIndex]
                totalDifferences[relativeDifference, :, :] += tDF.castToReal(
                    difference)
                numDifferences[relativeDifference] += 1
                relativeDifference += 1

                if abortFlag: return None

        for relativeDifference in range(0, len(currentSlice)):
            if progress is not None:
                framesProcessed += qProgress / len(currentSlice)
                progress.setProgress(framesProcessed, target)

            meanDifference = (totalDifferences[relativeDifference, :, :] /
                              numDifferences[relativeDifference])
            timeDifference = relativeDifference + sliceSpacing * framesPerSlice
            correlations[timeDifference] = cQC.calculateRealQCurves(
                meanDifference)

            if abortFlag: return None

    if progress is not None:
        progress.cycle()
        progress.setText('Calculating Correlation Curves')

    correlations = cC.calculateCorrelation(correlations)

    frameRate = rV.readFramerate(videoPath)
    timeSpacings = np.array(np.arange(1, len(correlations) + 1)) / frameRate
    #This is the way to stack arrays in numpy
    outputMatrix = np.c_[timeSpacings, correlations]

    if abortFlag: return None

    if outputPath is not None:
        if progress is not None: progress.setText('Saving to Disk')

        np.savetxt(outputPath, outputMatrix)

    if progress is not None:
        progress.setPercentage(100)
        progress.setText('Done!')

    return outputMatrix
示例#8
0
 def setUpClass(self):
     # a (10-frame) 128x128 crop of the large data file
     self.frames = readVideo('tests/data/small.avi').astype(numpy.int16)
     self.firstDiff = self.frames[1] - self.frames[0]