Beispiel #1
0
class Vbi(object):
    '''This class represents a line of raw vbi data and all our attempts to
        decode it.'''

    possible_bytes = [hammbytes]*2 + [paritybytes]*40

    def __init__(self, vbi, bitwidth=5.112, gauss_sd=1.1, gauss_sd_offset=2.0,
                 offset_low = 75.0, offset_high = 119.0,
                 thresh_low = 1.1, thresh_high = 2.36,
                 allow_unmatched = True, find=finders.all_headers):

        # data arrays

        # vbi is the raw line as an array of 2048 floats
        self.vbi = vbi

        # blurring amounts
        self.gauss_sd = gauss_sd
        self.gauss_sd_offset = gauss_sd_offset

        # Offset range to check for signal drift, in samples.
        # The algorithm will check with sub sample accuracy.
        # The offset finder is very fast (it uses bisection)
        # so this range can be relatively large. But not too
        # large or you get false positives.
        self.offset_low = offset_low
        self.offset_high = offset_high

        # black level of the signal
        self.black = np.mean(self.vbi[:80])

        # Threshold multipliers. The black level of the signal 
        # is derived from the mean of the area before the VBI 
        # begins. This is multiplied by the following factors 
        # to give the low/high thresholds. Anything outside
        # this range is assumed to be a 0 or a 1. Tweaking these
        # can improve results, but often at a speed cost.
        self.thresh_low = self.black*thresh_low
        self.thresh_high = self.black*thresh_high

        # Allow vbi.py to emitt packet 0's that don't match
        # any finder? Set to false when you have finders
        # for all headers in the data.
        self.allow_unmatched = allow_unmatched
        self.finders = find

        # vbi packet bytewise
        self._mask0 = np.zeros(42, dtype=np.uint8)
        self._mask1 = np.zeros(42, dtype=np.uint8)

        self.g = Guess(bitwidth=bitwidth)


    def find_offset_and_scale(self):
        '''Tries to find the offset of the vbi data in the raw samples.'''

        # Split into chunks and ensure there is something "interesting" in each
        target = gauss(self.vbi, self.gauss_sd_offset)
        d = [np.std(target[x:x+128]) < 5.0 for x in range(64, 1440, 128)]
        if any(d):
            return False

        low = 64
        high = 256
        target = gauss(self.vbi[low:high], self.gauss_sd_offset)

        def _inner(offset):
            self.g.set_offset(offset)

            self.g.update_cri(low, high)
            guess_scaled = self.g.convolved[low:high]
            mask_scaled = self.g.mask[low:high]

            a = guess_scaled*mask_scaled
            b = np.clip(target*mask_scaled, self.black, 256)

            scale = a.std()/b.std()
            b -= self.black
            b *= scale
            a = np.clip(a, 0, 256*scale)

            return np.sum(np.square(b-a))

        offset = fminbound(_inner, self.offset_low, self.offset_high)

        # call it also to set self.offset and self.scale
        return (_inner(offset) < 10)

    def make_guess_mask(self):
        a = []

        for i in range(42*8):
            (low, high) = self.g.get_bit_pos(i)
            a.append(self.vbi[low:high])

        mins = np.array([min(x) for x in a])
        maxs = np.array([max(x) for x in a])
        avgs = np.array([np.array(x).mean() for x in a])

        for i in range(42):
            mini = mins[i*8:(i+1)*8]
            maxi = maxs[i*8:(i+1)*8]
            avgi = avgs[i*8:(i+1)*8]
            self._mask0[i] = 0xff
            for j in range(8):
                if mini[j] < self.thresh_low:
                    self._mask0[i] &= ~(1<<j)
                if maxi[j] > self.thresh_high:
                    self._mask1[i] |= (1<<j)

        tmp = self._mask1 & self._mask0
        self._mask0 |= self._mask1
        self._mask1 = tmp

    def make_possible_bytes(self, possible_bytes):
        def masked(b, n):
            m0 = util.m0s[self._mask0[n]]
            m1 = util.m1s[self._mask1[n]]
            m = m0 & m1 & b
            if m:
                return m
            else:
                mm0 = m0 & b
                mm1 = m1 & b
                if len(mm0) < len(mm1):
                    return mm0 or mm1 or b
                else:
                    return mm1 or mm0 or b

        self.possible_bytes = [masked(b,n) for n,b in enumerate(possible_bytes)]

    def _deconvolve_make_diff(self, (low, high)):
        a = normalise(self.g.convolved)
        diff_sq = np.square(a - self.target)
        return np.sum(diff_sq)
        # an interesting trick I discovered. 
        # bias the result towards the curent area of interest
        return np.sum(diff_sq[:low]) + 2.6*np.sum(diff_sq[low:high]) + np.sum(diff_sq[high:])