Пример #1
0
def _fit_voigt_water(ppm, spectra):
    """Private function to fit water residual in one spectra.

    Parameters
    ----------
    ppm : ndarray, shape (n_samples, )
        The PPM array.

    spectra : ndarray, shape (n_samples, )
        The spectra on which the water has to be fitted.

    Returns
    -------
    popt : list of float,
        A list of the fitted parameters.

    """

    # Get the value between of the spectra between 4 and 6
    water_limits = (4.0, 6.0)
    sub_ppm = ppm[np.flatnonzero(np.bitwise_and(ppm > water_limits[0], ppm < water_limits[1]))]
    sub_spectra = spectra[np.flatnonzero(np.bitwise_and(ppm > water_limits[0], ppm < water_limits[1]))]

    # Define the default parameters
    amp_dft = np.max(sub_spectra) / _voigt_profile(0.0, 1.0, 0.0, 1.0, 1.0)
    popt_default = [amp_dft, 1.0, 1.0, 1.0]
    # Define the bound
    param_bounds = ([0.0, 0.0, 0.0, 0.0], [np.inf, np.inf, np.inf, np.inf])

    try:
        popt, _ = curve_fit(_voigt_profile, sub_ppm, np.real(sub_spectra), p0=popt_default, bounds=param_bounds)
    except RuntimeError:
        popt = popt_default

    return popt
Пример #2
0
def _load_supp(len_all_trasactions, support_map, item_supp_map, itemset):
    """Compute support ratio for item of itemset

    Parameters
    ----------
    len_all_trasactions: int
        length of trasactions

    support_map: dict

    item_supp_map: dict

    itemset: int / frozenset

    Returns
    -------
    support: float
    """
    if not isinstance(itemset, frozenset):
        return item_supp_map[itemset]

    if len(itemset) == 1:
        return item_supp_map[list(itemset)[0]]

    supp_bits = np.ones(len_all_trasactions, dtype=np.bool)
    for item in itemset:
        np.bitwise_and(supp_bits, support_map[item], supp_bits)

    return supp_bits.sum() / len_all_trasactions
Пример #3
0
def _optimal_substemma (ms_id, explain_matrix, combinations, mode):
    """Do an exhaustive search for the combination among a given set of ancestors
    that best explains a given manuscript.

    """

    ms_id = ms_id - 1  # numpy indices start at 0
    val = current_app.config.val

    b_defined = val.def_matrix[ms_id]
    # remove variants where the inspected ms is undefined
    b_common = np.logical_and (val.def_matrix, b_defined)

    explain_equal_matrix = val.mask_matrix[ms_id]

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms.
    b_equal = np.bitwise_and (val.mask_matrix, explain_equal_matrix) > 0
    b_equal = np.logical_and (b_equal, b_common)

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms. or is posterior to it.
    b_post = np.bitwise_and (val.mask_matrix, explain_matrix) > 0
    b_post = np.logical_and (b_post, b_common)

    for comb in combinations:
        # how many passages does this combination explain?
        # pylint: disable=no-member
        b_explained_equal = np.logical_or.reduce (b_equal[comb.vec])
        b_explained_post  = np.logical_or.reduce (b_post[comb.vec])
        b_explained_post  = np.logical_and (b_explained_post, np.logical_not (b_explained_equal))
        b_explained       = np.logical_or (b_explained_equal, b_explained_post)

        comb.n_explained_equal = np.count_nonzero (b_explained_equal)
        comb.n_explained_post  = np.count_nonzero (b_explained_post)

        unexplained_matrix = np.copy (explain_matrix)
        unexplained_matrix[np.logical_not (b_defined)] = 0
        unexplained_matrix[b_explained] = 0
        b_unknown = np.bitwise_and (unexplained_matrix, 0x1) > 0
        unexplained_matrix[b_unknown] = 0
        b_open = unexplained_matrix > 0

        comb.n_unknown = np.count_nonzero (b_unknown)
        comb.n_open = np.count_nonzero (b_open)

        if mode == 'detail':
            comb.open_indices    = tuple (int (n + 1) for n in np.nonzero (b_open)[0])
            comb.unknown_indices = tuple (int (n + 1) for n in np.nonzero (b_unknown)[0])

    if mode == 'search':
        # add the 'hint' column
        def key_len (c):
            return c.len

        def key_explained (c):
            return -c.explained ()

        for _k, g in itertools.groupby (sorted (combinations, key = key_len), key = key_len):
            sorted (g, key = key_explained)[0].hint = True
Пример #4
0
def _get_init_guess(strsa, strsb, nroots, hdiag, orbsym, wfnsym=0):
    airreps = numpy.zeros(strsa.size, dtype=numpy.int32)
    birreps = numpy.zeros(strsb.size, dtype=numpy.int32)
    for i, ir in enumerate(orbsym):
        airreps[numpy.bitwise_and(strsa, 1<<i) > 0] ^= ir
        birreps[numpy.bitwise_and(strsb, 1<<i) > 0] ^= ir
    na = len(strsa)
    nb = len(strsb)

    ci0 = []
    iroot = 0
    for addr in numpy.argsort(hdiag):
        x = numpy.zeros((na*nb))
        addra = addr // nb
        addrb = addr % nb
        if airreps[addra] ^ birreps[addrb] == wfnsym:
            x[addr] = 1
            ci0.append(x)
            iroot += 1
            if iroot >= nroots:
                break
    try:
        # Add noise
        ci0[0][0 ] += 1e-5
        ci0[0][-1] -= 1e-5
    except IndexError:
        raise IndexError('Configuration of required symmetry (wfnsym=%d) not found' % wfnsym)
    return ci0
Пример #5
0
    def _read_symbology_block(self, buf2):
        """ Read symbology block. """
        # Read and decode symbology header
        self.symbology_header = _unpack_from_buf(buf2, 0, SYMBOLOGY_HEADER)

        # Read radial packets
        packet_code = struct.unpack('>h', buf2[16:18])[0]
        assert packet_code in SUPPORTED_PACKET_CODES
        self.packet_header = _unpack_from_buf(buf2, 16, RADIAL_PACKET_HEADER)
        self.radial_headers = []
        nbins = self.packet_header['nbins']
        nradials = self.packet_header['nradials']
        nbytes = _unpack_from_buf(buf2, 30, RADIAL_HEADER)['nbytes']
        if packet_code == 16 and nbytes != nbins:
            nbins = nbytes  # sometimes these do not match, use nbytes
        self.raw_data = np.empty((nradials, nbins), dtype='uint8')
        pos = 30

        for radial in self.raw_data:
            radial_header = _unpack_from_buf(buf2, pos, RADIAL_HEADER)
            pos += 6
            if packet_code == 16:
                radial[:] = np.fromstring(buf2[pos:pos+nbins], '>u1')
                pos += radial_header['nbytes']
            else:
                assert packet_code == AF1F
                # decode run length encoding
                rle_size = radial_header['nbytes'] * 2
                rle = np.fromstring(buf2[pos:pos+rle_size], dtype='>u1')
                colors = np.bitwise_and(rle, 0b00001111)
                runs = np.bitwise_and(rle, 0b11110000) // 16
                radial[:] = np.repeat(colors, runs)
                pos += rle_size
            self.radial_headers.append(radial_header)
Пример #6
0
def get_init_guess(norb, nelec, nroots, hdiag, orbsym, wfnsym=0):
    if isinstance(nelec, (int, numpy.integer)):
        nelecb = nelec//2
        neleca = nelec - nelecb
    else:
        neleca, nelecb = nelec
    strsa = numpy.asarray(cistring.gen_strings4orblist(range(norb), neleca))
    strsb = numpy.asarray(cistring.gen_strings4orblist(range(norb), nelecb))
    airreps = numpy.zeros(strsa.size, dtype=numpy.int32)
    birreps = numpy.zeros(strsb.size, dtype=numpy.int32)
    for i in range(norb):
        airreps[numpy.bitwise_and(strsa, 1<<i) > 0] ^= orbsym[i]
        birreps[numpy.bitwise_and(strsb, 1<<i) > 0] ^= orbsym[i]
    na = len(strsa)
    nb = len(strsb)

    ci0 = []
    iroot = 0
    for addr in numpy.argsort(hdiag):
        x = numpy.zeros((na*nb))
        addra = addr // nb
        addrb = addr % nb
        if airreps[addra] ^ birreps[addrb] == wfnsym:
            x[addr] = 1
            ci0.append(x)
            iroot += 1
            if iroot >= nroots:
                break
    return ci0
Пример #7
0
    def test(self, X, y, verbose=True):
        # if we don't need 3d inputs...
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')

        if verbose:
            print("Getting predictions on the test set")
        predictions = self.predict(X)

        if len(predictions) != len(y):
            raise LanguageClassifierException("Non comparable arrays")

        if self.binary:
            acc = (predictions == y).mean()
            prec = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(predictions)
            recall = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(y)
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
                print("Precision for class=1: {0:.3f}".format(prec))
                print("Recall for class=1: {0:.3f}".format(recall))

            return (acc, prec, recall)
        else:
            # TODO: Obtain more metrics for the multiclass problem
            acc = (predictions == y).mean()
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
            return acc
Пример #8
0
def filter_by_pvalue_strand_lag(ratios, pcutoff, pvalues, output, no_correction, name, singlestrand):
    """Filter DPs by strang lag and pvalue"""
    if not singlestrand:
        zscore_ratios = zscore(ratios)
        ratios_pass = np.where(np.bitwise_and(zscore_ratios > -2, zscore_ratios < 2) == True, True, False)
    if not no_correction:
        pv_pass = [True] * len(pvalues)
        pvalues = map(lambda x: 10**-x, pvalues)
        
        _output_BED(name + '-uncor', output, pvalues, pv_pass)
        _output_narrowPeak(name + '-uncor', output, pvalues, pv_pass)
        
        pv_pass, pvalues = multiple_test_correction(pvalues, alpha=pcutoff)
    else:
        pv_pass = np.where(np.asarray(pvalues) >= -log10(pcutoff), True, False)
    
    if not singlestrand:
        filter_pass = np.bitwise_and(ratios_pass, pv_pass)
        assert len(pv_pass) == len(ratios_pass)
    else:
        filter_pass = pv_pass
    
    assert len(output) == len(pvalues)
    assert len(filter_pass) == len(pvalues)
    
    return output, pvalues, filter_pass
def process_cloudmask(mod09a1_file_name, cloudmask_output_name):
    fn_mod09a1 = mod09a1_file_name

    stateflags, geoTransform, proj = return_band(fn_mod09a1, 11)  # band 11 -- 500m State Flags

    goodpix_mask = numpy.where(stateflags == 65535, 1, 0)

    cloud = numpy.bitwise_and(stateflags, 3) + 1
    numpy.putmask(cloud, goodpix_mask, 0)

    # print cloudmask

    not_set = numpy.where(cloud == 4, 1, 0)
    shadow1 = numpy.where(cloud == 1, 1, 0)
    shadow2 = numpy.where(numpy.bitwise_and(stateflags, 4) == 4, 1, 0)
    shadow = numpy.logical_and(shadow1, shadow2)
    numpy.putmask(cloud, shadow, 4)
    numpy.putmask(cloud, not_set, 1)

    blue = return_band(fn_mod09a1, 3)[0]
    too_blue1 = numpy.where(cloud == 1, 1, 0)
    too_blue2 = numpy.where(blue > 2000, 1, 0)
    too_blue = numpy.logical_and(too_blue1, too_blue2)
    numpy.putmask(cloud, too_blue, 5)

    # print cloud

    output_file(cloudmask_output_name, cloud, geoTransform, proj)

    stateflags = None
    cloud = None
def removeShortEvs(tsin,md):
    """ >>> ts=np.array([1,1,1,0,1,1,1,0,0,1,1,1,0,0,0,1,1,1,
                0,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,1,1,0,0,1,0,1,0,1])
        >>> print ts
        >>> print removeShortEvs(ts==1,2,3) 
    """
    evs=[]
    if not np.any(tsin): return np.int32(tsin)
    if np.all(tsin): return np.int32(tsin)
    tser=np.copy(tsin)
    ton = np.bitwise_and(tser,
        np.bitwise_not(np.roll(tser,1))).nonzero()[0].tolist()
    toff=np.bitwise_and(np.roll(tser,1),
        np.bitwise_not(tser)).nonzero()[0].tolist()
    if ton[-1]>toff[-1]:toff.append(tser.shape[0])
    if ton[0]>toff[0]:ton.insert(0,0)
    assert len(ton)==len(toff)
    #print np.int32(np.bitwise_and(tser,np.bitwise_not(np.roll(tser,1))))
    #print np.int32(np.bitwise_and(np.roll(tser,1),np.bitwise_not(tser)))
    for f in range(len(ton)):
        ts=ton[f];te=toff[f];dur=te-ts
        #print ts, te,dur
        if  dur<md: tsin[ts:te]-=1
    #tsin -= temp[:,val]
    return np.int32(tsin)
Пример #11
0
def moments(data):
    """Returns (height, x, y, width_x, width_y,offset)
    the gaussian parameters of a 2D distribution by calculating its
    moments """
    total = data.sum()
    X, Y = np.indices(data.shape)
    x = (X*data).sum()/total
    y = (Y*data).sum()/total
    height = data.max()
    firstq = np.median(data[data < np.median(data)])
    thirdq = np.median(data[data > np.median(data)])
    offset = np.median(data[np.where(np.bitwise_and(data > firstq,
                                                    data < thirdq))])
    places = np.where((data-offset) > 4*np.std(data[np.where(np.bitwise_and(
                                      data > firstq, data < thirdq))]))
    width_y = np.std(places[0])
    width_x = np.std(places[1])
    # These if statements take into account there might only be one significant
    # point above the background when that is the case it is assumend the width
    # of the gaussian must be smaller than one pixel
    if width_y == 0.0:
        width_y = 0.5
    if width_x == 0.0:
        width_x = 0.5

    height -= offset
    return height, x, y, width_x, width_y, offset
Пример #12
0
    def checkDistribution(self,catalog):
        '''
        Separate CCD in 8 quadrants and check that are stars on all of them.

        :param catalog:
        :return:
        '''
        camera = self.getCam()
        width,heigth = camera.getPixelSize()
        ngrid = 8
        wgrid = np.linspace(0, width,ngrid)
        hgrid = np.linspace(0,heigth,ngrid)

        star_per_grid = np.zeros((ngrid-1,ngrid-1))

        for i in range(ngrid-1):
            mask_w = np.bitwise_and(catalog['X_IMAGE'] > wgrid[i],
                                    catalog['X_IMAGE'] < wgrid[i+1])
            for j in range(ngrid-1):
                mask_h = np.bitwise_and(catalog['Y_IMAGE'] > hgrid[i],
                                        catalog['Y_IMAGE'] < hgrid[i+1])
                mask = np.bitwise_and(mask_w,
                                      mask_h)

                star_per_grid[i][j] += np.sum(mask)

        nstar = len(catalog)/2/ngrid**2

        mask_starpg = star_per_grid < nstar

        if np.any(mask_starpg):
            raise StarDistributionException('Stellar distribution not suitable for optical alignment.')
Пример #13
0
    def rearrange_bits(array):
        # Do bit rearrangement for the 10-bit lytro raw format
        # Normalize output to 1.0 as float64
        t0 = array[0::5]
        t1 = array[1::5]
        t2 = array[2::5]
        t3 = array[3::5]
        lsb = array[4::5]

        t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3)
        t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2)
        t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4)
        t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6)

        image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16)
        image[:, 0::4] = t0.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 1::4] = t1.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 2::4] = t2.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 3::4] = t3.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )

        # Normalize data to 1.0 as 64-bit float.
        # Division is by 1023 as the Lytro Illum saves 10-bit raw data.
        return np.divide(image, 1023.0).astype(np.float64)
Пример #14
0
 def convert_to_complex_samples(self, byte_arr):
     '''
     Generic parsing of complex samples.
     Handles 4-bit case
     `samples` is a byte array 
     TODO add others?
     Throws error if `self.bit_depth` is unsupported.
     '''
     # typically real is upper nibble, complex is lower nibble, but TODO make this generic
     if self.bit_depth == 4:
         lsb = (bitwise_and(byte_arr, 0x0f) << 4).astype(int8) >> 4
         msb = bitwise_and(byte_arr, 0xf0).astype(int8) >> 4
     elif self.bit_depth == 8:
         msb = byte_arr[0::2]  # TODO this might not be working
         lsb = byte_arr[1::2]
     elif self.bit_depth == 16:
         sample_arr = byte_arr.view(int16)
         msb = sample_arr[0::2]
         lsb = sample_arr[1::2]
     else:
         raise Error('Bit depth not supported for complex samples')
     if self.i_msb:
         return msb + 1j * lsb
     else:
         return lsb + 1j * msb
Пример #15
0
 def place_ship(self, position, length, orientation):
     """
     Return None if ship cannot be placed
     """
     ship = None
     if orientation == 'H':
         zeros = np.zeros(self.width * self.height, dtype='int8')
         if (position[0] + length) > self.width:
             return None
         for i in range(length):
             zeros[position[1] * self.width + position[0]+i] = 1
         if np.all(np.bitwise_and(self._layout, zeros) == 0):
             self._layout = np.bitwise_or(self._layout, zeros)
             ship = Ship(position, length, orientation)
     elif orientation == 'V':
         zeros = np.zeros(self.width * self.height, dtype='int8')
         if (position[1] + length) > self.height:
             return None
         for i in range(length):
             zeros[(position[1] + i) * self.width + position[0]] = 1
         if np.all(np.bitwise_and(self._layout, zeros) == 0):
             self._layout = np.bitwise_or(self._layout, zeros)
             ship = Ship(position, length, orientation)
     if ship:
         self._ships.append(ship)
         return ship
Пример #16
0
def load_spc(fname):
    """Load data from Becker & Hickl SPC files.

    Returns:
        3 numpy arrays (timestamps, detector, nanotime) and a float
        (timestamps_unit).
    """

    f = open(fname, 'rb')
    # We first decode the first 6 bytes which is a header...
    header = np.fromfile(f, dtype='u2', count=3)
    timestamps_unit = header[1] * 0.1e-9
    num_routing_bits = np.bitwise_and(header[0], 0x000F)  # unused

    # ...and then the remaining records containing the photon data
    spc_dtype = np.dtype([('field0', '<u2'), ('b', '<u1'), ('c', '<u1'),
                          ('a', '<u2')])
    data = np.fromfile(f, dtype=spc_dtype)

    nanotime =  4095 - np.bitwise_and(data['field0'], 0x0FFF)
    detector = data['c']

    # Build the macrotime (timestamps) using in-place operation for efficiency
    timestamps = data['b'].astype('int64')
    np.left_shift(timestamps, 16, out=timestamps)
    timestamps += data['a']

    # extract the 13-th bit from data['field0']
    overflow = np.bitwise_and(np.right_shift(data['field0'], 13), 1)
    overflow = np.cumsum(overflow, dtype='int64')

    # Add the overflow bits
    timestamps += np.left_shift(overflow, 24)

    return timestamps, detector, nanotime, timestamps_unit
def compute_dice_with_transfo(img_fixed, img_moving, transfo):
    
    #first transform 
    toolsPaths = ['CIP_PATH'];
    path=dict()
    for path_name in toolsPaths:
        path[path_name]=os.environ.get(path_name,False)
        if path[path_name] == False:
            print path_name + " environment variable is not set"
            exit()
    temp_out = "/Users/rolaharmouche/Documents/Data/temp_reg.nrrd"        
    resamplecall = os.path.join(path['CIP_PATH'], "ResampleCT")    
        
    sys_call = resamplecall+" -d "+img_fixed+" -r "+ temp_out+\
            " -t "+transfo+" -l "+img_moving
    os.system(sys_call) 
    
    print(" computing ssd between "+img_fixed+" and registered"+ img_moving)
                    
    img1_data, info = nrrd.read(temp_out)
    img2_data, info = nrrd.read(img_fixed)
    
    
    
    #careful reference image has labels =2 and 3
    added_images = img1_data
    np.bitwise_and(img1_data, img2_data, added_images)
    Dice_calculation = sum(added_images[:])*2.0/(sum(img1_data[:])+sum(img2_data[:]))

    return Dice_calculation
Пример #18
0
def cloudMask(tiffFolder):
    """
    The cloudMask includes pixels identified as cloud, shadow, or snow in the Quality Assessment band (BQA).
    Masked pixels have a value of 0 and clear pixels have a value of 1. If there is no BQA, invoke Fmask.
    """
    return_value = True;
    inputTiffName=os.path.join(tiffFolder,os.path.basename(tiffFolder)) + "_BQA.TIF"
    print "In cloudMask checking for: " +inputTiffName
    outputTiffName=os.path.join(tiffFolder,os.path.basename(tiffFolder)) + "_MTLFmask.TIF"
    if os.path.exists(inputTiffName):
        [maskArray, geoTiffAtts]= LSFGeoTIFF.ReadableLSFGeoTIFF.fromFile(inputTiffName).asGeoreferencedArray() 
        # USGS documentation
        # shown here: https://landsat.usgs.gov/collectionqualityband
        # for example, decimal 2800 = binary 0000101011110000 which would be:
        # bits 15	14	13	12	11	10	9	8	7	6	5	4	3	2	1	0
        #      0	0	0	0	1	0	1	0	1	1	1	1	0  	0	0       0
        # high confidence cloud, bits 4, 5, and 6.
        cloud=np.equal(np.right_shift(np.bitwise_and(maskArray, 112), 4), 7)
        # high confidence cloud shadow, bits 7 and 8
        shadow=np.equal(np.right_shift(np.bitwise_and(maskArray, 496), 7), 3)
        # high confidence snow/ice, bits 9 and 10.
        snow=np.equal(np.right_shift(np.bitwise_and(maskArray, 1536), 9), 3)
        # if cloud, shadow, or snow mask is set for a pixel, mask it in newMask
        newMask = np.logical_not(np.logical_or(np.logical_or(cloud,shadow),snow))
        LSFGeoTIFF.Unsigned8BitLSFGeoTIFF.fromArray(newMask, geoTiffAtts).write(outputTiffName)

    else:
        print "Begin Fmask processing " + str(datetime.datetime.now())
        return_value = runFmask(tiffFolder,fmaskShellCall)
        print "End Fmask processing " + str(datetime.datetime.now())
    return return_value
Пример #19
0
    def highlightedImage(self,background,motion,number):
		redChannel = background[:,:,2]
		#highlight motion
		background[:,:,2] = np.bitwise_and(np.bitwise_not(motion), redChannel) +  np.bitwise_and(motion, redChannel//3 + 168)
		cv2.putText(background,'motion!',(self.frame_size[1]-50,self.frame_size[0]//2), self.font, 1, (0,0,255), 2)
		cv2.putText(background,str(number),(self.frame_size[1]//2-100,self.frame_size[0]//2-100), self.font, 2, (0,255,0), 2)
		return background
Пример #20
0
def load_spc(fname):
    """Load data from Becker&Hickl SPC files.

    Returns:
        3 numpy arrays: timestamps, detector, nanotime
    """
    spc_dtype = np.dtype([("field0", "<u2"), ("b", "<u1"), ("c", "<u1"), ("a", "<u2")])
    data = np.fromfile(fname, dtype=spc_dtype)

    nanotime = 4095 - np.bitwise_and(data["field0"], 0x0FFF)
    detector = data["c"]

    # Build the macrotime (timestamps) using in-place operation for efficiency
    timestamps = data["b"].astype("int64")
    np.left_shift(timestamps, 16, out=timestamps)
    timestamps += data["a"]

    # extract the 13-th bit from data['field0']
    overflow = np.bitwise_and(np.right_shift(data["field0"], 13), 1)
    overflow = np.cumsum(overflow, dtype="int64")

    # Add the overflow bits
    timestamps += np.left_shift(overflow, 24)

    return timestamps, detector, nanotime
Пример #21
0
def doubleParityChksum(data):
    """Computes horizontal an vertical parities.

    One horizontal parity bit is computed per octet. 8 vertical parity bits
    are computed. The checksum is the 8 vertical parity bits plus the
    horizontal parity bits, padded by a variable number of 0 bits to align
    on an octet boundary. Even parity is used.

    The checksum is returned as a string where each character codes a byte
    of the checksum.
    """
    bitmask = numpy.array([128,64,32,16,8,4,2,1])
    bytes = numpy.fromstring(data,numpy.uint8)
    numBytes = len(bytes)
    bits = numpy.bitwise_and.outer(bytes,bitmask).flat
    numpy.putmask(bits,bits,1)
    bits = numpy.reshape(bits, (numBytes,8))

    verParities = numpy.bitwise_and(numpy.sum(bits),1)
    bits = numpy.concatenate((bits,[verParities]))

    horParities = numpy.bitwise_and(numpy.sum(bits,1),1)
    if len(horParities)%8:
        horParities = numpy.concatenate((horParities,
                                           [0]*(8-len(horParities)%8)))

    bitmask = numpy.array([128,64,32,16,8,4,2,1])
    chksumstring = chr(numpy.dot(verParities,bitmask))
    for i in range(len(horParities)/8):
        chksumstring += chr(numpy.dot(horParities[i*8:(i+1)*8],bitmask))
    return chksumstring
Пример #22
0
def process_t3records(t3records, time_bit=10, dtime_bit=15,
                      ch_bit=6, special_bit=True, ovcfunc=None):
    """Extract the different fields from the raw t3records array (.ht3).

    Returns:
        3 arrays representing detectors, timestamps and nanotimes.
    """
    if special_bit:
        ch_bit += 1
    assert ch_bit <= 8
    assert dtime_bit <= 16

    detectors = np.bitwise_and(
        np.right_shift(t3records, time_bit + dtime_bit), 2**ch_bit - 1).astype('uint8')
    nanotimes = np.bitwise_and(
        np.right_shift(t3records, time_bit), 2**dtime_bit - 1).astype('uint16')

    assert time_bit <= 16
    dt = np.dtype([('low16', 'uint16'), ('high16', 'uint16')])

    t3records_low16 = np.frombuffer(t3records, dt)['low16']     # View
    timestamps = t3records_low16.astype(np.int64)               # Copy
    np.bitwise_and(timestamps, 2**time_bit - 1, out=timestamps)

    overflow_ch = 2**ch_bit - 1
    overflow = 2**time_bit
    if ovcfunc is None:
        ovcfunc = _correct_overflow
    ovcfunc(timestamps, detectors, overflow_ch, overflow)
    return detectors, timestamps, nanotimes
Пример #23
0
def simpleAdd(exp0, exp1, badPixelMask):
    """Add two exposures, avoiding bad pixels
    """
    imArr0, maskArr0, varArr0 = exp0.getMaskedImage().getArrays()
    imArr1, maskArr1, varArr1 = exp1.getMaskedImage().getArrays()
    expRes = exp0.Factory(exp0, True)
    miRes = expRes.getMaskedImage()
    imArrRes, maskArrRes, varArrRes = miRes.getArrays()

    weightMap = afwImage.ImageF(exp0.getDimensions())
    weightArr = weightMap.getArray()

    good0 = np.bitwise_and(maskArr0, badPixelMask) == 0
    good1 = np.bitwise_and(maskArr1, badPixelMask) == 0

    imArrRes[:, :] = np.where(good0,  imArr0, 0) + np.where(good1,  imArr1, 0)
    varArrRes[:, :] = np.where(good0, varArr0, 0) + np.where(good1, varArr1, 0)
    maskArrRes[:, :] = np.bitwise_or(np.where(good0, maskArr0, 0), np.where(good1, maskArr1, 0))
    weightArr[:, :] = np.where(good0, 1, 0) + np.where(good1, 1, 0)

    miRes /= weightMap
    miRes *= 2  # want addition, not mean, where both pixels are valid

    setCoaddEdgeBits(miRes.getMask(), weightMap)

    return expRes
def computeState(isFix,md):
    ''' generic function that determines event start and end
        isFix - 1d array, time series with one element for each
            gaze data point, 1 indicates the event is on, 0 - off
        md - minimum event duration
        returns
            list with tuples with start and end for each
                event (values in frames)
            timeseries analogue to isFix but the values
                correspond to the list
    '''
    fixations=[]
    if isFix.sum()==0: return np.int32(isFix),[]
    fixon = np.bitwise_and(isFix,
        np.bitwise_not(np.roll(isFix,1))).nonzero()[0].tolist()
    fixoff=np.bitwise_and(np.roll(isFix,1),
        np.bitwise_not(isFix)).nonzero()[0].tolist()
    if len(fixon)==0 and len(fixoff)==0: fixon=[0]; fixoff=[isFix.size-1]
    if fixon[-1]>fixoff[-1]:fixoff.append(isFix.shape[0]-1)
    if fixon[0]>fixoff[0]:fixon.insert(0,0)
    if len(fixon)!=len(fixoff): print 'invalid fixonoff';raise TypeError
    for f in range(len(fixon)):
        fs=fixon[f];fe=(fixoff[f]+1);dur=fe-fs
        if  dur<md[0] or dur>md[1]:
            isFix[fs:fe]=False
        else: fixations.append([fs,fe-1])
    #fixations=np.array(fixations)
    return isFix,fixations
def interpolateBlinks(t,d,hz):
    ''' Interpolate short missing intervals
        d - 1d array, time series with gaze data, np.nan indicates blink
        hz - gaze data recording rate
    '''
    isblink= np.isnan(d)
    if isblink.sum()<2 or isblink.sum()>(isblink.size-2): return d
    blinkon = np.bitwise_and(isblink,np.bitwise_not(
        np.roll(isblink,1))).nonzero()[0].tolist()
    blinkoff=np.bitwise_and(np.roll(isblink,1),
        np.bitwise_not(isblink)).nonzero()[0].tolist()
    if len(blinkon)==0 and len(blinkoff)==0: return d
    #print 'bla',len(blinkon), len(blinkoff)
    if blinkon[-1]>blinkoff[-1]: blinkoff.append(t.size-1)
    if blinkon[0]>blinkoff[0]: blinkon.insert(0,0)
    if len(blinkon)!=len(blinkoff):
        print 'Blink Interpolation Failed'
        raise TypeError
    f=interp1d(t[~isblink],d[~isblink],bounds_error=False)
    for b in range(len(blinkon)):
        bs=blinkon[b]-1
        be=(blinkoff[b])
        if (be-bs)<INTERPMD*hz:
            d[bs:be]=f(t[bs:be])
            #for c in [7,8]: tser[bs:be,c]=np.nan
    return d
Пример #26
0
    def _record_data(self):
        '''
            Reads raw event data from SRAM and splits data stream into events
            ----------
            Returns:
                event_data : np.ndarray
                    Numpy array of single event numpy arrays 
        '''
        
        self.count_lost = self.dut['fadc0_rx'].get_count_lost()
#         print 'count_lost is %d' % self.count_lost
#         print 'event_count is %d' % self.event_count
        if self.count_lost > 0:
            logging.error('SRAM FIFO overflow number %d. Skip data.', self.count_lost)
            self.dut['fadc0_rx'].reset()
            self.set_adc_eventsize(self.sample_count, self.sample_delay)
            #return
        
        single_data = self.dut['DATA_FIFO'].get_data()                          # Read raw data from SRAM
        
        try:
            if single_data.shape[0] > 200:
                selection = np.where(single_data & 0x10000000 == 0x10000000)[0]         # Make mask from new-event-bit
                event_data = np.bitwise_and(single_data, 0x00003fff).astype(np.uint32)  # Remove new-event-bit from data       
                event_data = np.split(event_data, selection)                            # Split data into events by means of mask
                event_data = event_data[1:-1]                                           # Remove first and last event in case of chopping
                event_data = np.vstack(event_data)                                      # Stack events together
            else:
                event_data = np.asarray([np.bitwise_and(single_data, 0x00003fff).astype(np.uint32)])

            if event_data.shape[1] == self.sample_count:
                return event_data
        except ValueError as e:
            logging.error('_record_data() experienced a ValueError: ' + str(e))
            return
Пример #27
0
def mean_average_precision(distances, labels):
    """
    Calculate mean average precision and precision-recall breakeven.

    Returns
    -------
    mean_average_precision, mean_prb, ap_dict : float, float, dict
        The dict gives the per-type average precisions.
    """

    label_matches = generate_matches_array(labels)  # across all tokens

    ap_dict = {}
    prbs = []
    for target_type in sorted(set(labels)):
        if len(np.where(np.asarray(labels) == target_type)[0]) == 1:
            continue
        type_matches = generate_type_matches_array(labels, target_type)
        swtt_matches = np.bitwise_and(
            label_matches == True, type_matches == True
            ) # same word, target type
        dwtt_matches = np.bitwise_and(
            label_matches == False, type_matches == True
            ) # different word, target type
        ap, prb = average_precision(
            distances[swtt_matches], distances[dwtt_matches]
            )
        prbs.append(prb)
        ap_dict[target_type] = ap
    return np.mean(ap_dict.values()), np.mean(prbs), ap_dict
Пример #28
0
    def check_criterion(self, compiled_record, trial_record, **kwargs):
        trial_number = np.asarray(compiled_record['trial_number'])
        current_step = np.asarray(compiled_record['current_step'])
        correct = np.asarray(compiled_record['correct'])
        protocol_name = np.asarray(compiled_record['protocol_name'])
        protocol_ver = np.asarray(compiled_record['protocol_version_number'])

        # filter out trial_numbers for current protocol_name and protocol_ver
        current_step = current_step[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
        trial_number = trial_number[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]
        correct = correct[np.bitwise_and(protocol_name==protocol_name[-1],protocol_ver==protocol_ver[-1])]

        if self.num_trials_mode == 'consecutive':
            jumps = np.where(np.diff(trial_number)!=1) # jumps in trial number
            if not jumps[0]:
                which_trials = trial_number
            else:
                which_trials = trial_number[jump[0][-1]:] # from the last jump
        else:
            which_trials = trial_number

        if np.size(which_trials)<self.num_trials:
            graduate = False # dont graduate if the number of trials less than num required
        else:
            which_trials = which_trials[-self.num_trials:]
            filter =  np.isin(trial_number,which_trials)
            correct = correct[filter]
            perf = np.sum(correct)/np.size(correct)
            if perf >self.pct_correct:
                graduate = True
            else:
                graduate = False

        return graduate
Пример #29
0
def check_fills_complement(figure1, figure2):
    figure1_bw = get_bw_image(figure1)
    figure2_bw = get_bw_image(figure2)
    #figure1_gray = figure1.convert('L')
    #figure1_bw = numpy.asarray(figure1_gray).copy()
    #figure2_gray = figure2.convert('L')
    #figure2_bw = numpy.asarray(figure2_gray).copy()
    #
    ## Dark area = 1, Light area = 0
    #figure1_bw[figure1_bw < 128] = 1
    #figure1_bw[figure1_bw >= 128] = 0
    #figure2_bw[figure2_bw < 128] = 1
    #figure2_bw[figure2_bw >= 128] = 0

    difference21 = figure2_bw - figure1_bw
    difference12 = figure1_bw - figure2_bw
    # 0 - 1 = 255 because of rollover.
    difference21[difference21 > 10] = 0
    difference12[difference12 > 10] = 0

    percent_diff = get_percent_diff(difference21, figure2_bw)
    if percent_diff < PERCENT_DIFF_THRESHOLD and \
        get_percent_diff(numpy.bitwise_and(figure1_bw, figure2_bw),
                         figure1_bw) < PERCENT_DIFF_THRESHOLD:
        return percent_diff
    percent_diff = get_percent_diff(difference12, figure1_bw)
    if percent_diff < PERCENT_DIFF_THRESHOLD and \
        get_percent_diff(numpy.bitwise_and(figure1_bw, figure2_bw),
                         figure2_bw) < PERCENT_DIFF_THRESHOLD:
        return -percent_diff

    return 0
Пример #30
0
def center_mask_at(mask, pos, indexes, toroidal=False):
    ndim = len(mask)
    #print 'ndim:', ndim
    new = range(ndim)
    #print 'new:', new
    toKeep = np.ones(len(mask[0]), dtype=bool)
    #print 'toKeep:', toKeep
    for ic in xrange(ndim):
        #print 'ic:', ic
        #print 'pos[ic]:', pos[ic]
        comp = mask[ic] + pos[ic]
        #print 'comp:', comp
        if not toroidal:
            insiders = np.bitwise_and(comp>=0, comp<indexes.shape[ic])
            np.bitwise_and(toKeep, insiders, toKeep)
        else:
            comp[np.where(comp<0)] = indexes.shape[ic]-1
            comp[np.where(comp>=indexes.shape[ic])] = 0
            #print 'indexes.shape[ic]:', indexes.shape[ic]
            #print 'comp after modif:', comp
        new[ic] = comp
        #print 'new[ic]:', new[ic]
    #m = tuple( [n[toKeep] for n in new] )
    #np.bitwise_and(toKeep, indexes[m]!=-1, toKeep)
    #print 'toKeep:', toKeep
    #print 'new:', new
    return tuple( [n[toKeep] for n in new] )
Пример #31
0
def filter_catalog_data(data, catalogName):
    """Remove extended sources and bad measurements from reference catalogs
    before performing photometric calibration"""

    # Keep only point source objects and good measurements

    # Panstarrs flags
    if catalogName == "II/349/ps1":
        # First remove data using the general 'Qual' flag.
        # ------------------------------------------------
        # There are 8 bits
        # Bit 1: extended object in PS1
        # Bit 2: Extended in external data (e.g. 2MASS)
        # Bit 3: Good-quality measurement in PS1
        # Bit 4: Good-quality measurement in external data (eg, 2MASS)
        # Bit 5: Good-quality object in the stack (>1 good stack measurement)
        # Bit 6: The primary stack measurements are the best measurements
        # Bit 7: Suspect object in the stack (no more than 1 good measurement,
        #        2 or more suspect or good stack measurement)
        # Bit 8:  Poor-quality stack object (no more than 1 good or suspect
        # measurement)

        Quality_flags = np.array(data["Qual"])
        Qual_flag = unpackbits(Quality_flags, 8)

        qual_bits = [1, 2, 3, 7, 8]
        qual_values = [0, 0, 1, 0, 0]
        counter = 0
        for i, j in zip(qual_bits, qual_values):
            condition = Qual_flag[:, i - 1] == j
            if counter == 0:
                quality_mask = condition
            else:
                quality_mask = np.bitwise_and(quality_mask, condition)
            counter += 1
        # flag for individual bands
        # -------------------------
        # There are 25 bits. Use only the bit stating whether it is
        # an extended object in this band.
        # Bit 1: Used within relphot (SECF_STAR_FEW): skip star
        # Bit 2: Used within relphot (SECF_STAR_POOR): skip star
        # Bit 3: Synthetic photometry used in average measurement
        # Bit 4: Ubercal photometry used in average measurement
        # Bit 5: PS1 photometry used in average measurement
        # Bit 6: PS1 stack photometry exists
        # Bit 7: Tycho photometry used for synthetic magnitudes
        # Bit 8: Synthetic magnitudes repaired with zeropoint map
        # Bit 9: Average magnitude calculated in 0th pass
        # Bit 10: Average magnitude calculated in 1th pass
        # Bit 11: Average magnitude calculated in 2th pass
        # Bit 12: Average magnitude calculated in 3th pass
        # Bit 13: Average magnitude calculated in 4th pass
        # Bit 14: Extended in this band (PSPS only)
        # Bit 15: PS1 stack photometry comes from primary skycell
        # Bit 16: PS1 stack best measurement is a detection (not forced)
        # Bit 17: PS1 stack primary measurement is a detection (not forced)
        # Bit 18:
        # Bit 19:
        # Bit 20:
        # Bit 21: This photcode has SDSS photometry
        # Bit 22: This photcode has HSC photometry
        # Bit 23: This photcode has CFH photometry (mostly megacam)
        # Bit 24: This photcode has DES photometry
        # Bit 25: Extended in this band

        band_bits_and = [1, 2, 14, 25]
        band_values_and = [0, 0, 0, 0]
        band_bits_or = [9, 10, 11, 12]
        band_values_or = [1, 1, 1, 1]

        # bands = ['g', 'r', 'i', 'z', 'y']
        # No need to consider y band, and fainter sensitivity, so
        # might remove good reference stars
        bands = ["g", "r", "i", "z"]
        band_flags = []
        # Unpack bits from individual band flags
        for band in bands:
            _temp = np.array(data["%sFlags" % band])
            band_flags.append(unpackbits(_temp, 25))
        band_flags = np.array(band_flags)

        # Apply mask conditions
        for i in range(len(bands)):
            counter = 0
            for j1, k1 in zip(band_bits_and, band_values_and):
                condition = band_flags[i][:, j1 - 1] == k1
                quality_mask = np.bitwise_and(quality_mask, condition)
                counter += 1
            counter2 = 0
            # At least one Average magnitude calculated
            for j2, k2 in zip(band_bits_or, band_values_or):
                condition_or = band_flags[i][:, j2 - 1] == k2
                if counter2 == 0:
                    quality_mask_or = condition_or
                else:
                    quality_mask_or = np.bitwise_or(quality_mask_or, condition_or)
                counter2 += 1
            # Combine both masks
            quality_mask = np.bitwise_and(quality_mask, quality_mask_or)

    elif catalogName == "V/147/sdss12":
        # No mask yet
        quality_mask = np.ones(len(data), dtype=bool)

    elif catalogName == "I/345/gaia2":
        # No mask yet
        quality_mask = np.ones(len(data), dtype=bool)

    elif catalogName == "I/284/out":
        # No mask yet
        quality_mask = np.ones(len(data), dtype=bool)

    return data[quality_mask]
Пример #32
0
        -1)  #remet dans le bon sens l'image issue de la caméra du Raspberry pi

    img_resultat = copy.deepcopy(img_init)

    img = img_init[0:350, 0:500]  #on ne traite que la partie gauche de l'image
    img_canny = cv2.Canny(img, 2 * seuil_canny, seuil_canny)

    #création d'un masque pour ignorer une partie de l'image
    masque = np.zeros((img.shape[0], img.shape[1]), np.uint8)
    masque = cv2.fillConvexPoly(
        masque,
        np.array([[[100, 350], [0, 200], [0, 100], [300, 0], [400, 0],
                   [500, 100], [500, 200], [200, 350]]],
                 dtype=np.int32),
        color=255)
    img_canny = np.bitwise_and(
        masque, img_canny)  #supprime le coin à gauche de la détection
    #cv2.imshow("Canny", img_canny)

    img_resultat = cv2.putText(
        img_resultat,
        "pretraitement " + str(int((time.clock() - debut_tot) * 1000)) + " ms",
        (8, img_resultat.shape[0] - 20),
        cv2.FONT_HERSHEY_PLAIN,
        2,
        255,
        thickness=2)
    #####################################################
    #détecte des droites avec la transformée de Hough
    #Le deuxième argument est la résolution de la distance en pixels
    #le troisième est la résolution de l'angle en radians
    #le quatrième est le nombre de vote minimum pour qu'une ligne soit prise en compte
Пример #33
0
def get_patches(data_name, fg_name, mask_name, zaras, lvl, dataset, tumor):

    patch_size = 512

    if data_name.find("normal") >= 0:
        pouzit_masku = 0

    elif data_name.find("tumor") >= 0:
        pouzit_masku = 1

    fg = imread(fg_name)
    if pouzit_masku:
        lbl = imread_gdal_mask(mask_name, lvl + 3)  #lvl1 ->+1
    else:
        lbl = np.zeros([int(np.shape(fg)[0] * 2),
                        int(np.shape(fg)[1] * 2)],
                       dtype=np.bool)  #####plice

    fg = cv2.resize(fg, None, fx=2, fy=2,
                    interpolation=cv2.INTER_NEAREST) > 0  #####plice

    fg = fg[:np.min([np.shape(lbl)[0], np.shape(fg)[0]]), :np.
            min([np.shape(lbl)[1], np.shape(fg)[1]])]

    lbl = lbl[:np.min([np.shape(lbl)[0], np.shape(fg)[0]]), :np.
              min([np.shape(lbl)[1], np.shape(fg)[1]])]

    #

    if tumor:
        tmp = lbl > 0
    else:
        tmp = np.bitwise_and(fg > 0, lbl == 0)

    if dataset == 'plice':
        tmp[lbl == 1] = 0

    tmp = clear_border(tmp, 2 * (1 + int(np.ceil(patch_size / 2 / 2 + 1))))
    pozice = np.where(tmp > 0)

    rr = np.random.choice(len(pozice[0]), zaras)

    i = -1
    patch = []
    mask = []

    for r in rr:
        i += 1

        pozicex = pozice[1][r] * 2 * 2 * 2
        pozicey = pozice[0][r] * 2 * 2 * 2
        pozicex += np.random.randint(2 * 2 * 2)
        pozicey += np.random.randint(2 * 2 * 2)

        if pouzit_masku:
            mask.append(
                get_patches_mask_gdal(mask_name, [pozicex, pozicey],
                                      patch_size, lvl))
        else:
            mask.append(np.zeros((patch_size, patch_size)))
        patch.append(
            get_patches_data_gdal(data_name, [pozicex, pozicey], patch_size,
                                  lvl))

        if pouzit_masku:
            mask.append(
                get_patches_mask_gdal(mask_name, [pozicex, pozicey],
                                      patch_size, lvl - 1))
        else:
            mask.append(np.zeros((patch_size, patch_size)))
        patch.append(
            get_patches_data_gdal(data_name, [pozicex, pozicey], patch_size,
                                  lvl - 1))

    return patch, mask
Пример #34
0
 def excludepoints(self):
     self.mask = np.bitwise_and(self.mask.astype(bool),
                                np.invert(self.mtmp)).astype(int)
     self.mask.shape = self.dims
Пример #35
0
    def aug(self, img: np.ndarray, labels: np.ndarray) -> tuple:
        if self.rand_center:
            yc, xc = [
                int(random.uniform(-x, 2 * self.target_size + x))
                for x in self.mosaic_border
            ]
        else:
            yc, xc = [self.target_size, self.target_size]
        indices = [
            random.randint(0,
                           len(self.candidate_labels) - 1) for _ in range(3)
        ]

        img4 = np.ones(shape=(self.target_size * 2, self.target_size * 2, 3))
        img4 = (img4 * (np.array(self.pad_val)[None, None, :])).astype(
            np.uint8)
        labels4 = list()

        for i, index in enumerate([1] + indices):
            img_i = img if i == 0 else cv.imread(self.candidate_imgs[index])
            labels_i = labels if i == 0 else self.candidate_labels[index]
            img_i, ratio = self.scale_no_pad.scale_img(img_i)
            img_i, labels_i = self.color_gitter(img_i, labels_i)
            h, w = img_i.shape[:2]
            if i == 0:
                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
            elif i == 1:  # top right
                x1a, y1a, x2a, y2a = xc, max(yc - h,
                                             0), min(xc + w,
                                                     self.target_size * 2), yc
                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
            elif i == 2:  # bottom left
                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(
                    self.target_size * 2, yc + h)
                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(
                    y2a - y1a, h)
            else:  # bottom right
                x1a, y1a, x2a, y2a = xc, yc, min(xc + w,
                                                 self.target_size * 2), min(
                                                     self.target_size * 2,
                                                     yc + h)
                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
            img4[y1a:y2a, x1a:x2a] = img_i[y1b:y2b, x1b:x2b]
            padw = x1a - x1b
            padh = y1a - y1b
            if labels_i.shape[0] > 0:
                labels_i[:, [2, 4]] = ratio * labels_i[:, [2, 4]] + padw
                labels_i[:, [3, 5]] = ratio * labels_i[:, [3, 5]] + padh
                labels4.append(labels_i)
        if len(labels4):
            labels4 = np.concatenate(labels4, 0)
            # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:])  # use with center crop
            np.clip(labels4[:, 2:],
                    0,
                    2 * self.target_size,
                    out=labels4[:, 2:])  # use with random_affine
        else:
            img4, labels = self.affine(img4, np.zeros((0, 6),
                                                      dtype=np.float32))
            return img4, labels

        valid_index = np.bitwise_and((labels4[:, 4] - labels4[:, 2]) > 2,
                                     (labels4[:, 5] - labels4[:, 3]) > 2)
        labels4 = labels4[valid_index, :]
        img4, labels4 = self.affine(img4, labels4)
        return img4, labels4
Пример #36
0
def FindLetter(img, show_result=False):
    ## image histogram equalization(first add bilateral blur) ##
    cnts_convex = findConvexHull(img,
                                 preprocess=True,
                                 edge_th_min=100,
                                 edge_th_max=200,
                                 show=show_result,
                                 name='1st')
    cnts_convex, _ = AreaFilter(cnts_convex,
                                area_th_min=0.001,
                                area_th_max=0.2)
    print "number of cnts_convex:", len(cnts_convex), '\n'
    img_convex = img.copy()
    cv2.drawContours(img_convex, cnts_convex, -1, (255, 255, 255), 1)

    cnts_convex_2nd = findConvexHull(img_convex,
                                     preprocess=False,
                                     edge_th_min=100,
                                     edge_th_max=200,
                                     show=show_result,
                                     name='2nd')
    cnts_convex_2nd, _ = AreaFilter(cnts_convex_2nd,
                                    area_th_min=0.001,
                                    area_th_max=0.2)
    print "number of cnts_convex_2nd:", len(cnts_convex_2nd)
    cnts_filter_2nd, cnts_exts_2nd = CalcExts(cnts_convex_2nd,
                                              filter=True,
                                              th=5.0)
    img_convex_2nd = img.copy()
    print "after aspect filtered:", len(cnts_filter_2nd), '\n'
    cv2.drawContours(img_convex_2nd, cnts_filter_2nd, -1, (0, 255, 0), 1)

    img_merge = img.copy()
    cnts_merge, cnts_exts_merge = MergeOverlap(cnts_filter_2nd, cnts_exts_2nd)
    cnts_merge_f, cnts_merge_f_exts = CalcExts(cnts_merge, filter=True, th=5.0)
    print "after aspect filtered:", len(cnts_merge_f)

    cnts_merge_f, cnts_merge_f_exts = AreaFilter(cnts_merge_f,
                                                 cnts_merge_f_exts,
                                                 area_th_min=0.4,
                                                 area_th_max=1.0)
    cv2.drawContours(img_merge, cnts_merge_f, -1, (0, 255, 0), 1)

    ## get individual letter crop image ##
    img_minbox = img.copy()
    img_crops = []
    img_letters = []
    cnts_minRect_orig = []
    cnts_minRect = []
    cnts_fit = []
    is_blocks = []
    num = 0
    for c in cnts_merge_f:
        ## find min Rect ##
        rect = cv2.minAreaRect(c)
        cnts_minRect_orig.append(rect)
        ## convert minRect to drawable box points ##
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        ## draw min Rect ##
        cv2.drawContours(img_minbox, [box], 0, (0, 0, 255), 2)
        ## crop letter ##
        img_crop = img.copy()
        l = np.min(box[:, 0])
        r = np.max(box[:, 0])
        t = np.min(box[:, 1])
        b = np.max(box[:, 1])
        buffer = 30
        length = max((r - l + buffer), (b - t + buffer),
                     int((max(rect[1]) + buffer)))
        center = np.asarray(rect[0], dtype=np.int)
        # crop x #
        if center[0] - length / 2 < 0:
            img_crop = img_crop[:, 0:2 * center[0]]
            tx = 0
        elif center[0] + length / 2 >= 640:
            img_crop = img_crop[:, center[0] - (640 - center[0]):640]
            tx = -(center[0] - (640 - center[0]))
        else:
            img_crop = img_crop[:,
                                center[0] - length / 2:center[0] + length / 2]
            tx = -(center[0] - length / 2)
        # crop y ##
        if center[1] - length / 2 < 0:
            img_crop = img_crop[0:2 * center[1], :]
            ty = 0
        elif center[1] + length / 2 >= 480:
            img_crop = img_crop[center[1] - (480 - center[1]):480, :]
            ty = -(center[1] - (480 - center[1]))
        else:
            img_crop = img_crop[center[1] - length / 2:center[1] +
                                length / 2, :]
            ty = -(center[1] - length / 2)
        img_crops.append(img_crop)
        cnts_minRect.append(ModifyMinRect(rect, tx, ty, 0, rect[2]))
        cnts_fit.append(TranslateContour(c, tx, ty))
        ## rotate letter image ##
        rows = img_crops[-1].shape[0]
        cols = img_crops[-1].shape[1]
        M = cv2.getRotationMatrix2D((cols / 2, rows / 2), cnts_minRect[-1][2],
                                    1)
        img_crops[-1] = cv2.warpAffine(img_crops[-1], M, (cols, rows))
        cnts_fit[-1] = RotateContour(cnts_fit[-1], (cols / 2, rows / 2),
                                     -cnts_minRect[-1][2])
        ## second crop ##
        center = np.asarray(cnts_minRect[-1][0], dtype=np.int)
        width = int(cnts_minRect[-1][1][0])
        height = int(cnts_minRect[-1][1][1])
        extra = 5
        x_start = center[0] - width / 2 - extra
        y_start = center[1] - height / 2 - extra
        if x_start < 0:
            x_start = 0
        if y_start < 0:
            y_start = 0
        img_crops[-1] = img_crops[-1][y_start:y_start + height + 2 * extra,
                                      x_start:x_start + width + 2 * extra]
        cnts_minRect[-1] = ModifyMinRect(cnts_minRect[-1], -x_start, -y_start,
                                         0, 0)
        cnts_fit[-1] = TranslateContour(cnts_fit[-1], -x_start, -y_start)

        ## calc hue histogram ##
        mask = np.zeros(img_crops[-1].shape[:2], np.uint8)
        cv2.drawContours(mask, [cnts_fit[-1]], -1, 255, -1)
        img_crop_blur = cv2.GaussianBlur(img_crops[-1], (3, 3), 0)
        img_crop_hsv = cv2.cvtColor(img_crop_blur, cv2.COLOR_BGR2HSV)
        first = np.array(img_crop_hsv[:, :, 0], dtype=np.int)
        second = np.array(img_crop_hsv[:, :, 1], dtype=np.int)
        third = np.array(img_crop_hsv[:, :, 2], dtype=np.int)
        img_mul_2 = np.asarray((1 * first + 0 * third) / 1, dtype=np.uint8)
        img_mul_1 = np.asarray((2 * first + 1 * third) / 3, dtype=np.uint8)
        hist_2 = cv2.calcHist([img_mul_2], [0], mask, [180], [0, 180])
        hist_1 = cv2.calcHist([img_mul_1], [0], mask, [205], [0, 205])
        hist_equal_2 = HistEqual(hist_2, 3)  #3
        hist_equal_1 = HistEqual(hist_1, 7)

        ## find histogram peaks and filter out small peak values ##
        shape = img_crop_hsv.shape
        peaks_2 = findPeak(hist_equal_2, shape[0] * shape[1], 0)
        peaks_1 = findPeak(hist_equal_1, shape[0] * shape[1], 0)

        if len(peaks_2) == 1:
            is_blocks.append(True)
        else:
            is_blocks.append(False)

        ltr_mask2 = np.bitwise_and((img_mul_2 >= peaks_2[0][0] - 4),
                                   (img_mul_2 <= peaks_2[0][0] + 4))  #4
        ltr_mask1 = np.bitwise_and((img_mul_1 >= peaks_1[0][0] - 4),
                                   (img_mul_1 <= peaks_1[0][0] + 4))
        ltr_mask_comb = np.bitwise_and(ltr_mask1, ltr_mask2)
        ltr_mask_comb = np.bitwise_and(ltr_mask_comb, mask)

        ## floodfill algotithm ##
        seedpt = FindSeedPt(ltr_mask_comb, num)
        flooded = img_crops[-1].copy()
        mask_flood = np.zeros((shape[0] + 2, shape[1] + 2), np.uint8)
        flags = 4 | cv2.FLOODFILL_FIXED_RANGE | (1 << 8)
        cv2.floodFill(flooded, mask_flood, seedpt, (255, 255, 255), (40, ) * 3,
                      (40, ) * 3, flags)
        ltr_mask_flooded = np.all(flooded == (255, 255, 255), axis=2)

        ltr_mask2 = np.array(ltr_mask2, dtype=np.int)
        ltr_mask1 = np.array(ltr_mask1, dtype=np.int)
        ltr_mask_flooded = np.asarray(ltr_mask_flooded, dtype='int')

        ltr_mask_comb = np.bitwise_and(ltr_mask_comb, ltr_mask_flooded)
        ltr_mask_comb = np.array(ltr_mask_comb, dtype=np.int)
        '''if False:#len(peaks) > 1:
            mask2 = np.bitwise_and((img_mul >= peaks[1][0] - 0), (img_mul <= peaks[1][0] + 0))
            mask2 = np.array(mask2, dtype=np.int)
            mask1 = mask1+mask2'''
        foreground = np.array(
            [0, 255],
            dtype=np.uint8)  ## letter:white(255), background:black(0)
        img_black_2 = foreground[ltr_mask2]
        img_black_1 = foreground[ltr_mask1]
        img_black_flooded = foreground[ltr_mask_flooded]
        img_black_comb = foreground[ltr_mask_comb]
        if num == -1:
            ## first letter mask
            plt.plot(hist_2, color='r')
            plt.plot(hist_equal_2, color='g')
            plt.xlim([0, 180])
            ## second letter mask
            plt.figure()
            plt.plot(hist_1, color='r')
            plt.plot(hist_equal_1, color='g')

            plt.xlim([0, 204])
            plt.ion()
            plt.show()

        img_black_2 = np.bitwise_and(img_black_2, mask)

        img_black_1 = np.bitwise_and(img_black_1, mask)

        img_black_flooded = np.bitwise_and(img_black_flooded, mask)
        cv2.circle(img_black_2, seedpt, 5, 0, -1)

        img_black_comb = np.bitwise_and(img_black_comb, mask)
        img_letters.append(img_black_comb)
        """
        if isblock:
            img_black_comb = img_black_comb/2
        """

        if show_result:
            cv2.imshow(img_name + 'letter_hue' + str(num), img_black_2)
            cv2.imshow(img_name + 'letter_mix' + str(num), img_black_1)
            cv2.imshow(img_name + 'letter_floodfill' + str(num),
                       img_black_flooded)
            cv2.imshow(img_name + 'letter_comb' + str(num), img_black_comb)
        #cv2.imwrite('./letters/'+img_name+'letter_hue'+str(num)+'.jpg', img_black_2)
        #cv2.imwrite('./letters/'+img_name+'letter_mix'+str(num)+'.jpg', img_black_1)
        cv2.imwrite(
            './letters/' + img_name + 'letter_comb' + str(num) + '.jpg',
            img_black_comb)
        num += 1
        ## cnts_merge_f loop end ##
    if show_result:
        cv2.imshow('image_convex', img_convex)
        cv2.imshow('image_convex_2nd', img_convex_2nd)
        cv2.imshow('image_merge', img_merge)
        cv2.imshow('image_minbox', img_minbox)
    cv2.imwrite('result.jpg', img_minbox)

    return img_letters, cnts_merge_f, cnts_fit, cnts_minRect_orig, cnts_minRect, is_blocks
Пример #37
0
    def get_view(self) -> [BlockType]:
        positions = view.View.get_visible_positions(self.direction)
        positions = np.array(positions, dtype=object)

        # Fill ra_agent on grid
        grid = deepcopy(self.world.grid)
        effects = deepcopy(self.world.effects)

        for iter_agent in self.world.agents:
            position = iter_agent.position
            grid[position.y][position.x] = iter_agent

        # Empty space to draw information
        sketch = np.zeros(positions.shape, dtype=np.uint64)

        for y, position_row in enumerate(positions):
            for x, position in enumerate(position_row):
                abs_position = position + self.position
                if self.world.map_contains(
                        abs_position):  # If position is outside of map
                    item = grid[abs_position.y][abs_position.x]

                    if item is None:  # If item or ra_agent exists on the position
                        sketch[y][x] = BlockType.Empty
                    else:

                        if isinstance(item, Agent):
                            if isinstance(
                                    item,
                                    BlueAgent):  # If the ra_agent is myself
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.BlueAgent)
                            elif isinstance(
                                    item, PurpleAgent
                            ):  # Or ra_agent is companion or opponent
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.PurpleAgent)
                            elif isinstance(
                                    item, GreenAgent
                            ):  # Or ra_agent is companion or opponent
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.GreenAgent)
                            elif isinstance(
                                    item, OrangeAgent
                            ):  # Or ra_agent is companion or opponent
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.OrangeAgent)
                        elif isinstance(item, items.Apple):
                            if isinstance(item, items.BlueApple
                                          ):  # If the ra_agent is myself
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.BlueApple)
                            elif isinstance(
                                    item, items.RedApple
                            ):  # Or ra_agent is companion or opponent
                                sketch[y][x] = np.bitwise_or(
                                    int(sketch[y][x]), BlockType.RedApple)

                    effect = effects[abs_position.y][abs_position.x]

                    if np.bitwise_and(int(effect), BlockType.Punish):
                        sketch[y][x] = np.bitwise_or(int(sketch[y][x]),
                                                     BlockType.Punish)

                else:
                    sketch[y][x] = np.bitwise_or(int(sketch[y][x]),
                                                 BlockType.OutBound)

        return sketch
Пример #38
0
Файл: bit.py Проект: nykh2010/-
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
a = np.arange(-5, 6)
print(a)
b = -a
print(b)
c = a ^ b
d = a.__xor__(b)
e = np.bitwise_xor(a, b)
print(c, d, e, sep='\n')
print(np.where(e < 0)[0])
f = np.arange(1, 21)
print(f)
g = f - 1
print(g)
h = f & g
i = f.__and__(g)
j = np.bitwise_and(f, g)
print(h, i, j, sep='\n')
print(np.where(j == 0)[0])
    ret, frame = cap.read()
    frame_og = frame
    l, a, b = cv2.split(frame)
    clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(1, 1))
    frame = clahe.apply(l)
    cv2.line(frame_og, (300, 513), (1900, 513), (0, 255, 0), 2)
    cv2.line(frame_og, (300, 482), (1900, 482), (0, 255, 0), 2)
    if ret == True:
        foregroundMask = bgSubtractor.apply(frame)
        foregroundMask = cv2.morphologyEx(foregroundMask, cv2.MORPH_OPEN, kernel)
        foregroundMask = cv2.erode(foregroundMask, kernel, iterations=3)
        foregroundMask = cv2.morphologyEx(foregroundMask, cv2.MORPH_CLOSE, kernel,iterations=6)
        foregroundMask = cv2.dilate(foregroundMask, kernel_di, iterations=7)
        foregroundMask = cv2.medianBlur(foregroundMask,5)
        thresh = cv2.threshold(foregroundMask, 25, 255, cv2.THRESH_BINARY)[1]
        thresh1 = np.bitwise_and(thresh, thresh_MASK_1)
        thresh2 = np.bitwise_and(thresh, thresh_MASK_2)
        contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        try:
            hierarchy = hierarchy[0]
        except:
            hierarchy = []
        for contour, hier in zip(contours, hierarchy):
            areas = [cv2.contourArea(c) for c in contours]
            max_index = np.argmax(areas)
            cnt = contours[max_index]
            (x, y, w, h) = cv2.boundingRect(cnt)
            cx = int((w / 2) + x)
            cy = int((h / 2) + y)
            if w > 10 and h > 10:
Пример #40
0
    def __init__(self, roidb_file, dict_file, imdb_file, rpndb_file, data_dir,
                 split, num_im):
        imdb.__init__(self, roidb_file[:-3])

        # read in dataset from a h5 file and a dict (json) file
        self.im_h5 = h5py.File(os.path.join(data_dir, imdb_file), 'r')
        self.roi_h5 = h5py.File(os.path.join(data_dir, roidb_file),
                                'r')  # the GT

        # roidb metadata
        self.info = json.load(open(os.path.join(data_dir, dict_file), 'r'))
        self.im_refs = self.im_h5['images']  # image data reference
        im_scale = self.im_refs.shape[2]

        print('split==%i' % split)
        data_split = self.roi_h5['split'][:]
        self.split = split
        if split >= 0:
            split_mask = data_split == split  # current split
        else:  # -1
            split_mask = data_split >= 0  # all
        # get rid of images that do not have box
        valid_mask = self.roi_h5['img_to_first_box'][:] >= 0
        valid_mask = np.bitwise_and(split_mask, valid_mask)
        self._image_index = np.where(valid_mask)[
            0]  # contains the valid_mask index in the full dataset array
        if num_im > -1:
            self._image_index = self._image_index[:num_im]

        # override split mask
        split_mask = np.zeros_like(data_split).astype(bool)
        split_mask[self.image_index] = True  # build a split mask
        # if use all images
        # filter rpn roidb with split_mask
        if cfg.TRAIN.USE_RPN_DB:
            self.rpn_h5_fn = os.path.join(data_dir, rpndb_file)
            self.rpn_h5 = h5py.File(os.path.join(data_dir, rpndb_file), 'r')
            self.rpn_rois = self.rpn_h5['rpn_rois']
            self.rpn_scores = self.rpn_h5['rpn_scores']
            self.rpn_im_to_roi_idx = np.array(
                self.rpn_h5['im_to_roi_idx'][split_mask])
            self.rpn_num_rois = np.array(self.rpn_h5['num_rois'][split_mask])
        self.quadric_rois = self.rpn_h5['quadric_rois']
        self.roidb_idx_to_imdbidx = self.rpn_h5['im_to_imdb_idx'][
            split_mask]  # said before, no split_mask for that, maybe related to the sequence model
        self.roidb_to_scannet_oid = self.roi_h5['roi_idx_to_scannet_oid']
        self.impaths = self.im_h5['im_paths']
        self.make_im2seq()
        # h5 file is in 1-based index
        self.im_to_first_box = self.roi_h5['img_to_first_box'][split_mask]
        self.im_to_last_box = self.roi_h5['img_to_last_box'][split_mask]
        self.all_boxes = self.roi_h5['boxes_%i' %
                                     im_scale][:]  # will index later
        self.all_boxes[:, :2] = self.all_boxes[:, :2]
        widths = self.im_h5['image_widths'][()]
        widths = widths[self.roidb_idx_to_imdbidx]
        heights = self.im_h5['image_heights'][()]
        heights = heights[self.roidb_idx_to_imdbidx]
        self.im_sizes = np.vstack([widths, heights]).transpose(
        )  #self.im_h5['image_heights'][self.roidb_idx_to_imdbidx[split_mask]]]).transpose()

        assert (np.all(self.all_boxes[:, :2] >= 0))  # sanity check
        assert (np.all(self.all_boxes[:, 2:] > 0))  # no empty box

        # convert from xc, yc, w, h to x1, y1, x2, y2
        self.all_boxes[:, :2] = self.all_boxes[:, :2] - self.all_boxes[:,
                                                                       2:] / 2
        self.all_boxes[:, 2:] = self.all_boxes[:, :2] + self.all_boxes[:, 2:]
        self.labels = self.roi_h5['labels'][:, 0]
        #self.rel_geo_2d = self.roi_h5['rel_geo_2d']
        #self.rel_geo_3d = self.roi_h5['rel_geo_3d']

        # add background class
        self.info['label_to_idx']['__background__'] = 0
        self.class_to_ind = self.info['label_to_idx']
        self.ind_to_classes = sorted(self.class_to_ind,
                                     key=lambda k: self.class_to_ind[k])
        cfg.ind_to_class = self.ind_to_classes

        # load relation labels
        self.im_to_first_rel = self.roi_h5['img_to_first_rel'][split_mask]
        self.im_to_last_rel = self.roi_h5['img_to_last_rel'][split_mask]

        self._relations = self.roi_h5['relationships'][:]
        self._relation_predicates = self.roi_h5['predicates'][:, 0]
        assert (self.im_to_first_rel.shape[0] == self.im_to_last_rel.shape[0])
        assert (self._relations.shape[0] == self._relation_predicates.shape[0]
                )  # sanity check
        self.predicate_to_ind = self.info['predicate_to_idx']
        self.predicate_to_ind['__background__'] = 0
        self.ind_to_predicates = sorted(self.predicate_to_ind,
                                        key=lambda k: self.predicate_to_ind[k])

        cfg.ind_to_predicate = self.ind_to_predicates

        # Default to roidb handler
        self._roidb_handler = self.gt_roidb
Пример #41
0
def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name,
                  specprofile_ref_name, speckernel_ref_name, subarray,
                  soss_filter, soss_kwargs):
    """Run the spectral extraction on NIRISS SOSS data.
    Parameters
    ----------
    input_model : DataModel
        The input DataModel.
    spectrace_ref_name : str
        Name of the spectrace reference file.
    wavemap_ref_name : str
        Name of the wavemap reference file.
    specprofile_ref_name : str
        Name of the specprofile reference file.
    speckernel_ref_name : str
        Name of the speckernel reference file.
    subarray : str
        Subarray on which the data were recorded; one of 'SUBSTRIPT96',
        'SUBSTRIP256' or 'FULL'.
    soss_filter : str
        Filter in place during observations; one of 'CLEAR' or 'F277W'.
    soss_kwargs : dict
        Dictionary of keyword arguments passed from extract_1d_step.

    Returns
    -------
    output_model : DataModel
        DataModel containing the extracted spectra.
    """
    # Map the order integer names to the string names
    order_str_2_int = {f'Order {order}': order for order in [1, 2, 3]}

    # Read the reference files.
    spectrace_ref = datamodels.SpecTraceModel(spectrace_ref_name)
    wavemap_ref = datamodels.WaveMapModel(wavemap_ref_name)
    specprofile_ref = datamodels.SpecProfileModel(specprofile_ref_name)
    speckernel_ref = datamodels.SpecKernelModel(speckernel_ref_name)

    ref_files = dict()
    ref_files['spectrace'] = spectrace_ref
    ref_files['wavemap'] = wavemap_ref
    ref_files['specprofile'] = specprofile_ref
    ref_files['speckernel'] = speckernel_ref

    # Initialize the output model and output references (model of the detector and box aperture weights).
    output_model = datamodels.MultiSpecModel()
    output_model.update(input_model)  # Copy meta data from input to output.

    output_references = datamodels.SossExtractModel()
    output_references.update(input_model)

    all_tracemodels = dict()
    all_box_weights = dict()

    # Extract depending on the type of datamodels (Image or Cube)
    if isinstance(input_model, datamodels.ImageModel):

        log.info('Input is an ImageModel, processing a single integration.')

        # Initialize the theta, dx, dy transform parameters
        transform = soss_kwargs.pop('transform')

        # Received a single 2D image; set dtype to float64 and convert DQ to boolean mask.
        scidata = input_model.data.astype('float64')
        scierr = input_model.err.astype('float64')
        scimask = input_model.dq > 0  # Mask bad pixels with True.
        refmask = bitfield_to_boolean_mask(
            input_model.dq,
            ignore_flags=dqflags.pixel['REFERENCE_PIXEL'],
            flip_bits=True)

        # Perform background correction.
        bkg_mask = make_background_mask(scidata, width=40)
        scidata_bkg, col_bkg, npix_bkg = soss_background(scidata,
                                                         scimask,
                                                         bkg_mask=bkg_mask)

        # Determine the theta, dx, dy transform needed to match scidata trace position to ref file position.
        if transform is None:
            log.info('Solving for the transformation parameters.')

            # Unpack the expected order 1 & 2 positions.
            spectrace_ref = ref_files['spectrace']
            xref_o1 = spectrace_ref.trace[0].data['X']
            yref_o1 = spectrace_ref.trace[0].data['Y']
            xref_o2 = spectrace_ref.trace[1].data['X']
            yref_o2 = spectrace_ref.trace[1].data['Y']

            # Use the solver on the background subtracted image.
            if subarray == 'SUBSTRIP96' or soss_filter == 'F277W':
                # Use only order 1 to solve theta, dx, dy
                transform = solve_transform(scidata_bkg,
                                            scimask,
                                            xref_o1,
                                            yref_o1,
                                            soss_filter=soss_filter)
            else:
                transform = solve_transform(scidata_bkg,
                                            scimask,
                                            xref_o1,
                                            yref_o1,
                                            xref_o2,
                                            yref_o2,
                                            soss_filter=soss_filter)

        log.info(
            'Measured to Reference trace position transform: theta={:.4f}, dx={:.4f}, dy={:.4f}'
            .format(transform[0], transform[1], transform[2]))

        # Prepare the reference file arguments.
        ref_file_args = get_ref_file_args(ref_files, transform)

        # Make sure wavelength maps cover only parts where the centroid is inside the detector image
        if subarray != 'SUBSTRIP96':
            _mask_wv_map_centroid_outside(ref_file_args[0], ref_files,
                                          transform, scidata_bkg.shape[0])

        # Model the traces based on optics filter configuration (CLEAR or F277W)
        if soss_filter == 'CLEAR':

            # Model the image.
            kwargs = dict()
            kwargs['transform'] = transform
            kwargs['tikfac'] = soss_kwargs['tikfac']
            kwargs['n_os'] = soss_kwargs['n_os']
            kwargs['threshold'] = soss_kwargs['threshold']

            result = model_image(scidata_bkg, scierr, scimask, refmask,
                                 ref_file_args, **kwargs)
            tracemodels, soss_kwargs['tikfac'], logl = result

        else:
            # No model can be fit for F277W yet, missing throughput reference files.
            msg = f"No extraction possible for filter {soss_filter}."
            log.critical(msg)
            return None, None

        # Save trace models for output reference
        for order in tracemodels:
            # Save as a list (convert to array at the end)
            all_tracemodels[order] = [tracemodels[order]]

        # Use the trace models to perform a decontaminated extraction.
        kwargs = dict()
        kwargs['width'] = soss_kwargs['width']
        kwargs['bad_pix'] = soss_kwargs['bad_pix']

        result = extract_image(scidata_bkg, scierr, scimask, tracemodels,
                               ref_files, transform, subarray, **kwargs)
        wavelengths, fluxes, fluxerrs, npixels, box_weights = result

        # Save box weights for output reference
        for order in box_weights:
            # Save as a list (convert to array at the end)
            all_box_weights[order] = [box_weights[order]]

        # Copy spectral data for each order into the output model.
        for order in wavelengths.keys():

            table_size = len(wavelengths[order])

            out_table = np.zeros(table_size,
                                 dtype=datamodels.SpecModel().spec_table.dtype)
            out_table['WAVELENGTH'] = wavelengths[order]
            out_table['FLUX'] = fluxes[order]
            out_table['FLUX_ERROR'] = fluxerrs[order]
            out_table['DQ'] = np.zeros(table_size)
            out_table['BACKGROUND'] = col_bkg
            out_table['NPIXELS'] = npixels[order]

            spec = datamodels.SpecModel(spec_table=out_table)

            # Add integration number and spectral order
            spec.spectral_order = order_str_2_int[order]

            output_model.spec.append(spec)

        output_model.meta.soss_extract1d.width = kwargs['width']
        output_model.meta.soss_extract1d.tikhonov_factor = soss_kwargs[
            'tikfac']
        output_model.meta.soss_extract1d.delta_x = transform[1]
        output_model.meta.soss_extract1d.delta_y = transform[2]
        output_model.meta.soss_extract1d.theta = transform[0]
        output_model.meta.soss_extract1d.oversampling = soss_kwargs['n_os']
        output_model.meta.soss_extract1d.threshold = soss_kwargs['threshold']

    elif isinstance(input_model, datamodels.CubeModel):

        nimages = len(input_model.data)

        log.info(
            'Input is a CubeModel containing {} integrations.'.format(nimages))

        # Initialize the theta, dx, dy transform parameters
        transform = soss_kwargs.pop('transform')

        # Loop over images.
        for i in range(nimages):

            log.info('Processing integration {} of {}.'.format(i + 1, nimages))

            # Unpack the i-th image, set dtype to float64 and convert DQ to boolean mask.
            scidata = input_model.data[i].astype('float64')
            scierr = input_model.err[i].astype('float64')
            scimask = np.bitwise_and(input_model.dq[i],
                                     dqflags.pixel['DO_NOT_USE']).astype(bool)
            refmask = bitfield_to_boolean_mask(
                input_model.dq[i],
                ignore_flags=dqflags.pixel['REFERENCE_PIXEL'],
                flip_bits=True)

            # Perform background correction.
            bkg_mask = make_background_mask(scidata, width=40)
            scidata_bkg, col_bkg, npix_bkg = soss_background(scidata,
                                                             scimask,
                                                             bkg_mask=bkg_mask)

            # Determine the theta, dx, dy transform needed to match scidata trace position to ref file position.
            if transform is None:
                log.info('Solving for the transformation parameters.')

                # Unpack the expected order 1 & 2 positions.
                spectrace_ref = ref_files['spectrace']
                xref_o1 = spectrace_ref.trace[0].data['X']
                yref_o1 = spectrace_ref.trace[0].data['Y']
                xref_o2 = spectrace_ref.trace[1].data['X']
                yref_o2 = spectrace_ref.trace[1].data['Y']

                # Use the solver on the background subtracted image.
                if subarray == 'SUBSTRIP96' or soss_filter == 'F277W':
                    # Use only order 1 to solve theta, dx, dy
                    transform = solve_transform(scidata_bkg,
                                                scimask,
                                                xref_o1,
                                                yref_o1,
                                                soss_filter=soss_filter)
                else:
                    transform = solve_transform(scidata_bkg,
                                                scimask,
                                                xref_o1,
                                                yref_o1,
                                                xref_o2,
                                                yref_o2,
                                                soss_filter=soss_filter)

            log.info(
                'Measured to Reference trace position transform: theta={:.4f}, dx={:.4f}, dy={:.4f}'
                .format(transform[0], transform[1], transform[2]))

            # Prepare the reference file arguments.
            ref_file_args = get_ref_file_args(ref_files, transform)

            # Make sure wavelength maps cover only parts where the centroid is inside the detector image
            _mask_wv_map_centroid_outside(ref_file_args[0], ref_files,
                                          transform, scidata_bkg.shape[0])

            # Model the traces based on optics filter configuration (CLEAR or F277W)
            if soss_filter == 'CLEAR':

                # Model the image.
                kwargs = dict()
                kwargs['transform'] = transform
                kwargs['tikfac'] = soss_kwargs['tikfac']
                kwargs['n_os'] = soss_kwargs['n_os']
                kwargs['threshold'] = soss_kwargs['threshold']

                result = model_image(scidata_bkg, scierr, scimask, refmask,
                                     ref_file_args, **kwargs)
                tracemodels, soss_kwargs['tikfac'], logl = result

            else:
                # No model can be fit for F277W yet, missing throughput reference files.
                msg = f"No extraction possible for filter {soss_filter}."
                log.critical(msg)
                return None, None

            # Save trace models for output reference
            for order in tracemodels:
                # Initialize a list for first integration
                if i == 0:
                    all_tracemodels[order] = []
                all_tracemodels[order].append(tracemodels[order])

            # Use the trace models to perform a de-contaminated extraction.
            kwargs = dict()
            kwargs['width'] = soss_kwargs['width']
            kwargs['bad_pix'] = soss_kwargs['bad_pix']

            result = extract_image(scidata_bkg, scierr, scimask, tracemodels,
                                   ref_files, transform, subarray, **kwargs)
            wavelengths, fluxes, fluxerrs, npixels, box_weights = result

            # Save box weights for output reference
            for order in box_weights:
                # Initialize a list for first integration
                if i == 0:
                    all_box_weights[order] = []
                all_box_weights[order].append(box_weights[order])

            # Copy spectral data for each order into the output model.
            for order in wavelengths.keys():

                table_size = len(wavelengths[order])

                out_table = np.zeros(
                    table_size, dtype=datamodels.SpecModel().spec_table.dtype)
                out_table['WAVELENGTH'] = wavelengths[order]
                out_table['FLUX'] = fluxes[order]
                out_table['FLUX_ERROR'] = fluxerrs[order]
                out_table['DQ'] = np.zeros(table_size)
                out_table['BACKGROUND'] = col_bkg
                out_table['NPIXELS'] = npixels[order]

                spec = datamodels.SpecModel(spec_table=out_table)

                # Add integration number and spectral order
                spec.spectral_order = order_str_2_int[order]
                spec.int_num = i + 1  # integration number starts at 1, not 0 like python

                output_model.spec.append(spec)

            output_model.meta.soss_extract1d.width = kwargs['width']
            output_model.meta.soss_extract1d.tikhonov_factor = soss_kwargs[
                'tikfac']
            output_model.meta.soss_extract1d.delta_x = transform[1]
            output_model.meta.soss_extract1d.delta_y = transform[2]
            output_model.meta.soss_extract1d.theta = transform[0]
            output_model.meta.soss_extract1d.oversampling = soss_kwargs['n_os']
            output_model.meta.soss_extract1d.threshold = soss_kwargs[
                'threshold']

    else:
        msg = "Only ImageModel and CubeModel are implemented for the NIRISS SOSS extraction."
        log.critical(msg)
        return None, None

    # Save output references
    for order in all_tracemodels:
        # Convert from list to array
        tracemod_ord = np.array(all_tracemodels[order])
        # Save
        order_int = order_str_2_int[order]
        setattr(output_references, f'order{order_int}', tracemod_ord)

    for order in all_box_weights:
        # Convert from list to array
        box_w_ord = np.array(all_box_weights[order])
        # Save
        order_int = order_str_2_int[order]
        setattr(output_references, f'aperture{order_int}', box_w_ord)

    return output_model, output_references
Пример #42
0
            # pointx_right, pointy_right,spilt_point = drop_failing(file, position='right')
            pointx_left, pointy_left,spilt_point = drop_failing(file, position='right')
            pointx_left_copy = copy.copy(pointx_left)
            pointy_left_copy = copy.copy(pointy_left)
            pointx_left.insert(0,0)
            pointy_left.insert(0,0)
            pointx_left.append(0)
            pointy_left.append(row - 1)
            pointx_left.append(0)
            pointy_left.append(0)
            img = np.zeros((row,col))
            rr, cc = polygon( pointy_left,pointx_left)
            img[rr, cc] = 1
            img = img.astype(int)
            image = image.astype(int)
            img = np.bitwise_and(np.array(img),np.array(image))
            left = np.sum(img)
            img2 = image - img

            right = np.sum(img2)
            ratio1 = left / right
            ratio2 = - 1
            print('left', left, 'right', right, 'ratio', ratio1)
            if( ratio1 < 0.4 or ratio1 > 2):
                print('Left water start....')
                pointx_left, pointy_left, spilt_point2 = drop_failing(file, position='left',rotate=False)
                pointx_left_copy = copy.copy(pointx_left)
                pointy_left_copy = copy.copy(pointy_left)
                pointx_left.insert(0, 0)
                pointy_left.insert(0, 0)
                pointx_left.append(0)
Пример #43
0

start = time.time()

warp_tar_img = cv2.imread('image/warped_target.png')

warp_ref_img = cv2.imread('image/warped_reference.png')
mask = Mask(warp_tar_img, warp_ref_img)
seam_mask = cv2.imread('image/seam_mask.png', cv2.IMREAD_GRAYSCALE)
ref_region_mask = cv2.imread('image/result_from_reference.png',
                             cv2.IMREAD_GRAYSCALE)
tar_region_mask = cv2.bitwise_and(cv2.bitwise_not(ref_region_mask), mask.tar)
mask.tar_result = tar_region_mask
mask.ref_result = ref_region_mask
slic = MaskedSLIC(warp_tar_img,
                  np.bitwise_and(mask.tar_result, mask.overlap),
                  region_size=20,
                  compactness=5)

median_tar_img = np.copy(warp_tar_img)
for i in range(len(slic.labels_position)):
    if i != 0:
        rows, cols = slic.labels_position[i]
        median_tar_img[rows, cols] = np.median(median_tar_img[rows, cols],
                                               axis=0)

median_ref_img = np.copy(warp_ref_img)
for i in range(len(slic.labels_position)):
    if i != 0:
        rows, cols = slic.labels_position[i]
        median_ref_img[rows, cols] = np.median(median_ref_img[rows, cols],
Пример #44
0
def dense_to_sparse(depth, num_samples):
    mask_keep = depth > 0
    n_keep = np.count_nonzero(mask_keep)
    prob = float(self.num_samples) / n_keep
    return np.bitwise_and(mask_keep,
                          np.random.uniform(0, 1, depth.shape) < prob)
Пример #45
0
def bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=np.uint8):
    """
    bitmask2mask(bitmask, ignore_bits, good_pix_value=1, dtype=numpy.uint8)
    Interprets an array of bit flags and converts it to a "binary" mask array.
    This function is particularly useful to convert data quality arrays to
    binary masks.

    Parameters
    ----------
    bitmask : numpy.ndarray
        An array of bit flags. Values different from zero are interpreted as
        "bad" values and values equal to zero are considered as "good" values.
        However, see `ignore_bits` parameter on how to ignore some bits
        in the `bitmask` array.

    ignore_bits : int, str, None
        An integer bit mask, `None`, or a comma- or '+'-separated
        string list of integer bit values that indicate what bits in the
        input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
        is a `str` and if it is prepended with '~', then the meaning
        of `ignore_bits` parameters will be reversed: now it will be
        interpreted as a list of bits to be *used* (or *not ignored*) when
        deciding what elements of the input `bitmask` array are "bad".

        The `ignore_bits` parameter is the integer sum of all of the bit
        values from the input `bitmask` array that should be considered
        "good" when creating the output binary mask. For example, if
        values in the `bitmask` array can be combinations
        of 1, 2, 4, and 8 flags and one wants to consider that
        values having *only* bit flags 2 and/or 4 as being "good",
        then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
        having values 2,4, or 6 will be considered "good", while an
        element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
        as "bad".

        Alternatively, one can enter a comma- or '+'-separated list
        of integer bit flags that should be added to obtain the
        final "good" bits. For example, both ``4,8`` and ``4+8``
        are equivalent to setting `ignore_bits` to 12.

        See :py:func:`interpret_bits_value` for examples.

        | Setting `ignore_bits` to `None` effectively will interpret
          all `bitmask` elements as "good" regardless of their value.

        | Setting `ignore_bits` to 0 effectively will assume that all
          non-zero elements in the input `bitmask` array are to be
          interpreted as "bad".

        | In order to reverse the meaning of the `ignore_bits`
          parameter from indicating bits in the values of `bitmask`
          elements that should be ignored when deciding which elements
          are "good" (these are the elements that are zero after ignoring
          `ignore_bits`), to indicating the bits should be used
          exclusively in deciding whether a `bitmask` element is "good",
          prepend '~' to the string value. For example, in order to use
          **only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
          in the values of the input `bitmask` array when deciding whether
          or not that element is "good", set `ignore_bits` to ``~4+8``,
          or ``~4,8 To obtain the same effect with an `int` input value
          (except for 0), enter -(4+8+1)=-9. Following this convention,
          a `ignore_bits` string value of ``'~0'`` would be equivalent to
          setting ``ignore_bits=None``.

    good_mask_value : int, bool (Default = 1)
        This parameter is used to derive the values that will be assigned to
        the elements in the output `mask` array that correspond to the "good"
        flags (that are 0 after zeroing bits specified by `ignore_bits`)
        in the input `bitmask` array. When `good_mask_value` is non-zero or
        `True` then values in the output mask array corresponding to "good"
        bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
        values of corresponding to "bad" flags will be 0. When
        `good_mask_value` is zero or `False` then values in the output mask
        array corresponding to "good" bit flags in `bitmask` will be 0
        (or `False` if `dtype` is `bool`) and values of corresponding
        to "bad" flags will be 1.

    dtype : data-type (Default = numpy.uint8)
        The desired data-type for the output binary mask array.

    Returns
    -------
    mask : numpy.ndarray
        Returns an array whose elements can have two possible values,
        e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
        values of to the input `bitmask` elements, `ignore_bits` parameter,
        and the `good_mask_value` parameter.

    Examples
    --------
        >>> from stsci.tools import bitmask
        >>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
        >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=int)
        array([[1, 1, 0, 0, 1, 0, 0, 1],
               [0, 0, 1, 1, 1, 0, 0, 1]])
        >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=bool)
        array([[True,  True, False, False,  True, False, False,  True],
               [False, False,  True,  True,  True, False, False,  True]], dtype=bool)
        >>> bitmask.bitmask2mask(dqbits, ignore_bits=6, good_pix_value=0, dtype=int)
        array([[0, 0, 1, 0, 0, 1, 1, 0],
               [1, 0, 0, 0, 0, 1, 0, 0]])
        >>> bitmask.bitmask2mask(dqbits, ignore_bits=~6, good_pix_value=0, dtype=int)
        array([[0, 0, 0, 1, 0, 0, 1, 0],
               [1, 1, 0, 0, 0, 0, 1, 0]])
        >>> bitmask.bitmask2mask(dqbits, ignore_bits='~(2+4)', good_pix_value=0, dtype=int)
        array([[0, 0, 0, 1, 0, 0, 1, 0],
               [1, 1, 0, 0, 0, 0, 1, 0]])

    """

    ignore_bits = interpret_bits_value(ignore_bits)

    if good_mask_value:
        mask = np.ones_like(bitmask, dtype=dtype)
        if ignore_bits is None:
            return mask
        bad_mask_value = 0

    else:
        mask = np.zeros_like(bitmask, dtype=dtype)
        if ignore_bits is None:
            return mask
        bad_mask_value = 1

    mask[np.bitwise_and(bitmask, ~ignore_bits) > 0] = bad_mask_value

    return mask
Пример #46
0
def plot_movie_typeE(f, pp):
    #all_grp = f['Asymetric/vessels_after_adaption']
    all_grp = f['adaption']
    x_min = 20
    x_max = 50
    max_iterlength = len(all_grp.keys()) - 5
    i = 1
    k = 0
    while i < max_iterlength:
        #adaption_grp= f['/Asymetric/vessels_after_adaption/vessels_after_adaption_%i'%i]
        adaption_grp = f['/adaption/vessels_after_adaption_%i' % i]
        index_of_artery = np.bitwise_and(
            np.asarray(adaption_grp['edges/flags']), ku.ARTERY) > 0
        index_of_capillary = np.bitwise_and(
            np.asarray(adaption_grp['edges/flags']), ku.CAPILLARY) > 0
        index_of_vein = np.bitwise_and(np.asarray(adaption_grp['edges/flags']),
                                       ku.VEIN) > 0

        shearforce = adaption_grp['edges/shearforce']
        shearforce = np.multiply(shearforce, 10000)
        shearforce = np.log10(shearforce)
        diameter = np.multiply(adaption_grp['edges/radius'], 2)
        node_a_index = adaption_grp['edges/node_a_index']
        node_b_index = adaption_grp['edges/node_b_index']

        pressure_at_nodes = adaption_grp['nodes/pressure']

        pressure_at_vessel = []

        for NodeA, NodeB in itertools.izip(node_a_index, node_b_index):
            pressure_at_vessel.append(
                (pressure_at_nodes[NodeA] + pressure_at_nodes[NodeB]) / 2 *
                7.5)
        pressure_at_vessel = np.array(pressure_at_vessel)
        fig = plt.figure()
        plt.subplot(5, 1, 1)
        a = pressure_at_vessel[index_of_artery]
        sig1a = shearforce[index_of_artery]
        plt.plot(a, sig1a, 'or')
        a = pressure_at_vessel[index_of_capillary]
        sig1c = shearforce[index_of_capillary]
        plt.plot(a, sig1c, 'yD')
        a = pressure_at_vessel[index_of_vein]
        sig1v = shearforce[index_of_vein]
        plt.plot(a, sig1v, 'bv')
        #    plt.semilogy(pressure_at_vessel[index_of_artery],shearforce[index_of_artery],'ro')
        #    plt.semilogy(pressure_at_vessel[index_of_capillary],shearforce[index_of_capillary],'yD')
        #    plt.semilogy(pressure_at_vessel[index_of_vein],shearforce[index_of_vein],'bv')
        plt.grid()
        plt.xlabel('pressure/ mmHg')
        plt.xlim([x_min, x_max])
        plt.ylim([-2, 2.5])
        plt.ylabel('sig1')

        plt.subplot(5, 1, 2)
        signal2 = 100 - 86 * np.exp(
            -5000 * np.log10(np.log10(pressure_at_vessel))**5.4)
        signal2 = -np.log10(signal2)
        pa = pressure_at_vessel[index_of_artery]
        sig2a = signal2[index_of_artery]
        plt.plot(pa, sig2a, 'or')
        pc = pressure_at_vessel[index_of_capillary]
        sig2c = signal2[index_of_capillary]
        plt.plot(pc, sig2c, 'yD')
        pv = pressure_at_vessel[index_of_vein]
        sig2v = signal2[index_of_vein]
        plt.plot(pv, sig2v, 'bv')
        #plt.subplot(4,1,3)
        #    plt.plot(signal2[index_of_artery],diameter[index_of_artery],'ro')
        #    plt.plot(signal2[index_of_capillary],diameter[index_of_capillary],'yD')
        #    plt.plot(signal2[index_of_vein],diameter[index_of_vein],'bv')
        plt.grid()
        #plt.legend(['ART','CAP','VEN'])
        plt.xlabel('pressure/ mmHg')
        plt.xlim([x_min, x_max])
        plt.ylim([-1.9, -1.3])
        plt.ylabel('sig2')
        #### signal 3
        plt.subplot(5, 1, 3)
        metabolic = adaption_grp['edges/metabolicSignal']
        conductive = adaption_grp['edges/conductivitySignal']
        flow = adaption_grp['edges/flow']
        flow = np.multiply(flow, 60. / 1000000.)
        #    plt.semilogx(flow[index_of_artery],metabolic[index_of_artery],'or')
        #    plt.semilogx(flow[index_of_capillary],metabolic[index_of_capillary],'yD')
        #    plt.semilogx(flow[index_of_vein],metabolic[index_of_vein],'bv')
        plt.plot(pa, metabolic[index_of_artery], 'or')
        plt.plot(pc, metabolic[index_of_capillary], 'yD')
        plt.plot(pv, metabolic[index_of_vein], 'bv')

        plt.ylabel('sig3')
        plt.xlim([x_min, x_max])
        plt.ylim([0, 3.5])
        plt.grid()

        #### signal 4
        plt.subplot(5, 1, 4)
        #    plt.semilogx(flow[index_of_artery],conductive[index_of_artery],'or')
        #    plt.semilogx(flow[index_of_capillary],conductive[index_of_capillary],'yD')
        #    plt.semilogx(flow[index_of_vein],conductive[index_of_vein],'bv')
        plt.plot(pa, conductive[index_of_artery], 'or')
        plt.plot(pc, conductive[index_of_capillary], 'yD')
        plt.plot(pv, conductive[index_of_vein], 'bv')

        plt.ylabel('sig4')
        plt.xlabel('pressure/mmHg')
        plt.xlim([x_min, x_max])
        plt.ylim([0, 3])
        plt.grid()
        ### sum
        plt.subplot(5, 1, 5)
        plt.plot(
            pa, sig1a + sig2a + metabolic[index_of_artery] +
            conductive[index_of_artery], 'or')
        plt.plot(
            pc, sig1c + sig2c + metabolic[index_of_capillary] +
            conductive[index_of_capillary], 'yD')
        plt.plot(
            pv, sig1v + sig2v + metabolic[index_of_vein] +
            conductive[index_of_vein], 'bv')

        plt.ylabel("sum")
        plt.xlim([x_min, x_max])
        plt.ylim([-3, 5.])
        plt.grid()
        plt.savefig("mov_%03i.png" % k)
        plt.close()
        #os.system("python2 /localdisk/thierry/tumorcode/py/krebs/povrayRenderVessels.py ../test_configs.h5 /Asymetric/vessels_after_adaption/vessels_after_adaption_%03i"%i)
        os.system(
            "python2 /daten/tumorcode/py/krebs/povrayRenderVessels.py /daten/localdisk/adaption_project/vessels-q2d-8mm-P6-typeE-9x3L130-sample00_adption_p_typeE.h5 /adaption/vessels_after_adaption_%i"
            % i)
        i = i + 100
        k = k + 1
Пример #47
0
def iter_fit_all(xy,uv,xyindx,uvindx,
                    xyorig=None,uvorig=None,
                    mode='rscale',nclip=3,sigma=3.0,minobj=3,
                    center=None,verbose=False):

    if not isinstance(xy,np.ndarray):
        # cast input list as numpy ndarray for fitting
        xy = np.array(xy)
    if not isinstance(uv,np.ndarray):
        # cast input list as numpy ndarray for fitting
        uv = np.array(uv)

    if xy.shape[0] < nclip:
        log.warning('The number of sources for the fit < number of clipping iterations.',
            '    Resetting number of clipping iterations to 0.')
        nclip=0

    if center is None:
        xcen = uv[:,0].mean()
        ycen = uv[:,1].mean()
        center = [xcen,ycen]
    xy -= center
    uv -= center

    fit = fit_all(xy, uv, mode=mode, center=center, verbose=verbose)
    npts = xy.shape[0]
    npts0 = 0
    if nclip is None: nclip = 0
    # define index to initially include all points
    for n in range(nclip):
        if 'resids' in fit:
            resids = fit['resids']
        else:
            resids = compute_resids(xy, uv, fit)

        # redefine what pixels will be included in next iteration
        whtfrac = npts/(npts-npts0-1.0)
        cutx = sigma*(fit['rms'][0]*whtfrac)
        cuty = sigma*(fit['rms'][1]*whtfrac)

        goodx = (np.abs(resids[:,0]) < cutx)
        goody = (np.abs(resids[:,1]) < cuty)
        goodpix = np.bitwise_and(goodx,goody)

        if np.where(goodpix == True)[0].shape[0] > 2:
            npts0 = npts - goodpix.shape[0]
            xy = xy[goodpix]
            uv = uv[goodpix]
            xyindx = xyindx[goodpix]
            uvindx = uvindx[goodpix]
            if xyorig is not None:
                xyorig = xyorig[goodpix]
            if uvorig is not None:
                uvorig = uvorig[goodpix]
            fit = fit_all(xy, uv, mode=mode, center=center, verbose=False)
            del goodpix,goodx,goody
        else:
            break

    fit['img_coords'] = xy
    fit['ref_coords'] = uv
    fit['img_indx'] = xyindx
    fit['ref_indx'] = uvindx
    fit['img_orig_xy'] = xyorig
    fit['ref_orig_xy'] = uvorig
    fit['fit_xy'] = np.dot(xy - fit['offset'],
                           np.linalg.inv(fit['fit_matrix'])) + center

    return fit
Пример #48
0
    def loadRHS(self, filepath, load_binary):
        t1 = time.time()
        data = dict()
        print('Loading intan data')

        f = open(filepath, 'rb')
        filesize = os.fstat(f.fileno()).st_size - f.tell()

        # Check 'magic number' at beginning of file to make sure this is an Intan
        # Technologies RHS2000 data file.
        magic_number = np.fromfile(f, np.dtype('u4'), 1)
        if magic_number != int('d69127ac', 16):
            raise IOError('Unrecognized file type.')

        # Read version number.
        data_file_main_version_number = np.fromfile(f, 'i2', 1)[0]
        data_file_secondary_version_number = np.fromfile(f, 'i2', 1)[0]

        print('Reading Intan Technologies RHS2000 Data File, Version ', data_file_main_version_number, \
            data_file_secondary_version_number)

        num_samples_per_data_block = 128

        # Read information of sampling rate and amplifier frequency settings.
        sample_rate = np.fromfile(f, 'f4', 1)[0]
        dsp_enabled = np.fromfile(f, 'i2', 1)[0]
        actual_dsp_cutoff_frequency = np.fromfile(f, 'f4', 1)[0]
        actual_lower_bandwidth = np.fromfile(f, 'f4', 1)[0]
        actual_lower_settle_bandwidth = np.fromfile(f, 'f4', 1)[0]
        actual_upper_bandwidth = np.fromfile(f, 'f4', 1)[0]

        desired_dsp_cutoff_frequency = np.fromfile(f, 'f4', 1)[0]
        desired_lower_bandwidth = np.fromfile(f, 'f4', 1)[0]
        desired_lower_settle_bandwidth = np.fromfile(f, 'f4', 1)[0]
        desired_upper_bandwidth = np.fromfile(f, 'f4', 1)[0]

        # This tells us if a software 50/60 Hz notch filter was enabled during the data acquistion
        notch_filter_mode = np.fromfile(f, 'i2', 1)[0]
        notch_filter_frequency = 0
        if notch_filter_mode == 1:
            notch_filter_frequency = 50
        elif notch_filter_mode == 2:
            notch_filter_frequency = 60

        desired_impedance_test_frequency = np.fromfile(f, 'f4', 1)[0]
        actual_impedance_test_frequency = np.fromfile(f, 'f4', 1)[0]

        amp_settle_mode = np.fromfile(f, 'i2', 1)[0]
        charge_recovery_mode = np.fromfile(f, 'i2', 1)[0]

        stim_step_size = np.fromfile(f, 'f4', 1)[0]
        charge_recovery_current_limit = np.fromfile(f, 'f4', 1)[0]
        charge_recovery_target_voltage = np.fromfile(f, 'f4', 1)[0]

        # Place notes in data structure
        notes = {
            'note1': _fread_QString(f),
            'note2': _fread_QString(f),
            'note3': _fread_QString(f)
        }

        # See if dc amplifier was saved
        dc_amp_data_saved = np.fromfile(f, 'i2', 1)[0]

        # Load eval board mode
        eval_board_mode = np.fromfile(f, 'i2', 1)[0]

        reference_channel = _fread_QString(f)

        # Place frequency-related information in data structure.
        frequency_parameters = {
            'amplifier_sample_rate': sample_rate * pq.Hz,
            'board_adc_sample_rate': sample_rate * pq.Hz,
            'board_dig_in_sample_rate': sample_rate * pq.Hz,
            'desired_dsp_cutoff_frequency': desired_dsp_cutoff_frequency,
            'actual_dsp_cutoff_frequency': actual_dsp_cutoff_frequency,
            'dsp_enabled': dsp_enabled,
            'desired_lower_bandwidth': desired_lower_bandwidth,
            'desired_lower_settle_bandwidth': desired_lower_settle_bandwidth,
            'actual_lower_bandwidth': actual_lower_bandwidth,
            'actual_lower_settle_bandwidth': actual_lower_settle_bandwidth,
            'desired_upper_bandwidth': desired_upper_bandwidth,
            'actual_upper_bandwidth': actual_upper_bandwidth,
            'notch_filter_frequency': notch_filter_frequency,
            'desired_impedance_test_frequency':
            desired_impedance_test_frequency,
            'actual_impedance_test_frequency': actual_impedance_test_frequency
        }

        stim_parameters = {
            'stim_step_size': stim_step_size,
            'charge_recovery_current_limit': charge_recovery_current_limit,
            'charge_recovery_target_voltage': charge_recovery_target_voltage,
            'amp_settle_mode': amp_settle_mode,
            'charge_recovery_mode': charge_recovery_mode
        }

        # Define data structure for spike trigger settings.
        spike_trigger_struct = {
            'voltage_trigger_mode': {},
            'voltage_threshold': {},
            'digital_trigger_channel': {},
            'digital_edge_polarity': {}
        }

        spike_triggers = []

        # Define data structure for data channels.
        channel_struct = {
            'native_channel_name': {},
            'custom_channel_name': {},
            'native_order': {},
            'custom_order': {},
            'board_stream': {},
            'chip_channel': {},
            'port_name': {},
            'port_prefix': {},
            'port_number': {},
            'electrode_impedance_magnitude': {},
            'electrode_impedance_phase': {}
        }

        # Create structure arrays for each type of data channel.
        amplifier_channels = []
        board_adc_channels = []
        board_dac_channels = []
        board_dig_in_channels = []
        board_dig_out_channels = []

        amplifier_index = 0
        board_adc_index = 0
        board_dac_index = 0
        board_dig_in_index = 0
        board_dig_out_index = 0

        # Read signal summary from data file header.

        number_of_signal_groups = np.fromfile(f, 'i2', 1)[0]
        print('Signal groups: ', number_of_signal_groups)

        for signal_group in range(number_of_signal_groups):
            signal_group_name = _fread_QString(f)
            signal_group_prefix = _fread_QString(f)
            signal_group_enabled = np.fromfile(f, 'i2', 1)[0]
            signal_group_num_channels = np.fromfile(f, 'i2', 1)[0]
            signal_group_num_amp_channels = np.fromfile(f, 'i2', 1)[0]

            if signal_group_num_channels > 0 and signal_group_enabled > 0:
                new_channel = {}
                new_trigger_channel = {}

                new_channel['port_name'] = signal_group_name
                new_channel['port_prefix'] = signal_group_prefix
                new_channel['port_number'] = signal_group
                for signal_channel in range(signal_group_num_channels):
                    new_channel['native_channel_name'] = _fread_QString(f)
                    new_channel['custom_channel_name'] = _fread_QString(f)
                    new_channel['native_order'] = np.fromfile(f, 'i2', 1)[0]
                    new_channel['custom_order'] = np.fromfile(f, 'i2', 1)[0]
                    signal_type = np.fromfile(f, 'i2', 1)[0]
                    channel_enabled = np.fromfile(f, 'i2', 1)[0]
                    new_channel['chip_channel'] = np.fromfile(f, 'i2', 1)[0]
                    skip = np.fromfile(f, 'i2', 1)[0]  # ignore command_stream
                    new_channel['board_stream'] = np.fromfile(f, 'i2', 1)[0]
                    new_trigger_channel['voltage_trigger_mode'] = np.fromfile(
                        f, 'i2', 1)[0]
                    new_trigger_channel['voltage_threshold'] = np.fromfile(
                        f, 'i2', 1)[0]
                    new_trigger_channel[
                        'digital_trigger_channel'] = np.fromfile(f, 'i2', 1)[0]
                    new_trigger_channel['digital_edge_polarity'] = np.fromfile(
                        f, 'i2', 1)[0]
                    new_channel['electrode_impedance_magnitude'] = np.fromfile(
                        f, 'f4', 1)[0]
                    new_channel['electrode_impedance_phase'] = np.fromfile(
                        f, 'f4', 1)[0]

                    if channel_enabled:
                        if signal_type == 0:
                            ch = new_channel.copy()
                            amplifier_channels.append(ch)
                            spike_triggers.append(new_trigger_channel)
                            amplifier_index = amplifier_index + 1
                        elif signal_type == 1:
                            # aux inputs not used in RHS2000 system
                            pass
                        elif signal_type == 2:
                            # supply voltage not used in RHS2000 system
                            pass
                        elif signal_type == 3:
                            ch = new_channel.copy()
                            board_adc_channels.append(ch)
                            board_adc_index = board_adc_index + 1
                        elif signal_type == 4:
                            ch = new_channel.copy()
                            board_dac_channels.append(ch)
                            board_dac_index = board_dac_index + 1
                        elif signal_type == 5:
                            ch = new_channel.copy()
                            board_dig_in_channels.append(ch)
                            board_dig_in_index = board_dig_in_index + 1
                        elif signal_type == 6:
                            ch = new_channel.copy()
                            board_dig_out_channels.append(ch)
                            board_dig_out_index = board_dig_out_index + 1
                        else:
                            raise Exception('Unknown channel type')

        # Summarize contents of data file.
        num_amplifier_channels = amplifier_index
        num_board_adc_channels = board_adc_index
        num_board_dac_channels = board_dac_index
        num_board_dig_in_channels = board_dig_in_index
        num_board_dig_out_channels = board_dig_out_index

        print('Found ', num_amplifier_channels, ' amplifier channel',
              _plural(num_amplifier_channels))
        if dc_amp_data_saved != 0:
            print('Found ', num_amplifier_channels, 'DC amplifier channel',
                  _plural(num_amplifier_channels))
        print('Found ', num_board_adc_channels, ' board ADC channel',
              _plural(num_board_adc_channels))
        print('Found ', num_board_dac_channels, ' board DAC channel',
              _plural(num_board_adc_channels))
        print('Found ',
              num_board_dig_in_channels, ' board digital input channel',
              _plural(num_board_dig_in_channels))
        print('Found ', num_board_dig_out_channels,
              ' board digital output channel',
              _plural(num_board_dig_out_channels))

        # Determine how many samples the data file contains.

        # Each data block contains num_samplesper_data_block amplifier samples
        bytes_per_block = num_samples_per_data_block * 4  # timestamp data
        if dc_amp_data_saved != 0:
            bytes_per_block += num_samples_per_data_block * (
                2 + 2 + 2) * num_amplifier_channels
        else:
            bytes_per_block += num_samples_per_data_block * (
                2 + 2) * num_amplifier_channels
        # Board analog inputs are sampled at same rate as amplifiers
        bytes_per_block += num_samples_per_data_block * 2 * num_board_adc_channels
        # Board analog outputs are sampled at same rate as amplifiers
        bytes_per_block += num_samples_per_data_block * 2 * num_board_dac_channels
        # Board digital inputs are sampled at same rate as amplifiers
        if num_board_dig_in_channels > 0:
            bytes_per_block += num_samples_per_data_block * 2
        # Board digital outputs are sampled at same rate as amplifiers
        if num_board_dig_out_channels > 0:
            bytes_per_block += num_samples_per_data_block * 2

        # How many data blocks remain in this file?
        data_present = 0
        bytes_remaining = filesize - f.tell()
        if bytes_remaining > 0:
            data_present = 1

        num_data_blocks = int(bytes_remaining / bytes_per_block)

        num_amplifier_samples = num_samples_per_data_block * num_data_blocks
        num_board_adc_samples = num_samples_per_data_block * num_data_blocks
        num_board_dac_samples = num_samples_per_data_block * num_data_blocks
        num_board_dig_in_samples = num_samples_per_data_block * num_data_blocks
        num_board_dig_out_samples = num_samples_per_data_block * num_data_blocks

        record_time = num_amplifier_samples / sample_rate

        if data_present:
            print('File contains ', record_time, ' seconds of data.  '
                  'Amplifiers were sampled at ', sample_rate / 1000, ' kS/s.')
        else:
            print('Header file contains no data.  Amplifiers were sampled at ',
                  sample_rate / 1000, 'kS/s.')

        if data_present:

            anas_gain = 0.195
            anas_offset = 32768
            dc_gain = -0.01923
            dc_offset = 512

            self._channel_info['gain'] = {}
            for ch in np.arange(num_amplifier_channels):
                self._channel_info['gain'][str(ch)] = anas_gain

            if not load_binary:
                # Pre-allocate memory for data.
                print('Allocating memory for data')

                t = np.zeros(num_amplifier_samples)

                amplifier_data = np.zeros(
                    (num_amplifier_channels, num_amplifier_samples))
                if dc_amp_data_saved != 0:
                    dc_amplifier_data = np.zeros(
                        (num_amplifier_channels, num_amplifier_samples))

                stim_data = np.zeros(
                    (num_amplifier_channels, num_amplifier_samples))
                board_adc_data = np.zeros(
                    (num_board_adc_channels, num_board_adc_samples))
                board_dac_data = np.zeros(
                    (num_board_dac_channels, num_board_dac_samples))
                board_dig_in_raw = np.zeros(num_board_dig_in_samples)
                board_dig_out_raw = np.zeros(num_board_dig_out_samples)

                # Read sampled data from file.
                print('Reading data from file')

                amplifier_index = 0
                board_adc_index = 0
                board_dac_index = 0
                board_dig_in_index = 0
                board_dig_out_index = 0

                print_increment = 10
                percent_done = print_increment

                print('num_data_blocks: ', num_data_blocks)

                for i in range(num_data_blocks):
                    t[amplifier_index:(amplifier_index + num_samples_per_data_block)] = \
                        np.fromfile(f, 'i4', num_samples_per_data_block)
                    if num_amplifier_channels > 0:
                        amplifier_data[:, amplifier_index:(amplifier_index + num_samples_per_data_block)] = \
                            np.reshape(np.fromfile(f, 'u2', num_samples_per_data_block*num_amplifier_channels),
                                        (num_amplifier_channels, num_samples_per_data_block))
                        if dc_amp_data_saved != 0:
                            dc_amplifier_data[:, amplifier_index:(amplifier_index + num_samples_per_data_block)] = \
                                np.reshape(np.fromfile(f, 'u2', num_samples_per_data_block * num_amplifier_channels),
                                           (num_amplifier_channels, num_samples_per_data_block))
                        stim_data[:, amplifier_index:(amplifier_index + num_samples_per_data_block)] = \
                            np.reshape(np.fromfile(f, 'u2', num_samples_per_data_block * num_amplifier_channels),
                                       (num_amplifier_channels, num_samples_per_data_block))

                    if num_board_adc_channels > 0:
                        board_adc_data[:, board_adc_index:(board_adc_index + num_samples_per_data_block)] = \
                            np.reshape(np.fromfile(f, 'u2', num_samples_per_data_block*num_board_adc_channels),
                                        (num_board_adc_channels, num_samples_per_data_block))
                    if num_board_dac_channels > 0:
                        board_dac_data[:, board_dac_index:(board_dac_index + num_samples_per_data_block)] = \
                            np.reshape(np.fromfile(f, 'u2', num_samples_per_data_block*num_board_dac_channels),
                                        (num_board_dac_channels, num_samples_per_data_block))
                    if num_board_dig_in_channels > 0:
                        board_dig_in_raw[board_dig_in_index:(board_dig_in_index + num_samples_per_data_block)] = \
                        np.fromfile(f, 'u2', num_samples_per_data_block)
                    if num_board_dig_out_channels > 0:
                        board_dig_out_raw[board_dig_out_index:(board_dig_out_index + num_samples_per_data_block)] = \
                        np.fromfile(f, 'u2', num_samples_per_data_block)

                    amplifier_index += num_samples_per_data_block
                    board_adc_index += num_samples_per_data_block
                    board_dac_index += num_samples_per_data_block
                    board_dig_in_index += num_samples_per_data_block
                    board_dig_out_index += num_samples_per_data_block

                    fraction_done = 100 * float(
                        (i + 1) / float(num_data_blocks))
                    if fraction_done >= percent_done:
                        print(percent_done, '% done')
                        percent_done += print_increment

                # Make sure we have read exactly the right amount of data.
                bytes_remaining = filesize - f.tell()
                if bytes_remaining != 0:
                    # raise Error('Error: End of file not reached.')
                    pass

                # Close data file.
                f.close()

                t2 = time.time()
                print('Loading done. time: ', t2 - t1)

                if data_present:

                    print('Parsing data')

                    # Check for gaps in timestamps.
                    num_gaps = len(np.where(np.diff(t) != 1)[0])
                    if num_gaps == 0:
                        print('No missing timestamps in data.')
                    else:
                        print(
                            'Warning: ', num_gaps,
                            ' gaps in timestamp data found.  Time scale will not be uniform!'
                        )
                    # Scale time steps (units = seconds).
                    t = t / frequency_parameters['amplifier_sample_rate']

                    # # Extract digital input channels times separate variables.
                    if np.count_nonzero(board_dig_in_raw) != 0:
                        board_dig_in_data = []
                        for i in range(num_board_dig_in_channels):
                            # find idx of high level
                            mask = 2**board_dig_in_channels[
                                i]['native_order'] * np.ones(
                                    len(board_dig_in_raw))
                            idx_high = np.where(
                                np.bitwise_and(
                                    board_dig_in_raw.astype(
                                        dtype='int'), mask.astype(
                                            dtype='int')) > 0)
                            rising, falling = get_rising_falling_edges(
                                idx_high)
                            board_dig_in_data.append(t[rising])
                        board_dig_in_data = np.array(board_dig_in_data)
                    else:
                        print('No digital input data')
                        board_dig_in_data = np.array([])

                    if np.count_nonzero(board_dig_out_raw) != 0:
                        board_dig_out_data = []
                        for i in range(num_board_dig_out_channels):
                            # find idx of high level
                            mask = 2**board_dig_out_channels[i][
                                'native_order'] * np.ones(
                                    len(board_dig_out_raw))
                            print(mask.shape, len(board_dig_out_data))
                            idx_high = np.where(
                                np.bitwise_and(
                                    board_dig_out_raw.astype(
                                        dtype='int'), mask.astype(
                                            dtype='int')) > 0)
                            rising, falling = get_rising_falling_edges(
                                idx_high)
                            board_dig_out_data.append(t[rising])
                        board_dig_out_data = np.array(board_dig_out_data)
                    else:
                        print('No digital output data')
                        board_dig_out_data = np.array([])

                    # Clear variables
                    del board_dig_out_raw
                    del board_dig_in_raw

                    #TODO optimize memory-wise: e.g. only save time and chan of compliance, ampsett, charge recovery as list

                    # Scale voltage levels appropriately.
                    amplifier_data -= anas_offset  # units = microvolts
                    amplifier_data *= anas_gain  # units = microvolts
                    if dc_amp_data_saved != 0:
                        dc_amplifier_data -= dc_offset  # units = volts
                        dc_amplifier_data *= dc_gain  # units = volts

                    if np.count_nonzero(stim_data) != 0:
                        # TODO only save stim channel and respective signals waveform
                        stim_polarity = np.zeros(
                            (num_amplifier_channels, num_amplifier_samples))

                        compliance_limit_data_idx = np.where(
                            stim_data >= 2**15)
                        stim_data[compliance_limit_data_idx] -= 2**15
                        charge_recovery_data_idx = np.where(stim_data >= 2**14)
                        stim_data[charge_recovery_data_idx] -= 2**14
                        amp_settle_data_idx = np.where(stim_data >= 2**13)
                        stim_data[amp_settle_data_idx] -= 2**13

                        stim_polarity_idx = np.where(stim_data >= 2**8)
                        stim_polarity[stim_polarity_idx] = 1
                        stim_data[stim_polarity_idx] -= 2**8
                        stim_polarity = 1 - 2 * stim_polarity  # convert(0 = pos, 1 = neg) to + / -1
                        stim_data *= stim_polarity
                        stim_data = stim_parameters[
                            'stim_step_size'] * stim_data / float(
                                1e-6)  # units = microamps

                        stim_channels = []
                        stim_signal = []

                        for ch, stim in enumerate(stim_data):
                            if np.count_nonzero(stim) != 0:
                                stim_channels.append(ch)
                                stim_signal.append(stim)
                        stim_channels = np.array(stim_channels)
                        stim_signal = np.array(stim_signal)

                        # Clear variables
                        del stim_polarity, stim_data

                        amp_settle_data = []
                        charge_recovery_data = []
                        compliance_limit_data = []

                        for chan in np.arange(num_amplifier_channels):
                            if len(
                                    np.where(amp_settle_data_idx[0] == chan)
                                [0]) != 0:
                                amp_settle_data.append(
                                    t[amp_settle_data_idx[1][np.where(
                                        amp_settle_data_idx[0] == chan)[0]]])
                            else:
                                amp_settle_data.append([])
                            if len(
                                    np.where(charge_recovery_data_idx[0] ==
                                             chan)[0]) != 0:
                                charge_recovery_data.append(
                                    t[charge_recovery_data_idx[1][np.where(
                                        charge_recovery_data_idx[0] == chan)
                                                                  [0]]])
                            else:
                                charge_recovery_data.append([])
                            if len(
                                    np.where(compliance_limit_data_idx[0] ==
                                             chan)[0]) != 0:
                                compliance_limit_data.append(
                                    t[compliance_limit_data_idx[1][np.where(
                                        compliance_limit_data_idx[0] == chan)
                                                                   [0]]])
                            else:
                                compliance_limit_data.append([])

                        amp_settle_data = np.array(amp_settle_data)
                        charge_recovery_data = np.array(charge_recovery_data)
                        compliance_limit_data = np.array(compliance_limit_data)
                    else:
                        print('No stimulation data')
                        stim_channels = np.array([])
                        stim_signal = np.array([])
                        amp_settle_data = np.array([])
                        charge_recovery_data = np.array([])
                        compliance_limit_data = np.array([])

                    if np.count_nonzero(board_adc_data) != 0:
                        board_adc_data -= 32768  # units = volts
                        board_adc_data *= 312.5e-6  # units = volts
                    else:
                        del board_adc_data
                        print('No ADC data')
                        board_adc_data = np.array([])

                    if np.count_nonzero(board_dac_data) != 0:
                        board_dac_data -= 32768  # units = volts
                        board_dac_data *= 312.5e-6  # units = volts
                    else:
                        del board_dac_data
                        print('No DAC data')
                        board_dac_data = np.array([])

                    t3 = time.time()
                    print('Parsing done. time: ', t3 - t2)

                # Create data dictionary
                print('Creating data structure...')
                data['notes'] = notes
                data['frequency_parameters'] = frequency_parameters
                data['stim_parameters'] = stim_parameters
                if data_file_main_version_number > 1:
                    data['reference_channel'] = reference_channel

                if num_amplifier_channels > 0:
                    data['amplifier_channels'] = amplifier_channels
                    if data_present:
                        data['amplifier_data'] = amplifier_data
                        if dc_amp_data_saved != 0:
                            data['dc_amplifier_data'] = dc_amplifier_data

                        data['stim_channels'] = stim_channels
                        data['stim_signal'] = stim_signal
                        data['amp_settle_data'] = amp_settle_data
                        data['charge_recovery_data'] = charge_recovery_data
                        data['compliance_limit_data'] = compliance_limit_data
                        data['t'] = t

                    data['spike_triggers'] = spike_triggers

                if num_board_adc_channels > 0:
                    data['board_adc_channels'] = board_adc_channels
                    if data_present:
                        data['board_adc_data'] = board_adc_data
                else:
                    data['board_adc_data'] = np.array([])
                    data['board_adc_channels'] = np.array([])

                if num_board_dac_channels > 0:
                    data['board_dac_channels'] = board_dac_channels
                    if data_present:
                        data['board_dac_data'] = board_dac_data
                else:
                    data['board_dac_data'] = np.array([])
                    data['board_dac_channels'] = np.array([])

                if num_board_dig_in_channels > 0:
                    data['board_dig_in_channels'] = board_dig_in_channels
                    if data_present:
                        data['board_dig_in_data'] = board_dig_in_data
                else:
                    data['board_dig_in_data'] = np.array([])
                    data['board_dig_in_channels'] = np.array([])

                if num_board_dig_out_channels > 0:
                    data['board_dig_out_channels'] = board_dig_out_channels
                    if data_present:
                        data['board_dig_out_data'] = board_dig_out_data
                else:
                    data['board_dig_out_data'] = np.array([])
                    data['board_dig_out_channels'] = np.array([])

                if data_present:
                    print(
                        'Extracted data are now available in the python workspace.'
                    )
                else:
                    print(
                        'Extracted waveform information is now available in the python workspace.'
                    )
            else:
                # Create data dictionary
                print('Creating data structure...')
                data['notes'] = notes
                data['frequency_parameters'] = frequency_parameters
                data['stim_parameters'] = stim_parameters
                if data_file_main_version_number > 1:
                    data['reference_channel'] = reference_channel
                if num_amplifier_channels > 0:
                    data['amplifier_channels'] = amplifier_channels
                    data['spike_triggers'] = spike_triggers

                if num_board_adc_channels > 0:
                    data['board_adc_channels'] = board_adc_channels
                else:
                    data['board_adc_channels'] = np.array([])

                if num_board_dac_channels > 0:
                    data['board_dac_channels'] = board_dac_channels
                else:
                    data['board_dac_channels'] = np.array([])

                if num_board_dig_in_channels > 0:
                    data['board_dig_in_channels'] = board_dig_in_channels
                else:
                    data['board_dig_in_channels'] = np.array([])

                if num_board_dig_out_channels > 0:
                    data['board_dig_out_channels'] = board_dig_out_channels
                else:
                    data['board_dig_out_channels'] = np.array([])

                if data_present:
                    print(
                        'Extracted data are now available in the python workspace.'
                    )
                else:
                    print(
                        'Extracted waveform information is now available in the python workspace.'
                    )

        return data
Пример #49
0
def get_wheel_data(session_path, bp_data=None, save=False):
    """
    Get wheel data from raw files and converts positions into centimeters and
    timestamps into seconds.
    **Optional:** saves _ibl_wheel.times.npy and _ibl_wheel.position.npy

    Times:
    Gets Rotary Encoder timestamps (ms) for each position and converts to times.

    Uses time_converter to extract and convert timstamps (ms) to times (s).

    Positions:
    Positions are in (cm) of RE perimeter relative to 0. The 0 resets every trial.

    cmtick = radius (cm) * 2 * pi / n_ticks
    cmtick = 3.1 * 2 * np.pi / 1024

    :param session_path: absolute path of session folder
    :type session_path: str
    :param data: dictionary containing the contents pybppod jsonable file read with raw.load_data
    :type data: dict, optional
    :param save: wether to save the corresponding alf file
                 to the alf folder, defaults to False
    :type save: bool, optional
    :return: Numpy structured array.
    :rtype: numpy.ndarray
    """
    ##
    status = 0
    if not bp_data:
        bp_data = raw.load_data(session_path)
    df = raw.load_encoder_positions(session_path)
    if df is None:
        logger_.error('No wheel data for ' + str(session_path))
        return None
    data = structarr(['re_ts', 're_pos', 'bns_ts'],
                     shape=(df.shape[0], ),
                     formats=['f8', 'f8', np.object])
    data['re_ts'] = df.re_ts.values
    data['re_pos'] = df.re_pos.values
    data['bns_ts'] = df.bns_ts.values
    data['re_pos'] = data[
        're_pos'] / 1024 * 2 * np.pi  # convert positions to radians
    trial_starts = get_trial_start_times(session_path)
    # need a flag if the data resolution is 1ms due to the old version of rotary encoder firmware
    if np.all(np.mod(data['re_ts'], 1e3) == 0):
        status = 1
    data['re_ts'] = data['re_ts'] / 1e6  # convert ts to seconds
    # get the converter function to translate re_ts into behavior times
    convtime = time_converter_session(session_path, kind='re2b')
    data['re_ts'] = convtime(data['re_ts'])

    def get_reset_trace_compensation_with_state_machine_times():
        # this is the preferred way of getting resets using the state machine time information
        # it will not always work depending on firmware versions, new bugs
        iwarn = []
        ns = len(data['re_pos'])
        tr_dc = np.zeros_like(data['re_pos'])  # trial dc component
        for bp_dat in bp_data:
            restarts = np.sort(
                np.array(bp_dat['behavior_data']['States timestamps']
                         ['reset_rotary_encoder'] + bp_dat['behavior_data']
                         ['States timestamps']['reset2_rotary_encoder'])[:, 0])
            ind = np.unique(
                np.searchsorted(data['re_ts'], restarts, side='left') - 1)
            # the rotary encoder doesn't always reset right away, and the reset sample given the
            # timestamp can be ambiguous: look for zeros
            for i in np.where(data['re_pos'][ind] != 0)[0]:
                # handle boundary effects
                if ind[i] > ns - 2:
                    continue
                # it happens quite often that we have to lock in to next sample to find the reset
                if data['re_pos'][ind[i] + 1] == 0:
                    ind[i] = ind[i] + 1
                    continue
                # also case where the rotary doesn't reset to 0, but erratically to -1/+1
                if data['re_pos'][ind[i]] <= (1 / 1024 * 2 * np.pi):
                    ind[i] = ind[i] + 1
                    continue
                # compounded with the fact that the reset may have happened at next sample.
                if np.abs(
                        data['re_pos'][ind[i] + 1]) <= (1 / 1024 * 2 * np.pi):
                    ind[i] = ind[i] + 1
                    continue
                # sometimes it is also the last trial that has this behaviour
                if (bp_data[-1] is bp_dat) or (bp_data[0] is bp_dat):
                    continue
                iwarn.append(ind[i])
                # at which point we are running out of possible bugs and calling it
            tr_dc[ind] = data['re_pos'][ind - 1]
        if iwarn:  # if a warning flag was caught in the loop throw a single warning
            logger_.warning(
                'Rotary encoder reset events discrepancy at following indices: '
                + str(iwarn) + ' times: ' + str(data['re_ts'][iwarn]))
        # exit status 0 is fine, 1 something went wrong
        return tr_dc, len(iwarn) != 0

    # attempt to get the resets properly unless the unit is ms which means precision is
    # not good enough to match SM times to wheel samples time
    if not status:
        tr_dc, status = get_reset_trace_compensation_with_state_machine_times()

    # if something was wrong or went wrong agnostic way of getting resets: just get zeros values
    if status:
        tr_dc = np.zeros_like(data['re_pos'])  # trial dc component
        i0 = np.where(data['re_pos'] == 0)[0]
        tr_dc[i0] = data['re_pos'][i0 - 1]
    # even if things went ok, rotary encoder may not log the whole session. Need to fix outside
    else:
        i0 = np.where(
            np.bitwise_and(
                np.bitwise_or(data['re_ts'] >= trial_starts[-1],
                              data['re_ts'] <= trial_starts[0]),
                data['re_pos'] == 0))[0]
    # make sure the bounds are not included in the current list
    i0 = np.delete(
        i0, np.where(np.bitwise_or(i0 >= len(data['re_pos']) - 1, i0 == 0)))
    # a 0 sample is not a reset if 2 conditions are met:
    # 1/2 no inflexion (continuous derivative)
    c1 = np.abs(
        np.sign(data['re_pos'][i0 + 1] - data['re_pos'][i0]) -
        np.sign(data['re_pos'][i0] - data['re_pos'][i0 - 1])) == 2
    # 2/2 needs to be below threshold
    c2 = np.abs(
        (data['re_pos'][i0] - data['re_pos'][i0 - 1]) /
        (EPS +
         (data['re_ts'][i0] - data['re_ts'][i0 - 1]))) < THRESHOLD_RAD_PER_SEC
    # apply reset to points identified as resets
    i0 = i0[np.where(np.bitwise_not(np.bitwise_and(c1, c2)))]
    tr_dc[i0] = data['re_pos'][i0 - 1]

    # unwrap the rotation (in radians) and then add the DC component from restarts
    data['re_pos'] = np.unwrap(data['re_pos']) + np.cumsum(tr_dc)

    # Also forgot to mention that time stamps may be repeated or very close to one another.
    # Find them as they will induce large jitters on the velocity function or errors in
    # attempts of interpolation
    rep_idx = np.where(
        np.diff(data['re_ts']) <= THRESHOLD_CONSECUTIVE_SAMPLES)[0]
    # Change the value of the repeated position
    data['re_pos'][rep_idx] = (data['re_pos'][rep_idx] +
                               data['re_pos'][rep_idx + 1]) / 2
    data['re_ts'][rep_idx] = (data['re_ts'][rep_idx] +
                              data['re_ts'][rep_idx + 1]) / 2
    # Now remove the repeat times that are rep_idx + 1
    data = np.delete(data, rep_idx + 1)

    # convert to cm
    data['re_pos'] = data['re_pos'] * WHEEL_RADIUS_CM

    # #  DEBUG PLOTS START HERE ########################
    # # if you are experiencing a new bug here is some plot tools
    # # do not forget to increment the wasted dev hours counter below
    # WASTED_HOURS_ON_THIS_WHEEL_FORMAT = 16
    #
    # import matplotlib.pyplot as plt
    # fig = plt.figure()
    # ax = plt.axes()
    # tstart = get_trial_start_times(session_path)
    # tts = np.c_[tstart, tstart, tstart + np.nan].flatten()
    # vts = np.c_[tstart * 0 + 100, tstart * 0 - 100, tstart + np.nan].flatten()
    # ax.plot(tts, vts, label='Trial starts')
    # ax.plot(convtime(df.re_ts.values/1e6), df.re_pos.values / 1024 * 2 * np.pi,
    #         '.-', label='Raw data')
    # i0 = np.where(df.re_pos.values == 0)
    # ax.plot(convtime(df.re_ts.values[i0] / 1e6), df.re_pos.values[i0] / 1024 * 2 * np.pi,
    #         'r*', label='Raw data zero samples')
    # ax.plot(convtime(df.re_ts.values / 1e6) , tr_dc, label='reset compensation')
    # ax.set_xlabel('Bpod Time')
    # ax.set_ylabel('radians')
    # #
    # restarts = np.array(bp_data[10]['behavior_data']['States timestamps']\
    #                         ['reset_rotary_encoder']).flatten()
    # # x__ = np.c_[restarts, restarts, restarts + np.nan].flatten()
    # # y__ = np.c_[restarts * 0 + 1, restarts * 0 - 1, restarts+ np.nan].flatten()
    # #
    # # ax.plot(x__, y__, 'k', label='Restarts')
    #
    # ax.plot(data['re_ts'], data['re_pos'] / WHEEL_RADIUS_CM, '.-', label='Output Trace')
    # ax.legend()
    # # plt.hist(np.diff(data['re_ts']), 400, range=[0, 0.01])
    # #  DEBUG PLOTS STOP HERE ########################

    check_alf_folder(session_path)
    if raw.save_bool(save, '_ibl_wheel.timestamps.npy'):
        tpath = os.path.join(session_path, 'alf', '_ibl_wheel.timestamps.npy')
        np.save(tpath, data['re_ts'])
    if raw.save_bool(save, '_ibl_wheel.position.npy'):
        ppath = os.path.join(session_path, 'alf', '_ibl_wheel.position.npy')
        np.save(ppath, data['re_pos'])
    return data
Пример #50
0
def CPA(indices):
    if platform == "linux" or platform == "linux2":
        TRACES = '/home/philipp/workspace/hw-security-course-ws19/Task3-CPA/source_files/36/Threshholds/traces_7.csv'
        MSGS = '/home/philipp/workspace/hw-security-course-ws19/Task3-CPA/source_files/36/messages.csv'
    elif platform == "darwin":
        TRACES = '/Users/janlucavettel/Documents/FPGA/HW-Sicherheit/Task3-CPA/example_traces/test_traces.csv'
        MSGS = '/Users/janlucavettel/Documents/FPGA/HW-Sicherheit/Task3-CPA/example_traces/test_msgs.csv'
    #elif platform == "win32":
        # Windows...



    traces = genfromtxt(TRACES, delimiter=',')

    with open(MSGS, newline='') as csvfile:
        msgs = list(csv.reader(csvfile))


    key = msgs[0][0]
    key = binascii.unhexlify(key)
    cipher = AES.new(key, AES.MODE_ECB)
    decipher = AES.new(key, AES.MODE_ECB)
    ciphertexts = []

    for i in range(len(msgs)):
        text = cipher.encrypt(binascii.unhexlify(msgs[i][1]))
        a = binascii.hexlify(text).lower()
        a = a.decode("utf-8")
        msgs[i].append(a)



    # msgs = np.array(msgs, dtype=str)


    #Set this to the Byte you want to attack
    print("Running ", indices)
    numByte = indices[0]
    numBit = indices[1]

    numTraces = traces.shape[0]
    traceLength = traces.shape[1]


    k = np.arange(0, 256)
    H = np.zeros((256, len(msgs)))

    start = time.time()

    for i in range(len(k)):
        for j in range(len(msgs)):
            msg = msgs[j][2]
            msg = msg[2*numByte:2*numByte+2]
            msg = int(msg, 16)
            H[i,j] = getInvSboxValue(msg ^ k[i])
            H[i,j] = np.bitwise_and(np.array(H[i,j]).astype(int), 2**numBit)

    HModel = H
    for i in range(len(H)):
        HModel[i] = np.array(list(map(Hamming.HammingDistanceInt, H[i])))

    endPower = time.time()

    HModel = HModel.T

    correlationObject = Correlation(HModel,traces)
    corrMatrix = correlationObject.correlationTraces(traces, HModel)

    endCorr = time.time()

    maxValue = np.amax(np.abs(corrMatrix))
    result = np.where(np.abs(corrMatrix) == maxValue)


    filename = str(numByte) + "_" + str(numBit)
    figureNumber = numByte*8 + numBit
    plt.figure(figureNumber)
    plt.plot(corrMatrix.T, color='gray')
    correctByte = findLasRoundKeyByte(numByte)
    correctByte = int(correctByte, 16)
    print(correctByte)
    plt.plot(corrMatrix[correctByte].T, color='red')
    title = "BYTE_BIT: " + filename + " KEYHYP: " + str(result[0]) + " TRACE MOMENT: " + str(result[1]) + " Max CorrValue: " + str(maxValue)
    plt.title(title)
    filename = filename + ".png"
    exportpath = '/home/philipp/workspace/hw-security-course-ws19/Task3-CPA/source_files/48/Threshholds/CorrelationImages35/' + filename
    plt.savefig(filename, dpi=100)
    plt.close(figureNumber)

    print("------------------------------------------")
    print("Byte number is: ", numByte)
    print("Bit number is: ", numBit)
    print("Number of traces: ", numTraces)
    print("Trace length: ", traceLength)
    print("Power took", endPower - start)
    print("Correlation took", endCorr - start)
    print("Max value is: ", maxValue)
    print("The best Key Hyp is: ", result[0])
    print("Korrelation is in point: ", result[1])
    print("------------------------------------------")
Пример #51
0
def calc_lfp_linesource_anisotropic(cell, x, y, z, sigma, r_limit):
    """Calculate electric field potential using the line-source method, all
    compartments treated as line sources, even soma.

    Parameters
    ----------
    cell: obj
        LFPy.Cell or LFPy.TemplateCell instance
    x : float
        extracellular position, x-axis
    y : float
        extracellular position, y-axis
    z : float
        extracellular position, z-axis
    sigma : array
        extracellular conductivity [sigma_x, sigma_y, sigma_z]
    r_limit : np.ndarray
        minimum distance to source current for each compartment
    """

    #some variables for h, r2, r_soma calculations
    xstart = cell.xstart
    xend = cell.xend
    ystart = cell.ystart
    yend = cell.yend
    zstart = cell.zstart
    zend = cell.zend
    l_vecs = np.array([xend - xstart, yend - ystart, zend - zstart])

    pos = np.array([x, y, z])

    rs, closest_points = return_dist_from_segments(xstart, ystart, zstart,
                                                   xend, yend, zend, pos)

    dx2 = (xend - xstart)**2
    dy2 = (yend - ystart)**2
    dz2 = (zend - zstart)**2
    a = (sigma[1] * sigma[2] * dx2 + sigma[0] * sigma[2] * dy2 +
         sigma[0] * sigma[1] * dz2)

    b = -2 * (sigma[1] * sigma[2] * (x - xstart) *
              (xend - xstart) + sigma[0] * sigma[2] * (y - ystart) *
              (yend - ystart) + sigma[0] * sigma[1] * (z - zstart) *
              (zend - zstart))
    c = (sigma[1] * sigma[2] * (x - xstart)**2 + sigma[0] * sigma[2] *
         (y - ystart)**2 + sigma[0] * sigma[1] * (z - zstart)**2)

    for idx in np.where(rs < r_limit)[0]:
        r, closest_point, l_vec = rs[idx], closest_points[:, idx], l_vecs[:,
                                                                          idx]

        p_ = pos.copy()
        if np.abs(r) < 1e-12:
            if np.abs(l_vec[0]) < 1e-12:
                p_[0] += r_limit[idx]
            elif np.abs(l_vec[1]) < 1e-12:
                p_[1] += r_limit[idx]
            elif np.abs(l_vec[2]) < 1e-12:
                p_[2] += r_limit[idx]
            else:
                displace_vec = np.array([-l_vec[1], l_vec[0], 0])
                displace_vec = displace_vec / np.sqrt(np.sum(displace_vec**
                                                             2)) * r_limit[idx]
                p_[:] += displace_vec
        else:
            p_[:] = pos + (pos - closest_point) * (r_limit[idx] - r) / r

        if np.sqrt(np.sum((p_ - closest_point)**2)) - r_limit[idx] > 1e-9:
            print(p_, closest_point)

            raise RuntimeError("Segment adjustment not working")

        b[idx] = -2 * (sigma[1] * sigma[2] * (p_[0] - xstart[idx]) *
                       (xend[idx] - xstart[idx]) + sigma[0] * sigma[2] *
                       (p_[1] - ystart[idx]) *
                       (yend[idx] - ystart[idx]) + sigma[0] * sigma[1] *
                       (p_[2] - zstart[idx]) * (zend[idx] - zstart[idx]))
        c[idx] = (sigma[1] * sigma[2] * (p_[0] - xstart[idx])**2 +
                  sigma[0] * sigma[2] * (p_[1] - ystart[idx])**2 +
                  sigma[0] * sigma[1] * (p_[2] - zstart[idx])**2)

    [i] = np.where(np.abs(b) <= 1e-6)
    [iia] = np.where(
        np.bitwise_and(np.abs(4 * a * c - b * b) < 1e-6,
                       np.abs(a - c) < 1e-6))
    [iib] = np.where(
        np.bitwise_and(
            np.abs(4 * a * c - b * b) < 1e-6,
            np.abs(a - c) >= 1e-6))
    [iii
     ] = np.where(np.bitwise_and(4 * a * c - b * b < -1e-6,
                                 np.abs(b) > 1e-6))
    [iiii
     ] = np.where(np.bitwise_and(4 * a * c - b * b > 1e-6,
                                 np.abs(b) > 1e-6))

    if len(i) + len(iia) + len(iib) + len(iii) + len(iiii) != cell.totnsegs:
        print(a, b, c)
        print(i, iia, iib, iii, iiii)
        raise RuntimeError

    mapping = np.zeros(cell.totnsegs)
    mapping[i] = _anisotropic_line_source_case_i(a[i], c[i])
    mapping[iia] = _anisotropic_line_source_case_iia(a[iia], c[iia])
    mapping[iib] = _anisotropic_line_source_case_iib(a[iib], b[iib], c[iib])
    mapping[iii] = _anisotropic_line_source_case_iii(a[iii], b[iii], c[iii])
    mapping[iiii] = _anisotropic_line_source_case_iiii(a[iiii], b[iiii],
                                                       c[iiii])

    if np.isnan(mapping).any():
        raise RuntimeError("NaN")

    return 1 / (4 * np.pi) * mapping / np.sqrt(a)
Пример #52
0
    def get_swath_data(self, item, fill=None):
        """Retrieve the item asked for then set it to the specified data type, scale it, and mask it.
        """
        if fill is None:
            fill = self.get_fill_value(item)
        var_info = self.file_type_info.get(item)
        variable = self[var_info.var_name]
        data = variable.get()
        if var_info.index is not None:
            data = data[var_info.index]
        # before or after scaling/offset?
        if var_info.bit_mask is not None:
            bit_mask = var_info.bit_mask
            shift_amount = var_info.right_shift
            offset = var_info.additional_offset
            numpy.bitwise_and(data, bit_mask, data)
            numpy.right_shift(data, shift_amount, data)
            numpy.add(data, offset, data)

        # Convert to the correct data type
        data = data.astype(var_info.data_type)

        # Get the fill value
        if var_info.fill_attr_name and isinstance(var_info.fill_attr_name,
                                                  str):
            fill_value = self[var_info.var_name + "." +
                              var_info.fill_attr_name]
            mask = data == fill_value
        elif var_info.fill_attr_name:
            fill_value = var_info.fill_attr_name
            mask = data >= fill_value
        else:
            fill_value = -999.0
            mask = data == fill_value

        # Get the valid_min and valid_max
        valid_min, valid_max = None, None
        if var_info.range_attr_name:
            if isinstance(var_info.range_attr_name, str):
                valid_min, valid_max = self[var_info.var_name + "." +
                                            var_info.range_attr_name]
            else:
                valid_min, valid_max = var_info.range_attr_name

        # Certain data need to have special values clipped
        if var_info.clip_saturated and valid_max is not None:
            LOG.debug(
                "Setting any saturation or \"can't aggregate\" values to valid maximum"
            )
            data[(data == self.CANT_AGGR_VALUE) |
                 (data == self.SATURATION_VALUE)] = valid_max

        if mask is not None and valid_max is not None:
            mask[(data < valid_min) | (data > valid_max)] = True

        # Get the scaling factors
        scale_value = None
        if var_info.scale_attr_name:
            try:
                scale_value = self[var_info.var_name + "." +
                                   var_info.scale_attr_name]
                if var_info.index is not None:
                    scale_value = scale_value[var_info.index]
                scale_value = float(scale_value)
            except KeyError:
                LOG.debug("No scaling factors for %s", item)
        offset_value = None
        if var_info.offset_attr_name is not None:
            try:
                offset_value = self[var_info.var_name + "." +
                                    var_info.offset_attr_name]
                if var_info.index is not None:
                    offset_value = offset_value[var_info.index]
                offset_value = float(offset_value)
            except KeyError:
                LOG.debug("No offset for %s", item)

        LOG.debug("Variable " + str(var_info.var_name) +
                  " is using scale value " + str(scale_value) +
                  " and offset value " + str(offset_value))

        if offset_value is not None:
            data -= data.dtype.type(offset_value)
        if scale_value is not None:
            data *= data.dtype.type(scale_value)

        # Special case: 250m Resolution
        if var_info.interpolate:
            if mask is not None:
                data[mask] = numpy.nan

            if item in [K_LONGITUDE_250, K_LATITUDE_250]:
                cache_key = "250"
                lon_key = K_LONGITUDE_250
                lat_key = K_LATITUDE_250
                res_factor = 4
            elif item in [K_LONGITUDE_500, K_LATITUDE_500]:
                cache_key = "500"
                lon_key = K_LONGITUDE_500
                lat_key = K_LATITUDE_500
                res_factor = 2
            else:
                raise ValueError("Don't know how to interpolate item '%s'" %
                                 (item, ))

            if self.nav_interpolation[cache_key][
                    0] is not None and self.nav_interpolation[cache_key][
                        1] is not None:
                LOG.debug(
                    "Returning previously interpolated %sm resolution geolocation data",
                    cache_key)
                data = self.nav_interpolation[cache_key][not (item == lon_key)]
                self.nav_interpolation[cache_key] = [None, None]
                return data

            self.nav_interpolation[cache_key][not (item == lon_key)] = data

            if self.nav_interpolation[cache_key][
                    0] is None or self.nav_interpolation[cache_key][1] is None:
                # We don't have the other coordinate data yet
                self.get_swath_data(lon_key if item == lat_key else lat_key,
                                    fill=fill)
            else:
                # We already have the other coordinate variable, the user isn't asking for this item so just return
                LOG.debug(
                    "Returning 'None' because this instance of the function shouldn't have been called by the user"
                )
                return None

            LOG.info("Interpolating to higher resolution: %s" %
                     (var_info.var_name, ))
            lon_data, lat_data = self.nav_interpolation[cache_key]

            new_lon_data, new_lat_data = interpolate_geolocation_cartesian(
                lon_data, lat_data, res_factor=res_factor)

            new_lon_data[numpy.isnan(new_lon_data)] = fill
            new_lat_data[numpy.isnan(new_lat_data)] = fill
            # Cache the results when the user requests the other coordinate
            self.nav_interpolation[cache_key] = [new_lon_data, new_lat_data]
            data = new_lon_data if item == lon_key else new_lat_data
        elif mask is not None:
            data[mask] = fill

        return data
def np_float2np_bf16(arr):
    """Convert a numpy array of float to a numpy array
    of bf16 in uint16"""
    orig = arr.view("<u4")
    bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
    return np.right_shift(orig + bias, 16).astype("uint16")
Пример #54
0
def Optimizer(network,
              Alive_Node,
              Update=False,
              R=30,
              In_Median=30,
              First=False):
    BSMO_NET = nx.create_empty_copy(network)
    BSMO_CHID = []
    Swarm_Size = 40
    MIR = 100

    if Update == True:
        MAX_X = 0
        MAX_Y = 0
        for i in Alive_Node:
            x, y = BSMO_NET.node[i]['pos']
            if x > MAX_X:
                MAX_X = x
            if y > MAX_Y:
                MAX_Y = y

        R = math.sqrt(MAX_X**2 + MAX_Y**2) / 4

    ##Initializing
    SM_Arr = []
    FIT = []
    MG = 4
    Group0 = []
    Group1 = []
    Group2 = []
    Group3 = []
    NGroup = 1
    LLL = np.zeros(MG)
    GLL = 0
    MLLL = 10
    MGLL = 20
    NB_Cluster = max(round(cf.P_CH * len(Alive_Node)), 1)
    for i in range(0, Swarm_Size):
        SM = []
        for j in Alive_Node:
            if random() <= cf.P_CH:
                SM.append(1)
            else:
                SM.append(0)
        SM_Arr.append(SM)
        FIT.append(Get_Fitness(BSMO_NET, SM, Alive_Node))
        Group0.append(i)

    Pr = 0.1
    LLID = np.where(np.max(FIT) == FIT)[0][0]
    GLID = np.where(np.max(FIT) == FIT)[0][0]

    for Iter in range(0, MIR):
        ## Local Leader Phase
        Pr = Pr + (0.4 - 0.1) / MIR
        for i in range(0, MG):
            if i == 0:
                temp = Group0
            if i == 1:
                temp = Group1
            if i == 2:
                temp = Group2
            if i == 3:
                temp = Group3

            ## find LLID
            MAXFIT = 0
            count = 0
            for ID in temp:
                TMPFIT = FIT[ID]
                if TMPFIT > MAXFIT:
                    LLID = ID
                    MAXFIT = TMPFIT

            for j in temp:
                if FIT[j] == FIT[LLID]:
                    continue
                if FIT[j] == FIT[GLID]:
                    continue
                if Pr > random():
                    SM = SM_Arr[j]
                    LL = SM_Arr[LLID]
                    Rand = np.random.choice(temp, 1)[0]
                    SMR = SM_Arr[Rand]
                    b = randint(0, 1)
                    d = randint(-1, 1)
                    SM_Arr[j] = np.bitwise_xor(
                        SM,
                        np.bitwise_or(
                            np.bitwise_and(b, np.bitwise_xor(LL, SM)),
                            np.bitwise_and(d, np.bitwise_xor(SMR, SM))))
                    FIT[j] = Get_Fitness(BSMO_NET, SM_Arr[j], Alive_Node)
                if FIT[j] > FIT[LLID]:
                    count = 1
                    LLIDPOT = j
            if count == 0:
                LLL[i] += 1
            else:
                count = 0
                LLID = LLIDPOT

            ## Local Leader Decision
            if LLL[i] == MLLL:
                LLL[i] = 0
                for TT in temp:
                    if FIT[TT] == FIT[LLID]:
                        continue
                    if FIT[TT] == FIT[GLID]:
                        continue
                    if Pr > random():
                        SM = SM_Arr[TT]
                        LL = SM_Arr[LLID]
                        GL = SM_Arr[GLID]
                        b = randint(0, 1)
                        SM_Arr[TT] = np.bitwise_xor(
                            SM,
                            np.bitwise_or(
                                np.bitwise_and(b, np.bitwise_xor(LL, SM)),
                                np.bitwise_and(b, np.bitwise_xor(GL, SM))))
                        FIT[TT] = Get_Fitness(BSMO_NET, SM_Arr[TT], Alive_Node)

                    else:
                        SM = []
                        for KT in Alive_Node:
                            if random() < cf.P_CH:
                                SM.append(KT)
                            else:
                                SM.append(KT)
                        SM_Arr[TT] = SM
                        FIT[TT] = Get_Fitness(BSMO_NET, SM_Arr[TT], Alive_Node)

        ## Global Leader Phase
        count = 0
        GLID = np.where(np.max(FIT) == FIT)[0][0]
        for i in range(0, len(SM_Arr)):
            if FIT[i] == FIT[GLID]:
                continue
            Prob = 0.9 * (FIT[i] / FIT[GLID]) + 0.1
            if Prob > random():
                GL = SM_Arr[GLID]
                SM = SM_Arr[i]
                Rand = randint(0, Swarm_Size - 1)
                SMR = SM_Arr[Rand]
                b = randint(0, 1)
                d = randint(-1, 1)
                SM_Arr[i] = np.bitwise_xor(
                    SM,
                    np.bitwise_or(np.bitwise_and(b, np.bitwise_xor(GL, SM)),
                                  np.bitwise_and(d, np.bitwise_xor(SMR, SM))))
                FIT[i] = Get_Fitness(BSMO_NET, SM_Arr[i], Alive_Node)
                if FIT[i] > FIT[GLID]:
                    count = 1
        if count == 0:
            GLL += 1
        else:
            count = 0
            GLID = np.where(np.max(FIT) == FIT)[0][0]

        ## Global Desision
        if GLL == MGLL:
            GLL = 0
            NGroup += 1
            Choice_Node = np.arange(0, Swarm_Size, 1)
            if NGroup == 2:
                Group0 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group0))
                Group1 = np.array(Choice_Node)
            if NGroup == 3:
                Group0 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group0))
                Group1 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group1))
                Group2 = np.array(Choice_Node)
            if NGroup == 4:
                Group0 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group0))
                Group1 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group1))
                Group2 = np.random.choice(Choice_Node,
                                          int(len(Choice_Node) / NGroup),
                                          replace=False)
                Choice_Node = list(set(Choice_Node) - set(Group2))
                Group3 = np.array(Choice_Node)
            if NGroup == 5:
                BSMO_CHID = SM_Arr[GLID]

    INNER = []
    OUTER = []
    BSMO_CHID = np.where(SM_Arr[GLID] == np.max(SM_Arr[GLID]))[0] + 1
    for i in BSMO_CHID:
        if BSMO_NET.node[i]['RTBS'] < R:
            INNER.append(i)
            BSMO_NET.node[i]['Next'] = 0
        else:
            OUTER.append(i)

    for i in Alive_Node:
        if i in BSMO_CHID:
            continue
        x, y = BSMO_NET.node[i]['pos']
        NNDist = 1000
        NNID = 0
        for j in BSMO_CHID:
            if i == j:
                continue
            x2, y2 = BSMO_NET.node[j]['pos']
            NewDist = math.sqrt((x - x2)**2 + (y - y2)**2)
            if NNDist > NewDist:
                NNID = j
                NNDist = NewDist
        BSMO_NET.node[i]['Next'] = NNID

    for i in OUTER:
        NNID = 0
        NNDist = 1000
        x, y = BSMO_NET.node[i]['pos']
        for j in INNER:
            x2, y2 = BSMO_NET.node[j]['pos']
            NewDist = math.sqrt((x - x2)**2 + (y - y2)**2)
            if NNDist > NewDist:
                NNID = j
                NNDist = NewDist
        BSMO_NET.node[i]['Next'] = NNID

    if First == True:
        ## add_Edge
        for i in Alive_Node:
            BSMO_NET.add_edge(i, BSMO_NET.node[i]['Next'])

    return BSMO_NET, BSMO_CHID, R
Пример #55
0
def find_trial_ids(trials,
                   side='all',
                   choice='all',
                   order='trial num',
                   sort='idx',
                   contrast=(1, 0.5, 0.25, 0.125, 0.0625, 0),
                   event=None):
    """
    Finds trials that match criterion
    :param trials: trials object. Must contain attributes contrastLeft, contrastRight and
    feedbackType
    :param side: stimulus side, options are 'all', 'left' or 'right'
    :param choice: trial choice, options are 'all', 'correct' or 'incorrect'
    :param contrast: contrast of stimulus, pass in list/tuple of all contrasts that want to be
    considered e.g [1, 0.5] would only look for trials with 100 % and 50 % contrast
    :param order: how to order the trials, options are 'trial num' or 'reaction time'
    :param sort: how to sort the trials, options are 'side' (split left right trials), 'choice'
    (split correct incorrect trials), 'choice and side' (split left right and correct incorrect)
    :param event: trial event to align to (in order to remove nan trials for this event)
    :return: np.array of trial ids, list of dividers to indicate how trials are sorted
    """
    if event:
        idx = ~np.isnan(trials[event])
        nan_idx = np.where(idx)[0]
    else:
        idx = np.ones_like(trials['feedbackType'], dtype=bool)

    # Find trials that have specified contrasts
    cont = np.bitwise_or(
        ismember(trials['contrastLeft'][idx], np.array(contrast))[0],
        ismember(trials['contrastRight'][idx], np.array(contrast))[0])

    # Find different permutations of trials
    # correct right
    cor_r = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == 1,
                           np.isfinite(trials['contrastRight'][idx]))))[0]
    # correct left
    cor_l = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == 1,
                           np.isfinite(trials['contrastLeft'][idx]))))[0]
    # incorrect right
    incor_r = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == -1,
                           np.isfinite(trials['contrastRight'][idx]))))[0]
    # incorrect left
    incor_l = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == -1,
                           np.isfinite(trials['contrastLeft'][idx]))))[0]

    reaction_time = trials['response_times'][idx] - trials['goCue_times'][idx]

    def _order_by(_trials, order):
        # Returns subset of trials either ordered by trial number or by reaction time
        sorted_trials = np.sort(_trials)
        if order == 'trial num':
            return sorted_trials
        elif order == 'reaction time':
            sorted_reaction = np.argsort(reaction_time[sorted_trials])
            return sorted_trials[sorted_reaction]

    dividers = []

    # Find the trial id for all possible combinations
    if side == 'all' and choice == 'all':
        if sort == 'idx':
            trial_id = _order_by(np.r_[cor_r, cor_l, incor_r, incor_l], order)
        elif sort == 'choice':
            trial_id = np.r_[_order_by(np.r_[cor_l, cor_r], order),
                             _order_by(np.r_[incor_l, incor_r], order)]
            dividers.append(np.r_[cor_l, cor_r].shape[0])
        elif sort == 'side':
            trial_id = np.r_[_order_by(np.r_[cor_l, incor_l], order),
                             _order_by(np.r_[cor_r, incor_r], order)]
            dividers.append(np.r_[cor_l, incor_l].shape[0])
        elif sort == 'choice and side':
            trial_id = np.r_[_order_by(cor_l, order),
                             _order_by(incor_l, order),
                             _order_by(cor_r, order),
                             _order_by(incor_r, order)]
            dividers.append(cor_l.shape[0])
            dividers.append(np.r_[cor_l, incor_l].shape[0])
            dividers.append(np.r_[cor_l, incor_l, cor_r].shape[0])

    if side == 'left' and choice == 'all':
        if sort in ['idx', 'side']:
            trial_id = _order_by(np.r_[cor_l, incor_l], order)
        elif sort in ['choice', 'choice and side']:
            trial_id = np.r_[_order_by(cor_l, order),
                             _order_by(incor_l, order)]
            dividers.append(cor_l.shape[0])

    if side == 'right' and choice == 'all':
        if sort in ['idx', 'side']:
            trial_id = _order_by(np.r_[cor_r, incor_r], order)
        elif sort in ['choice', 'choice and side']:
            trial_id = np.r_[_order_by(cor_r, order),
                             _order_by(incor_r, order)]
            dividers.append(cor_r.shape[0])

    if side == 'all' and choice == 'correct':
        if sort in ['idx', 'choice']:
            trial_id = _order_by(np.r_[cor_l, cor_r], order)
        elif sort in ['side', 'choice and side']:
            trial_id = np.r_[_order_by(cor_l, order), _order_by(cor_r, order)]
            dividers.append(cor_l.shape[0])

    if side == 'all' and choice == 'incorrect':
        if sort in ['idx', 'choice']:
            trial_id = _order_by(np.r_[incor_l, incor_r], order)
        elif sort in ['side', 'choice and side']:
            trial_id = np.r_[_order_by(incor_l, order),
                             _order_by(incor_r, order)]
            dividers.append(incor_l.shape[0])

    if side == 'left' and choice == 'correct':
        trial_id = _order_by(cor_l, order)

    if side == 'left' and choice == 'incorrect':
        trial_id = _order_by(incor_l, order)

    if side == 'right' and choice == 'correct':
        trial_id = _order_by(cor_r, order)

    if side == 'right' and choice == 'incorrect':
        trial_id = _order_by(incor_r, order)

    if event:
        trial_id = nan_idx[trial_id]

    return trial_id, dividers
Пример #56
0
def calc_lfp_soma_as_point_anisotropic(cell, x, y, z, sigma, r_limit):
    """Calculate electric field potential, soma is treated as point source, all
    compartments except soma are treated as line sources.

    Parameters
    ----------
    cell: obj
        LFPy.Cell or LFPy.TemplateCell instance
    x : float
        extracellular position, x-axis
    y : float
        extracellular position, y-axis
    z : float
        extracellular position, z-axis
    sigma : array
        extracellular conductivity [sigma_x, sigma_y, sigma_z]
    r_limit : np.ndarray
        minimum distance to source current for each compartment
    """

    xstart = cell.xstart
    xend = cell.xend
    ystart = cell.ystart
    yend = cell.yend
    zstart = cell.zstart
    zend = cell.zend
    l_vecs = np.array([xend - xstart, yend - ystart, zend - zstart])

    pos = np.array([x, y, z])

    rs, closest_points = return_dist_from_segments(xstart, ystart, zstart,
                                                   xend, yend, zend, pos)

    dx2 = (xend - xstart)**2
    dy2 = (yend - ystart)**2
    dz2 = (zend - zstart)**2
    a = (sigma[1] * sigma[2] * dx2 + sigma[0] * sigma[2] * dy2 +
         sigma[0] * sigma[1] * dz2)

    b = -2 * (sigma[1] * sigma[2] * (x - xstart) *
              (xend - xstart) + sigma[0] * sigma[2] * (y - ystart) *
              (yend - ystart) + sigma[0] * sigma[1] * (z - zstart) *
              (zend - zstart))
    c = (sigma[1] * sigma[2] * (x - xstart)**2 + sigma[0] * sigma[2] *
         (y - ystart)**2 + sigma[0] * sigma[1] * (z - zstart)**2)

    for idx in np.where(rs < r_limit)[0]:
        r, closest_point, l_vec = rs[idx], closest_points[:, idx], l_vecs[:,
                                                                          idx]

        p_ = pos.copy()
        if np.abs(r) < 1e-12:
            if np.abs(l_vec[0]) < 1e-12:
                p_[0] += r_limit[idx]
            elif np.abs(l_vec[1]) < 1e-12:
                p_[1] += r_limit[idx]
            elif np.abs(l_vec[2]) < 1e-12:
                p_[2] += r_limit[idx]
            else:
                displace_vec = np.array([-l_vec[1], l_vec[0], 0])
                displace_vec = displace_vec / np.sqrt(np.sum(displace_vec**
                                                             2)) * r_limit[idx]
                p_[:] += displace_vec
        else:
            p_[:] = pos + (pos - closest_point) * (r_limit[idx] - r) / r

        if np.sqrt(np.sum((p_ - closest_point)**2)) - r_limit[idx] > 1e-9:
            print(p_, closest_point)

            raise RuntimeError("Segment adjustment not working")

        b[idx] = -2 * (sigma[1] * sigma[2] * (p_[0] - xstart[idx]) *
                       (xend[idx] - xstart[idx]) + sigma[0] * sigma[2] *
                       (p_[1] - ystart[idx]) *
                       (yend[idx] - ystart[idx]) + sigma[0] * sigma[1] *
                       (p_[2] - zstart[idx]) * (zend[idx] - zstart[idx]))
        c[idx] = (sigma[1] * sigma[2] * (p_[0] - xstart[idx])**2 +
                  sigma[0] * sigma[2] * (p_[1] - ystart[idx])**2 +
                  sigma[0] * sigma[1] * (p_[2] - zstart[idx])**2)

    [i] = np.where(np.abs(b) <= 1e-6)
    [iia] = np.where(
        np.bitwise_and(np.abs(4 * a * c - b * b) < 1e-6,
                       np.abs(a - c) < 1e-6))
    [iib] = np.where(
        np.bitwise_and(
            np.abs(4 * a * c - b * b) < 1e-6,
            np.abs(a - c) >= 1e-6))
    [iii
     ] = np.where(np.bitwise_and(4 * a * c - b * b < -1e-6,
                                 np.abs(b) > 1e-6))
    [iiii
     ] = np.where(np.bitwise_and(4 * a * c - b * b > 1e-6,
                                 np.abs(b) > 1e-6))

    if len(i) + len(iia) + len(iib) + len(iii) + len(iiii) != cell.totnsegs:
        print(a, b, c)
        print(i, iia, iib, iii, iiii)
        raise RuntimeError

    mapping = np.zeros(cell.totnsegs)
    mapping[i] = _anisotropic_line_source_case_i(a[i], c[i])
    mapping[iia] = _anisotropic_line_source_case_iia(a[iia], c[iia])
    mapping[iib] = _anisotropic_line_source_case_iib(a[iib], b[iib], c[iib])
    mapping[iii] = _anisotropic_line_source_case_iii(a[iii], b[iii], c[iii])
    mapping[iiii] = _anisotropic_line_source_case_iiii(a[iiii], b[iiii],
                                                       c[iiii])

    if np.isnan(mapping).any():
        raise RuntimeError("NaN")

    mapping /= np.sqrt(a)

    # Treat soma as point source
    dx2_soma = (cell.xmid[0] - x)**2
    dy2_soma = (cell.ymid[0] - y)**2
    dz2_soma = (cell.zmid[0] - z)**2

    r2_soma = dx2_soma + dy2_soma + dz2_soma

    if np.abs(r2_soma) < 1e-6:
        dx2_soma += 0.001
        r2_soma += 0.001

    if r2_soma < r_limit[0]**2:
        # For anisotropic media, the direction in which to move points matter.
        # Radial distance between point source and electrode is scaled to r_limit
        r2_scale_factor = r_limit[0] * r_limit[0] / r2_soma
        dx2_soma *= r2_scale_factor
        dy2_soma *= r2_scale_factor
        dz2_soma *= r2_scale_factor

    mapping[0] = 1 / np.sqrt(sigma[1] * sigma[2] * dx2_soma +
                             sigma[0] * sigma[2] * dy2_soma +
                             sigma[0] * sigma[1] * dz2_soma)

    return 1 / (4 * np.pi) * mapping
Пример #57
0
                        stderr=open(os.devnull, 'wb'))
        kpageflags = np.fromfile("kpageflags", dtype=np.uint64)
    else:
        subprocess.call(["cp", "/proc/kpageflags", "."],
                        stderr=open(os.devnull, 'wb'))
        kpageflags = np.fromfile("kpageflags", dtype=np.uint64)

    if args.verbose:
        print " done."

    # filter kpageflags
    if args.flags:
        mask = 0
        for f in args.flags:
            mask |= 1 << f
        kpageflags = np.bitwise_and(kpageflags, mask)

    # reshape so it can be a rectangle
    kpagecount_data = np.append(
        kpagecount,
        np.zeros((WIDTH - kpagecount.size % WIDTH, ), dtype=np.int64))
    kpagecount = np.reshape(kpagecount_data, (-1, WIDTH))
    hilbert_curve(kpagecount_data, t=kpagecount, N=256)
    kpageflags_data = np.append(
        kpageflags,
        np.zeros((WIDTH - kpageflags.size % WIDTH, ), dtype=np.uint64))
    kpageflags = np.reshape(kpageflags_data, (-1, WIDTH))
    hilbert_curve(kpageflags_data, t=kpageflags, N=256)
    #hilbert_curve(kpageflags_data, t=kpageflags, N=256, offset=65536, t_offset=[0, 256])

    kpageflags_bounds = np.unique(kpageflags)
Пример #58
0
        # cv2.imshow("frame3", frame)
        # plt.hist(img2.ravel(),256,[0,256]); plt.show()

        # _, labels = cv2.connectedComponents(union_difference)
        # print(labels.shape)
        # mask = np.array(labels, dtype=np.uint8)
        # mask[labels == 1] = 255
        # print(mask)
        # mask_list.append(mask)
        # _, img2 = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY )
        i = 0
        # cv2.imshow("frame2", difference_list[3])
        #print(max(difference_list[0]), max(img2))
        while (i < len(frame_list)):
            intersection_list.append(
                np.bitwise_and(img2.astype(int),
                               difference_list[i].astype(int)))
            # print(str(type(img2))+" and "+str(type(difference_list[i])))
            i += 1
            print(i)
        #print(len(intersection_list))
        try:
            contours, hierarchy = cv2.findContours(
                np.uint8(intersection_list[9]), 1, 2)
            cnt = contours[0]
            rect = cv2.minAreaRect(cnt)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            frame = cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
        except:
            print("no conture found")
        cv2.imshow("intersection2", np.uint8(intersection_list[1]))
Пример #59
0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
fa = FaceAligner(predictor, desiredFaceWidth=256)

# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


# create a mask image of the same shape as input image, filled with 0s (black color)
mask = np.zeros_like(image)
(imgH, imgW) = image.shape[:2]
# create a white filled ellipse

tempImg[0:imgH, 0:imgW] = cv2.blur(image[0:imgH, 0:imgW], (50, 50))
mask=cv2.ellipse(mask, center=(int((imgW) / 2), int((imgH) / 2)), axes=(imgW-imgW/2,imgH-imgH/2), angle=0, startAngle=0, endAngle=360, color=(255,255,255), thickness=-1)
# Bitwise AND operation to black out regions outside the mask
result = np.bitwise_and(image,mask)
# Convert from BGR to RGB for displaying correctly in matplotlib
# Note that you needn't do this for displaying using OpenCV's imshow()
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask_rgb = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
result_rgb = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)

cv2.imshow("image_rgb", image_rgb)
cv2.imshow("mask_rgb", mask_rgb)
cv2.imshow("result_rgb", result_rgb)


cv2.waitKey(0)
Пример #60
0
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND). """
    x, axis = _keras_axis(x, axis)
    return np.bitwise_and(x, axis=axis, keepdims=keepdims)