示例#1
0
    def _sanitise_geometry(self, lons, lats):
        """
        Sanitise geometry by removing any masked points.
        :param lons:
        :param lats:
        :return: A tuple containing arrays of (lon, lat)
        """
        # Align the arrays
        lons, lats = self.__align_lons_lats(lons, lats)

        # Get masks
        lon_mask = ma.getmaskarray(lons)
        lat_mask = ma.getmaskarray(lats)

        # Filter the arrays
        # kltsa 19/07/2016 Change for issue 23330: Some filters 
        #                  were excluded in order to allow values 
        #                  up to 90 to be included.
        self.longitudes = lons[
            (lons >= -180) &
            (lons <= 180) &
            #(lats >= 90) &
            #(lats <= 90) &
            (lon_mask == False)# &
            #(lat_mask == False)
        ]

        self.latitudes = lats[
            #(lons >= -180) &
            #(lons <= 180) &
            (lats >= -90) &
            (lats <= 90) &
            #(lon_mask == False) &
            (lat_mask == False)
        ]
def _tabulate_time_series(a):
    """
    Private function called by tabulate for flexible-dtype TimeSeries.
    """
    basedtype = a.dtype
    basenames = basedtype.names
    if basenames is None:
        _varshape = a._varshape
        if _varshape != ():
            pseudodtype = [('_dates', int),
                           ('_data',(basedtype, _varshape)),
                           ('_mask',(bool,_varshape))]
        else:
            pseudodtype = [('_dates', int),
                           ('_data', basedtype),
                           ('_mask', bool)]
        pseudo = itertools.izip(a._dates, a.filled(), ma.getmaskarray(a),)
    else:
        pseudodtype = [('_dates', int)]
        pseudodtype.extend([(fname,[('_data',ftype), ('_mask',bool)])
                            for (fname,ftype) in basedtype.descr])
        fields = [a[f] for f in basenames]
        pseudo = itertools.izip(a._dates,
                                *[zip(f.filled().flat, ma.getmaskarray(f).flat)
                                  for f in fields])
    return np.fromiter(pseudo, dtype=pseudodtype)
示例#3
0
    def assertArraysEqual(self, arr1, arr2):
        """
        Ensure that two numpy / numpy.ma arrays are equivalent in both their
        mask and their data.
        """
        if (arr1.shape != arr2.shape):
            msg = "Shapes differ:\n" + \
              str(arr1.shape) + " != " + str(arr2.shape)
            raise AssertionError(msg)

        mask1 = ma.getmaskarray(arr1)
        mask2 = ma.getmaskarray(arr2)
        masks_equal = np.array_equal(mask1, mask2)
        if (not masks_equal):
            msg = "Masks differ:\n" + \
              str(mask1) + " != " + str(mask2) + "\n" + \
              "Arrays are: \n" + \
              str(arr1) + "\n" + \
              str(arr2)

            raise AssertionError(msg)

        vals_equal = ma.allequal(arr1, arr2)
        if (not vals_equal):
            msg = "Values differ:\n" + \
              str(arr1) + " != " + str(arr2)
            raise AssertionError(msg)
示例#4
0
def self_training(X, y, X_unLabeled, clf, th):
    clf.fit(X=X, y=y)
    index_unlabeled = ma.arange(0, len(X_unLabeled), 1)
    y_unlabeled = np.zeros(len(X_unLabeled))
    train_is_failed = False

    while True:
        probs = clf.predict_proba(X=X_unLabeled[~ma.getmaskarray(index_unlabeled)])
        index_greater_equal = np.greater_equal([max(d) for d in probs], [th]*len(probs))
        index_labelable = index_unlabeled.data[~ma.getmaskarray(index_unlabeled)][index_greater_equal]

        if not len(index_labelable) > 0:
            if not len(index_unlabeled.data[ma.getmaskarray(index_unlabeled)]) > 0:
                train_is_failed = True
            break

        index_unlabeled[index_labelable] = ma.masked

        if index_unlabeled.all() is ma.masked:
            break

        y_unlabeled[index_labelable] = [np.argmax(p) for p in probs[index_greater_equal]]

        X_labelable = X_unLabeled[index_unlabeled.mask]
        y_labelable = y_unlabeled[index_unlabeled.mask]

        clf.fit(X=np.append(X, X_labelable, axis=0),
                y=np.append(y, y_labelable))

    if train_is_failed:
        y_unlabeled = []
    else:
        y_unlabeled = ma.array(data=y_unlabeled, mask=index_unlabeled.mask)

    return clf, y_unlabeled
示例#5
0
文件: __init__.py 项目: marqh/iris
    def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
        # Define helper function to extract unmasked values as a 1d
        # array.
        def unmasked_data_as_1d_array(array):
            array = ma.asarray(array)
            if array.ndim == 0:
                if array.mask:
                    data = np.array([])
                else:
                    data = np.array([array.data])
            else:
                data = array.data[~ma.getmaskarray(array)]
            return data

        # Compare masks. This will also check that the array shapes
        # match, which is not tested when comparing unmasked values if
        # strict is False.
        a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
        np.testing.assert_array_equal(a_mask, b_mask)

        if strict:
            assertion(a.data, b.data, **kwargs)
        else:
            assertion(unmasked_data_as_1d_array(a),
                      unmasked_data_as_1d_array(b),
                      **kwargs)
示例#6
0
def errorbar(ax, x, y, xerr=True, yerr=True, fmt='-', ecolor=None, elinewidth=None, capsize=3, 
             barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False, **kwargs):
    """Plots two Dvects against each other. Requires matplotlib.
    
    See matplotlib.axes.Axes.errorbar for an explanation of most fields. The only differences are:

    ax   -- the Axes you are plotting in, needed to allow a call of the form ax.errorbar(...) to the
            standard matplotlib errorbar plotter
    x    -- x Dvect
    y    -- y Dvect

    Quite often you may want to set fmt=.' to get points. The lines are for compatibility with
    the matplotlib errorbar routine.
    """
    import warnings
    
    try:
        import matplotlib.pyplot as plt
    except ImportError:
        raise DvectError("dvect.errorbar: matplotlib needed for plotting") 

    # Identify joint OK part
    ok = ~(getmaskarray(x) | getmaskarray(y))

    # Catch xerr and yerr which are re-defined cf maplotlib errorbar since 
    # Dvects carry their own errors
    xerr = x.err.data[ok] if xerr and x.err is not None and isinstance(x,Dvect) else None
    yerr = y.err.data[ok] if yerr and y.err is not None and isinstance(y,Dvect) else None

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        ax.errorbar(x.data[ok], y.data[ok], xerr=xerr, yerr=yerr, fmt=fmt, ecolor=ecolor, elinewidth=elinewidth, capsize=capsize, 
                    barsabove=barsabove, lolims=lolims, uplims=uplims, xlolims=xlolims, xuplims=xuplims, **kwargs)
示例#7
0
 def __setslice__(self, i, j, value):
     "Sets the slice described by [i,j] to `value`."
     _localdict = self.__dict__
     d = self._data
     m = _localdict['_fieldmask']
     names = self.dtype.names
     if value is masked:
         for n in names:
             m[i:j][n] = True
     elif not self._hardmask:
         fval = filled(value)
         mval = getmaskarray(value)
         for n in names:
             d[n][i:j] = fval
             m[n][i:j] = mval
     else:
         mindx = getmaskarray(self)[i:j]
         dval = np.asarray(value)
         valmask = getmask(value)
         if valmask is nomask:
             for n in names:
                 mval = mask_or(m[n][i:j], valmask)
                 d[n][i:j][~mval] = value
         elif valmask.size > 1:
             for n in names:
                 mval = mask_or(m[n][i:j], valmask)
                 d[n][i:j][~mval] = dval[~mval]
                 m[n][i:j] = mask_or(m[n][i:j], mval)
     self._fieldmask = m
示例#8
0
    def _sanitise_geometry(self, lons, lats):
        """
        Sanitise geometry by removing any masked points.
        :param lons:
        :param lats:
        :return: A tuple containing arrays of (lon, lat)
        """
        # Align the arrays
        lons, lats = self.__align_lons_lats(lons, lats)

        # Get masks
        lon_mask = ma.getmaskarray(lons)
        lat_mask = ma.getmaskarray(lats)

        # Filter the arrays
        self.longitudes = lons[
            (lons >= -180) &
            (lons <= 180) &
            (lats >= -90) &
            (lats <= 90) &
            (lon_mask == False) &
            (lat_mask == False)
        ]

        self.latitudes = lats[
            (lons >= -180) &
            (lons <= 180) &
            (lats >= -90) &
            (lats <= 90) &
            (lon_mask == False) &
            (lat_mask == False)
        ]
示例#9
0
 def _assertMaskedArray(self, assertion, a, b, strict):
     a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
     np.testing.assert_array_equal(a_mask, b_mask)
     if strict:
         assertion(a.data, b.data)
     else:
         assertion(a[~a_mask].data, b[~b_mask].data)
示例#10
0
 def _compute_masks_differ(self, var1, var2):
     if (np.array_equal(
         ma.getmaskarray(var1),
         ma.getmaskarray(var2))):
         return False
     else:
         return True
示例#11
0
文件: woa.py 项目: castelao/oceansdb
def woa_profile_from_dap(var, d, lat, lon, depth, cfg):
    """
    Monthly Climatologic Mean and Standard Deviation from WOA,
    used either for temperature or salinity.

    INPUTS
        time: [day of the year]
        lat: [-90<lat<90]
        lon: [-180<lon<180]
        depth: [meters]

    Reads the WOA Monthly Climatology NetCDF file and
    returns the corresponding WOA values of salinity or temperature mean and
    standard deviation for the given time, lat, lon, depth.
    """
    if lon < 0:
        lon = lon+360

    url = cfg['url']

    doy = int(d.strftime('%j'))
    dataset = open_url(url)

    dn = (np.abs(doy-dataset['time'][:])).argmin()
    xn = (np.abs(lon-dataset['lon'][:])).argmin()
    yn = (np.abs(lat-dataset['lat'][:])).argmin()

    if re.match("temperature\d?$", var):
        mn = ma.masked_values(dataset.t_mn.t_mn[dn, :, yn, xn].reshape(
            dataset['depth'].shape[0]), dataset.t_mn.attributes['_FillValue'])
        sd = ma.masked_values(dataset.t_sd.t_sd[dn, :, yn, xn].reshape(
            dataset['depth'].shape[0]), dataset.t_sd.attributes['_FillValue'])
        # se = ma.masked_values(dataset.t_se.t_se[dn, :, yn, xn].reshape(
        #    dataset['depth'].shape[0]), dataset.t_se.attributes['_FillValue'])
        # Use this in the future. A minimum # of samples
        # dd = ma.masked_values(dataset.t_dd.t_dd[dn, :, yn, xn].reshape(
        #    dataset['depth'].shape[0]), dataset.t_dd.attributes['_FillValue'])
    elif re.match("salinity\d?$", var):
        mn = ma.masked_values(dataset.s_mn.s_mn[dn, :, yn, xn].reshape(
            dataset['depth'].shape[0]), dataset.s_mn.attributes['_FillValue'])
        sd = ma.masked_values(dataset.s_sd.s_sd[dn, :, yn, xn].reshape(
            dataset['depth'].shape[0]), dataset.s_sd.attributes['_FillValue'])
        # dd = ma.masked_values(dataset.s_dd.s_dd[dn, :, yn, xn].reshape(
        #    dataset['depth'].shape[0]), dataset.s_dd.attributes['_FillValue'])
    zwoa = ma.array(dataset.depth[:])

    ind = (depth <= zwoa.max()) & (depth >= zwoa.min())
    # Mean value profile
    f = interp1d(zwoa[~ma.getmaskarray(mn)].compressed(), mn.compressed())
    mn_interp = ma.masked_all(depth.shape)
    mn_interp[ind] = f(depth[ind])
    # The stdev profile
    f = interp1d(zwoa[~ma.getmaskarray(sd)].compressed(), sd.compressed())
    sd_interp = ma.masked_all(depth.shape)
    sd_interp[ind] = f(depth[ind])

    output = {'woa_an': mn_interp, 'woa_sd': sd_interp}

    return output
示例#12
0
def test_no_data_available():
    """ This is a position without valid data """

    db = WOA()
    out = db['TEMP'].extract(doy=155, lat=48.1953, lon=-69.5855,
            depth=[2.0, 5.0, 6.0, 21.0, 44.0, 79.0, 5000])
    assert sorted(out.keys()) == [u't_dd', u't_mn', u't_sd', u't_se']
    for v in out:
        ma.getmaskarray(out[v]).all()
示例#13
0
    def test_set_fields(self):
        # Tests setting fields.
        base = self.base.copy()
        mbase = base.view(mrecarray)
        mbase = mbase.copy()
        mbase.fill_value = (999999, 1e20, 'N/A')
        # Change the data, the mask should be conserved
        mbase.a._data[:] = 5
        assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
        assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
        # Change the elements, and the mask will follow
        mbase.a = 1
        assert_equal(mbase['a']._data, [1]*5)
        assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
        # Use to be _mask, now it's recordmask
        assert_equal(mbase.recordmask, [False]*5)
        assert_equal(mbase._mask.tolist(),
                     np.array([(0, 0, 0),
                               (0, 1, 1),
                               (0, 0, 0),
                               (0, 0, 0),
                               (0, 1, 1)],
                              dtype=bool))
        # Set a field to mask ........................
        mbase.c = masked
        # Use to be mask, and now it's still mask !
        assert_equal(mbase.c.mask, [1]*5)
        assert_equal(mbase.c.recordmask, [1]*5)
        assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
        assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5)
        assert_equal(mbase._mask.tolist(),
                     np.array([(0, 0, 1),
                               (0, 1, 1),
                               (0, 0, 1),
                               (0, 0, 1),
                               (0, 1, 1)],
                              dtype=bool))
        # Set fields by slices .......................
        mbase = base.view(mrecarray).copy()
        mbase.a[3:] = 5
        assert_equal(mbase.a, [1, 2, 3, 5, 5])
        assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
        mbase.b[3:] = masked
        assert_equal(mbase.b, base['b'])
        assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
        # Set fields globally..........................
        ndtype = [('alpha', '|S1'), ('num', int)]
        data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
        rdata = data.view(MaskedRecords)
        val = ma.array([10, 20, 30], mask=[1, 0, 0])

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            rdata['num'] = val
            assert_equal(rdata.num, val)
            assert_equal(rdata.num.mask, [1, 0, 0])
示例#14
0
 def test_set_mask(self):
     base = self.base.copy()
     mbase = base.view(mrecarray)
     # Set the mask to True .......................
     mbase.mask = masked
     assert_equal(ma.getmaskarray(mbase["b"]), [1] * 5)
     assert_equal(mbase["a"]._mask, mbase["b"]._mask)
     assert_equal(mbase["a"]._mask, mbase["c"]._mask)
     assert_equal(mbase._mask.tolist(), np.array([(1, 1, 1)] * 5, dtype=bool))
     # Delete the mask ............................
     mbase.mask = nomask
     assert_equal(ma.getmaskarray(mbase["c"]), [0] * 5)
     assert_equal(mbase._mask.tolist(), np.array([(0, 0, 0)] * 5, dtype=bool))
示例#15
0
文件: MV2.py 项目: NESII/uvcdat
 def outer (self, a, b):
     "Return the function applied to the outer product of a and b."
     a = _makeMaskedArg(a)
     b = _makeMaskedArg(b)
     ma = getmask(a)
     mb = getmask(b)
     if ma is nomask and mb is nomask:
         m = None
     else:
         ma = getmaskarray(a)
         mb = getmaskarray(b)
         m = logical_or.outer(ma, mb)
     d = numpy.maximum.outer(filled(a), filled(b))
     return TransientVariable(d, mask=m)
示例#16
0
def computeEdgeDistances(uvframe):
    """
    Create a 2D matrix @edgedists as a companion to @uvframe,
    containing for each pixel a distance to the nearest edge (more precisely,
    the nearest 0-valued pixel).

    We compute @edgedists in a floodfill fashion spreading from zero-areas
    to the middle of one-areas iteratively, with distances approximated
    on the pixel grid.

    We return a tuple (edgedists, edgedirs), where edgedirs contains information
    about the relative offset of the nearest edge piece.
    """
    # edgedists is a masked array, with only already computed values unmasked;
    # at first, uvframe == 0 already are computed (as zeros)
    edgedists = ma.array(numpy.zeros(uvframe.shape, dtype = numpy.float), mask = (uvframe > 0))
    edgedirs = ma.array(numpy.zeros(uvframe.shape, dtype = (numpy.float, 2)), mask = [[[j,j] for j in i] for i in uvframe > 0])
    #numpy.set_printoptions(threshold=numpy.nan)
    #print edgedists
    #print edgedirs

    flood_spread = scipy.ndimage.morphology.generate_binary_structure(2, 2)
    neighbor_ofs = [[-1,-1],[-1,0],[-1,1], [0,-1],[0,0],[0,1],  [1,-1],[1,0],[1,1]]
    s2 = math.sqrt(2)
    neighbor_dist = [s2,1,s2, 1,0,1, s2,1,s2]

    while ma.getmaskarray(edgedists).any():
        # scan masked area for any elements that have unmasked neighbors
        done_mask = numpy.invert(ma.getmaskarray(edgedists))
        todo_mask = done_mask ^ scipy.ndimage.binary_dilation(done_mask, flood_spread)
        #print_mask(todo_mask)
        for i in numpy.transpose(numpy.nonzero(todo_mask)):
            neighbor_val = ma.array([
                    edge_dist_if_within(edgedists, i + ofs) + dist
                        for ofs, dist in zip(neighbor_ofs, neighbor_dist)
                ])
            nearestnei = ma.argmin(neighbor_val)

            # We assert that this update never affects value other fields
            # visited later in this iteration of floodfill
            edgedists[tuple(i)] = neighbor_val[nearestnei]

            nearestneicoord = i + neighbor_ofs[nearestnei]
            #print "-", nearestneicoord, edgedirs[tuple(nearestneicoord)]
            edgedirs[tuple(i)] = edgedirs[tuple(nearestneicoord)] + tuple(neighbor_ofs[nearestnei])
            #print "+", i, edgedirs[tuple(i)]

    return (edgedists.data, edgedirs.data)
示例#17
0
 def get_data(self, file_middle):
     params = self.params
     cal_weights = params['cal_weights']
     pol_weights = params['pol_weights']
     n_time = self.n_time
     window = params['window']
     subtract_slope = params['subtract_slope']
     input_fname = (params['input_root'] + file_middle +
                        params['input_end'])
     # Read in the data.
     Reader = core.fitsGBT.Reader(input_fname)
     Blocks = Reader.read(params['scans'], params['IFs'],
                          force_tuple=True)
     # On the first pass, set the channel width.
     if not hasattr(self, "chan_width"):
         self.chan_width = Blocks[0].field['CDELT1']
     # Loop over the Blocks to select the channel polarizations and cal
     # state that we want to process.
     for Data in Blocks:
         data = Data.data
         data_selected = ma.zeros((Data.dims[0], 1, 1, Data.dims[3]),
                                   dtype=float)
         data_selected.mask = ma.getmaskarray(data_selected)
         for ii in range(len(pol_weights)) :
             for jj in range(len(cal_weights)) :
                 data_selected[:,0,0,:] += (data[:,ii,jj,:]
                                            * pol_weights[ii]
                                            * cal_weights[jj])
         Data.set_data(data_selected)
     # Convert the data to the proper format and return it.
     return make_masked_time_stream(Blocks, n_time, window=window, 
                                    return_means=True, 
                                    subtract_slope=subtract_slope)
示例#18
0
def local_background(image, pos, radx, rady):

    sum = 0
    pixel_count = 0
    mask = ma.getmaskarray(image)


    for x in range (pos[1]-4*radx,pos[1]+4*radx):
        for y in range(pos[0]-4*rady, pos[0]+4*rady):
            a = (x-pos[1])/radx
            b = (y-pos[0])/rady
            minor = pow(a, 2.0)
            major = pow(b, 2.0)
            oval = minor + major
            if oval >= 1 and oval <=8 and 0<x<2570 and 0<y<4611:
                if mask[y,x] == False:
                    sum += image[y,x]
                    pixel_count += 1

    if pixel_count == 0:
        pixel_count = 1
        bg = sum/pixel_count
        return bg, pixel_count
    else:
        bg = sum/pixel_count
        return bg, pixel_count
示例#19
0
def addfield(mrecord, newfield, newfieldname=None):
    """Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
    """
    _data = mrecord._data
    _mask = mrecord._mask
    if newfieldname is None or newfieldname in reserved_fields:
        newfieldname = 'f%i' % len(_data.dtype)
    newfield = ma.array(newfield)
    # Get the new data ............
    # Create a new empty recarray
    newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
    newdata = recarray(_data.shape, newdtype)
    # Add the exisintg field
    [newdata.setfield(_data.getfield(*f), *f)
         for f in _data.dtype.fields.values()]
    # Add the new field
    newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
    newdata = newdata.view(MaskedRecords)
    # Get the new mask .............
    # Create a new empty recarray
    newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
    newmask = recarray(_data.shape, newmdtype)
    # Add the old masks
    [newmask.setfield(_mask.getfield(*f), *f)
         for f in _mask.dtype.fields.values()]
    # Add the mask of the new field
    newmask.setfield(getmaskarray(newfield),
                     *newmask.dtype.fields[newfieldname])
    newdata._mask = newmask
    return newdata
示例#20
0
def test_mask(N=4):
    l = 5

    x = np.arange(N)
    y = np.arange(N)
    X, Y = np.meshgrid(x, y)

    # input ndarray -> output ndarray
    Z = np.ones(X.shape)
    h = wmean_2D(X, Y, Z, l=l)
    assert type(h) is np.ndarray

    # input MA array -> output MA array
    Z = ma.array(Z)
    h = wmean_2D(X, Y, Z, l=l)
    assert type(h) == ma.MaskedArray
    # Input MA and mask==False -> Output MA and mask==False
    assert ~h.mask.any()

    # Only the masked inputs should return as masked.
    Z.mask = ma.getmaskarray(Z)
    Z.mask[0, 0] = True
    h = wmean_2D(X, Y, Z, l=l)
    assert h[0, 0].mask == True
    assert ~h[1:, 1:].mask.any()
示例#21
0
def pixel_counts(data, ra_inds, dec_inds, pixel_hits, map_shape=(-1, -1)):
    """Counts the hits on each unique pixel.

    Returns pix_list, a list of tuples, each tuple is a (ra,dec) index on a 
    map pixel hit on this scan.  The list only contains unique entries.  The
    array pixel_hits (preallowcated for performance), is
    filled with the number of hits on each of these pixels as a function of
    frequency index. Only the entries pixel_hits[:len(pix_list), :] 
    are meaningful.
    """

    if ra_inds.shape != dec_inds.shape or ra_inds.ndim != 1:
        raise ValueError("Ra and Dec arrays not properly shaped.")
    if pixel_hits.shape[-1] != data.shape[-1] or pixel_hits.shape[0] < len(ra_inds):
        raise ValueError("counts not allowcated to right shape.")

    pix_list = []
    for ii in range(len(ra_inds)):
        pix = (ra_inds[ii], dec_inds[ii])
        if (
            (map_shape[0] > -1 and pix[0] >= map_shape[0])
            or (map_shape[1] > -1 and pix[1] >= map_shape[1])
            or pix[0] < 0
            or pix[1] < 0
        ):
            continue
        elif not pix in pix_list:
            pix_list.append(pix)
        unmasked_freqs = sp.logical_not(ma.getmaskarray(data)[ii, :])
        pixel_hits[pix_list.index(pix), unmasked_freqs] += 1

    return pix_list
示例#22
0
def get_storage_change(len_var,var_names,signs,dirs,storage):
    for idx in np.arange(0,len_var):
        data = get_data(dirs[idx])
        storage+=data*signs[idx]
    mask = ma.getmaskarray(data)
    storage_change = ma.masked_array(storage,mask)
    return storage_change[0]
示例#23
0
    def start(self, observation):
        '''
        Handle the first iteration.
        
        Keyword Arguments:
        
        observation (int) The index of the most recent observation [0:num_states]
        
        Returns: 
        next_action (int) The index of the next action to take [0:num_actions]
        '''
        next_state = observation
        
        # the valid actions are at unmasked array positions, so compute the inverse
        # of the mask of the relevant row
        valid_action_flags = ~ma.getmaskarray(self._q_table[next_state,:])
        
        # get the indices of the valid actions and pick one at random
        next_action = random.choice(np.flatnonzero(valid_action_flags))

        # store off state for the next iteration
        self._last_state = next_state
        self._last_action = next_action
        
        # increment state visitation table
        self.update_visitation_table(next_state, next_action)
        self._epoch_num +=1
          
        return next_action
示例#24
0
def test_mask(N=10):
    l = 5
    t = np.arange(N)
    y = np.ones(N)

    # Input ndarray -> output ndarray
    h = maud.wmean_1D(y, t=t, l=l)
    assert type(h) is np.ndarray
    h = cmaud.wmean_1D(y, t=t, l=l)
    assert type(h) is np.ndarray

    y = ma.array(y)
    h = maud.wmean_1D(y, t=t, l=l)
    # Input MA -> output MA
    assert type(h) == ma.MaskedArray
    # Input MA and mask==False -> Output MA and mask==False
    assert ~h.mask.any()
    h = cmaud.wmean_1D(y, t=t, l=l)
    assert type(h) == ma.MaskedArray
    assert ~h.mask.any()

    y.mask = ma.getmaskarray(y)
    y.mask[0] = True
    h = maud.wmean_1D(y, t=t, l=l)
    # The masked values @ input will be masked @ output
    assert h[0].mask == True
    assert ~h[1:].mask.any()
    h = cmaud.wmean_1D(y, t=t, l=l)
    assert h[0].mask == True
    assert ~h[1:].mask.any()
 def test_off_map(self) :
     Data = self.blocks[0]
     Data.calc_freq()
     map = self.map
     map[:,:,:] = 0.0
     Data.data[:,:,:,:] = 0.0
     # Rig the pointing but put one off the map.
     def rigged_pointing() :
         Data.ra = map.get_axis('ra')[range(10)]
         Data.dec = map.get_axis('dec')[range(10)]
         Data.ra[3] = Data.ra[3] - 8.0
     Data.calc_pointing = rigged_pointing
     smd.sub_map(Data, map)
     self.assertTrue(sp.alltrue(ma.getmaskarray(Data.data[3,:,:,:])))
     self.assertTrue(sp.alltrue(sp.logical_not(
                 ma.getmaskarray((Data.data[[0,1,2,4,5,6,7,8,9],:,:,:])))))
示例#26
0
文件: core1D.py 项目: castelao/maud
def wmean_bandpass_1D_serial(data, lshorterpass, llongerpass, t=None,
        method='hann', axis=0):
    """ Equivalent to wmean_1D_serial, but it is a bandpass

        Input:
            - data: np.array or ma.maked_array, nD
            - lshorterpass: The size of the highpass filter, i.e. shorter
                wavelenghts are preserved. It is in the same unit of t.
            - llongerpass: The size of the lowpass filter, i.e.longer
                wavelenghts are preserved. It is in the same unit of t.
	    - t: is the scale of the choosed axis, 1D. If not
                defined, it will be considered a sequence.
            - method: ['hann', 'hamming', 'blackman']
                Defines the weight function type
            - axis: Dimension which the filter will be applied
    """
    assert False, "There is a BUG here"

    assert axis <= data.ndim, "Invalid axis!"

    # If necessary, move the axis to be filtered for the first axis
    if axis != 0:
        data_smooth = wmean_bandpass_1D_serial(data.swapaxes(0, axis),
                lshorterpass = lshorterpass,
                llongerpass = llongerpass,
                t = t,
                method = method,
                axis = 0)

        return data_smooth.swapaxes(0, axis)
    # Below here, the filter will be always applied on axis=0

    # If t is not given, creates a regularly spaced t
    if t is None:
        print "The scale along the choosed axis weren't defined. I'll consider a constant sequence."
	t = np.arange(data.shape[axis])

    assert t.shape == (data.shape[axis],), "Invalid size of t."

    # ----
    winfunc = window_func(method)

    data_smooth = ma.masked_all(data.shape)

    if data.ndim==1:
        (I,) = np.nonzero(~ma.getmaskarray(data))
        for i in I:
            # First remove the high frequency
            tmp = _convolve_1D(t[i], t, llongerpass, winfunc, data)
            # Then remove the low frequency
            data_smooth[i] = tmp - \
                    _convolve_1D(t[i], t, lshorterpass, winfunc, tmp)

    else:
        I = data.shape[1]
        for i in range(I):
            data_smooth[:,i] = wmean_bandpass_1D_serial(data[:,i],
                    lshorterpass, llongerpass, t, method, axis)

    return data_smooth
示例#27
0
    def test(self):
        self.flags = {}
        try:
            threshold = self.cfg['threshold']
        except:
            print("Deprecated cfg format. It should contain a threshold item.")
            threshold = self.cfg

        try:
            flag_good = self.cfg['flag_good']
            flag_bad = self.cfg['flag_bad']
        except:
            print("Deprecated cfg format. It should contain flag_good & flag_bad.")
            flag_good = 1
            flag_bad = 4

        assert (np.size(threshold) == 1) and \
                (threshold is not None) and \
                (np.isfinite(threshold))   

        flag = np.zeros(self.data[self.varname].shape, dtype='i1')
        flag[np.nonzero(self.features['bin_spike'] > threshold)] = flag_bad
        flag[np.nonzero(self.features['bin_spike'] <= threshold)] = flag_good
        flag[ma.getmaskarray(self.data[self.varname])] = 9
        self.flags['bin_spike'] = flag
示例#28
0
    def _build_time(self):
        time = numpy.array([])
        mask = numpy.array([], dtype='bool')
	for d in self.dataset:
            time = numpy.append(time, d['time'].data)
            mask = numpy.append(mask, ma.getmaskarray(d['time']))
	self.data['time'] = ma.masked_array(time,mask)
示例#29
0
def add_data_2_map(data, ra_inds, dec_inds, map, noise_i=None, weight=1):
    """Add a data masked array to a map.
    
    This function also adds the weight to the noise matrix for diagonal noise.
    """

    ntime = len(ra_inds)
    shape = sp.shape(map)
    if len(dec_inds) != ntime or len(data[:, 0]) != ntime:
        raise ValueError("Time axis of data, ra_inds and dec_inds must be" " same length.")
    if not noise_i is None and map.shape != noise_i.shape:
        raise ValueError("Inverse noise array must be the same size as the map" " or None.")

    for time_ind in range(ntime):
        if (
            ra_inds[time_ind] >= 0
            and ra_inds[time_ind] < shape[0]
            and dec_inds[time_ind] >= 0
            and dec_inds[time_ind] < shape[1]
        ):
            # Get unmasked
            unmasked_inds = sp.logical_not(ma.getmaskarray(data[time_ind, :]))
            ind_map = (ra_inds[time_ind], dec_inds[time_ind], unmasked_inds)
            map[ind_map] += (weight * data)[time_ind, unmasked_inds]
            if not noise_i is None:
                if not hasattr(weight, "__iter__"):
                    noise_i[ind_map] += weight
                else:
                    noise_i[ind_map] += weight[unmasked_inds]
示例#30
0
 def calculate_theta(self, Xm, p_y_given_x):
     """Estimate marginal parameters from data and expected latent labels."""
     theta = []
     for i in range(self.n_visible):
         not_missing = np.logical_not(ma.getmaskarray(Xm)[:, i])
         theta.append(self.estimate_parameters(Xm.data[not_missing, i], p_y_given_x[:, not_missing]))
     return np.array(theta)
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
    """
    Extract from a masked rec array and create the manager.
    """

    # essentially process a record array then fill it
    fill_value = data.fill_value
    fdata = ma.getdata(data)
    if index is None:
        index = get_names_from_index(fdata)
        if index is None:
            index = ibase.default_index(len(data))
    index = ensure_index(index)

    if columns is not None:
        columns = ensure_index(columns)
    arrays, arr_columns = to_arrays(fdata, columns)

    # fill if needed
    new_arrays = []
    for fv, arr, col in zip(fill_value, arrays, arr_columns):
        mask = ma.getmaskarray(data[col])
        if mask.any():
            arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
            arr[mask] = fv
        new_arrays.append(arr)

    # create the manager
    arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
    if columns is None:
        columns = arr_columns

    mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)

    if copy:
        mgr = mgr.copy()
    return mgr
示例#32
0
def get_xy_values(self, order="C", asmasked=False):
    """Get X Y coordinate values as numpy 2D arrays."""
    nno = self.ncol * self.nrow

    ier, xvals, yvals = _cxtgeo.surf_xy_as_values(
        self.xori,
        self.xinc,
        self.yori,
        self.yinc * self.yflip,
        self.ncol,
        self.nrow,
        self.rotation,
        nno,
        nno,
        0,
    )
    if ier != 0:
        raise XTGeoCLibError(f"Error in surf_xy_as_values, error code: {ier}")

    # reshape
    xvals = xvals.reshape((self.ncol, self.nrow))
    yvals = yvals.reshape((self.ncol, self.nrow))

    if order == "F":
        xvals = np.array(xvals, order="F")
        yvals = np.array(yvals, order="F")

    if asmasked:
        tmpv = ma.filled(self.values, fill_value=np.nan)
        tmpv = np.array(tmpv, order=order)
        tmpv = ma.masked_invalid(tmpv)
        mymask = ma.getmaskarray(tmpv)
        xvals = ma.array(xvals, mask=mymask, order=order)
        yvals = ma.array(yvals, mask=mymask, order=order)

    return xvals, yvals
示例#33
0
    def calculate_mis(self, p_y_given_x, theta, Xm):
        mis = np.zeros((self.n_hidden, self.n_visible))
        sample = np.random.choice(np.arange(Xm.shape[0]),
                                  min(self.max_samples, Xm.shape[0]),
                                  replace=False)
        n_observed = np.sum(np.logical_not(ma.getmaskarray(Xm[sample])),
                            axis=0)

        n_samples, n_visible = Xm.shape
        memory_size = float(n_samples * n_visible * self.n_hidden *
                            self.dim_hidden * 64) / 1000**3  # GB
        batch_size = np.clip(int(self.ram * n_visible / memory_size), 1,
                             n_visible)
        for i in range(0, n_visible, batch_size):
            log_marg_x = self.calculate_marginals_on_samples(
                theta[i:i + batch_size, ...],
                Xm[sample, i:i +
                   batch_size])  # n_hidden, n_samples, n_visible, dim_hidden
            mis[:, i:i + batch_size] = np.einsum(
                'ijl,ijkl->ik',
                p_y_given_x[:, sample, :],
                log_marg_x,
                optimize=False) / n_observed[i:i + batch_size][np.newaxis, :]
        return mis  # MI in nats
示例#34
0
def addfield(mrecord, newfield, newfieldname=None):
    """Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
    """
    _data = mrecord._data
    _mask = mrecord._mask
    if newfieldname is None or newfieldname in reserved_fields:
        newfieldname = 'f%i' % len(_data.dtype)
    newfield = ma.array(newfield)
    # Get the new data ............
    # Create a new empty recarray
    newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
    newdata = recarray(_data.shape, newdtype)
    # Add the exisintg field
    [
        newdata.setfield(_data.getfield(*f), *f)
        for f in _data.dtype.fields.values()
    ]
    # Add the new field
    newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
    newdata = newdata.view(MaskedRecords)
    # Get the new mask .............
    # Create a new empty recarray
    newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
    newmask = recarray(_data.shape, newmdtype)
    # Add the old masks
    [
        newmask.setfield(_mask.getfield(*f), *f)
        for f in _mask.dtype.fields.values()
    ]
    # Add the mask of the new field
    newmask.setfield(getmaskarray(newfield),
                     *newmask.dtype.fields[newfieldname])
    newdata._mask = newmask
    return newdata
示例#35
0
def obtain_overlap(in_files_lis, out_file, variable):
    fh_out = Dataset(out_file, "w")

    ma_lis = []
    for f_index, in_file in enumerate(in_files_lis):
        fh_in = Dataset(in_file, "r")
        ma_lis.append(ma.getmaskarray(fh_in.variables[variable][:]))

        if f_index == len(in_files_lis) - 1:
            for name, dim in fh_in.dimensions.items():
                fh_out.createDimension(name, len(dim))
            overlap_mask = np.logical_or.reduce(ma_lis)
            for v_name, varin in fh_in.variables.items():
                if v_name == 'lat' or v_name == 'lon':
                    outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
                    outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
                    outVar[:] = varin[:]
                else:
                    outVar = fh_out.createVariable(v_name, varin.datatype, varin.dimensions)
                    outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
                    outVar[:] = ma.array(varin[:], mask=overlap_mask)

        fh_in.close()
    fh_out.close()
示例#36
0
 def getImage(self, index):
     sub_path = self.getPath(index)
     path = '{0}/data/{1}-color.png'.format(self.dataset_root, sub_path)
     image = np.array(Image.open(path))
     if (self.IMAGE_CONTAINS_MASK):
         mask = image[:, :, 3:]
     image = image[:, :, :3]
     if (index >= self.len_real and index < self.len_grid):
         if (self.add_syn_background):
             label = np.expand_dims(
                 np.array(
                     Image.open('{0}/data/{1}-label.png'.format(
                         self.dataset_root, sub_path))), 2)
             mask_back = ma.getmaskarray(ma.masked_equal(label, 0))
             back_filename = random.choice(self.background)
             back = np.array(Image.open(back_filename).convert("RGB"))
             image = back * mask_back + image
         if (self.add_syn_noise):
             image = image + np.random.normal(
                 loc=0.0, scale=7.0, size=image.shape)
             image = image.astype(np.uint8)
     if (self.IMAGE_CONTAINS_MASK):
         image = np.concatenate([image, mask], 2)
     return image
示例#37
0
def broadcast(*args):
    def _mask_or(a, b):
        return ma.mask_or(a, b, shrink=True)

    args = [_safe_masked_invalid(arg) for arg in args]
    if any([ma.isMA(arg) for arg in args]):
        vars = [ma.getdata(var) for var in args]
        mvars = [ma.getmaskarray(var) for var in args]
        outargs = list(map(np.array, np.broadcast_arrays(*vars)))
        masks = list(map(np.array, np.broadcast_arrays(*mvars)))
        mask = reduce(_mask_or, masks)
    else:
        mask = ma.nomask
        # Using map(np.array, ...) to get contiguous copies.
        outargs = list(map(np.array, np.broadcast_arrays(*args)))
    if outargs[0].ndim == 0:
        scalar = True
        for arg in outargs:
            arg.shape = (1, )
        if mask is not ma.nomask:
            mask.shape = (1, )
    else:
        scalar = False
    return scalar, mask, outargs
示例#38
0
def _acf(x, mode):
    """Computes the auto-correlation function of the time series x.
Note that the computations are performed on anomalies (deviations from average).
Gaps in the series are filled first, the anomalies are then computed and the missing
values filled with 0.

:Parameters:
    `x` : TimeSeries
        Time series.
    """
    x = ma.array(x, copy=False, subok=True, dtype=float)
    if x.ndim > 1:
        raise ValueError("The input array should be 1D only.")
    # make sure there's no gap in the data
    if isinstance(x, TimeSeries) and x.has_missing_dates():
        x = ts.fill_missing_dates(x)
    #
    m = np.logical_not(ma.getmaskarray(x)).astype(int)
    x = x.anom().filled(0).view(ndarray)
    xx = (x*x)
    n = len(x)
    #
    _avf = np.correlate(x,x,'full')[n-1:]
    if mode:
        dnm_ = np.fromiter((np.sum(x[k:]*x[:-k])/np.sum(m[k:]*xx[:-k])
                            for k in range(1,n)),
                            dtype=float)
    else:
        dnm_ = np.fromiter((np.sum(x[k:]*x[:-k])/\
                            np.sqrt((m[k:]*xx[:-k]).sum() * (m[:-k]*xx[k:]).sum())
                            for k in range(1,n)),
                           dtype=float)
    poslags = _avf[1:]/dnm_
    return ma.fix_invalid(np.concatenate([np.array([1.]),
                                          poslags,
                                          poslags[::-1]]))
示例#39
0
def global_range(data, v, cfg):
    """
    """
    assert cfg['minval'] < cfg['maxval'], \
            "Global Range(%s), minval (%s) must be smaller than maxval(%s)" \
            % (v, cfg['minval'], cfg['maxval'])

    # Default flag 0, no QC.
    flag = np.zeros(data[v].shape, dtype='i1')

    # Flag good inside acceptable range
    ind = (data[v] >= cfg['minval']) & \
            (data[v] <= cfg['maxval'])
    flag[np.nonzero(ind)] = 1

    # Flag bad outside acceptable range
    ind = (data[v] < cfg['minval']) | \
        (data[v] > cfg['maxval'])
    flag[np.nonzero(ind)] = 4

    # Flag as 9 any masked input value
    flag[ma.getmaskarray(data[v])] = 9

    return flag
示例#40
0
    def test(self):
        """

                I slightly modified the Goring & Nikora 2002. It is
                  expected that CTD profiles has a typical depth
                  structure, with a range between surface and bottom.
        """
        self.flags = {}
        try:
            threshold = self.cfg["threshold"]
        except KeyError:
            print("Deprecated cfg format. It should contain a threshold item.")
            threshold = self.cfg["k"]

        assert ((np.size(threshold) == 1) and (threshold is not None)
                and (np.isfinite(threshold)))

        flag = np.zeros(self.data[self.varname].shape, dtype="i1")
        flag[np.nonzero(
            self.features["tukey53H_norm"] > threshold)] = self.flag_bad
        flag[np.nonzero(
            self.features["tukey53H_norm"] <= threshold)] = self.flag_good
        flag[ma.getmaskarray(self.data[self.varname])] = 9
        self.flags["tukey53H_norm"] = flag
示例#41
0
def human_calibrate_mistakes(datadir, varname, cfg=None, niter=5):
    """
    """
    import pandas as pd

    db = ProfilesQCPandasCollection(datadir, cfg=cfg, saveauxiliary=True)

    assert varname in db.keys()

    data = db.data
    features = db.auxiliary[varname]
    flags = combined_flag(db.flags[varname])
    binflags = flags2bin(np.array(flags))

    result = calibrate4flags(db.flags[varname],
                             features,
                             q=0.90,
                             verbose=False)

    #profileslist = aux['profileid'].iloc[mistake].iloc[
    #        np.absolute(prob[mistake] - p_optimal).argsort()
    #        ].unique()

    error_log = [{
        'err': result['n_err'],
        'err_ratio': result['err_ratio'],
        'p_optimal': result['p_optimal']
    }]

    human_flag = ma.masked_all(len(flags), dtype='object')

    for i in range(niter):
        # Failures from AD to reproduce flags
        mistake = (result['false_positive'] | result['false_negative'])
        # Only the ones that weren't already flagged by a human
        mistake = mistake & ma.getmaskarray(human_flag)
        profileids = np.unique(data['profileid'].iloc[mistake])
        # In the future order by how badly AD mistaked
        #profileids = data['profileid'].iloc[mistake].iloc[
        #    np.absolute(prob[mistake] - p_optimal).argsort()
        #    ].unique()
        #derr = np.absolute(prob[np.nonzero(mistake)] - p_optimal)
        #ind_toeval = np.nonzero(mistake & ~doubt)
        #profileids = data['profileid'].iloc[ind_toeval].iloc[derr.argsort()
        #    ].unique()
        if len(profileids) == 0:
            break
        # 5 random profiles with mistakes
        for pid in np.random.permutation(profileids)[:5]:
            print("Profile: %s" % pid)
            ind_p = data.profileid == pid
            h = HumanQC().eval(data[varname][ind_p],
                               data['PRES'][ind_p],
                               baseflag=binflags[np.array(ind_p)],
                               fails=mistake[np.array(ind_p)],
                               humanflag=human_flag[np.array(ind_p)])

            #ind_humanqc[np.nonzero(ind_p)[0][h == 'good']] = True
            #flags.loc[np.nonzero(ind_p)[0][h == 'good'], 'human'] = 1
            #ind_humanqc[np.nonzero(ind_p)[0][h == 'bad']] = False
            #flags.loc[np.nonzero(ind_p)[0][h == 'bad'], 'human'] = 4
            #flags.loc[np.nonzero(ind_p)[0][h == 'doubt'], 'human'] = 6
            #doubt[np.nonzero(ind_p)[0][h == 'doubt']] = True
            #ind_humanqc.mask[np.nonzero(ind_p)[0][h == 'doubt']] = True

            # Update human_flag only at the new values
            human_flag[np.nonzero(ind_p)[0][~h.mask]] = h[~h.mask]

        flags[human_flag == 'good'] = 1
        flags[human_flag == 'bad'] = 4
        #flags[human_flag == 'doubt'] = 1
        #doubt[human_flag == 'doubt'] = True

        # Update binflags
        binflags = flags2bin(flags)

        result = calibrate4flags(flags, features, q=0.90, verbose=False)

        error_log.append({
            'err': result['n_err'],
            'err_ratio': result['err_ratio'],
            'p_optimal': result['p_optimal'],
            'tot_misfit': result['tot_misfit']
        })

        print error_log[-2]
        print error_log[-1]

    result['human_flag'] = human_flag
    result['ind_humanqc'] = binflags
    result['error_log'] = error_log
    #return {'ind_humanqc': binflags, 'error_log': error_log,
    #        'result': result}
    return result
示例#42
0
    def plot_surface(
        self,
        surf,
        minvalue=None,
        maxvalue=None,
        contourlevels=None,
        xlabelrotation=None,
        colormap=None,
        logarithmic=False,
    ):  # pylint: disable=too-many-statements
        """Input a surface and plot it."""
        # need a deep copy to avoid changes in the original surf

        logger.info("The key contourlevels %s is not in use", contourlevels)

        usesurf = surf.copy()
        if usesurf.yflip < 0:
            usesurf.swapaxes()

        if abs(surf.rotation) > 0.001:
            usesurf.unrotate()

        xi, yi, zi = usesurf.get_xyz_values()

        zimask = ma.getmaskarray(zi).copy()  # yes need a copy!

        legendticks = None
        if minvalue is not None and maxvalue is not None:
            minv = float(minvalue)
            maxv = float(maxvalue)

            step = (maxv - minv) / 10.0
            legendticks = []
            for i in range(10 + 1):
                llabel = float("{0:9.4f}".format(minv + step * i))
                legendticks.append(llabel)

            zi.unshare_mask()
            zi[zi < minv] = minv
            zi[zi > maxv] = maxv

            # need to restore the mask:
            zi.mask = zimask

            # note use surf.min, not usesurf.min here ...
            notetxt = ("Note: map values are truncated from [" +
                       str(surf.values.min()) + ", " + str(surf.values.max()) +
                       "] " + "to interval [" + str(minvalue) + ", " +
                       str(maxvalue) + "]")

            self._fig.text(0.99,
                           0.02,
                           notetxt,
                           ha="right",
                           va="center",
                           fontsize=8)

        logger.info("Legendticks: %s", legendticks)

        if minvalue is None:
            minvalue = usesurf.values.min()

        if maxvalue is None:
            maxvalue = usesurf.values.max()

        # this will override current instance colormap locally, and is
        # therefore reset afterwards
        keepcolor = self.colormap
        if colormap is not None:
            self.colormap = colormap

        levels = np.linspace(minvalue, maxvalue, self.contourlevels)
        logger.debug("Number of contour levels: %s", levels)

        plt.setp(self._ax.xaxis.get_majorticklabels(), rotation=xlabelrotation)

        # zi = ma.masked_where(zimask, zi)
        # zi = ma.masked_greater(zi, xtgeo.UNDEF_LIMIT)
        logger.info("Current colormap is %s, requested is %s", self.colormap,
                    colormap)
        logger.info("Current colormap name is %s", self.colormap.name)

        if ma.std(zi) > 1e-07:
            uselevels = levels
        else:
            uselevels = 1

        try:
            if logarithmic is False:
                locator = None
                ticks = legendticks
                im = self._ax.contourf(xi,
                                       yi,
                                       zi,
                                       uselevels,
                                       locator=locator,
                                       cmap=self.colormap)

            else:
                logger.info("use LogLocator")
                locator = ticker.LogLocator()
                ticks = None
                uselevels = None
                im = self._ax.contourf(xi,
                                       yi,
                                       zi,
                                       locator=locator,
                                       cmap=self.colormap)

            self._fig.colorbar(im, ticks=ticks)
        except ValueError as err:
            logger.warning("Could not make plot: %s", err)

        plt.gca().set_aspect("equal", adjustable="box")
        self.colormap = keepcolor
示例#43
0
def calibrate4flags(flags, features, q=0.90, verbose=False):
    """ Adjust coeficients for Anomaly Detection to best reproduce given flags

        Inputs:
            flag_ref: Reference index. What the Anomaly Detection will try
                   to reproduce. Uses the True and Falses from flag_ref
                   to partition the data to be used to fit, to adjust
                   and to estimate the error.
            qctests: The tests used by the Anomaly Detection. One curve will
                   be fit for each test.
            aux: The auxiliary tests results from the ProfileQCCollection. It
                   is expected that the qctests are present in aux.
            q: The top q extreme tests results to be used on Anom. Detect.
                 For example q=0 will use all the data, while q=0.9 (default)
                 will use the percentile of 0.9, i.e. the top 10% values.

            Output: Returns a dictionary with
                err:
                err_ratio:
                false_negative:
                false_positive:
                p_optimal:
                params:

            Use the functions:
                split_data_groups()
                fit_tests()
                estimate_anomaly()
                estimate_p_optimal()

    """
    if hasattr(flags, 'keys'):
        flags = combined_flag(flags)

    assert not hasattr(flags, 'keys')
    assert hasattr(features, 'keys')
    assert len(features[features.keys()[0]]) == len(flags)

    indices = split_data_groups(flags)
    params = fit_tests(features[indices['fit']], q=q)
    prob = estimate_anomaly(features, params)

    if verbose is True:
        pylab.hist(prob)
        pylab.show()

    binflags = flags2bin(flags)
    p_optimal, test_err = estimate_p_optimal(prob[indices['test']],
                                             binflags[indices['test']])

    # Guarantee the the false_* indices will be np.array
    false_negative = (prob < p_optimal) & binflags
    false_negative[ma.getmaskarray(false_negative)] = False
    false_negative = np.array(false_negative)
    false_positive = (prob > p_optimal) & ~binflags
    false_positive[ma.getmaskarray(false_positive)] = False
    false_positive = np.array(false_positive)

    mistake = false_positive | false_negative

    # I can extract only .data, since split_data_groups already eliminated
    #   all non valid positions.
    #err = np.nonzero(false_negative)[0].size + \
    #        np.nonzero(false_positive)[0].size
    tot_misfit = np.nonzero(mistake)[0].size
    n_err = float(np.nonzero(mistake[indices['err']])[0].shape[0])
    #err_ratio = float(err)/prob[indices['ind_err']].size
    err_ratio = n_err / indices['err'].astype('i').sum()
    #false_negative = (prob < p_optimal) & \
    #    (flag_ref.data is True) & (ma.getmaskarray(flag_ref) is False)
    #false_positive = (prob > p_optimal) & \
    #    (flag_ref.data is False) & (ma.getmaskarray(flag_ref) is False)

    output = {
        'false_negative': false_negative,
        'false_positive': false_positive,
        'prob': prob,
        'p_optimal': p_optimal,
        'tot_misfit': tot_misfit,
        'n_err': n_err,
        'err_ratio': err_ratio,
        'params': params
    }

    return output
示例#44
0
    def DenseFusion(self, img, depth, posecnn_res):
        my_result_wo_refine = []

        itemid = 1  # this is simplified for single label decttion, if multi-label used, check DFYW3.py for more

        depth = np.array(depth)
        # img = img

        seg_res = posecnn_res

        x1, y1, x2, y2 = seg_res["box"]
        banana_bbox_draw = self.posecnn.get_box_rcwh(seg_res["box"])
        rmin, rmax, cmin, cmax = int(y1), int(y2), int(x1), int(x2)
        try:
            depth = depth[:, :,
                          1]  # because depth has 3 dimensions RGB but they are the all the same with each other
        except:
            # depth=depth
            pass
        depth = np.nan_to_num(depth)  #DIY
        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))  # ok
        mask_depth_nonzeros = mask_depth[:].nonzero()
        label_banana = np.squeeze(seg_res["mask"])
        label_banana = ma.getmaskarray(ma.masked_greater(label_banana, 0.5))
        label_banana_nonzeros = label_banana.flatten().nonzero()

        mask_label = ma.getmaskarray(ma.masked_equal(
            label_banana, itemid))  # label from banana label
        mask_label_nonzeros = mask_label[:].nonzero()

        mask = mask_label * mask_depth

        mask_nonzeros = mask[:].flatten().nonzero()
        mask_target = mask[rmin:rmax, cmin:cmax]
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        res_len_choose = len(choose)
        if len(choose) > self.num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_points] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            # print("(?)len of choose is 0, check error")
            print("Info, DenseFusion: len(choose)=", len(choose))
            # return "ERROR, img broken (?)"
            # choose = np.pad(choose, (0, self.num_points - len(choose)), 'wrap')
            return None

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])
        pt2 = depth_masked / self.cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)

        img_masked = np.array(img)[:, :, :3]
        img_masked = np.transpose(img_masked, (2, 0, 1))
        img_masked = img_masked[:, rmin:rmax, cmin:cmax]

        cloud = torch.from_numpy(cloud.astype(np.float32))
        choose = torch.LongTensor(choose.astype(np.int32))
        img_masked = self.norm(torch.from_numpy(img_masked.astype(np.float32)))
        index = torch.LongTensor([itemid - 1])

        cloud = Variable(cloud).cuda()
        choose = Variable(choose).cuda()
        img_masked = Variable(img_masked).cuda()
        index = Variable(index).cuda()

        cloud = cloud.view(1, self.num_points, 3)
        img_masked = img_masked.view(1, 3,
                                     img_masked.size()[1],
                                     img_masked.size()[2])

        pred_r, pred_t, pred_c, emb = self.estimator(img_masked, cloud, choose,
                                                     index)
        pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, self.num_points, 1)

        pred_c = pred_c.view(self.bs, self.num_points)
        how_max, which_max = torch.max(pred_c, 1)
        pred_t = pred_t.view(self.bs * self.num_points, 1, 3)
        points = cloud.view(self.bs * self.num_points, 1, 3)

        my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
        my_t = (points + pred_t)[which_max[0]].view(-1).cpu().data.numpy()
        my_pred = np.append(my_r, my_t)
        my_result_wo_refine.append(my_pred.tolist())

        my_result = []
        for ite in range(0, self.iteration):
            T = Variable(torch.from_numpy(
                my_t.astype(np.float32))).cuda().view(1, 3).repeat(
                    self.num_points,
                    1).contiguous().view(1, self.num_points, 3)
            my_mat = quaternion_matrix(my_r)
            R = Variable(torch.from_numpy(my_mat[:3, :3].astype(
                np.float32))).cuda().view(1, 3, 3)
            my_mat[0:3, 3] = my_t

            new_cloud = torch.bmm((cloud - T), R).contiguous()
            pred_r, pred_t = self.refiner(new_cloud, emb, index)
            pred_r = pred_r.view(1, 1, -1)
            pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
            my_r_2 = pred_r.view(-1).cpu().data.numpy()
            my_t_2 = pred_t.view(-1).cpu().data.numpy()
            my_mat_2 = quaternion_matrix(my_r_2)

            my_mat_2[0:3, 3] = my_t_2
            my_mat_final = np.dot(my_mat, my_mat_2)
            my_r_final = copy.deepcopy(my_mat_final)
            my_r_final[0:3, 3] = 0
            my_r_final = quaternion_from_matrix(my_r_final, True)
            my_t_final = np.array(
                [my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])

            my_pred = np.append(my_r_final, my_t_final)
            my_result.append(my_pred.tolist())
        my_result_np = np.array(my_result)
        my_result_mean = np.mean(my_result, axis=0)
        my_r = my_result_mean[:4]
        my_t = my_result_mean[4:]
        my_r_quaternion = my_r
        return my_r_quaternion, my_t
示例#45
0
def filter_cal_scale(Data,
                     size,
                     filter_type='rectangular',
                     filter_size_units='bins'):
    """Estimates Achromatic gain fluctuations and scales by them.
    """

    on_ind = 0
    off_ind = 1
    n_time = Data.data.shape[0]
    if (Data.field['CAL'][on_ind] != 'T' or Data.field['CAL'][off_ind] != 'F'):
        raise ce.DataError('Cal states not in expected order.')

    if tuple(Data.field['CRVAL4']) == (-5, -7, -8, -6):
        # Here we check the polarizations and cal indicies
        xx_ind = 0
        yy_ind = 3
        xy_inds = [1, 2]

        diff_xx = Data.data[:, xx_ind, on_ind, :] - Data.data[:, xx_ind,
                                                              off_ind, :]
        diff_yy = Data.data[:, yy_ind, on_ind, :] - Data.data[:, yy_ind,
                                                              off_ind, :]
        cal_xx = ma.mean(diff_xx, -1)
        cal_yy = ma.mean(diff_yy, -1)
        mask = ma.getmaskarray(Data.data)
        #mask = sp.any(sp.any(mask, 1), 1)
        # XXX Wrong, Just needs to be factorizable.  Need to devellop and
        # algorithem for ensuring this, but for now, just use this. This
        # shouldn't be too bad for the current RFI flagging algorithm (Apr
        # 2012).
        time_mask = sp.any(sp.any(sp.any(mask, 1), 1), 1)
        # Sanity check the masks.
        if not (sp.all(time_mask[ma.getmaskarray(cal_xx)])
                and sp.all(time_mask[ma.getmaskarray(cal_yy)])):
            msg = "Doesn't make since, this should always be true."
            raise RuntimeError(msg)
        # Convert to normal arrays.
        cal_xx = cal_xx.filled(0)
        cal_yy = cal_yy.filled(0)
        # XXX
        #Data.calc_time()
        #time_unmask = sp.logical_not(time_mask)
        #plt.plot(Data.time[time_unmask], cal_xx[time_unmask])
        # Now set up the filter.
        if filter_type == 'rectangular':
            if filter_size_units == 'bins':
                n_bins_filter = int(size)
                if n_bins_filter % 2 == 0:
                    raise ValueError("Rectangular filter should have an odd"
                                     "number of bins.")
            elif filter_size_units == 'seconds':
                Data.calc_time()
                dt = abs(sp.mean(sp.diff(Data.time)))
                n_bins_filter = size / dt
                # Round to the nearest odd number.
                n_bins_filter = 2 * int(round(n_bins_filter / 2. - 0.5)) + 1
            else:
                msg = 'Filter unit type unsupported.'
                raise ValueError(msg)
            kernal = sp.ones(n_bins_filter) / n_bins_filter
        else:
            msg = 'Filter type unsupported.'
            raise ValueError(msg)
        # Now that we know the kernal size, figure out what elements will
        # be newly masked by the smoothing.
        half_width = n_bins_filter // 2
        old_mask = time_mask.copy()
        for ii in range(n_time):
            if old_mask[ii]:
                time_mask[ii - half_width:ii + half_width + 1] = True
        # Also mask the edges.
        time_mask[:half_width] = True
        time_mask[-half_width:] = True
        # Now acctually do the convolution.
        cal_xx = signal.convolve(cal_xx, kernal, mode='same')
        cal_yy = signal.convolve(cal_yy, kernal, mode='same')
        # XXX
        #Data.calc_time()
        #time_unmask = sp.logical_not(time_mask)
        #plt.plot(Data.time[time_unmask], cal_xx[time_unmask])
        #plt.plot(Data.time, time_mask)
        plt.show()
        # Replace invalid entries with unity (They get masked later anyway).
        cal_xx[time_mask] = 1.
        cal_yy[time_mask] = 1.
        # Calibrate and apply mask.
        Data.data[:, xx_ind, :, :] /= cal_xx[:, None, None]
        Data.data[:, yy_ind, :, :] /= cal_yy[:, None, None]
        cross_cal = sp.sqrt(cal_xx * cal_yy)
        Data.data[:, xy_inds, :, :] /= cross_cal[:, None, None, None]
        # Apply the mask.
        Data.data[time_mask, ...] = ma.masked
    #elif tuple(Data.field['CRVAL4']) == (1, 2, 3, 4) :
    else:
        raise ce.DataError("Unsupported polarization states.")
示例#46
0
    def set_features(self):
        try:
            doy = int(self.data.attrs['date'].strftime('%j'))
        except:
            doy = int(self.data.attrs['datetime'].strftime('%j'))

        if ('LATITUDE' in self.data.attrs.keys()) and \
                ('LONGITUDE' in self.data.attrs.keys()):
                    mode = 'profile'
                    kwargs = {
                            'lat': self.data.attrs['LATITUDE'],
                            'lon': self.data.attrs['LONGITUDE']}

        if ('LATITUDE' in self.data.keys()) and \
                ('LONGITUDE' in self.data.keys()):
                    mode = 'track'
                    dLmax = max(
                            self.data['LATITUDE'].max() - self.data['LATITUDE'].min(),
                            self.data['LONGITUDE'].max() - self.data['LONGITUDE'].min())
                    # Only use each measurement coordinate if it is spread.
                    if dLmax >= 0.01:
                        kwargs = {
                            'lat': self.data['LATITUDE'],
                            'lon': self.data['LONGITUDE']}

        if ('DEPTH' in self.data.keys()):
            depth = self.data['DEPTH']
        elif ('PRES' in self.data.keys()):
            depth = self.data['PRES']

        db = WOA()
        if self.varname[-1] == '2':
            vtype = self.varname[:-1]
        else:
            vtype = self.varname

        idx = ~ma.getmaskarray(depth) & np.array(depth >= 0)
        if mode == 'track':
            woa = db[vtype].track(
                var=['mean', 'standard_deviation', 'standard_error',
                    'number_of_observations'],
                doy=doy,
                depth=depth[idx],
                **kwargs)
        else:
            woa = db[vtype].extract(
                var=['mean', 'standard_deviation', 'standard_error',
                    'number_of_observations'],
                doy=doy,
                depth=depth[idx],
                **kwargs)

        if idx.all() is not True:
            for v in woa.keys():
                tmp = ma.masked_all(depth.shape, dtype=woa[v].dtype)
                tmp[idx] = woa[v]
                woa[v] = tmp

        self.features = {
                'woa_mean': woa['mean'],
                'woa_std': woa['standard_deviation'],
                'woa_nsamples': woa['number_of_observations'],
                'woa_se': woa['standard_error']}

        self.features['woa_bias'] = self.data[self.varname] - \
                self.features['woa_mean']

        # if use_standard_error = True, the comparison with the climatology
        #   considers the standard error, i.e. the bias will be only the
        #   ammount above the standard error range.
        if self.cfg['use_standard_error'] is True:
            standard_error = self.features['woa_std'] / \
                    self.features['woa_nsamples'] ** 0.5
            idx = np.absolute(self.features['woa_bias']) <= \
                    standard_error
            self.features['woa_bias'][idx] = 0
            idx = np.absolute(self.features['woa_bias']) > standard_error
            self.features['woa_bias'][idx] -= \
                    np.sign(self.features['woa_bias'][idx]) * \
                    standard_error[idx]

        self.features['woa_normbias'] = self.features['woa_bias'] / \
                self.features['woa_std']
示例#47
0
    def callback(self, rgb, depth):
        if DEBUG:
            print('received depth image of type: ' + depth.encoding)
            print('received rgb image of type: ' + rgb.encoding)
        #https://answers.ros.org/question/64318/how-do-i-convert-an-ros-image-into-a-numpy-array/
        depth = np.frombuffer(depth.data,
                              dtype=np.uint16).reshape(depth.height,
                                                       depth.width, -1)
        rgb = np.frombuffer(rgb.data,
                            dtype=np.uint8).reshape(rgb.height, rgb.width, -1)
        rgb_original = rgb
        #cv2.imshow('depth', depth)

        #time1 = time.time()
        rgb = np.transpose(rgb, (2, 0, 1))
        rgb = norm(torch.from_numpy(rgb.astype(np.float32)))
        rgb = Variable(rgb).cuda()
        semantic = self.model(rgb.unsqueeze(0))
        _, pred = torch.max(semantic, dim=1)
        pred = pred * 255
        pred = np.transpose(pred, (1, 2, 0))  # (CxHxW)->(HxWxC)
        #print(pred.shape)

        #ret, threshold = cv2.threshold(pred.cpu().numpy(), 1, 255, cv2.THRESH_BINARY)    #pred is already binary, therefore, this line is unnecessary
        contours, hierarchy = cv2.findContours(np.uint8(pred),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)
        cnt = max(contours, key=cv2.contourArea)
        x, y, w, h = cv2.boundingRect(cnt)
        rmin, rmax, cmin, cmax = get_bbox([x, y, w, h])
        #cv2.rectangle(rgb_original,(cmin,rmin), (cmax,rmax) , (0,255,0),2)
        #cv2.imwrite('depth.png', depth)          #save depth image

        mask_depth = ma.getmasksarray(ma.masked_not_equal(depth, 0))
        mask_label = ma.getmaskarray(ma.masked_equal(pred, np.array(255)))
        mask = mask_depth * mask_label

        #print(rgb.shape)             #torch.Size([3, 480, 640])
        #print(rgb_original.shape)    #(480, 640, 3)
        img = np.transpose(rgb_original, (2, 0, 1))
        img_masked = img[:, rmin:rmax, cmin:cmax]

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]

        #print("length of choose is :{0}".format(len(choose)))
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return (cc, cc, cc, cc, cc, cc)

        if len(choose) > num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:
                   num_points] = 1  # if number of object pixels are bigger than 500, we select just 500
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]  # now len(choose) = 500
        else:
            choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)

        choose = np.array([choose])

        pt2 = depth_masked
        #print(pt2)
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = cloud / 1000

        points = torch.from_numpy(cloud.astype(np.float32))
        choose = torch.LongTensor(choose.astype(np.int32))
        img = norm(torch.from_numpy(img_masked.astype(np.float32)))
        idx = torch.LongTensor([self.object_index])

        img = Variable(img).cuda().unsqueeze(0)
        points = Variable(points).cuda().unsqueeze(0)
        choose = Variable(choose).cuda().unsqueeze(0)
        idx = Variable(idx).cuda().unsqueeze(0)

        pred_r, pred_t, pred_c, emb = self.estimator(img, points, choose, idx)
        pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)
        pred_c = pred_c.view(bs, num_points)
        how_max, which_max = torch.max(pred_c, 1)
        pred_t = pred_t.view(bs * num_points, 1, 3)

        my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
        my_t = (points.view(bs * num_points, 1, 3) +
                pred_t)[which_max[0]].view(-1).cpu().data.numpy()
        my_pred = np.append(my_r, my_t)

        for ite in range(0, iteration):
            T = Variable(torch.from_numpy(
                my_t.astype(np.float32))).cuda().view(1, 3).repeat(
                    num_points, 1).contiguous().view(1, num_points, 3)
            my_mat = quaternion_matrix(my_r)
            R = Variable(torch.from_numpy(my_mat[:3, :3].astype(
                np.float32))).cuda().view(1, 3, 3)
            my_mat[0:3, 3] = my_t

            new_points = torch.bmm((points - T), R).contiguous()
            pred_r, pred_t = self.refiner(new_points, emb, idx)
            pred_r = pred_r.view(1, 1, -1)
            pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
            my_r_2 = pred_r.view(-1).cpu().data.numpy()
            my_t_2 = pred_t.view(-1).cpu().data.numpy()
            my_mat_2 = quaternion_matrix(my_r_2)
            my_mat_2[0:3, 3] = my_t_2

            my_mat_final = np.dot(
                my_mat,
                my_mat_2)  # refine pose means two matrix multiplication
            my_r_final = copy.deepcopy(my_mat_final)
            my_r_final[0:3, 3] = 0
            my_r_final = quaternion_from_matrix(my_r_final, True)
            my_t_final = np.array(
                [my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])

            my_pred = np.append(my_r_final, my_t_final)
            my_r = my_r_final
            my_t = my_t_final

        my_r = quaternion_matrix(my_r)[:3, :3]
        #print(my_t.shape)
        my_t = np.array(my_t)
        #print(my_t.shape)
        #print(my_r.shape)

        target = np.dot(self.scaled, my_r.T)
        target = np.add(target, my_t)

        p0 = (int((target[0][0] / target[0][2]) * self.cam_fx + self.cam_cx),
              int((target[0][1] / target[0][2]) * self.cam_fy + self.cam_cy))
        p1 = (int((target[1][0] / target[1][2]) * self.cam_fx + self.cam_cx),
              int((target[1][1] / target[1][2]) * self.cam_fy + self.cam_cy))
        p2 = (int((target[2][0] / target[2][2]) * self.cam_fx + self.cam_cx),
              int((target[2][1] / target[2][2]) * self.cam_fy + self.cam_cy))
        p3 = (int((target[3][0] / target[3][2]) * self.cam_fx + self.cam_cx),
              int((target[3][1] / target[3][2]) * self.cam_fy + self.cam_cy))
        p4 = (int((target[4][0] / target[4][2]) * self.cam_fx + self.cam_cx),
              int((target[4][1] / target[4][2]) * self.cam_fy + self.cam_cy))
        p5 = (int((target[5][0] / target[5][2]) * self.cam_fx + self.cam_cx),
              int((target[5][1] / target[5][2]) * self.cam_fy + self.cam_cy))
        p6 = (int((target[6][0] / target[6][2]) * self.cam_fx + self.cam_cx),
              int((target[6][1] / target[6][2]) * self.cam_fy + self.cam_cy))
        p7 = (int((target[7][0] / target[7][2]) * self.cam_fx + self.cam_cx),
              int((target[7][1] / target[7][2]) * self.cam_fy + self.cam_cy))

        cv2.line(rgb_original, p0, p1, (255, 255, 255), 2)
        cv2.line(rgb_original, p0, p3, (255, 255, 255), 2)
        cv2.line(rgb_original, p0, p4, (255, 255, 255), 2)
        cv2.line(rgb_original, p1, p2, (255, 255, 255), 2)
        cv2.line(rgb_original, p1, p5, (255, 255, 255), 2)
        cv2.line(rgb_original, p2, p3, (255, 255, 255), 2)
        cv2.line(rgb_original, p2, p6, (255, 255, 255), 2)
        cv2.line(rgb_original, p3, p7, (255, 255, 255), 2)
        cv2.line(rgb_original, p4, p5, (255, 255, 255), 2)
        cv2.line(rgb_original, p4, p7, (255, 255, 255), 2)
        cv2.line(rgb_original, p5, p6, (255, 255, 255), 2)
        cv2.line(rgb_original, p6, p7, (255, 255, 255), 2)

        #print('estimated rotation is :{0}'.format(my_r))
        #print('estimated translation is :{0}'.format(my_t))

        #time2 = time.time()
        #print('inference time is :{0}'.format(time2-time1))
        cv2.imshow('rgb',
                   cv2.cvtColor(rgb_original,
                                cv2.COLOR_BGR2RGB))  # OpenCV uses BGR model
        cv2.waitKey(
            1
        )  # pass any integr except 0, as 0 will freeze the display windows
示例#48
0
def sanitize_array(data,
                   index,
                   dtype=None,
                   copy=False,
                   raise_cast_failure=False):
    """
    Sanitize input data to an ndarray, copy if specified, coerce to the
    dtype if specified.
    """
    if dtype is not None:
        dtype = pandas_dtype(dtype)

    if isinstance(data, ma.MaskedArray):
        mask = ma.getmaskarray(data)
        if mask.any():
            data, fill_value = maybe_upcast(data, copy=True)
            data.soften_mask()  # set hardmask False if it was True
            data[mask] = fill_value
        else:
            data = data.copy()

    data = extract_array(data, extract_numpy=True)

    # GH#846
    if isinstance(data, np.ndarray):

        if dtype is not None:
            subarr = np.array(data, copy=False)

            # possibility of nan -> garbage
            if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
                try:
                    subarr = _try_cast(data, True, dtype, copy, True)
                except ValueError:
                    if copy:
                        subarr = data.copy()
            else:
                subarr = _try_cast(data, True, dtype, copy, raise_cast_failure)
        elif isinstance(data, Index):
            # don't coerce Index types
            # e.g. indexes can have different conversions (so don't fast path
            # them)
            # GH#6140
            subarr = sanitize_index(data, index, copy=copy)
        else:

            # we will try to copy be-definition here
            subarr = _try_cast(data, True, dtype, copy, raise_cast_failure)

    elif isinstance(data, ExtensionArray):
        if isinstance(data, ABCPandasArray):
            # We don't want to let people put our PandasArray wrapper
            # (the output of Series/Index.array), into a Series. So
            # we explicitly unwrap it here.
            subarr = data.to_numpy()
        else:
            subarr = data

        # everything else in this block must also handle ndarray's,
        # becuase we've unwrapped PandasArray into an ndarray.

        if dtype is not None:
            subarr = data.astype(dtype)

        if copy:
            subarr = data.copy()
        return subarr

    elif isinstance(data, (list, tuple)) and len(data) > 0:
        if dtype is not None:
            try:
                subarr = _try_cast(data, False, dtype, copy,
                                   raise_cast_failure)
            except Exception:
                if raise_cast_failure:  # pragma: no cover
                    raise
                subarr = np.array(data, dtype=object, copy=copy)
                subarr = lib.maybe_convert_objects(subarr)

        else:
            subarr = maybe_convert_platform(data)

        subarr = maybe_cast_to_datetime(subarr, dtype)

    elif isinstance(data, range):
        # GH#16804
        start, stop, step = get_range_parameters(data)
        arr = np.arange(start, stop, step, dtype='int64')
        subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure)
    else:
        subarr = _try_cast(data, False, dtype, copy, raise_cast_failure)

    # scalar like, GH
    if getattr(subarr, 'ndim', 0) == 0:
        if isinstance(data, list):  # pragma: no cover
            subarr = np.array(data, dtype=object)
        elif index is not None:
            value = data

            # figure out the dtype from the value (upcast if necessary)
            if dtype is None:
                dtype, value = infer_dtype_from_scalar(value)
            else:
                # need to possibly convert the value here
                value = maybe_cast_to_datetime(value, dtype)

            subarr = construct_1d_arraylike_from_scalar(
                value, len(index), dtype)

        else:
            return subarr.item()

    # the result that we want
    elif subarr.ndim == 1:
        if index is not None:

            # a 1-element ndarray
            if len(subarr) != len(index) and len(subarr) == 1:
                subarr = construct_1d_arraylike_from_scalar(
                    subarr[0], len(index), subarr.dtype)

    elif subarr.ndim > 1:
        if isinstance(data, np.ndarray):
            raise Exception('Data must be 1-dimensional')
        else:
            subarr = com.asarray_tuplesafe(data, dtype=dtype)

    # This is to prevent mixed-type Series getting all casted to
    # NumPy string type, e.g. NaN --> '-1#IND'.
    if issubclass(subarr.dtype.type, compat.string_types):
        # GH#16605
        # If not empty convert the data to dtype
        # GH#19853: If data is a scalar, subarr has already the result
        if not lib.is_scalar(data):
            if not np.all(isna(data)):
                data = np.array(data, dtype=dtype, copy=False)
            subarr = np.array(data, dtype=object, copy=copy)

    if is_object_dtype(subarr.dtype) and dtype != 'object':
        inferred = lib.infer_dtype(subarr, skipna=False)
        if inferred == 'period':
            try:
                subarr = period_array(subarr)
            except IncompatibleFrequency:
                pass

    return subarr
示例#49
0
def merge_arrays(seqarrays,
                 fill_value=-1,
                 flatten=False,
                 usemask=False,
                 asrecarray=False):
    """
    Merge arrays field by field.

    Parameters
    ----------
    seqarrays : sequence of ndarrays
        Sequence of arrays
    fill_value : {float}, optional
        Filling value used to pad missing data on the shorter arrays.
    flatten : {False, True}, optional
        Whether to collapse nested fields.
    usemask : {False, True}, optional
        Whether to return a masked array or not.
    asrecarray : {False, True}, optional
        Whether to return a recarray (MaskedRecords) or not.

    Examples
    --------
    >>> from numpy.lib import recfunctions as rfn
    >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
    masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
                 mask = [(False, False) (False, False) (True, False)],
           fill_value = (999999, 1e+20),
                dtype = [('f0', '<i4'), ('f1', '<f8')])

    >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
    ...              usemask=False)
    array([(1, 10.0), (2, 20.0), (-1, 30.0)],
          dtype=[('f0', '<i4'), ('f1', '<f8')])
    >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
    ...               np.array([10., 20., 30.])),
    ...              usemask=False, asrecarray=True)
    rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
              dtype=[('a', '<i4'), ('f1', '<f8')])

    Notes
    -----
    * Without a mask, the missing value will be filled with something,
    * depending on what its corresponding type:
            -1      for integers
            -1.0    for floating point numbers
            '-'     for characters
            '-1'    for strings
            True    for boolean values
    * XXX: I just obtained these values empirically
    """
    # Only one item in the input sequence ?
    if (len(seqarrays) == 1):
        seqarrays = np.asanyarray(seqarrays[0])
    # Do we have a single ndarray as input ?
    if isinstance(seqarrays, (ndarray, np.void)):
        seqdtype = seqarrays.dtype
        if (not flatten) or \
           (zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
            # Minimal processing needed: just make sure everythng's a-ok
            seqarrays = seqarrays.ravel()
            # Make sure we have named fields
            if not seqdtype.names:
                seqdtype = [('', seqdtype)]
            # Find what type of array we must return
            if usemask:
                if asrecarray:
                    seqtype = MaskedRecords
                else:
                    seqtype = MaskedArray
            elif asrecarray:
                seqtype = recarray
            else:
                seqtype = ndarray
            return seqarrays.view(dtype=seqdtype, type=seqtype)
        else:
            seqarrays = (seqarrays, )
    else:
        # Make sure we have arrays in the input sequence
        seqarrays = map(np.asanyarray, seqarrays)
    # Find the sizes of the inputs and their maximum
    sizes = tuple(a.size for a in seqarrays)
    maxlength = max(sizes)
    # Get the dtype of the output (flattening if needed)
    newdtype = zip_descr(seqarrays, flatten=flatten)
    # Initialize the sequences for data and mask
    seqdata = []
    seqmask = []
    # If we expect some kind of MaskedArray, make a special loop.
    if usemask:
        for (a, n) in itertools.izip(seqarrays, sizes):
            nbmissing = (maxlength - n)
            # Get the data and mask
            data = a.ravel().__array__()
            mask = ma.getmaskarray(a).ravel()
            # Get the filling value (if needed)
            if nbmissing:
                fval = _check_fill_value(fill_value, a.dtype)
                if isinstance(fval, (ndarray, np.void)):
                    if len(fval.dtype) == 1:
                        fval = fval.item()[0]
                        fmsk = True
                    else:
                        fval = np.array(fval, dtype=a.dtype, ndmin=1)
                        fmsk = np.ones((1, ), dtype=mask.dtype)
            else:
                fval = None
                fmsk = True
            # Store an iterator padding the input to the expected length
            seqdata.append(itertools.chain(data, [fval] * nbmissing))
            seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
        # Create an iterator for the data
        data = tuple(izip_records(seqdata, flatten=flatten))
        output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
                          mask=list(izip_records(seqmask, flatten=flatten)))
        if asrecarray:
            output = output.view(MaskedRecords)
    else:
        # Same as before, without the mask we don't need...
        for (a, n) in itertools.izip(seqarrays, sizes):
            nbmissing = (maxlength - n)
            data = a.ravel().__array__()
            if nbmissing:
                fval = _check_fill_value(fill_value, a.dtype)
                if isinstance(fval, (ndarray, np.void)):
                    if len(fval.dtype) == 1:
                        fval = fval.item()[0]
                    else:
                        fval = np.array(fval, dtype=a.dtype, ndmin=1)
            else:
                fval = None
            seqdata.append(itertools.chain(data, [fval] * nbmissing))
        output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
                             dtype=newdtype,
                             count=maxlength)
        if asrecarray:
            output = output.view(recarray)
    # And we're done...
    return output
示例#50
0
    def __getitem__(self, index):
        img = Image.open('{0}/{1}-color.png'.format(self.root,
                                                    self.list[index]))
        depth = np.array(
            Image.open('{0}/{1}-depth.png'.format(self.root,
                                                  self.list[index])))
        label = np.array(
            Image.open('{0}/{1}-label.png'.format(self.root,
                                                  self.list[index])))
        meta = scio.loadmat('{0}/{1}-meta.mat'.format(self.root,
                                                      self.list[index]))

        if self.list[index][:8] != 'data_syn' and int(
                self.list[index][5:9]) >= 60:
            cam_cx = self.cam_cx_2
            cam_cy = self.cam_cy_2
            cam_fx = self.cam_fx_2
            cam_fy = self.cam_fy_2
        else:
            cam_cx = self.cam_cx_1
            cam_cy = self.cam_cy_1
            cam_fx = self.cam_fx_1
            cam_fy = self.cam_fy_1

        mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

        add_front = False
        if self.add_noise:
            for k in range(5):
                seed = random.choice(self.syn)
                front = np.array(
                    self.trancolor(
                        Image.open('{0}/{1}-color.png'.format(
                            self.root, seed)).convert("RGB")))
                front = np.transpose(front, (2, 0, 1))
                f_label = np.array(
                    Image.open('{0}/{1}-label.png'.format(self.root, seed)))
                front_label = np.unique(f_label).tolist()[1:]
                if len(front_label) < self.front_num:
                    continue
                front_label = random.sample(front_label, self.front_num)
                for f_i in front_label:
                    mk = ma.getmaskarray(ma.masked_not_equal(f_label, f_i))
                    if f_i == front_label[0]:
                        mask_front = mk
                    else:
                        mask_front = mask_front * mk
                t_label = label * mask_front
                if len(t_label.nonzero()[0]) > 1000:
                    label = t_label
                    add_front = True
                    break

        obj = meta['cls_indexes'].flatten().astype(np.int32)

        while 1:
            idx = np.random.randint(0, len(obj))
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, obj[idx]))
            mask = mask_label * mask_depth
            if len(mask.nonzero()[0]) > self.minimum_num_pt:
                break

        if self.add_noise:
            img = self.trancolor(img)

        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        img = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax,
                                                               cmin:cmax]

        if self.list[index][:8] == 'data_syn':
            seed = random.choice(self.real)
            back = np.array(
                self.trancolor(
                    Image.open('{0}/{1}-color.png'.format(
                        self.root, seed)).convert("RGB")))
            back = np.transpose(back, (2, 0, 1))[:, rmin:rmax, cmin:cmax]
            img_masked = back * mask_back[rmin:rmax, cmin:cmax] + img
        else:
            img_masked = img

        if self.add_noise and add_front:
            img_masked = img_masked * mask_front[
                rmin:rmax, cmin:cmax] + front[:, rmin:rmax, cmin:cmax] * ~(
                    mask_front[rmin:rmax, cmin:cmax])

        if self.list[index][:8] == 'data_syn':
            img_masked = img_masked + np.random.normal(
                loc=0.0, scale=7.0, size=img_masked.shape)

        # p_img = np.transpose(img_masked, (1, 2, 0))
        # scipy.misc.imsave('temp/{0}_input.png'.format(index), p_img)
        # scipy.misc.imsave('temp/{0}_label.png'.format(index), mask[rmin:rmax, cmin:cmax].astype(np.int32))

        target_r = meta['poses'][:, :, idx][:, 0:3]
        target_t = np.array([meta['poses'][:, :, idx][:, 3:4].flatten()])
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) > self.num_pt:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_pt] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = meta['factor_depth'][0][0]
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
        pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        if self.add_noise:
            cloud = np.add(cloud, add_t)

        # fw = open('temp/{0}_cld.xyz'.format(index), 'w')
        # for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        dellist = [j for j in range(0, len(self.cld[obj[idx]]))]
        if self.refine:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_large)
        else:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_small)
        model_points = np.delete(self.cld[obj[idx]], dellist, axis=0)

        # fw = open('temp/{0}_model_points.xyz'.format(index), 'w')
        # for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t + add_t)
        else:
            target = np.add(target, target_t)

        # fw = open('temp/{0}_tar.xyz'.format(index), 'w')
        # for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([int(obj[idx]) - 1])
def printCurve(take_idx, criterion):
    conf_tp_or_fn = [[] for i in range(5)]
    conf_fp = [[] for i in range(5)]
    prec = [[] for i in range(5)]
    recall = [[] for i in range(5)]

    if take_idx is 0:
        file_1_name = 'occ/under60.txt'
        file_2_name = 'occ/under60_frames.txt'

    elif take_idx is 1:
        file_1_name = 'occ/from60to80.txt'
        file_2_name = 'occ/f60t80_frames.txt'

    else:
        file_1_name = 'occ/up80.txt'
        file_2_name = 'occ/up80_frames.txt'

    xmap = np.array([[j for i in range(640)] for j in range(480)])
    ymap = np.array([[i for i in range(640)] for j in range(480)])

    with open(file_1_name, 'r') as f1:
        with open(file_2_name, 'r') as f2:
            while 1:
                input_line_test = f2.readline()
                # print(input_line_test)
                if not input_line_test:
                    break
                if input_line_test[-1:] == '\n':
                    input_line_test = input_line_test[:-1]
                _, test_scene_id, test_frame_id = input_line_test.split('/')

                input_line_test = '/'.join(
                    ['data_v1', test_scene_id, test_frame_id])
                # import pdb;pdb.set_trace()

                input_line_test_2 = f1.readline()
                test_obj_id = int(float(input_line_test_2.split()[2])) + 1
                test_idx = int(float(input_line_test_2.split()[1]))
                # import pdb;pdb.set_trace()

                img = Image.open('{0}/{1}-color.png'.format(
                    opt.dataset_root, input_line_test))
                depth = np.array(
                    Image.open('{0}/{1}-depth.png'.format(
                        opt.dataset_root, input_line_test)))
                label = np.array(
                    Image.open('{0}/{1}-label.png'.format(
                        opt.dataset_root, input_line_test)))
                meta = scio.loadmat('{0}/{1}-meta.mat'.format(
                    opt.dataset_root, input_line_test))

                cam_cx = 312.9869
                cam_cy = 241.3109
                cam_fx = 1066.778
                cam_fy = 1067.487
                mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

                print('scene index: ', test_scene_id)
                print('object index: ', test_obj_id)
                mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
                mask_label = ma.getmaskarray(
                    ma.masked_equal(label, test_obj_id))
                mask = mask_label * mask_depth
                if not (len(mask.nonzero()[0]) > 50
                        and len(opt.symmetry[test_obj_id]['mirror']) > 0):
                    continue

                rmin, rmax, cmin, cmax = get_bbox(mask_label)
                img_temp = np.transpose(np.array(img)[:, :, :3],
                                        (2, 0, 1))[:, rmin:rmax, cmin:cmax]

                img_masked = img_temp
                target_r = meta['poses'][:, :, test_idx][:, 0:3]
                target_t = np.array(meta['poses'][:, :,
                                                  test_idx][:, 3:4].flatten())

                choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
                if len(choose) > opt.num_points:
                    c_mask = np.zeros(len(choose), dtype=int)
                    c_mask[:opt.num_points] = 1
                    np.random.shuffle(c_mask)
                    choose = choose[c_mask.nonzero()]
                else:
                    choose = np.pad(choose, (0, opt.num_points - len(choose)),
                                    'wrap')

                depth_masked = depth[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                xmap_masked = xmap[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                ymap_masked = ymap[
                    rmin:rmax,
                    cmin:cmax].flatten()[choose][:,
                                                 np.newaxis].astype(np.float32)
                choose = np.array([choose])

                cam_scale = meta['factor_depth'][0][0]
                pt2 = depth_masked / cam_scale
                pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
                pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
                cloud = np.concatenate((pt0, pt1, pt2), axis=1)

                target_sym = []
                for sym in opt.symmetry[test_obj_id]['mirror']:
                    target_sym.append(np.dot(sym, target_r.T))
                target_sym = np.array(target_sym)

                target_cen = np.add(opt.symmetry[test_obj_id]['center'],
                                    target_t)

                # print('ground truth norm: ', target_sym)
                # print('ground truth center: ', target_cen)
                points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, idx_ten = \
                torch.from_numpy(cloud.astype(np.float32)).unsqueeze(0), \
                torch.LongTensor(choose.astype(np.int32)).unsqueeze(0), \
                opt.norm(torch.from_numpy(img_masked.astype(np.float32))).unsqueeze(0), \
                torch.from_numpy(target_sym.astype(np.float32)).unsqueeze(0), \
                torch.from_numpy(target_cen.astype(np.float32)).unsqueeze(0), \
                torch.LongTensor([test_obj_id-1]).unsqueeze(0)

                points_ten, choose_ten, img_ten, target_sym_ten, target_cen_ten, idx_ten = Variable(points_ten), \
                                                                    Variable(choose_ten), \
                                                                    Variable(img_ten), \
                                                                    Variable(target_sym_ten), \
                                                                    Variable(target_cen_ten), \
                                                                    Variable(idx_ten)

                pred_norm, pred_on_plane, emb = opt.estimator(
                    img_ten, points_ten, choose_ten, idx_ten)

                bs, num_p, _ = pred_on_plane.size()

                pred_norm = pred_norm / (torch.norm(pred_norm, dim=2).view(
                    bs, num_p, 1))
                pred_norm = pred_norm.detach().numpy()
                pred_on_plane = pred_on_plane.detach().numpy()
                points = points_ten.detach().numpy()

                clustering_points_idx = np.where(pred_on_plane > max(
                    0.5,
                    pred_on_plane.max() * PRED_ON_PLANE_FACTOR +
                    pred_on_plane.mean() * (1 - PRED_ON_PLANE_FACTOR)))[1]
                clustering_norm = pred_norm[0, clustering_points_idx, :]
                clustering_points = points[0, clustering_points_idx, :]
                num_points = len(clustering_points_idx)

                # print(pred_on_plane.max())
                # import pdb;pdb.set_trace()

                close_thresh = 2e-3
                broad_thresh = 3e-3

                sym_conf = np.zeros((5, target_sym.shape[0]))

                count_pred = 0

                # import pdb; pdb.set_trace()

                while True:
                    count_pred += 1
                    if num_points <= 20 or count_pred > 3:
                        break

                    best_fit_num = 0

                    count_try = 0

                    for j in range(10):

                        pick_idx = np.random.randint(0, num_points - 1)
                        pick_point = clustering_points[pick_idx]
                        # proposal_norm = np.array(Plane(Point3D(pick_points[0]),Point3D(pick_points[1]),Point3D(pick_points[2])).normal_vector).astype(np.float32)
                        proposal_norm = clustering_norm[pick_idx]
                        proposal_norm = proposal_norm[:, np.newaxis]

                        # import pdb;pdb.set_trace()
                        proposal_point = pick_point

                        clustering_diff = clustering_points - proposal_point
                        clustering_dist = np.abs(
                            np.matmul(clustering_diff, proposal_norm))

                        broad_inliers = np.where(
                            clustering_dist < broad_thresh)[0]
                        broad_inlier_num = len(broad_inliers)

                        close_inliers = np.where(
                            clustering_dist < close_thresh)[0]
                        close_inlier_num = len(close_inliers)

                        norm_dist = np.abs(clustering_norm -
                                           np.transpose(proposal_norm)).sum(1)
                        close_norm_idx = np.where(norm_dist < 0.6)[0]
                        close_norm_num = len(close_norm_idx)

                        if close_inlier_num >= best_fit_num and broad_inlier_num >= num_points / (
                                4 - count_pred
                        ) * 0.9 and close_norm_num >= num_points / (
                                4 - count_pred) * 0.9:
                            best_fit_num = close_inlier_num
                            best_fit_norm = proposal_norm
                            best_fit_cen = clustering_points[
                                close_inliers].mean(0)
                            best_fit_idx = clustering_points_idx[close_inliers]
                            best_norm_dist = norm_dist
                            best_close_norm_idx = np.where(
                                best_norm_dist < 0.6)[0]

                    if best_fit_num == 0 or num_points <= 20:
                        break

                    clustering_points_same_sym = clustering_points[
                        best_close_norm_idx]

                    clustering_diff_same_sym = clustering_points_same_sym - best_fit_cen
                    clustering_dist_same_sym = np.abs(
                        np.matmul(clustering_diff_same_sym, best_fit_norm))

                    close_inliers = np.where(
                        clustering_dist_same_sym < close_thresh)[0]
                    close_inlier_num = len(close_inliers)

                    best_fit_num = close_inlier_num

                    broad_inliers = np.where(
                        clustering_dist_same_sym < broad_thresh)[0]
                    broad_inlier_num = len(broad_inliers)

                    def f(x):
                        dist = 0
                        # import pdb;pdb.set_trace()
                        for point in clustering_points_same_sym[broad_inliers]:
                            dist += np.abs(
                                (point * x[0:3]).sum() + x[3]) / np.sqrt(
                                    np.sum(np.square(x[0:3]), axis=0))

                        return dist

                    start_point = np.zeros(4)
                    start_point[0:3] = np.copy(best_fit_norm[:, 0])
                    start_point[3] = (-best_fit_cen *
                                      best_fit_norm[:, 0]).sum()

                    min_point = fmin(f, start_point, maxiter=50)

                    # import pdb;pdb.set_trace()
                    min_point = min_point / np.sqrt(
                        np.sum(np.square(min_point[0:3]), axis=0))

                    x_val = -(min_point[3] + best_fit_cen[1] * min_point[1] +
                              best_fit_cen[2] * min_point[2]) / min_point[0]

                    y_val = -(min_point[3] + best_fit_cen[0] * min_point[0] +
                              best_fit_cen[2] * min_point[2]) / min_point[1]

                    z_val = -(min_point[3] + best_fit_cen[0] * min_point[0] +
                              best_fit_cen[1] * min_point[1]) / min_point[2]

                    if np.abs(x_val) < 1:
                        new_pred_loc = np.array(
                            [x_val, best_fit_cen[1], best_fit_cen[2]])
                    elif np.abs(z_val) < 1:
                        new_pred_loc = np.array(
                            [best_fit_cen[0], best_fit_cen[1], z_val])
                    else:
                        new_pred_loc = np.array(
                            [best_fit_cen[0], y_val, best_fit_cen[2]])

                    new_proposal_norm = min_point[0:3]
                    clustering_diff = clustering_points_same_sym - new_pred_loc
                    clustering_dist = np.abs(
                        np.matmul(clustering_diff, new_proposal_norm))

                    close_inliers = np.where(clustering_dist < close_thresh)[0]
                    new_close_inlier_num = len(close_inliers)

                    broad_inliers = np.where(clustering_dist < broad_thresh)[0]
                    new_broad_inlier_num = len(broad_inliers)
                    # import pdb;pdb.set_trace()
                    if new_close_inlier_num >= close_inlier_num:
                        best_fit_num = new_close_inlier_num
                        best_fit_norm = new_proposal_norm[:, np.newaxis]
                        best_fit_cen = new_pred_loc

                    if best_fit_num == 0:
                        break
                    else:

                        print('predicted norm:{}, predicted point:{}'.format(
                            best_fit_norm, best_fit_cen))

                        max_idx = np.argmax(
                            np.abs(np.matmul(target_sym, best_fit_norm)))
                        sym_product = np.abs(
                            np.matmul(target_sym, best_fit_norm)[max_idx][0])
                        sym_dist = np.abs((target_sym[max_idx] *
                                           (best_fit_cen - target_cen)).sum())

                        norm_dist = np.abs(clustering_norm -
                                           np.transpose(best_fit_norm)).sum(1)
                        scrub_close_norm_idx = np.where(norm_dist < 1.3)[0]

                        # import pdb;pdb.set_trace()
                        predicted_confidence = best_fit_num / len(
                            best_close_norm_idx) - np.abs(
                                clustering_norm[best_close_norm_idx] -
                                np.transpose(best_fit_norm)).mean() * 3 * 1.5
                        predicted_confidence = max(0, predicted_confidence)

                        for dist_idx in range(5):
                            if sym_product > PRODUCT_THRESHOLD and sym_dist < (
                                    dist_idx + 1) * 0.01:
                                # import pdb;pdb.set_trace()
                                sym_conf[dist_idx, max_idx] = max(
                                    sym_conf[dist_idx, max_idx],
                                    predicted_confidence)

                            else:
                                conf_fp[dist_idx].append(predicted_confidence)

                        clustering_points_idx = np.setdiff1d(
                            clustering_points_idx,
                            clustering_points_idx[scrub_close_norm_idx])

                        clustering_norm = pred_norm[0,
                                                    clustering_points_idx, :]
                        clustering_points = points[0, clustering_points_idx, :]

                        num_points = len(clustering_points_idx)
                        # import pdb;pdb.set_trace()

                # import pdb;pdb.set_trace()

                for dist_idx in range(5):
                    for i in range(target_sym.shape[0]):
                        conf_tp_or_fn[dist_idx].append(sym_conf[dist_idx, i])
                # import pdb;pdb.set_trace()

    # import pdb;pdb.set_trace()

    print(conf_tp_or_fn)
    print(conf_fp)

    # import pdb;pdb.set_trace()

    for dist_idx in range(5):
        for t in range(1, 1001):
            conf_thresh = t / 1000

            true_positives = len(
                np.where(np.array(conf_tp_or_fn[dist_idx]) >= conf_thresh)[0])
            false_negatives = len(
                np.where(np.array(conf_tp_or_fn[dist_idx]) < conf_thresh)[0])
            false_positives = len(
                np.where(np.array(conf_fp[dist_idx]) >= conf_thresh)[0])
            if false_positives + true_positives > 0 and true_positives + false_negatives > 0:
                prec[dist_idx].append(true_positives /
                                      (false_positives + true_positives))
                recall[dist_idx].append(true_positives /
                                        (true_positives + false_negatives))

    return prec, recall
示例#52
0
    def __getitem__(self, index):
        img = Image.open(self.list_rgb[index])
        ori_img = np.array(img)
        depth = np.array(Image.open(self.list_depth[index]))
        label = np.array(Image.open(self.list_label[index]))
        obj = self.list_obj[index]
        rank = self.list_rank[index]        

        if obj == 2:
            for i in range(0, len(self.meta[obj][rank])):
                if self.meta[obj][rank][i]['obj_id'] == 2:
                    meta = self.meta[obj][rank][i]
                    break
        else:
            meta = self.meta[obj][rank][0]

        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
        if self.mode == 'eval':
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
        else:
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
        
        mask = mask_label * mask_depth

        if self.add_noise:
            img = self.trancolor(img)

        img = np.array(img)[:, :, :3]
        img = np.transpose(img, (2, 0, 1))
        img_masked = img

        rmin, rmax, cmin, cmax = get_bbox(meta['obj_bb'])

        img_masked = img_masked[:, rmin:rmax, cmin:cmax]
        #p_img = np.transpose(img_masked, (1, 2, 0))
        #scipy.misc.imsave('evaluation_result/{0}_input.png'.format(index), p_img)

        target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
        target_t = np.array(meta['cam_t_m2c'])
        add_t = np.array([random.uniform(-self.noise_trans, self.noise_trans) for i in range(3)])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return(cc, cc, cc, cc, cc, cc)

        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')
        
        depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = 1.0
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = np.add(cloud, -1.0 * target_t) / 1000.0
        cloud = np.add(cloud, target_t / 1000.0)

        if self.add_noise:
            cloud = np.add(cloud, add_t)

        #fw = open('evaluation_result/{0}_cld.xyz'.format(index), 'w')
        #for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))	
        #fw.close()

        model_points = self.pt[obj] / 1000.0
        dellist = [j for j in range(0, len(model_points))]
        dellist = random.sample(dellist, len(model_points) - self.num_pt_mesh_small)
        model_points = np.delete(model_points, dellist, axis=0)

        #fw = open('evaluation_result/{0}_model_points.xyz'.format(index), 'w')
        #for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t / 1000.0 + add_t)
            out_t = target_t / 1000.0 + add_t
        else:
            target = np.add(target, target_t / 1000.0)
            out_t = target_t / 1000.0

        #fw = open('evaluation_result/{0}_tar.xyz'.format(index), 'w')
        #for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([self.objlist.index(obj)])
示例#53
0
    depth_addr = data_path + "depth/" + str_num + "-depth.png"
    mask_addr = data_path + "mask/" + str_num + ".png"

    img = Image.open(rgb_addr)
    depth = np.array(Image.open(depth_addr))
    masks = np.array(Image.open(mask_addr))

    my_result_wo_refine = []
    my_result = []

    for idx in range(len(detected_classIDs)):
        itemid = detected_classIDs[idx]
        
        maskid = idx + 1
        try:
            mask = ma.getmaskarray(ma.masked_equal(masks, maskid))
            rmin, rmax, cmin, cmax = get_bbox(mask)

            print('itemid: {0}\n'.format(itemid))
            print('rmin {0}, rmax {1}, cmin {2}, cmax {3}'.format(rmin, rmax, cmin, cmax))

            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(masks, maskid))
            mask = mask_label * mask_depth
            choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]

            if len(choose) > num_points:
                c_mask = np.zeros(len(choose), dtype=int)
                c_mask[:num_points] = 1
                np.random.shuffle(c_mask)
                choose = choose[c_mask.nonzero()]
示例#54
0
def extract_static_vars_local(lat1, lat2, lon1, lon2, area_name, var_list,
                              doy_start, doy_end):
    lat_indices, lon_indices = select_area(lat1, lat2, lon1, lon2, "M03")
    lats, lons = get_lat_lon("M03")
    assert (len(lats) != 0 and len(lons) != 0)
    lats = lats[lat_indices[0]:lat_indices[1]]
    lons = lons[lon_indices[0]:lon_indices[1]]

    out_path = get_out_path(os.path.join("Data", "Sentinel"))
    fh_out = Dataset(
        os.path.join(out_path, "static_vars_" + area_name + ".nc"), "w")
    fh_out.createDimension("lat", len(lats))
    fh_out.createDimension("lon", len(lons))

    general_in_path = os.path.join("n5eil01u.ecs.nsidc.org", "SMAP",
                                   "SPL2SMAP_S.002")
    first_flag = True

    for doy_folder in generate_doy(doy_start, doy_end, "."):
        if doy_folder in os.listdir(general_in_path):
            for f in os.listdir(os.path.join(general_in_path, doy_folder)):
                if f.endswith(".h5"):
                    fh_in = Dataset(
                        os.path.join(general_in_path, doy_folder, f), "r")
                    group_3km = fh_in.groups[
                        "Soil_Moisture_Retrieval_Data_3km"]

                    lat_start = group_3km.variables["EASE_row_index_3km"][0, 0]
                    lat_end = group_3km.variables["EASE_row_index_3km"][-1, 0]
                    lon_start = group_3km.variables["EASE_column_index_3km"][0,
                                                                             0]
                    lon_end = group_3km.variables["EASE_column_index_3km"][0,
                                                                           -1]

                    if lat_end <= lat_indices[0] or lat_start >= lat_indices[1] \
                            or lon_end <= lon_indices[0] or lon_start >= lon_indices[1]:
                        fh_in.close()
                        continue

                    print(f)
                    if first_flag:
                        for v_name, varin in group_3km.variables.items():
                            if v_name in ["latitude_3km", "longitude_3km"]:
                                outVar = fh_out.createVariable(
                                    v_name[:3], varin.datatype, (v_name[:3]))
                            elif v_name in var_list:
                                outVar = fh_out.createVariable(
                                    v_name[:-4], varin.datatype,
                                    ("lat", "lon"))
                            else:
                                continue
                            outVar.setncatts({
                                k: varin.getncattr(k)
                                for k in varin.ncattrs()
                            })
                        fh_out.variables["lat"][:] = lats[:]
                        fh_out.variables["lon"][:] = lons[:]

                        first_flag = False

                    out_lat_start = max(lat_start - lat_indices[0], 0)
                    out_lat_end = min(lat_end + 1 - lat_indices[0], len(lats))
                    out_lon_start = max(lon_start - lon_indices[0], 0)
                    out_lon_end = min(lon_end + 1 - lon_indices[0], len(lons))
                    in_lat_start = max(lat_indices[0] - lat_start, 0)
                    in_lat_end = min(lat_indices[1] - lat_start,
                                     lat_end - lat_start + 1)
                    in_lon_start = max(lon_indices[0] - lon_start, 0)
                    in_lon_end = min(lon_indices[1] - lon_start,
                                     lon_end - lon_start + 1)
                    assert (out_lat_end - out_lat_start == in_lat_end -
                            in_lat_start)
                    assert (out_lon_end - out_lon_start == in_lon_end -
                            in_lon_start)
                    for v_name, varin in group_3km.variables.items():
                        if v_name in var_list:
                            a = fh_out.variables[
                                v_name[:-4]][out_lat_start:out_lat_end,
                                             out_lon_start:out_lon_end]
                            b = varin[in_lat_start:in_lat_end,
                                      in_lon_start:in_lon_end]
                            if not isinstance(a, ma.MaskedArray):
                                a = ma.array(a, mask=np.zeros(a.shape))
                            if not isinstance(b, ma.MaskedArray):
                                b = ma.array(b, mask=np.zeros(b.shape))

                            for i in range(0, out_lat_end - out_lat_start):
                                for j in range(0, out_lon_end - out_lon_start):
                                    if not ma.getmaskarray(a)[
                                            i,
                                            j] and not ma.getmaskarray(b)[i,
                                                                          j]:
                                        assert a[i, j] == b[
                                            i, j], v_name + " " + str(
                                                a[i, j]) + " " + str(b[i, j])
                                    elif ma.getmaskarray(a)[
                                            i,
                                            j] and not ma.getmaskarray(b)[i,
                                                                          j]:
                                        a[i, j] = b[i, j]
                            fh_out.variables[
                                v_name[:-4]][out_lat_start:out_lat_end,
                                             out_lon_start:out_lon_end] = a

                    fh_in.close()
    fh_out.close()
示例#55
0
def flag(Data,
         NoiseData,
         thres=3.0,
         max_noise_factor=-1,
         modes_subtract=1,
         filter_type='edge'):
    """Flags data for outliers using a signal subtracted data set.
    
    Flags outliers of in a time stream data by looking at a version of the data
    that has had the signal subtracted out of it.  Each frequency channel,
    polarization and cal state are treated separately.

    Parameters
    ----------
    Data : DataBlock Object
        Data to be flaged.  Upon exit, this object will have new flags.
    NoiseData : DataBlock Object
        Version of `Data` with the signal subtracted.
    thres : float
        Threshold for flagging in units of sigma (default is 3.0).
    modes_subtract : int
        How many modes to remove for high pass filtering.
    filter_type : {'edge', 'gaussian', 'gaussian/edge'}
        Type of high pass filtering to use.
    """

    # Get the mask and the data as normal arrays.
    # Copy seems to be nessisary if the mask is None.
    data = NoiseData.data.filled(0).copy()
    mask = ma.getmaskarray(NoiseData.data)
    ## High pass filter the data to make outliers stand out.
    un_mask = sp.logical_not(mask)
    NoiseData.calc_time()
    time = NoiseData.time
    n_time = len(time)
    # How many basis polynomials we need and with what fraction of each mode
    # gets subtracted out..
    if filter_type == 'edge':
        n_polys = modes_subtract
        subtract_weights = sp.ones(n_polys)
    elif filter_type == 'gaussian' or filter_type == 'gaussian/edge':
        n_polys = 4 * modes_subtract
        subtract_weights = sp.exp(
            -(sp.arange(n_polys, dtype=float) / modes_subtract)**2 / 2.)
        if filter_type == 'gaussian/edge':
            subtract_weights[0:2] = 1.
    # Test if the mask is the same for all slices.  If it is, that greatly
    # reduces the work as we only have to generate one set of polynomials.
    all_masks_same = True
    for jj in range(n_time):
        if sp.all(un_mask[jj, ...] == un_mask[jj, 0, 0, 0]):
            continue
        else:
            all_masks_same = False
            break
    if all_masks_same:
        polys = misc.ortho_poly(time, n_polys, un_mask[:, 0, 0, 0], 0)
        polys.shape = (n_polys, len(time), 1, 1, 1)
    else:
        polys = misc.ortho_poly(time[:, None, None, None], n_polys, un_mask, 0)
    # Subtract the slope mode (1th mode) out of the NoiseData.
    amps = sp.sum(data * un_mask * polys, 1)
    amps *= subtract_weights[:, None, None, None]
    data -= sp.sum(amps[:, None, :, :, :] * un_mask[None, :, :, :, :] * polys,
                   0)
    ## Do the main outlier flagging.
    # Iteratively flag on sliding scale to get closer and closer to desired
    # threshold.
    max_thres = sp.sqrt(n_time) / 2.
    n_iter = 3
    thresholds = (max_thres**(n_iter - 1 - sp.arange(n_iter)) *
                  thres**sp.arange(n_iter))**(1. / (n_iter - 1))
    for threshold in thresholds:
        # Subtract the mean from every channel.
        this_data = masked_subtract_mean(data, mask, 0)
        # Calculate the variance.
        un_mask = sp.logical_not(mask)
        counts = sp.sum(un_mask, 0)
        counts[counts == 0] = 1
        std = sp.sqrt(sp.sum(this_data**2 * un_mask, 0) / counts)
        bad_inds = abs(this_data) > threshold * std
        # If any polarization or cal state is masked, they all should be.
        bad_inds = sp.any(sp.any(bad_inds, 1), 1)
        mask[bad_inds[:, None, None, :]] = True
    ## Now look for times with excusion frequency average
    # (achromatic out-liers).
    # Compute the frequency mean.
    un_mask = sp.logical_not(mask)
    counts = sp.sum(un_mask, -1)
    fmean_un_mask = counts >= 1
    counts[counts == 0] = 1
    fmean = sp.sum(data * un_mask, -1) / counts
    # Subtract the time mean.
    fmean = masked_subtract_mean(fmean, sp.logical_not(fmean_un_mask), 0)
    # Get the variance.
    counts = sp.sum(fmean_un_mask, 0)
    counts[counts == 0] = 1
    fmean_std = sp.sqrt(sp.sum(fmean**2 * fmean_un_mask, 0) / counts)
    # Flag any time that is an outlier (for any polarization or cal state).
    bad_times = sp.any(sp.any(abs(fmean) > thres * fmean_std, 1), 1)
    mask[bad_times, :, :, :] = True
    ## Flag for very noisy channels.
    if max_noise_factor > 0:
        # Do this a few times to make sure we get everything.
        for ii in range(3):
            this_data = masked_subtract_mean(data, mask, 0)
            # Compute varience accounting for the mask.
            un_mask = sp.logical_not(mask)
            counts = sp.sum(un_mask, 0)
            vars_un_mask = counts >= 1
            counts[counts == 0] = 1
            vars = sp.sum(this_data**2 * un_mask, 0) / counts
            # Find the mean of the variences.
            counts = sp.sum(vars_un_mask, -1)
            counts[counts == 0] = 1
            mean_vars = sp.sum(vars * vars_un_mask, -1) / counts
            # Find channels that stand out (for any polarization or cal state).
            bad_chans = sp.any(
                sp.any(vars > max_noise_factor * mean_vars[:, :, None], 0), 0)
            mask[:, :, :, bad_chans] = True
    ## Transfer the mask to the DataBlock objects.
    Data.data[mask] = ma.masked
    NoiseData.data[mask] = ma.masked
示例#56
0
def sanitize_array(
    data,
    index: Optional["Index"],
    dtype: Optional[DtypeObj] = None,
    copy: bool = False,
    raise_cast_failure: bool = False,
) -> ArrayLike:
    """
    Sanitize input data to an ndarray or ExtensionArray, copy if specified,
    coerce to the dtype if specified.
    """

    if isinstance(data, ma.MaskedArray):
        mask = ma.getmaskarray(data)
        if mask.any():
            data, fill_value = maybe_upcast(data, copy=True)
            data.soften_mask()  # set hardmask False if it was True
            data[mask] = fill_value
        else:
            data = data.copy()

    # extract ndarray or ExtensionArray, ensure we have no PandasArray
    data = extract_array(data, extract_numpy=True)

    # GH#846
    if isinstance(data, np.ndarray):

        if dtype is not None and is_float_dtype(
                data.dtype) and is_integer_dtype(dtype):
            # possibility of nan -> garbage
            try:
                subarr = _try_cast(data, dtype, copy, True)
            except ValueError:
                if copy:
                    subarr = data.copy()
                else:
                    subarr = np.array(data, copy=False)
        else:
            # we will try to copy be-definition here
            subarr = _try_cast(data, dtype, copy, raise_cast_failure)

    elif isinstance(data, ABCExtensionArray):
        # it is already ensured above this is not a PandasArray
        subarr = data

        if dtype is not None:
            subarr = subarr.astype(dtype, copy=copy)
        elif copy:
            subarr = subarr.copy()
        return subarr

    elif isinstance(data, (list, tuple)) and len(data) > 0:
        if dtype is not None:
            subarr = _try_cast(data, dtype, copy, raise_cast_failure)
        else:
            subarr = maybe_convert_platform(data)

        subarr = maybe_cast_to_datetime(subarr, dtype)

    elif isinstance(data, range):
        # GH#16804
        arr = np.arange(data.start, data.stop, data.step, dtype="int64")
        subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
    elif isinstance(data, abc.Set):
        raise TypeError("Set type is unordered")
    elif lib.is_scalar(data) and index is not None and dtype is not None:
        data = maybe_cast_to_datetime(data, dtype)
        if not lib.is_scalar(data):
            data = data[0]
        subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
    else:
        subarr = _try_cast(data, dtype, copy, raise_cast_failure)

    # scalar like, GH
    if getattr(subarr, "ndim", 0) == 0:
        if isinstance(data, list):  # pragma: no cover
            subarr = np.array(data, dtype=object)
        elif index is not None:
            value = data

            # figure out the dtype from the value (upcast if necessary)
            if dtype is None:
                dtype, value = infer_dtype_from_scalar(value)
            else:
                # need to possibly convert the value here
                value = maybe_cast_to_datetime(value, dtype)

            subarr = construct_1d_arraylike_from_scalar(
                value, len(index), dtype)

        else:
            return subarr.item()

    # the result that we want
    elif subarr.ndim == 1:
        if index is not None:

            # a 1-element ndarray
            if len(subarr) != len(index) and len(subarr) == 1:
                subarr = construct_1d_arraylike_from_scalar(
                    subarr[0], len(index), subarr.dtype)

    elif subarr.ndim > 1:
        if isinstance(data, np.ndarray):
            raise Exception("Data must be 1-dimensional")
        else:
            subarr = com.asarray_tuplesafe(data, dtype=dtype)

    if not (is_extension_array_dtype(subarr.dtype)
            or is_extension_array_dtype(dtype)):
        # This is to prevent mixed-type Series getting all casted to
        # NumPy string type, e.g. NaN --> '-1#IND'.
        if issubclass(subarr.dtype.type, str):
            # GH#16605
            # If not empty convert the data to dtype
            # GH#19853: If data is a scalar, subarr has already the result
            if not lib.is_scalar(data):
                if not np.all(isna(data)):
                    data = np.array(data, dtype=dtype, copy=False)
                subarr = np.array(data, dtype=object, copy=copy)

        if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype):
            inferred = lib.infer_dtype(subarr, skipna=False)
            if inferred in {"interval", "period"}:
                subarr = array(subarr)

    return subarr
示例#57
0
    def process_file(self, file_ind):
        params = self.params
        file_middle = params['file_middles'][file_ind]
        input_fname = (params['input_root'] + file_middle +
                       params['input_end'])
        sub_input_fname = (params['subtracted_input_root'] + file_middle +
                           params['input_end'])
        output_fname = (params['output_root'] + file_middle +
                        params['output_end'])
        sub_output_fname = (params['subtracted_output_root'] + file_middle +
                            params['output_end'])
        Writer = fitsGBT.Writer(feedback=self.feedback)
        SubWriter = fitsGBT.Writer(feedback=self.feedback)

        # Read in the data, and loop over data blocks.
        Reader = fitsGBT.Reader(input_fname, feedback=self.feedback)
        SubReader = fitsGBT.Reader(sub_input_fname, feedback=self.feedback)
        if (sp.any(Reader.scan_set != SubReader.scan_set)
                or sp.any(Reader.IF_set != SubReader.IF_set)):
            raise ce.DataError("IFs and scans don't match signal subtracted"
                               " data.")
        # Get the number of scans if asked for all of them.
        scan_inds = params['scans']
        if len(scan_inds) == 0 or scan_inds is None:
            scan_inds = range(len(Reader.scan_set))
        if_inds = params['IFs']
        if len(if_inds) == 0 or scan_inds is None:
            if_inds = range(len(Reader.IF_set))
        if self.feedback > 1:
            print "New flags each block:",
        # Loop over scans and IFs
        for thisscan in scan_inds:
            for thisIF in if_inds:
                Data = Reader.read(thisscan, thisIF)
                SubData = SubReader.read(thisscan, thisIF)
                # Make sure they have agreeing masks to start.
                SubData.data[ma.getmaskarray(Data.data)] = ma.masked
                Data.data[ma.getmaskarray(SubData.data)] = ma.masked
                # Get initial number of flags.
                n_flags = ma.count_masked(Data.data)
                # Now do the flagging.
                flag(Data, SubData, params['thres'],
                     params['max_noise_factor'],
                     params['smooth_modes_subtract'], params['filter_type'])
                Data.add_history(
                    "Reflaged for outliers.",
                    ("Used file: " +
                     utils.abbreviate_file_path(sub_input_fname), ))
                SubData.add_history("Reflaged for outliers.")
                Writer.add_data(Data)
                SubWriter.add_data(SubData)
                # Report the number of new flags.
                n_flags = ma.count_masked(Data.data) - n_flags
                if self.feedback > 1:
                    print n_flags,
        if self.feedback > 1:
            print ''
        # Finally write the data back to file.
        utils.mkparents(output_fname)
        utils.mkparents(sub_output_fname)
        Writer.write(output_fname)
        SubWriter.write(sub_output_fname)
示例#58
0
def main():
    cfg = setup_config()
    pipeline = rs.pipeline()
    realsense_cfg = setup_realsense()
    pipeline.start(realsense_cfg)  # Start streaming
    visualizer = predictor.VisualizationDemo(cfg)

    ref_frame_axies = []
    ref_frame_label = []
    min_distance = 0.9
    label_cnt = 0
    frameth = 0

    my_t_pool = {}
    my_r_pool = {}

    while True:
        frameth += 1
        cur_frame_axies = []
        cur_frame_label = []
        my_t_per_frame = []
        my_r_per_frame = []

        align = rs.align(rs.stream.color)
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        rgb = aligned_frames.get_color_frame()
        rgb = np.asanyarray(rgb.get_data())
        frame = rgb.copy()

        # Do instance segmentation
        start = time.time()
        segmentation, vis = visualizer.run_on_image(frame)
        #print("Time = " + str(time.time()-start))

        cv2.imshow('Mask', vis)
        cv2.waitKey(1)

        # Get segmentation mask
        ori_label = segmentation['instances'].pred_masks.cpu().numpy()
        label = np.sum(ori_label, axis=0).astype(np.uint8)
        label = np.where(label != 0, 255, label)
        label = Image.fromarray(label).convert("L")
        label = np.asarray(label.convert('RGB')).astype(np.uint8)

        bboxes = segmentation['instances'].pred_boxes.tensor.cpu().numpy()
        xyxy_bboxes = bboxes
        bboxes = bbox_convert(bboxes)

        if len(bboxes) > 0:
            #depth_frames = frames.get_depth_frame()
            depth_frames = aligned_frames.get_depth_frame()

            video_profile = depth_frames.profile.as_video_stream_profile()
            intr = video_profile.get_intrinsics()
            depth = np.asanyarray(depth_frames.get_data())
            #centers = segmentation['instances'].pred_boxes.get_centers()
            if len(my_t_pool) > 0:
                last_key = list(my_t_pool.keys())[-1]

            for i in range(0, len(bboxes)):
                bbox_xyxy = np.array(list(xyxy_bboxes[i]))
                bbox = list(bboxes[i])
                print("Bounding Box:" + str(bbox))
                #center = bboxes[i].get_centers()
                #center = centers[i].cpu().numpy()
                num_idx = float('nan')
                max_value = 0

                label_of_object = ori_label[i].astype(np.uint8)
                label_of_object = np.where(label_of_object != 0, 255,
                                           label_of_object)
                label_of_object = Image.fromarray(label_of_object).convert("L")
                label_of_object = np.asarray(
                    label_of_object.convert('RGB')).astype(np.uint8)

                if len(ref_frame_label) > 0:
                    iou_list = []
                    b = bbox_xyxy
                    a = np.array(ref_frame_axies)
                    for k in range(len(ref_frame_axies)):
                        iou = iou_score(a[k], b)
                        iou_list.append(iou)
                    iou_list = np.array(iou_list)
                    max_value = iou_list.max()
                    if (max_value > min_distance):
                        min_idx = np.where(iou_list == max_value)[0][0]
                        num_idx = ref_frame_label[min_idx]

                if (math.isnan(num_idx)):
                    num_idx = label_cnt
                    label_cnt += 1
                cur_frame_label.append(num_idx)
                cur_frame_axies.append(bbox_xyxy)

                print(max_value)
                if (frameth == 1) or (max_value < 0.9) or (
                        i > len(my_t_pool[last_key]) - 1) or (frameth % 20
                                                              == 0):
                    pos_text = (bbox[0], bbox[1])

                    class_id = segmentation['instances'].pred_classes[i].cpu(
                    ).data.numpy()
                    print("Class: " + str(class_id))
                    #idx = class_id
                    if class_id == 0:
                        idx = 0
                    if class_id == 2:
                        idx = 1

                    model_points = model_points_list[idx]

                    mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
                    #mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
                    mask_label = ma.getmaskarray(
                        ma.masked_equal(label_of_object,
                                        np.array([255, 255, 255])))[:, :, 0]
                    mask = mask_label * mask_depth

                    rmin, rmax, cmin, cmax = posenet_deploy.get_bbox(bbox)

                    # choose
                    choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
                    if len(choose) == 0:
                        choose = torch.LongTensor([0])
                    if len(choose) > num_points:
                        c_mask = np.zeros(len(choose), dtype=int)
                        c_mask[:num_points] = 1
                        np.random.shuffle(c_mask)
                        choose = choose[c_mask.nonzero()]
                    else:
                        choose = np.pad(choose, (0, num_points - len(choose)),
                                        'wrap')

                    depth_masked = depth[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    xmap_masked = xmap[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    ymap_masked = ymap[
                        rmin:rmax,
                        cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                            np.float32)
                    choose = np.array([choose])

                    # point cloud
                    pt2 = depth_masked / cam_scale
                    pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
                    pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
                    cloud = np.concatenate((pt0, pt1, pt2), axis=1)
                    cloud = cloud / 1000.0
                    # print(cloud.shape)

                    # cropped img
                    #img_masked = rgb[:, :, :3]
                    img_masked = rgb[:, :, ::-1]  # bgr to rgb
                    img_masked = np.transpose(img_masked, (2, 0, 1))
                    img_masked = img_masked[:, rmin:rmax, cmin:cmax]

                    my_mask = np.transpose(label_of_object, (2, 0, 1))
                    my_mask = my_mask[:, rmin:rmax, cmin:
                                      cmax]  ## Added by me to crop the mask
                    mask_img = np.transpose(my_mask, (1, 2, 0))
                    img_rgb = np.transpose(img_masked, (1, 2, 0))
                    croped_img_mask = cv2.bitwise_and(img_rgb, mask_img)
                    crop_image_to_check = croped_img_mask.copy()
                    cv2.imshow("mask_crop", croped_img_mask)
                    croped_img_mask = np.transpose(croped_img_mask, (2, 0, 1))

                    # Variables
                    cloud = torch.from_numpy(cloud.astype(
                        np.float32)).unsqueeze(0)
                    choose = torch.LongTensor(choose.astype(
                        np.int32)).unsqueeze(0)
                    #img_masked = torch.from_numpy(img_masked.astype(np.float32)).unsqueeze(0)
                    img_masked = torch.from_numpy(
                        croped_img_mask.astype(np.float32)).unsqueeze(0)
                    index = torch.LongTensor([idx]).unsqueeze(
                        0)  # Specify which object

                    cloud = Variable(cloud).cuda()
                    choose = Variable(choose).cuda()
                    img_masked = Variable(img_masked).cuda()
                    index = Variable(index).cuda()

                    # Deploy
                    with torch.no_grad():
                        pred_r, pred_t, pred_c, emb = estimator(
                            img_masked, cloud, choose, index)

                    pred_r = pred_r / torch.norm(pred_r, dim=2).view(
                        1, num_points, 1)
                    pred_c = pred_c.view(bs, num_points)
                    how_max, which_max = torch.max(pred_c, 1)
                    pred_t = pred_t.view(bs * num_points, 1, 3)
                    points = cloud.view(bs * num_points, 1, 3)

                    my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
                    my_t = (points.view(bs * num_points, 1, 3) +
                            pred_t)[which_max[0]].view(-1).cpu().data.numpy()
                    my_pred = np.append(my_r, my_t)

                    # Refinement
                    for ite in range(0, iteration):
                        T = Variable(torch.from_numpy(my_t.astype(
                            np.float32))).cuda().view(1, 3).repeat(
                                num_points,
                                1).contiguous().view(1, num_points, 3)
                        my_mat = quaternion_matrix(my_r)
                        R = Variable(
                            torch.from_numpy(my_mat[:3, :3].astype(
                                np.float32))).cuda().view(1, 3, 3)
                        my_mat[0:3, 3] = my_t

                        new_cloud = torch.bmm((cloud - T), R).contiguous()
                        pred_r, pred_t = refiner(new_cloud, emb, index)
                        pred_r = pred_r.view(1, 1, -1)
                        pred_r = pred_r / (torch.norm(pred_r, dim=2).view(
                            1, 1, 1))
                        my_r_2 = pred_r.view(-1).cpu().data.numpy()
                        my_t_2 = pred_t.view(-1).cpu().data.numpy()
                        my_mat_2 = quaternion_matrix(my_r_2)

                        my_mat_2[0:3, 3] = my_t_2
                        my_mat_final = np.dot(my_mat, my_mat_2)
                        my_r_final = copy.deepcopy(my_mat_final)
                        my_r_final[0:3, 3] = 0
                        my_r_final = quaternion_from_matrix(my_r_final, True)
                        my_t_final = np.array([
                            my_mat_final[0][3], my_mat_final[1][3],
                            my_mat_final[2][3]
                        ])

                        my_pred = np.append(my_r_final, my_t_final)
                        my_r = my_r_final
                        my_t = my_t_final

                        my_r_matrix = quaternion_matrix(my_r)[:3, :3]
                    #print("Time = " + str(time.time()-start))
                    my_t_per_frame.append(my_t)
                    my_r_per_frame.append(my_r_matrix)

                    #rotation = Rot.from_matrix(my_r_matrix)
                    #angle = rotation.as_euler('xyz', degrees=True)

                    my_t = np.around(my_t, 5)
                    #print("translation vector = " + str(my_t))
                    #print("rotation angles = " + str(my_r))

                    frame = posenet_deploy.get_3d_bbox(frame, model_points,
                                                       my_r_matrix, my_t)
                    frame = posenet_deploy.draw_axes(frame, my_r_matrix, my_t)

                    if check_inverted(crop_image_to_check):
                        cv2.putText(frame,
                                    str(num_idx) + "_inverted", pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)
                    else:
                        cv2.putText(frame, str(num_idx), pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)

                    #cv2.putText(frame, str(num_idx), pos_text, cv2.FONT_HERSHEY_SIMPLEX,
                    #            0.5, (0,255,0), 2, cv2.LINE_AA)

                    posenet_deploy.putText(frame, i, num_idx, class_id, my_t)
                    #cv2.imshow('Result', rgb)
                    #cv2.waitKey(1)

                else:
                    rmin, rmax, cmin, cmax = posenet_deploy.get_bbox(bbox)
                    img_masked = rgb[:, :, ::-1]  # bgr to rgb
                    img_masked = np.transpose(img_masked, (2, 0, 1))
                    img_masked = img_masked[:, rmin:rmax, cmin:cmax]

                    my_mask = np.transpose(label_of_object, (2, 0, 1))
                    my_mask = my_mask[:, rmin:rmax, cmin:
                                      cmax]  ## Added by me to crop the mask
                    mask_img = np.transpose(my_mask, (1, 2, 0))
                    img_rgb = np.transpose(img_masked, (1, 2, 0))
                    croped_img_mask = cv2.bitwise_and(img_rgb, mask_img)
                    crop_image_to_check = croped_img_mask.copy()

                    pos_text = (bbox[0], bbox[1])
                    last_key = list(my_t_pool.keys())[-1]

                    print("POOL: " + str(my_t_pool[last_key]))
                    class_id = segmentation['instances'].pred_classes[i].cpu(
                    ).data.numpy()

                    my_t = my_t_pool[last_key][min_idx]
                    my_r_matrix = my_r_pool[last_key][min_idx]

                    my_t_per_frame.append(my_t)
                    my_r_per_frame.append(my_r_matrix)

                    frame = posenet_deploy.get_3d_bbox(frame, model_points,
                                                       my_r_matrix, my_t)
                    frame = posenet_deploy.draw_axes(frame, my_r_matrix, my_t)

                    if check_inverted(crop_image_to_check):
                        cv2.putText(frame,
                                    str(num_idx) + "_inverted", pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)
                    else:
                        cv2.putText(frame, str(num_idx), pos_text,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2, cv2.LINE_AA)

                    #cv2.putText(frame, str(num_idx), pos_text, cv2.FONT_HERSHEY_SIMPLEX,
                    #            0.5, (0,255,0), 2, cv2.LINE_AA)

                    posenet_deploy.putText(frame, i, num_idx, class_id, my_t)

            if len(my_t_per_frame) > 0:
                my_t_pool[frameth] = my_t_per_frame
                my_r_pool[frameth] = my_r_per_frame

            ref_frame_label = cur_frame_label
            ref_frame_axies = cur_frame_axies

            end = time.time() - start
            cv2.putText(frame,
                        "Time processing: " + str(round(end, 3)) + " seconds",
                        (100, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                        2, cv2.LINE_AA)
            cv2.imshow('Result', frame)
            cv2.waitKey(1)

        else:
            # Show images
            #video_writer.write(rgb)
            cv2.imshow('Result', rgb)
            cv2.waitKey(1)

    pipeline.stop()
示例#59
0
def woa_normbias(data, v, cfg):
    """

        FIXME: Move this procedure into a class to conform with the new system
          and include a limit in minimum ammount of samples to trust it. For
          example, consider as masked all climatologic values estimated from
          less than 5 samples.
    """

    # 3 is the possible minimum to estimate the std, but I shold use higher.
    min_samples = 3
    woa = None

    db = WOA()
    if v not in db.keys():
        vtype = v[:-1]
    else:
        vtype = v

    # Temporary solution while I'm not ready to handle tracks.
    if ('LATITUDE' in data) and ('LONGITUDE' in data) \
            and ('LATITUDE' not in data.attributes) \
            and ('LONGITUDE' not in data.attributes):
        if 'datetime' in data.keys():
            d = data['datetime']
        elif ('datetime' in data.attributes):
            d0 = data.attributes['datetime']
            if ('timeS' in data.keys()):
                d = [d0 + timedelta(seconds=s) for s in data['timeS']]
            else:
                d = [data.attributes['datetime']]*len(data['LATITUDE']),

        #woa = woa_track_from_file(
        #        d,
        #        data['LATITUDE'],
        #        data['LONGITUDE'],
        #        cfg['file'],
        #        varnames=cfg['vars'])

        module_logger.error("Sorry, I'm temporary not ready to handle tracks.")
        #woa = db[vtype].get_track(var=['mean', 'standard_deviation'],
        #        doy=d,
        #        depth=[0],
        #        lat=data['LATITUDE'],
        #        lon=data['LONGITUDE'])

    elif ('LATITUDE' in data.attributes.keys()) and \
            ('LONGITUDE' in data.attributes.keys()) and \
            ('PRES' in data.keys()):

                woa = db[vtype].track(
                        var=['mean', 'standard_deviation',
                            'number_of_observations'],
                        doy=int(data.attributes['datetime'].strftime('%j')),
                        depth=data['PRES'],
                        lat=data.attributes['LATITUDE'],
                        lon=data.attributes['LONGITUDE'])

    flag = np.zeros(data[v].shape, dtype='i1')
    features = {}

    try:
        woa_bias = data[v] - woa['mean']
        woa_normbias = woa_bias / woa['standard_deviation']

        ind = np.nonzero((woa['number_of_observations'] >= min_samples) &
                (np.absolute(woa_normbias) <= cfg['sigma_threshold']))
        flag[ind] = 1   # cfg['flag_good']
        ind = np.nonzero((woa['number_of_observations'] >= min_samples) &
                (np.absolute(woa_normbias) > cfg['sigma_threshold']))
        flag[ind] = 3   # cfg['flag_bad']

        # Flag as 9 any masked input value
        flag[ma.getmaskarray(data[v])] = 9

        features = {'woa_bias': woa_bias, 'woa_normbias': woa_normbias,
                'woa_std': woa['standard_deviation'],
                'woa_nsamples': woa['number_of_observations'],
                'woa_mean': woa['mean']}

    finally:
        # self.logger.warnning("%s - WOA is not available at this site" %
        # self.name)
        return flag, features
示例#60
0
def _one_to_one_extract_surface_flag_only_local(doy, lat1, lat2, lon1, lon2,
                                                area_name):
    """
        left-up: lat1 lon1  left-down: lat2 lon1  right-up: lat1 lon2  right-down: lat2 lon2
        e.g.
        For United States http://en.wikipedia.org/wiki/Extreme_points_of_the_United_States#Westernmost
        :param lat1: 50
        :param lat2: 24
        :param lon1: -125
        :param lon2: -66
        """
    flag_dic = {
        0: "static_water_body_flag",
        2: "coastal_mask_flag",
        3: "urban_area_flag",
        4: "precipitation_flag",
        5: "snow_or_ice_flag",
        6: "permanent_snow_or_ice_flag",
        7: "frozen_ground_flag",
        8: "frozen_ground_st_based",
        9: "mountainous_terrain_flag",
        10: "dense_vegetation_flag",
        11: "edge_cell_flag",
        12: "anomalous_sigma0_flag"
    }

    lat_indices, lon_indices = select_area(lat1, lat2, lon1, lon2, "M03")
    global_lats, global_lons = get_lat_lon("M03")
    assert (len(global_lats) != 0 and len(global_lons) != 0)

    in_path = os.path.join("n5eil01u.ecs.nsidc.org", "SMAP", "SPL2SMAP_S.002",
                           doy)
    out_path = get_out_path(
        os.path.join("Data", "Sentinel", "surface_flags", area_name, doy))

    for f in os.listdir(in_path):
        if f.endswith(".h5"):
            fh_in = Dataset(os.path.join(in_path, f), "r")
            group_3km = fh_in.groups["Soil_Moisture_Retrieval_Data_3km"]

            lat_start = group_3km.variables["EASE_row_index_3km"][0, 0]
            lat_end = group_3km.variables["EASE_row_index_3km"][-1, 0]
            lon_start = group_3km.variables["EASE_column_index_3km"][0, 0]
            lon_end = group_3km.variables["EASE_column_index_3km"][0, -1]

            if lat_end <= lat_indices[0] or lat_start >= lat_indices[1] \
                    or lon_end <= lon_indices[0] or lon_start >= lon_indices[1]:
                fh_in.close()
                continue

            print(f)

            lats = global_lats[lat_start:lat_end + 1]
            lons = global_lons[lon_start:lon_end + 1]

            fh_out = Dataset(os.path.join(out_path, f[:-3] + ".nc"), "w")
            fh_out.createDimension("lat", len(lats))
            fh_out.createDimension("lon", len(lons))

            for var_name in flag_dic.values():
                outVar = fh_out.createVariable(var_name, 'u1', (
                    'lat',
                    'lon',
                ))
                outVar.setncatts({'units': 'NA'})
                outVar.setncatts({'_FillValue': np.array([255]).astype('u1')})
                outVar[:] = ma.array(
                    np.zeros((len(lats), len(lons))),
                    mask=ma.getmaskarray(
                        group_3km.variables["surface_flag_3km"][:]))

            for v_name, varin in group_3km.variables.items():
                if v_name in ["latitude_3km", "longitude_3km"]:
                    outVar = fh_out.createVariable(v_name[:3], varin.datatype,
                                                   (v_name[:3]))
                    outVar.setncatts(
                        {k: varin.getncattr(k)
                         for k in varin.ncattrs()})
                elif v_name == "soil_moisture_3km":
                    outVar = fh_out.createVariable(v_name[:-4], varin.datatype,
                                                   ("lat", "lon"))
                    outVar.setncatts(
                        {k: varin.getncattr(k)
                         for k in varin.ncattrs()})
                    outVar[:] = varin[:]
            fh_out.variables["lat"].setncatts({"lat_start": lat_start})
            fh_out.variables["lat"].setncatts({"lat_end": lat_end})
            fh_out.variables["lon"].setncatts({"lon_start": lon_start})
            fh_out.variables["lon"].setncatts({"lon_end": lon_end})
            fh_out.variables["lat"][:] = lats[:]
            fh_out.variables["lon"][:] = lons[:]

            surface_flag = group_3km.variables["surface_flag_3km"][:]
            surface_flag_mask = ma.getmaskarray(
                group_3km.variables["surface_flag_3km"][:])
            for i in range(len(lats)):
                for j in range(len(lons)):
                    if not surface_flag_mask[i, j]:
                        bit_sf = '{0:016b}'.format(surface_flag[i, j])[::-1]
                        for bit_index in flag_dic:
                            if bit_sf[bit_index] == "1":
                                fh_out.variables[flag_dic[bit_index]][i, j] = 1

            fh_out.variables["anomalous_sigma0_flag"][:] = ma.array(
                fh_out.variables["anomalous_sigma0_flag"][:],
                mask=ma.getmaskarray(
                    group_3km.variables["soil_moisture_3km"][:]))
            fh_out.variables["edge_cell_flag"][:] = ma.array(
                fh_out.variables["edge_cell_flag"][:],
                mask=ma.getmaskarray(
                    group_3km.variables["soil_moisture_3km"][:]))

            fh_in.close()
            fh_out.close()