Example #1
0
def calcTemporalCorrelation(evaluationData, referenceData):
    '''
    Purpose ::
        Calculate the temporal correlation.
    
    Assumption(s) ::
        The first dimension of two datasets is the time axis.
    
    Input ::
        evaluationData - model data array of any shape
        referenceData- observation data array of any shape
            
    Output::
        temporalCorelation - A 2-D array of temporal correlation coefficients at each subregion
        sigLev - A 2-D array of confidence levels related to temporalCorelation 
    
    REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
    sigLev: the correlation between model and observation is significant at sigLev * 100 %
    '''
    evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
    referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
    
    nregion = evaluationData.shape[0]
    temporalCorrelation = ma.zeros([nregion])-100.
    sigLev = ma.zeros([nregion])-100.
    for iregion in np.arange(nregion):
        temporalCorrelation[iregion], sigLev[iregion] = stats.pearsonr(evaluationData[iregion,:], referenceData[iregion,:])
        sigLev[iregion] = 1 - sigLev[iregion]
                    
    temporalCorrelation=ma.masked_equal(temporalCorrelation.data, -100.)        
    sigLev=ma.masked_equal(sigLev.data, -100.)    
    
    return temporalCorrelation, sigLev
Example #2
0
def maskedEqual(array, missingValue):
    """ Mask an array where equal to a given (missing)value.

        Unfortunately ma.masked_equal does not work with structured arrays. See:
        https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html

        If the data is a structured array the mask is applied for every field (i.e. forming a
        logical-and). Otherwise ma.masked_equal is called.
    """
    if array_is_structured(array):
        # Enforce the array to be masked
        if not isinstance(array, ma.MaskedArray):
            array = ma.MaskedArray(array)

        # Set the mask separately per field
        for nr, field in enumerate(array.dtype.names):
            if hasattr(missingValue, '__len__'):
                fieldMissingValue = missingValue[nr]
            else:
                fieldMissingValue = missingValue

            array[field] = ma.masked_equal(array[field], fieldMissingValue)

        check_class(array, ma.MaskedArray) # post-condition check
        return array
    else:
        # masked_equal works with missing is None
        result = ma.masked_equal(array, missingValue, copy=False)
        check_class(result, ma.MaskedArray) # post-condition check
        return result
    def calculate(self, tracks):

        dataMean = ma.zeros((len(self.lon_range) - 1,
                             len(self.lat_range) - 1))
        dataMin = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        dataMax = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        dataMed = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        log.debug("Processing %d tracks" % (len(tracks)))
        for cell in self.gridCells:
            vcell = np.array([])
            for t in tracks:
                ii = np.where(((t.Latitude >= cell.ymin) &
                               (t.Latitude < cell.ymax)) &
                              ((t.Longitude >= cell.xmin) &
                               (t.Longitude < cell.xmax)))[0]
                if len(ii) > 0:
                    vv = t.CentralPressure[ii].compress(t.CentralPressure[ii] < sys.maxint)
                    vcell = np.append(vcell, vv.compress(vv > 0.0))

            if len(vcell > 0):
                dataMean[cell.index[0], cell.index[1]] = np.mean(vcell)
                dataMin[cell.index[0], cell.index[1]] = np.min(vcell)
                dataMax[cell.index[0], cell.index[1]] = np.max(vcell)
                dataMed[cell.index[0], cell.index[1]] = np.median(vcell)

        dataMean = ma.masked_equal(dataMean, 0)
        dataMin = ma.masked_equal(dataMin, 0)
        dataMax = ma.masked_equal(dataMax, 0)
        dataMed = ma.masked_equal(dataMed, 0)
        return dataMean, dataMin, dataMax, dataMed
Example #4
0
def topex_track_table(ndata,tracks,cycles):
    """
    """
    track_list=[]
    cycle_list=[]
    for track, n, cycle in itertools.izip(tracks.compressed(),ndata.compressed(),cycles.compressed()):
        for i in range(n):
            track_list.append(track)
            cycle_list.append(cycle)
    track_list=ma.masked_equal(track_list,-1)
    cycle_list=ma.masked_equal(cycle_list,-1)
    return cycle_list,track_list
Example #5
0
    def load_6hr(self, var, DTime, maskflag=True, verbose=False):
        if (DTime < datetime(DTime.year, 1, 1, 6)):
            dirYear = DTime.year -1
            dY      = +1
        else:
            dirYear = DTime.year
            dY      = 0

        srcDir  = os.path.join(self.baseDir, self.model, self.runName
                    ,"y%04d"%(dirYear), "6hr")

        srcPath = os.path.join(srcDir
                    ,"%s.sa.1460x%dx%d"%(var, self.ny, self.nx))

        # timeslice for no leap year (e.g.:2001)
        timeslice = (datetime(2001+dY, DTime.month, DTime.day, DTime.hour)
                   - datetime(2001, 1, 1, 6)).total_seconds() / (6*3600)
        if verbose ==True: print srcPath

        out = self.readslice_float32(srcPath, timeslice, self.ny, self.nx)

        if maskflag==True:
            return ma.masked_equal(out, self.miss)
        else:
            return out
    def createResultArray(self, result, fillValue=0):
        t = time.time()

        # Create list of records
        data = [i[:] for i in result]
        print '\tLooping through results took - %.4f' %(time.time()-t), len(data)

        # Converting the none values returned into a zero value
        # using the ma library in numpy
        # - retrieve mask for None
        # - then assign the fillValue to those columns
        data = array(data)
        mask = ma.masked_equal(data, None).mask

        if mask.any():
            data[mask] = fillValue

        #Sorting the array by primary cols identifying the agent as
        # postgres seems to return queries without any order


        # Convert it back to a regular array to enable all the other processing
        print '\tSize of the data set that was retrieved - ', data.shape
        print '\tRecords were processed after query in %.4f' %(time.time()-t)

        return data
Example #7
0
def _int_connection_matrix(sources, targets, values):
    '''
    Return a 2D connection matrix filled with integer values (typically the
    number of synapses) in the form of a masked matrix (values equal to 0 are
    masked)

    Parameters
    ----------
    sources : ndarray of int
        The indices of the source neurons for each value.
    targets : ndarray of int
        The indices of the target neurons for each value.
    values : ndarray of int or int
        The value for each (source, target) pair.

    Returns
    -------
    matrix : ma.MaskedArray
        The connection matrix, masked for 0 values
    '''
    assert np.min(values) > 0 and np.max(values) < 256
    full_matrix = np.zeros((np.max(targets) - np.min(targets) + 1,
                            np.max(sources) - np.min(sources) + 1),
                           dtype=np.uint8)
    full_matrix[targets - np.min(targets),
                sources - np.min(sources)] = values
    return ma.masked_equal(full_matrix, 0, copy=False)
Example #8
0
 def setUp(self):
     lazy_data = as_lazy_data(ma.masked_equal([1, 2, 3, 4, 5], 3))
     self.lazy_cube = Cube(lazy_data)
     self.lazy_cube.add_dim_coord(DimCoord([6, 7, 8, 9, 10],
                                           long_name='foo'),
                                  0)
     self.func = lambda x: x >= 3
Example #9
0
    def copyVariable(self, varname):
        source = self._openExemplar()

        dims = list(source.variables[varname].dimensions[:])

        # substitute lat for y and lon for x
        for i in range(len(dims)):
            if dims[i] == "x":
                dims[i] = "lon"
            elif dims[i] == "y":
                dims[i] = "lat"

        # Ensure that all the required dimensions are present
        for dim in dims:
            self.copyDimension(dim)

        # get the missing value
        missing = round(source.variables[varname].missing_value, -13)

        # Create the variable in the fixed file
        the_copy = self._ncfile.createVariable(varname, source.variables[varname].dtype, dims, fill_value=missing)

        # copy the data to the fixed variable
        the_copy[:] = ma.masked_equal(source.variables[varname][:], missing)

        source.close()

        return the_copy
Example #10
0
    def getData(self, variable=None):
        '''
        Aggreate all data from the file list in the order given.
        '''
        filename = self.flist[0].strip()
        f = cdms2.open( filename, 'r' )

        if( variable != None ):
            self.vartoread = variable
        else:
            self.vartoread = self.variable.id
            
        data = f(self.vartoread)[:]

        # ---------------------------
        # Concatenate following files
        # ---------------------------
        for filename in self.flist[ 1: ]:
            print "reading %s" % filename.strip()
            f = cdms2.open( filename.strip(), 'r' )
            data2 = f(self.vartoread)[:]
            data = numpy.concatenate((data,data2), axis=0)
            f.close()

        data=ma.array(data=data, \
                      fill_value=self.missing_value, \
                      copy=0,         \
                      dtype='float32' )
        data=ma.masked_equal(data, self.missing_value, copy=0)
        return data
Example #11
0
 def transform(self, X, details=False):
     """
     Label hidden factors for (possibly previously unseen) samples of data.
     Parameters: samples of data, X, shape = [n_samples, n_visible]
     Returns: , shape = [n_samples, n_hidden]
     """
     Xm = ma.masked_equal(X, self.missing_values)
     log_marg_x = self.calculate_marginals_on_samples(self.theta, Xm)
     p_y_given_x, log_z = self.calculate_latent(log_marg_x)
     labels = self.label(p_y_given_x)
     if details == 'surprise':
         # Totally experimental
         log_marg_x = self.calculate_marginals_on_samples(self.theta, Xm, return_ratio=False)
         n_samples = Xm.shape[0]
         surprise = []
         for l in range(n_samples):
             q = - sum([max([log_marg_x[j,l,i,labels[l, j]]
                             for j in range(self.n_hidden)])
                        for i in range(self.n_visible)])
             surprise.append(q)
         return p_y_given_x, log_z, np.array(surprise)
     elif details:
         return p_y_given_x, log_z
     else:
         return labels
    def _check_sst_quality(self, dataset, product_type):
        mask_specs = product_type.get_mask_consistency_check_specs()
        if len(mask_specs) == 0:
            return

        sst_variable_names = product_type.get_sst_variable_names()
        if len(sst_variable_names) == 0:
            return

        quality_variable_name = mask_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]

        valid_retrieval_quality = ma.masked_less(quality_data, 2)
        self.report["sst_valid_retrieval"] = float(valid_retrieval_quality.count())

        failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)
        sst_variable = dataset.variables[sst_variable_names[0]]
        fill_value = sst_variable.getncattr('_FillValue')
        sst_quality_one_data = ma.array(sst_variable[:], mask=failed_retrieval_quality.mask)

        invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)
        self.report["sst_invalid_retrieval"] = float(invalid_retrieval.count())

        failed_retrieval = ma.masked_not_equal(sst_quality_one_data, fill_value)
        self.report["sst_failed_retrieval"] = float(failed_retrieval.count())

        not_ocean = ma.masked_not_equal(quality_data, 0)
        self.report["not_ocean"] = float(not_ocean.count())
Example #13
0
    def make_contour_bounds_shapefile(self):
        import databundles.geo as dg
        from numpy import ma
        import yaml


        shape_file_dir = self.filesystem.path('extracts','contours')
        shape_file = os.path.join(shape_file_dir, 'contours.shp') # One of two. 
        
        if os.path.exists(shape_file):
            return shape_file_dir

        partition = self.partitions.all[0]# There is only one
        hdf = partition.hdf5file
        hdf.open()
        
        a1,_ = hdf.get_geo('property')
        a2,aa = hdf.get_geo('violent')
     
        a = dg.std_norm(ma.masked_equal(a1[...] + a2[...],0))   # ... Converts to a Numpy array. 

        # Creates the shapefile in the extracts/contour directory
        envelopes = dg.bound_clusters_in_raster( a, aa, shape_file_dir, 0.1,0.7, use_bb=True, use_distance=50)
  
        # Cache the envelopes for later. 
        env_file = self.filesystem.path('build','envelopes.yaml')
        with open(env_file,'w') as f:
            f.write(yaml.dump(envelopes, indent=4, default_flow_style=False))
  
        return  shape_file_dir
Example #14
0
def main():
    #Get the raster from the disk
    rast_data, x_cellsize, y_cellsize = get_array("./data/elevation.tif")

    slope = generic_filter(rast_data, calc_slope, size=3, extra_arguments=(x_cellsize, y_cellsize))

    plt.imshow(ma.masked_equal(slope, -9999), cmap=plt.winter(), origin="lower")
    plt.show()
Example #15
0
    def __call__(self,x,clip=False):
        x   = array(x)
        ret = zeros(x.shape,'int')
        for i, b in enumerate(self.boundaries):
            ret[greater_equal(x,b)]   = i+1

        ret[less(x,self.vmin)]  = 0
        return ma.masked_equal(ret,-1)/float(self.N)
Example #16
0
 def toNumpyArray(self):
     """
     Returns a narray
     """
     bands = map(lambda b : b.data(),self.rasterdata.bands)
     nodataval = self.rasterdata.allBandStatistics()['nodata']
     bands = map(lambda b : masked_equal(b,nodataval),bands)
     return bands
 def __check_false_positives(self, reference_mask, objective_mask, check_name):
     # noinspection PyNoneFunctionAssignment,PyUnresolvedReferences
     false_positives = ma.masked_equal(np.logical_or(np.logical_not(objective_mask), reference_mask), True)
     false_positives_count = false_positives.count()
     self.report[check_name] = false_positives_count
     if false_positives_count > 0:
         filename = os.path.basename(self.source_pathname)
         self.report[check_name + '_failed_for'] = filename
Example #18
0
def write_colormap(file_name, a, map, break_scheme='even', min_val=None, max_val=None, ave_val=None):
    """Write a QGIS colormap file"""
    import numpy as np
    import numpy.ma as ma
    import math

    header = "# QGIS Generated Color Map Export File\nINTERPOLATION:DISCRETE\n"

    masked = ma.masked_equal(a, 0)

    min_ = np.min(masked) if not min_val else min_val
    max_ = np.max(a) if not max_val else max_val
    ave_ = masked.mean() if not ave_val else ave_val

    if break_scheme == 'even':
        max_ = max_ * 1.001  # Be sure to get all values
        range = min_ - max_
        delta = range * .001
        r = np.linspace(min_ - delta, max_ + delta, num=map['n_colors'] + 1)
    elif break_scheme == 'jenks':
        from ambry.geo import jenks_breaks

        r = jenks_breaks(a, map['n_colors'])
    elif break_scheme == 'geometric':
        r = geometric_breaks(map['n_colors'], min_, max_)
    elif break_scheme == 'logistic':
        r = logistic_breaks(map['n_colors'], min_, max_)
    elif break_scheme == 'exponential':
        r = exponential_breaks(map['n_colors'], ave_)
    elif break_scheme == 'stddev':
        sd = np.std(a)
    else:
        raise Exception("Unknown break scheme: {}".format(break_scheme))

    colors = map['map']

    colors.append(None)  # Causes the last item to be skipped

    alphas, alpha_step = np.linspace(64, 255, len(colors), retstep=True)
    alpha = alpha_step + 64

    with open(file_name, 'w') as f:
        f.write(header)
        last_me = None
        for v, me in zip(r, colors):
            if me:
                f.write(','.join([str(v), str(me['R']), str(me['G']), str(me['B']), str(int(alpha)), me['letter']]))
                alpha += alpha_step
                alpha = min(alpha, 255)
                f.write('\n')
                last_me = me

        # Prevents 'holes' where the value is higher than the max_val
        if max_val:
            v = np.max(a)
            f.write(','.join(
                [str(v), str(last_me['R']), str(last_me['G']), str(last_me['B']), str(int(alpha)), last_me['letter']]))
            f.write('\n')
Example #19
0
    def applyOperation( self, input_variable, operation ):
        result = None
        try:
            self.setTimeBounds( input_variable )
            operator = None
#            pydevd.settrace('localhost', port=8030, stdoutToServer=False, stderrToServer=True)
            wpsLog.debug( " $$$ ApplyOperation: %s " % str( operation ) )
            if operation is not None:
                type = operation.get('type','').lower()
                bounds = operation.get('bounds','').lower()
                op_start_time = time.clock() # time.time()
                if not bounds:
                    if type == 'departures':
                        ave = cdutil.averager( input_variable, axis='t', weights='equal' )
                        result = input_variable - ave
                    elif type == 'climatology':
                        result = cdutil.averager( input_variable, axis='t', weights='equal' )
                    else:
                        result = input_variable
                    time_axis = input_variable.getTime()
                elif bounds == 'np':
                    if   type == 'departures':
                        result = ma.anomalies( input_variable ).squeeze()
                    elif type == 'climatology':
                        result = ma.average( input_variable ).squeeze()
                    else:
                        result = input_variable
                    time_axis = input_variable.getTime()
                else:
                    if bounds == 'djf': operator = cdutil.DJF
                    elif bounds == 'mam': operator = cdutil.MAM
                    elif bounds == 'jja': operator = cdutil.JJA
                    elif bounds == 'son': operator = cdutil.SON
                    elif bounds == 'year':          operator = cdutil.YEAR
                    elif bounds == 'annualcycle':   operator = cdutil.ANNUALCYCLE
                    elif bounds == 'seasonalcycle': operator = cdutil.SEASONALCYCLE
                    if operator <> None:
                        if   type == 'departures':    result = operator.departures( input_variable ).squeeze()
                        elif type == 'climatology':   result = operator.climatology( input_variable ).squeeze()
                        else:                         result = operator( input_variable ).squeeze()
                    time_axis = result.getTime()
                op_end_time = time.clock() # time.time()
                wpsLog.debug( " ---> Base Operation Time: %.5f" % (op_end_time-op_start_time) )
            else:
                result = input_variable
                time_axis = input_variable.getTime()

            if isinstance( result, float ):
                result_data = [ result ]
            elif result is not None:
                if result.__class__.__name__ == 'TransientVariable':
                    result = ma.masked_equal( result.squeeze().getValue(), input_variable.getMissing() )
                result_data = result.tolist( numpy.nan )
            else: result_data = None
        except Exception, err:
            wpsLog.debug( "Exception applying Operation '%s':\n %s" % ( str(operation), traceback.format_exc() ) )
            return ( None, None )
 def _prep_data(self):
     self.rain_rate = np.array(self.rain_rate)
     self.Z = ma.masked_equal(self.Z, -9.999)
     self.nd = np.array(self.nd)
     self.nd[self.nd == -9.999] = 0
     self.Nd = np.array(self.nd)
     self.num_particles = np.array(self.num_particles)
     self.time = np.array(self.time)
     self.velocity = self.vd  # np.ndarray(self.vd)
Example #21
0
    def hist(self, dataarr, path):
        g = self.graph

        '''
            input array
                g1iage,g1ierr,g2iage,g2ierr...
                .
                .
                .
                g1nage,...
        '''
#        dataarr = loadtxt(data, delimiter=',', skiprows=1)
        g.new_plot(padding=[30, 10, 30, 30],
                   show_legend='ur')
        r, c = dataarr.shape
        for ci in range(0, c, 2):
            ages = dataarr[:, ci]
            errs = dataarr[:, ci + 1]
            ages = ma.masked_equal(ages, 0)
            errs = ma.masked_equal(errs, 0)

            relative_errs = errs / ages * 100

            y, x = histogram(relative_errs)

            # convert edges to mids
            mids = zeros(len(x) - 1)
            for i in range(len(x) - 1):
                mids[i] = x[i] + (x[i + 1] - x[i]) / 2.0

            width = x[1] - x[0]

            g.new_series(mids, y, type='bar', bar_width=width)

        # add the labels
        with open(path, 'r') as f:
            header = f.readline()
            for i, l in enumerate(header.split(',')):
                g.set_series_label(l, series=i)
#===============================================================================
#     hardcoded additions
#===============================================================================
        g.set_x_title('Relative Error (%)')
        g.set_y_title('Frequency')
Example #22
0
    def get_data_range(self):
        """
        Gets the datarange of the variable in this dataset
        NOTE: This method might be time-consuming if called on a large dataset
        as it may have to iterate over every single data point.
        :return: min and max tuple for the range
        """
        try:
            actual_range = self._variable.attributes['actual_range']
            return [float(actual_range[0]), float(actual_range[1])]
        except (KeyError, ValueError):
            pass

        try:
            return [float(self._variable.attributes['valid_min']), float(self._variable.attributes['valid_max'])]
        except (KeyError, ValueError):
            pass

        try:
            fill_value = self._variable.attributes.get('_FillValue', None)
            missing_value = self._variable.attributes.get('missing_value', None)
            if fill_value is not None and missing_value is not None:
                valid_array = masked_equal(masked_equal(self._variable.array, missing_value), fill_value)
            elif fill_value is None and missing_value is None:
                valid_array = self._variable.array
            elif fill_value is not None:
                valid_array = masked_equal(self._variable.array, fill_value)
            else:
                valid_array = masked_equal(self._variable.array, missing_value)

            min = float(amin(valid_array))
            max = float(amax(valid_array))
            if math.isnan(min):
                min = 0
            if math.isnan(max):
                max = 100
        except:
            # Use the default result if something goes wrong
            log.exception("Can not calculate data range using default")
            min = 0
            max = 100

        return [min, max]
Example #23
0
 def setMask(self, maskVal=None, band="All"):
     if band != "All":
         self.unsetMask(band)
         self.setNoData(maskVal, band)
         if self.nodatad[band] is not None:
             self.datad[band] = ma.masked_equal(self.datad[band], self.nodatad[band])
     else:
         self.unsetMask()
         for band in range(1, self.bands + 1):
             self.setMask(maskVal, band)
Example #24
0
def safeDataStats(data, nodata=None):
    if nodata is not None:
        data = ma.masked_equal(data, nodata)

    stats = (data.min(), data.max(), data.mean(), data.std())
    stddev = stats[-1]
    if ma.isMaskedArray(stddev) or stddev == 0:
        stats = (None, None, None, None)

    return stats
Example #25
0
def _proportion(array, function, axis, **kwargs):
    # if the incoming array is masked use that to count the total number of values
    if isinstance(array, ma.MaskedArray):
        # calculate the total number of non-masked values across the given axis
        total_non_masked = _count(array.mask, lambda v: v == False, axis=axis, **kwargs)
        total_non_masked = ma.masked_equal(total_non_masked, 0)
    else:
        total_non_masked = array.shape[axis] 
        
    return _count(array, function, axis=axis, **kwargs) / total_non_masked
Example #26
0
def _grid_array_to_gdal_files(dap_grid_array, srs, geo_transform, filename_fmt='{i}.asc', missval=None):
    '''Generator which creates an Arc/Info ASCII Grid file for each "layer" (i.e. one step of X by Y) in a given grid

       :param dap_grid_array: Multidimensional arrary of rank 2 or 3
       :type dap_grid_array: numpy.ndarray
       :param srs: Spatial reference system
       :type srs: osr.SpatialReference
       :param geo_transform: GDAL affine transform which applies to this grid
       :type geo_transform: list
       :param filename_fmt: Proposed filename template for output files. "{i}" can be included and will be filled in with the layer number.
       :type filename_fmt: str
       :param missval: Value for which data should be identified as missing
       :type missval: numpy.array
       :returns: A generator which yields pairs of (filename, file_content_generator) of the created files. Note that there will likely be more than one file for layer (e.g. an .asc file and a .prj file).
    '''

    logger.debug("_grid_array_to_gdal_files: translating this grid {} of this srs {} transform {} to this file {}".format(dap_grid_array, srs, geo_transform, filename_fmt))

    logger.debug("Investigating the shape of this grid: {}".format(dap_grid_array))
    shp = dap_grid_array.shape
    if len(shp) == 2:
        ylen, xlen =  shp
        data = [ dap_grid_array ]
    elif len(shp) == 3:
        _, ylen, xlen = shp
        data = iter(dap_grid_array.data)
    else:
        raise ValueError("_grid_array_to_gdal_files received a grid of rank {} rather than the required 2 or 3".format(len(shp)))

    target_type = numpy_to_gdal[dap_grid_array.dtype.name]

    meta_ds = create_gdal_mem_dataset(xlen, ylen, geo_transform, srs, target_type, missval)

    for i, layer in enumerate(data):

        if missval:
            layer = ma.masked_equal(layer, missval)

        logger.debug("Data: {}".format(layer))
        meta_ds.GetRasterBand(1).WriteArray( numpy.flipud(layer) )
        
        driver = gdal.GetDriverByName('AAIGrid')

        outfile = gettempdir() + sep + filename_fmt.format(i=i)
        dst_ds = driver.CreateCopy(outfile, meta_ds, 0)
        
        file_list = dst_ds.GetFileList()

        # Once we're done, close properly the dataset
        dst_ds = None

        for filename in file_list:
            yield named_file_generator(filename)

    meta_ds = None
Example #27
0
    def scatter(self, dataarr, datapath):
        g = self.graph
        g.new_plot(padding=[30, 10, 30, 30],
                   )

        _r, c = dataarr.shape
        for ci in range(0, c, 3):
            ages = ma.masked_equal(dataarr[:, ci], 0)
            errors = ma.masked_equal(dataarr[:, ci + 1], 0)
            k2o = ma.masked_equal(dataarr[:, ci + 2], 0)

            relative_errs = errors / ages * 100

            g.new_series(relative_errs, k2o, type='scatter', marker='circle')

        g.set_x_limits(0, max(relative_errs) * 1.1)
        g.set_y_limits(0, max(k2o) * 1.1)

        g.set_x_title('Relative Error (%)')
        g.set_y_title('K2O (%)')
Example #28
0
def topex_time_table(dt_days,dt_seconds,dt_microseconds,base_date=None):
    """
    """
    if base_date is None:
        base_date=datetime(year=1950,month=01,day=01,hour=0,minute=0,second=0)
    t=[]
    for d, s, ms in itertools.izip(dt_days.compressed(),dt_seconds.compressed(),dt_microseconds.compressed()):
	    dt=timedelta(days=int(d),seconds=int(s),microseconds=int(ms))
            t.append(base_date+dt)
    t=ma.masked_equal(t,-1)
    return t
Example #29
0
def FIS_Test2(instances = 1000):
    test = getTestRun("1kTestRun3.db3")
    Weights = map(RecommenderItemCF.eval, zip(test[:instances,5],test[:instances,8],test[:instances,14]))
    Recs = map(Weighted_avg,Weights,test[:instances,3],test[:instances,12],test[:instances,10])
    k = test[:instances,:]
    k = numpy.column_stack([k, Recs])
    import numpy.ma as ma
    recoms =  ma.masked_equal(k[:instances,20],None)
    rats = ma.array(k[:,3], mask = recoms.mask)
    r = sum( numpy.abs( recoms.compressed()-rats.compressed() ) ) / len(rats.compressed())
    return r ,k
    def __get_masked_data(variable):
        """

        :type variable: Variable
        :rtype : ma.MaskedArray
        """
        data = variable[:]
        try:
            fill_value = variable.getncattr('_FillValue')
            return ma.masked_equal(data, fill_value)
        except AttributeError:
            return ma.array(data)
Example #31
0
# open the shapefile	
vDriver = ogr.GetDriverByName("ESRI Shapefile")
vSrc = vDriver.Open(shpF, 0)
vLayer = vSrc.GetLayer()
nFeat = vLayer.GetFeatureCount()
for f, feature in enumerate(vLayer):
  f=0
  feature = vLayer[f]
  name = feature.GetField(trgField)
  print(name)
  descr = feature.GetField(descrField)
  predP = get_zone_pixels(feature, shpF, predF, 1)#.compressed()
  trainP = get_zone_pixels(feature, shpF, trainF, 1)#.compressed()
  
  predP = ma.masked_equal(predP, predND)
  trainP = ma.masked_equal(trainP, trainND)
  trainP = ma.masked_equal(trainP, 0)
  """
  print(np.unique(predP.compressed()))
  print(np.unique(trainP.compressed()))
  
  hist, bin_edges = np.histogram(predP.compressed(), density=True)
  plt.hist(predP.compressed(), bins=bin_edges)
  plt.show()
  
  hist, bin_edges = np.histogram(trainP.compressed(), density=True)
  plt.hist(trainP.compressed(), bins=bin_edges)
  plt.show()
  
  print(predP.shape)
def MapThatShit(NDAI_BASE,date,spl_arr,drought_avg_tci_cmap):
    # get array informaiton
    extent, img_extent = setMap(NDAI_BASE)
    ds = gdal.Open(NDAI_BASE)
    array = ds.ReadAsArray()
    #array[ma.getmaskarray(NDAI)] = 0
    #nan = ds.GetRasterBand(1).GetNoDataValue()
    array = ma.masked_equal(array, 0)
    array = np.flipud(array)
    logging.info(extent)

    # set shape of figure
    width = array.shape[0]
    height = array.shape[1]
    base_width = 10.
    base_height = (base_width * height) / width
    print base_width, base_height
    logging.info(base_width, base_height)

    # figure
    fig = plt.figure(figsize=(base_height,base_width)) 
    ax = plt.subplot(1,1,1, projection=ccrs.PlateCarree())
    ax.set_extent(extent, ccrs.Geodetic())
    gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
                      linewidth=1, color='gray', alpha=0.5, linestyle='--')
    gl.ylabels_right = False
    gl.xlabels_top = False
    # prulletaria on the map
    
    ogr2ogr = r'C:\Program Files\GDAL//ogr2ogr.exe'
    base_geom = r'D:\Data\ChinaShapefile\natural_earth'
    # ocean
    in_file_ocean = base_geom + '\physical//10m_ocean.shp'    
    outfile_ocean = 'ocean.shp'
    # country boudaries
    in_file_bound = base_geom + '\cultural//10m_admin_0_boundary_lines_land.shp' 
    outfile_bound = 'boundaries.shp'
    # costlie
    in_file_coast = base_geom + '\physical//10m_coastline.shp'
    outfile_coast = 'coastline.shp'
    #lakes
    in_file_lakes = base_geom + '\physical//10m_lakes.shp'
    outfile_lakes = 'lakes.shp'
    # run the clip functions
    
    command = [ogr2ogr, '-f', "ESRI Shapefile", outfile_ocean, in_file_ocean,'-clipsrc',
               str(spl_arr[0]),str(spl_arr[1]),str(spl_arr[2]),str(spl_arr[3]),'-overwrite']    
    print (sp.list2cmdline(command))
    norm = sp.Popen(sp.list2cmdline(command),stdout=sp.PIPE, shell=True)
    norm.communicate()
    
    command = [ogr2ogr, '-f', "ESRI Shapefile", outfile_bound, in_file_bound,'-clipsrc',
               str(spl_arr[0]),str(spl_arr[1]),str(spl_arr[2]),str(spl_arr[3]),'-overwrite']    
    print (sp.list2cmdline(command))
    norm = sp.Popen(sp.list2cmdline(command),stdout=sp.PIPE, shell=True)
    norm.communicate()    
    
    command = [ogr2ogr, '-f', "ESRI Shapefile", outfile_coast, in_file_coast,'-clipsrc',
               str(spl_arr[0]),str(spl_arr[1]),str(spl_arr[2]),str(spl_arr[3]),'-overwrite']    
    print (sp.list2cmdline(command))
    norm = sp.Popen(sp.list2cmdline(command),stdout=sp.PIPE, shell=True)
    norm.communicate()    
    
    command = [ogr2ogr, '-f', "ESRI Shapefile", outfile_lakes, in_file_lakes,'-clipsrc',
               str(spl_arr[0]),str(spl_arr[1]),str(spl_arr[2]),str(spl_arr[3]),'-overwrite']    
    print (sp.list2cmdline(command))
    norm = sp.Popen(sp.list2cmdline(command),stdout=sp.PIPE, shell=True)
    norm.communicate()        

    ax.add_geometries(Reader(outfile_ocean).geometries(), ccrs.PlateCarree(), facecolor='lightblue')
    ax.add_geometries(Reader(outfile_bound).geometries(), ccrs.PlateCarree(), 
                      facecolor='',linestyle=':', linewidth=2)
    ax.add_geometries(Reader(outfile_coast).geometries(), ccrs.PlateCarree(), facecolor='')
    ax.add_geometries(Reader(outfile_lakes).geometries(), ccrs.PlateCarree(), facecolor='lightskyblue')
    
    # ticks of classes
    bounds = [   0.   ,   82.875,   95.625,  108.375,  127.5  ,  146.625,
            159.375,  172.125,  255.   ]
    # ticklabels plus colorbar
    ticks = ['-1','-0.35','-0.25','-0.15','+0','+.15','+.25','+.35','+1']
    cmap = cmap_discretize(drought_avg_tci_cmap,8)
    norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
    im = ax.imshow(array, origin='upper', extent=img_extent,norm=norm, cmap=cmap, vmin=0, vmax=255, interpolation='nearest')#, transform=ccrs.Mercator())
    title = 'NDAI '+date
    plt.title(title, fontsize=22)
    cb = plt.colorbar(im, fraction=0.0476, pad=0.04, ticks=bounds,norm=norm, orientation='horizontal')
    cb.set_label('Normalized Drought Anomaly Index')
    cb.set_ticklabels(ticks)
    
    spl_arr_str = str(spl_arr)
    spl_arr_str = spl_arr_str.replace('[','')
    spl_arr_str = spl_arr_str.replace(']','')
    spl_arr_str = spl_arr_str.replace(', ','#')
    spl_arr_str = spl_arr_str.replace('.','.')
    # and save the shit
    outpath = 'NDAI_'+date+'_'+spl_arr_str+'.png'
    plt.savefig(outpath, dpi=200, bbox_inches='tight')
    print outpath
    logging.info(outpath)
    #plt.tight_layout()
    plt.show()
    plt.close(fig)
    fig.clf() 
    return outpath
pnum = len(pname)
#print np.shape(sfcelv_hydroweb)
colors = ['xkcd:pastel blue', 'xkcd:aqua green', 'xkcd:soft pink']
labels = ["$<1m$", "$<5m$", "$>5m$"]
#==========
dataxx0 = []
datayy0 = []
dataxx1 = []
datayy1 = []
dataxx2 = []
datayy2 = []
dataxx3 = []
datayy3 = []
for point in np.arange(pnum):
    cmf_mean = np.mean(sfcelv_cmf[:, point])
    obs_mean = np.mean(ma.masked_equal(sfcelv_hydroweb[:, point], -9999.0))
    BIAS = abs(cmf_mean - obs_mean)
    # print (cmf_mean,obs_mean,BIAS)
    datayy0.append(BIAS)
    dataxx0.append(abs(elediff[point]))
    if disttomouth[point] <= 1.0:
        datayy1.append(BIAS)
        dataxx1.append(abs(elediff[point]))
        print("<=1m", pname[point], BIAS, disttomouth[point])
    elif disttomouth[point] <= 5.0:
        datayy2.append(BIAS)
        dataxx2.append(abs(elediff[point]))
        print("<=5m", pname[point], BIAS, disttomouth[point])
    else:
        datayy3.append(BIAS)
        dataxx3.append(abs(elediff[point]))
def getTimeseries(productcode, subproductcode, version, mapsetcode, geom,
                  start_date, end_date, aggregate):

    #    Extract timeseries from a list of files and return as JSON object
    #    It applies to a single dataset (prod/sprod/version/mapset) and between 2 dates
    #    Several types of aggregation foreseen:
    #
    #       mean :      Sum(Xi)/N(Xi)        -> min/max not considered          e.g. Rain
    #       cumulate:   Sum(Xi)              -> min/max not considered          e.g. Fire
    #
    #       count:      N(Xi where min < Xi < max)                              e.g. Vegetation anomalies
    #       surface:    count * PixelArea                                       e.g. Water Bodies
    #       percent:    count/Ntot                                              e.g. Vegetation anomalies
    #       precip:     compute the precipitation volume in m3*1E6              Rain (only)
    #
    #   History: 1.0 :  Initial release - since 2.0.1 -> now renamed '_green' from greenwich package
    #            1.1 :  Since Feb. 2017, it is based on a different approach (gdal.RasterizeLayer instead of greenwich)
    #                   in order to solve the issue with MULTIPOLYGON
    #

    ogr.UseExceptions()

    # Get Mapset Info
    mapset_info = querydb.get_mapset(mapsetcode=mapsetcode)

    # Prepare for computing conversion to area: the pixel size at Lat=0 is computed
    # The correction to the actual latitude (on AVERAGE value - will be computed below)
    const_d2km = 12364.35
    area_km_equator = abs(float(mapset_info.pixel_shift_lat)) * abs(
        float(mapset_info.pixel_shift_long)) * const_d2km

    # Get Product Info
    product_info = querydb.get_product_out_info(productcode=productcode,
                                                subproductcode=subproductcode,
                                                version=version)
    if product_info.__len__() > 0:
        # Get info from product_info
        scale_factor = 0
        scale_offset = 0
        nodata = 0
        date_format = ''
        for row in product_info:
            scale_factor = row.scale_factor
            scale_offset = row.scale_offset
            nodata = row.nodata
            date_format = row.date_format
            date_type = row.data_type_id

        # Create an output/temp shapefile, for managing the output layer (really mandatory ?? Can be simplified ???)
        try:
            tmpdir = tempfile.mkdtemp(prefix=__name__,
                                      suffix='_getTimeseries',
                                      dir=es_constants.base_tmp_dir)
        except:
            logger.error('Cannot create temporary dir ' +
                         es_constants.base_tmp_dir + '. Exit')
            raise NameError('Error in creating tmpdir')

        out_shape = tmpdir + os.path.sep + "output_shape.shp"
        outDriver = ogr.GetDriverByName('ESRI Shapefile')

        # Create the output shapefile
        outDataSource = outDriver.CreateDataSource(out_shape)
        dest_srs = ogr.osr.SpatialReference()
        dest_srs.ImportFromEPSG(4326)

        outLayer = outDataSource.CreateLayer("Layer", dest_srs)
        # outLayer = outDataSource.CreateLayer("Layer")
        idField = ogr.FieldDefn("id", ogr.OFTInteger)
        outLayer.CreateField(idField)

        featureDefn = outLayer.GetLayerDefn()
        feature = ogr.Feature(featureDefn)
        feature.SetGeometry(geom)
        # area = geom.GetArea()
        feature.SetField("id", 1)
        outLayer.CreateFeature(feature)
        feature = None

        [list_files,
         dates_list] = getFilesList(productcode, subproductcode, version,
                                    mapsetcode, date_format, start_date,
                                    end_date)

        # Built a dictionary with filenames/dates
        dates_to_files_dict = dict(list(zip(dates_list, list_files)))

        # Generate unique list of files
        unique_list = set(list_files)
        uniqueFilesValues = []

        geo_mask_created = False
        for infile in unique_list:
            single_result = {
                'filename': '',
                'meanvalue_noscaling': nodata,
                'meanvalue': None
            }

            if infile.strip() != '' and os.path.isfile(infile):
                # try:

                # Open input file
                orig_ds = gdal.Open(infile, gdal.GA_ReadOnly)
                orig_cs = osr.SpatialReference()
                orig_cs.ImportFromWkt(orig_ds.GetProjectionRef())
                orig_geoT = orig_ds.GetGeoTransform()
                x_origin = orig_geoT[0]
                y_origin = orig_geoT[3]
                pixel_size_x = orig_geoT[1]
                pixel_size_y = -orig_geoT[5]

                in_data_type_gdal = conv_data_type_to_gdal(date_type)

                # Create a mask from the geometry, with the same georef as the input file[s]
                if not geo_mask_created:

                    # Read polygon extent and round to raster resolution
                    x_min, x_max, y_min, y_max = outLayer.GetExtent()
                    x_min_round = int(old_div(
                        (x_min - x_origin),
                        pixel_size_x)) * pixel_size_x + x_origin
                    x_max_round = (
                        int(old_div(
                            (x_max - x_origin),
                            (pixel_size_x))) + 1) * pixel_size_x + x_origin
                    y_min_round = (
                        int(old_div(
                            (y_min - y_origin),
                            (pixel_size_y))) - 1) * pixel_size_y + y_origin
                    y_max_round = int(
                        old_div((y_max - y_origin),
                                (pixel_size_y))) * pixel_size_y + y_origin
                    #
                    #     # Create the destination data source
                    x_res = int(
                        round(
                            old_div((x_max_round - x_min_round),
                                    pixel_size_x)))
                    y_res = int(
                        round(
                            old_div((y_max_round - y_min_round),
                                    pixel_size_y)))
                    #
                    #     # Create mask in memory
                    mem_driver = gdal.GetDriverByName('MEM')
                    mem_ds = mem_driver.Create('', x_res, y_res, 1,
                                               in_data_type_gdal)
                    mask_geoT = [
                        x_min_round, pixel_size_x, 0, y_max_round, 0,
                        -pixel_size_y
                    ]
                    mem_ds.SetGeoTransform(mask_geoT)
                    mem_ds.SetProjection(orig_cs.ExportToWkt())
                    #
                    #     # Create a Layer with '1' for the pixels to be selected
                    gdal.RasterizeLayer(mem_ds, [1], outLayer, burn_values=[1])
                    # gdal.RasterizeLayer(mem_ds, [1], outLayer, None, None, [1])

                    # Read the polygon-mask
                    band = mem_ds.GetRasterBand(1)
                    geo_values = mem_ds.ReadAsArray()

                    # Create a mask from geo_values (mask-out the '0's)
                    geo_mask = ma.make_mask(geo_values == 0)
                    geo_mask_created = True
                    #
                    #     # Clean/Close objects
                    mem_ds = None
                    mem_driver = None
                    outDriver = None
                    outLayer = None

                # Read data from input file
                x_offset = int(old_div((x_min - x_origin), pixel_size_x))
                y_offset = int(old_div((y_origin - y_max), pixel_size_y))

                band_in = orig_ds.GetRasterBand(1)
                data = band_in.ReadAsArray(x_offset, y_offset, x_res, y_res)
                #   Catch the Error ES2-105 (polygon not included in Mapset)
                if data is None:
                    logger.error(
                        'ERROR: polygon extends out of file mapset for file: %s'
                        % infile)
                    return []

                # Create a masked array from the data (considering Nodata)
                masked_data = ma.masked_equal(data, nodata)

                # Apply on top of it the geo mask
                mxnodata = ma.masked_where(geo_mask, masked_data)

                # Test ONLY
                # write_ds_to_geotiff(mem_ds, '/data/processing/exchange/Tests/mem_ds.tif')

                if aggregate['aggregation_type'] == 'count' or aggregate[
                        'aggregation_type'] == 'percent' or aggregate[
                            'aggregation_type'] == 'surface' or aggregate[
                                'aggregation_type'] == 'precip':

                    if mxnodata.count() == 0:
                        meanResult = None
                    else:
                        mxrange = mxnodata
                        min_val = aggregate['aggregation_min']
                        max_val = aggregate['aggregation_max']

                        if min_val is not None:
                            min_val_scaled = old_div((min_val - scale_offset),
                                                     scale_factor)
                            mxrange = ma.masked_less(mxnodata, min_val_scaled)

                            # See ES2-271
                            if max_val is not None:
                                # Scale threshold from physical to digital value
                                max_val_scaled = old_div(
                                    (max_val - scale_offset), scale_factor)
                                mxrange = ma.masked_greater(
                                    mxrange, max_val_scaled)

                        elif max_val is not None:
                            # Scale threshold from physical to digital value
                            max_val_scaled = old_div((max_val - scale_offset),
                                                     scale_factor)
                            mxrange = ma.masked_greater(
                                mxnodata, max_val_scaled)

                        if aggregate['aggregation_type'] == 'percent':
                            # 'percent'
                            meanResult = float(mxrange.count()) / float(
                                mxnodata.count()) * 100

                        elif aggregate['aggregation_type'] == 'surface':
                            # 'surface'
                            # Estimate 'average' Latitude
                            y_avg = (y_min + y_max) / 2.0
                            pixelAvgArea = area_km_equator * math.cos(
                                old_div(y_avg, 180) * math.pi)
                            meanResult = float(mxrange.count()) * pixelAvgArea
                        elif aggregate['aggregation_type'] == 'precip':
                            # 'surface'
                            # Estimate 'average' Latitude
                            y_avg = (y_min + y_max) / 2.0
                            pixelAvgArea = area_km_equator * math.cos(
                                old_div(y_avg, 180) * math.pi)
                            n_pixels = mxnodata.count()
                            avg_precip = mxnodata.mean()
                            # Result is in km * km * mmm i.e. 1E3 m*m*m -> we divide by 1E3 to get 1E6 m*m*m
                            meanResult = float(
                                n_pixels) * pixelAvgArea * avg_precip * 0.001
                        else:
                            # 'count'
                            meanResult = float(mxrange.count())

                    # Both results are equal
                    finalvalue = meanResult

                else:  # if aggregate['type'] == 'mean' or if aggregate['type'] == 'cumulate':
                    if mxnodata.count() == 0:
                        finalvalue = None
                        meanResult = None
                    else:
                        if aggregate['aggregation_type'] == 'mean':
                            # 'mean'
                            meanResult = mxnodata.mean()
                        else:
                            # 'cumulate'
                            meanResult = mxnodata.sum()

                        finalvalue = (meanResult * scale_factor + scale_offset)

                # Assign results
                single_result['filename'] = infile
                single_result['meanvalue_noscaling'] = meanResult
                single_result['meanvalue'] = finalvalue

            else:
                logger.debug('ERROR: raster file does not exist - %s' % infile)

            uniqueFilesValues.append(single_result)

        # Define a dictionary to associate filenames/values
        files_to_values_dict = dict(
            (x['filename'], x['meanvalue']) for x in uniqueFilesValues)

        # Prepare array for result
        resultDatesValues = []

        # Returns a list of 'filenames', 'dates', 'values'
        for mydate in dates_list:

            my_result = {'date': datetime.date.today(), 'meanvalue': nodata}

            # Assign the date
            my_result['date'] = mydate
            # Assign the filename
            my_filename = dates_to_files_dict[mydate]

            # Map from array of Values
            my_result['meanvalue'] = files_to_values_dict[my_filename]

            # Map from array of dates
            resultDatesValues.append(my_result)

        try:
            shutil.rmtree(tmpdir)
        except:
            logger.debug('ERROR: Error in deleting tmpdir. Exit')

        # Return result
        return resultDatesValues
    else:
        logger.debug(
            'ERROR: product not registered in the products table! - %s %s %s' %
            (productcode, subproductcode, version))
        return []
Example #35
0
    def DenseFusion(self, img, depth, posecnn_res):
        my_result_wo_refine = []

        itemid = 1  # this is simplified for single label decttion, if multi-label used, check DFYW3.py for more

        depth = np.array(depth)
        # img = img

        seg_res = posecnn_res

        x1, y1, x2, y2 = seg_res["box"]
        banana_bbox_draw = self.posecnn.get_box_rcwh(seg_res["box"])
        rmin, rmax, cmin, cmax = int(x1), int(x2), int(y1), int(y2)
        depth = depth[:, :,
                      1]  # because depth has 3 dimensions RGB but they are the all the same with each other
        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))  # ok

        label_banana = np.squeeze(seg_res["mask"])
        label_banana = ma.getmaskarray(ma.masked_greater(label_banana, 0.5))
        label_banana_nonzeros = label_banana.flatten().nonzero()

        mask_label = ma.getmaskarray(ma.masked_equal(
            label_banana, itemid))  # label from banana label
        mask = mask_label * mask_depth

        mask_nonzeros = mask[:].flatten().nonzero()
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) > self.num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_points] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            print("len of choose is 0, check error")
            choose = np.pad(choose, (0, self.num_points - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])
        pt2 = depth_masked / self.cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        img_np = np.array(img)
        img_masked = np.array(img)[:, :, :3]
        img_masked = np.transpose(img_masked, (2, 0, 1))
        img_masked = img_masked[:, rmin:rmax, cmin:cmax]

        cloud = torch.from_numpy(cloud.astype(np.float32))
        choose = torch.LongTensor(choose.astype(np.int32))
        img_masked = self.norm(torch.from_numpy(img_masked.astype(np.float32)))
        index = torch.LongTensor([itemid - 1])

        cloud = Variable(cloud).cuda()
        choose = Variable(choose).cuda()
        img_masked = Variable(img_masked).cuda()
        index = Variable(index).cuda()

        cloud = cloud.view(1, self.num_points, 3)
        img_masked = img_masked.view(1, 3,
                                     img_masked.size()[1],
                                     img_masked.size()[2])

        pred_r, pred_t, pred_c, emb = self.estimator(img_masked, cloud, choose,
                                                     index)
        pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, self.num_points, 1)

        pred_c = pred_c.view(self.bs, self.num_points)
        how_max, which_max = torch.max(pred_c, 1)
        pred_t = pred_t.view(self.bs * self.num_points, 1, 3)
        points = cloud.view(self.bs * self.num_points, 1, 3)

        my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
        my_t = (points + pred_t)[which_max[0]].view(-1).cpu().data.numpy()
        my_pred = np.append(my_r, my_t)
        my_result_wo_refine.append(my_pred.tolist())

        my_result = []
        for ite in range(0, self.iteration):
            T = Variable(torch.from_numpy(
                my_t.astype(np.float32))).cuda().view(1, 3).repeat(
                    self.num_points,
                    1).contiguous().view(1, self.num_points, 3)
            my_mat = quaternion_matrix(my_r)
            R = Variable(torch.from_numpy(my_mat[:3, :3].astype(
                np.float32))).cuda().view(1, 3, 3)
            my_mat[0:3, 3] = my_t

            new_cloud = torch.bmm((cloud - T), R).contiguous()
            pred_r, pred_t = self.refiner(new_cloud, emb, index)
            pred_r = pred_r.view(1, 1, -1)
            pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))
            my_r_2 = pred_r.view(-1).cpu().data.numpy()
            my_t_2 = pred_t.view(-1).cpu().data.numpy()
            my_mat_2 = quaternion_matrix(my_r_2)

            my_mat_2[0:3, 3] = my_t_2
            my_mat_final = np.dot(my_mat, my_mat_2)
            my_r_final = copy.deepcopy(my_mat_final)
            my_r_final[0:3, 3] = 0
            my_r_final = quaternion_from_matrix(my_r_final, True)
            my_t_final = np.array(
                [my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])

            my_pred = np.append(my_r_final, my_t_final)
            my_result.append(my_pred.tolist())
        my_result_np = np.array(my_result)
        my_result_mean = np.mean(my_result, axis=0)
        my_r = my_result_mean[:4]
        my_t = my_result_mean[4:]
        my_r_quaternion = my_r
        return my_r_quaternion, my_t
Example #36
0
    def __getitem__(self, index):
        img = Image.open('{0}/{1}-color.png'.format(self.root,
                                                    self.list[index]))
        depth = np.array(
            Image.open('{0}/{1}-depth.png'.format(self.root,
                                                  self.list[index])))
        label = np.array(
            Image.open('{0}/{1}-label.png'.format(self.root,
                                                  self.list[index])))
        meta = scio.loadmat('{0}/{1}-meta.mat'.format(self.root,
                                                      self.list[index]))
        symmetries = np.loadtxt(self.symdir + 'symmetries.txt')
        symmetries = symmetries.reshape(21, 5, 3)

        if self.list[index][:8] != 'data_syn' and int(
                self.list[index][5:9]) >= 60:
            cam_cx = self.cam_cx_2
            cam_cy = self.cam_cy_2
            cam_fx = self.cam_fx_2
            cam_fy = self.cam_fy_2
        else:
            cam_cx = self.cam_cx_1
            cam_cy = self.cam_cy_1
            cam_fx = self.cam_fx_1
            cam_fy = self.cam_fy_1
        cam_intri = [cam_cx, cam_cy, cam_fx, cam_fy]
        cam_intri = np.array(cam_intri)
        mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

        add_front = False
        if self.add_noise:
            for k in range(5):
                seed = random.choice(self.list)
                front = np.array(
                    self.trancolor(
                        Image.open('{0}/{1}-color.png'.format(
                            self.root, seed)).convert("RGB")))
                front = np.transpose(front, (2, 0, 1))
                f_label = np.array(
                    Image.open('{0}/{1}-label.png'.format(self.root, seed)))
                front_label = np.unique(f_label).tolist()[1:]
                if len(front_label) < self.front_num:
                    continue
                front_label = random.sample(front_label, self.front_num)
                for f_i in front_label:
                    mk = ma.getmaskarray(ma.masked_not_equal(f_label, f_i))
                    if f_i == front_label[0]:
                        mask_front = mk
                    else:
                        mask_front = mask_front * mk
                t_label = label * mask_front
                if len(t_label.nonzero()[0]) > 1000:
                    label = t_label
                    add_front = True
                    break

        obj = meta['cls_indexes'].flatten().astype(np.int32)  # get class index

        while 1:
            idx = np.random.randint(0, len(obj))
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, obj[idx]))
            mask = mask_label * mask_depth
            mask_real = len(mask.nonzero()[0])
            if mask_real > self.minimum_num_pt:
                break

        if self.add_noise:
            img = self.trancolor(img)

        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        img = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax,
                                                               cmin:cmax]

        img_masked = img

        if self.add_noise and add_front:
            img_masked = img_masked * mask_front[
                rmin:rmax, cmin:cmax] + front[:, rmin:rmax, cmin:cmax] * ~(
                    mask_front[rmin:rmax, cmin:cmax])

        if self.list[index][:8] == 'data_syn':
            img_masked = img_masked + np.random.normal(
                loc=0.0, scale=7.0, size=img_masked.shape)

        order = idx
        target_r = meta['poses'][:, :, idx][:, 0:3]
        target_t = np.array([meta['poses'][:, :, idx][:, 3:4].flatten()])
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        # transform sym vectors into points
        cls_idx = int(obj[idx]) - 1
        model_s = symmetries[cls_idx, :, :]
        target_mode = 0
        if cls_idx in self.one_sym_list:
            multi_s = np.zeros((2, 3))
            multi_s[0, :] = model_s[0, :]
            multi_s[1, :] = model_s[1, :] + model_s[0, :]
            if cls_idx in self.axis_and_ref_list:
                target_mode = 2
            else:
                target_mode = 0
        elif cls_idx in self.only_axis_list:
            multi_s = np.zeros((2, 3))
            multi_s[0, :] = model_s[0, :]
            multi_s[1, :] = model_s[4, :] + model_s[0, :]
            target_mode = 1
        elif cls_idx in self.two_sym_list:
            multi_s = np.zeros((3, 3))
            multi_s[0, :] = model_s[0, :]
            multi_s[1, :] = model_s[1, :] + model_s[0, :]
            multi_s[2, :] = model_s[2, :] + model_s[0, :]
            target_mode = 0
        elif cls_idx in self.three_sym_list:
            multi_s = np.zeros((4, 3))
            multi_s[0, :] = model_s[0, :]
            multi_s[1, :] = model_s[1, :] + model_s[0, :]
            multi_s[2, :] = model_s[2, :] + model_s[0, :]
            multi_s[3, :] = model_s[3, :] + model_s[0, :]
            target_mode = 0
        else:
            multi_s = np.zeros((5, 3))

            # print("not in symmetry list")
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) > self.num_pt:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_pt] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')

        depth_masked = depth[
            rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                np.float32)  #(1000,1)get masked depth
        xmap_masked = self.xmap[
            rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                np.float32)  # (1000,1)
        ymap_masked = self.ymap[
            rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(
                np.float32)  # (1000,1)
        choose = np.array([choose])  # (1,1000)

        cam_scale = meta['factor_depth'][0][0]  # cam_scale = 10000
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
        pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)  # (1000,3)
        # print('cloud_shape = ', cloud.shape)
        if self.add_noise:
            cloud = np.add(cloud, add_t)  # (1000,3)

        dellist = [j for j in range(0, len(self.cld[obj[idx]]))]
        if self.refine:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_large)
        else:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_small)
        model_points = np.delete(self.cld[obj[idx]], dellist, axis=0)

        target = np.dot(model_points, target_r.T)
        target_s = np.add(np.dot(multi_s, target_r.T), target_t)
        target_num = target_s.shape[0] - 1
        if self.add_noise:
            target = np.add(target, target_t + add_t)
        else:
            target = np.add(target, target_t)

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.LongTensor([int(obj[idx]) - 1]), \
               torch.from_numpy(target_s.astype(np.float32)),\
               torch.LongTensor([target_num]),\
               torch.LongTensor([target_mode]), \
               mask_real / (640 * 480)
Example #37
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def extract_veldisp(gal_ID, z, ba, phi, r50, maps_filename,
                    plot_diagnostics=True, IMAGE_DIR=None, IMAGE_FORMAT='eps'):
    '''
    Extract the velocity dispersions as a function of distance from the center 
    of the galaxy.


    PARAMETERS
    ==========

    gal_ID : string
        <plate> - <IFU> of galaxy

    z : float
        galaxy redshift

    ba : float
        galaxy semimajor to semiminor axis ratio

    phi : float
        Galaxy rotation angle E of N [degrees]

    r50 : float
        50% light radius of galaxy (from elliptical petrosian fit) [arcsec]

    maps_filename : string
        Location of data map fits file

    plot_diagnostics : boolean
        Flag to determine whether or not to plot the various plot diagnostics.  
        Default is True (plot all figures).

    IMAGE_DIR : string
        File path to which diagnostic images are saved.  
        Default value is None (do not save images).

    IMAGE_FORMAT : string
        Saved image file format.  Default format is eps.
    '''

    ############################################################################
    # Read in stellar velocity dispersion map
    #---------------------------------------------------------------------------
    maps = fits.open(maps_filename)

    # Extract average r-band image
    r_band = maps['SPX_MFLUX'].data

    # Extract (and correct for instrumental resolution effects) stellar velocity 
    # dispersion map
    star_sigma = np.sqrt(maps['STELLAR_SIGMA'].data**2 - maps['STELLAR_SIGMACORR'].data**2)
    # See https://www.sdss.org/dr16/manga/manga-data/working-with-manga-data/ for 
    # correction details.

    # Stellar velocity dispersion mask extension
    star_sigma_mask_extension = maps['STELLAR_SIGMA'].header['QUALDATA']
    #---------------------------------------------------------------------------
    # Mask arrays
    #---------------------------------------------------------------------------
    mStar_sigma = ma.array(star_sigma, mask=maps[star_sigma_mask_extension].data > 0)
    mr_band = ma.masked_equal(r_band, 0)
    ############################################################################




    ############################################################################
    # Check if all of the array is masked.
    #---------------------------------------------------------------------------
    num_masked_spaxels = np.sum(mStar_sigma.mask) - np.sum(mr_band.mask)
    frac_masked_spaxels = num_masked_spaxels/np.sum(~mr_band.mask)

    unmasked_data = False

    if frac_masked_spaxels < 1:
        unmasked_data = True
    ############################################################################




    ############################################################################
    # Find coordinates of center spaxel
    #
    # The center of the galaxy is defined to be the same as the brightest spaxel 
    # in the galaxy.
    #---------------------------------------------------------------------------
    if unmasked_data:
        optical_center = np.asarray( mr_band.max() == mr_band).nonzero()

        x_center = optical_center[1][0]
        y_center = optical_center[0][0]
    else:
        rows, cols = mr_band.shape

        x_center = int(0.5*rows)
        y_center = int(0.5*cols)

        optical_center = ([y_center], [x_center])
    ############################################################################




    ############################################################################
    # Normalize velocity dispersion values by central value
    #---------------------------------------------------------------------------
    mStar_sigma_norm = mStar_sigma/mStar_sigma[optical_center]
    ############################################################################




    ############################################################################
    # Calculate distance from each spaxel to center spaxel, in spaxel units
    #---------------------------------------------------------------------------
    y, x = np.indices(mStar_sigma.shape)

    distance_spaxels = np.hypot(x - x_center, y - y_center)

    deproj_distance_spaxels = deproject_r(x - x_center, y - y_center, distance_spaxels, ba, phi)
    ############################################################################




    ############################################################################
    # Convert distances to arcseconds, kpc
    #---------------------------------------------------------------------------
    distance_arcsec = distance_spaxels*MANGA_SPAXEL_SIZE
    deproj_distance_arcsec = deproj_distance_spaxels*MANGA_SPAXEL_SIZE
    deproj_distance_arcsec_norm = deproj_distance_arcsec/r50


    dist_to_galaxy_kpc = ( z * c / (H0/1000))
    spaxel_scale_factor = dist_to_galaxy_kpc * np.tan( MANGA_SPAXEL_SIZE*(1/60)*(1/60)*(np.pi/180))
    distance_kpc = spaxel_scale_factor*distance_spaxels
    deproj_distance_kpc = spaxel_scale_factor*deproj_distance_spaxels
    ############################################################################




    ############################################################################
    # Create table of distances (spaxels, arcseconds, and kpc) and stellar 
    # velocity dispersion value (raw, normalized to center spaxel value)
    #---------------------------------------------------------------------------
    veldisp_table = Table()
    veldisp_table['vel_disp'] = mStar_sigma.flatten()
    veldisp_table['vel_disp_norm'] = mStar_sigma_norm.flatten()
    veldisp_table['r_spaxels'] = distance_spaxels.flatten()
    veldisp_table['r_deproj_spaxels'] = deproj_distance_spaxels.flatten()
    veldisp_table['r_arcsec'] = distance_arcsec.flatten()
    veldisp_table['r_deproj_arcsec'] = deproj_distance_arcsec.flatten()
    veldisp_table['r_deproj_arcsec_norm'] = deproj_distance_arcsec_norm.flatten()
    veldisp_table['r_kpc'] = distance_kpc.flatten()
    veldisp_table['r_deproj_kpc'] = deproj_distance_kpc.flatten()
    ############################################################################


    
    if plot_diagnostics:
        
        plot_diagnostic_panel(gal_ID, mr_band, mStar_sigma, mStar_sigma_norm, 
                              veldisp_table, deproj_distance_arcsec_norm, IMAGE_DIR=IMAGE_DIR)
        
        
        plot_veldisp_r( veldisp_table['r_deproj_arcsec_norm'], 
                        veldisp_table['vel_disp_norm'], 
                        gal_ID, 
                        IMAGE_DIR=IMAGE_DIR)
        
        if IMAGE_DIR is None:
            plt.show()


    return veldisp_table
Example #39
0
 def setUp(self):
     masked_data = ma.masked_equal([1, 2, 3, 4, 5], 3)
     self.cube = Cube(as_lazy_data(masked_data))
     self.cube.add_dim_coord(DimCoord([6, 7, 8, 9, 10], long_name="foo"), 0)
Example #40
0
def msd_calc(track, length=10):
    """Calculates mean squared displacement of input track.

    Returns numpy array containing MSD data calculated from an individual track.

    Parameters
    ----------
    track : pandas.core.frame.DataFrame
        Contains, at a minimum a 'Frame', 'X', and 'Y' column

    Returns
    -------
    new_track : pandas.core.frame.DataFrame
        Similar to input track.  All missing frames of individual trajectories
        are filled in with NaNs, and two new columns, MSDs and Gauss are added:
        MSDs, calculated mean squared displacements using the formula
        MSD = <(xpos-x0)**2>
        Gauss, calculated Gaussianity

    Examples
    --------
    >>> data1 = {'Frame': [1, 2, 3, 4, 5],
    ...          'X': [5, 6, 7, 8, 9],
    ...          'Y': [6, 7, 8, 9, 10]}
    >>> df = pd.DataFrame(data=data1)
    >>> new_track = msd.msd_calc(df, 5)

    >>> data1 = {'Frame': [1, 2, 3, 4, 5],
    ...          'X': [5, 6, 7, 8, 9],
    ...          'Y': [6, 7, 8, 9, 10]}
    >>> df = pd.DataFrame(data=data1)
    >>> new_track = msd.msd_calc(df)

    """

    meansd = np.zeros(length)
    gauss = np.zeros(length)
    new_frame = np.linspace(1, length, length)
    old_frame = track['Frame']
    oldxy = [track['X'], track['Y']]
    fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
                                fill_value=np.nan),
           interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
                                fill_value=np.nan)]

    intxy = [ma.masked_equal(fxy[0](new_frame), np.nan),
             ma.masked_equal(fxy[1](new_frame), np.nan)]
    data1 = {'Frame': new_frame,
             'X': intxy[0],
             'Y': intxy[1]
             }
    new_track = pd.DataFrame(data=data1)

    for frame in range(0, length-1):
        xy = [np.square(nth_diff(new_track['X'], n=frame+1)),
              np.square(nth_diff(new_track['Y'], n=frame+1))]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            meansd[frame+1] = np.nanmean(xy[0] + xy[1])
            gauss[frame+1] = np.nanmean(xy[0]**2 + xy[1]**2
                                        )/(2*(meansd[frame+1]**2))

    new_track['MSDs'] = pd.Series(meansd, index=new_track.index)
    new_track['Gauss'] = pd.Series(gauss, index=new_track.index)

    return new_track
Example #41
0
    def predict(self, rgb_img, depth, label_img, label, bboxes,
                intrinsic_matrix):
        num_points = self.num_points
        iteration = self.iteration
        batch_size = 1
        lst = np.array(label.flatten(), dtype=np.int32)
        img_height, img_width, _ = rgb_img.shape

        cameramodel = cameramodels.PinholeCameraModel.from_intrinsic_matrix(
            intrinsic_matrix, img_height, img_width)

        translations = []
        rotations = []
        for idx in range(len(lst)):
            itemid = lst[idx]

            rmin, cmin, rmax, cmax = bboxes[idx]
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label_img, itemid))
            mask = mask_label * mask_depth
            choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]

            if len(choose) == 0:
                translations.append([])
                rotations.append([])
                continue

            if len(choose) > num_points:
                c_mask = np.zeros(len(choose), dtype=int)
                c_mask[:num_points] = 1
                np.random.shuffle(c_mask)
                choose = choose[c_mask.nonzero()]
            else:
                choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

            xmap = np.array([[j for i in range(img_width)]
                             for j in range(img_height)])
            ymap = np.array([[i for i in range(img_width)]
                             for j in range(img_height)])

            depth_masked = depth[
                rmin:rmax,
                cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
            xmap_masked = xmap[rmin:rmax,
                               cmin:cmax].flatten()[choose][:,
                                                            np.newaxis].astype(
                                                                np.float32)
            ymap_masked = ymap[rmin:rmax,
                               cmin:cmax].flatten()[choose][:,
                                                            np.newaxis].astype(
                                                                np.float32)
            choose = np.array([choose])

            cloud = cameramodel.batch_project_pixel_to_3d_ray(
                np.concatenate([ymap_masked, xmap_masked], axis=1),
                depth_masked)

            img_masked = np.array(rgb_img)[:, :, :3]
            img_masked = np.transpose(img_masked, (2, 0, 1))
            img_masked = img_masked[:, rmin:rmax, cmin:cmax]

            with torch.no_grad():
                cloud = torch.from_numpy(cloud.astype(np.float32))
                choose = torch.LongTensor(choose.astype(np.int32))
                img_masked = normalize(
                    torch.from_numpy(img_masked.astype(np.float32)))
                index = torch.LongTensor([itemid - 1])

                cloud = cloud.cuda()
                choose = choose.cuda()
                img_masked = img_masked.cuda()
                index = index.cuda()

                cloud = cloud.view(1, num_points, 3)
                img_masked = img_masked.view(1, 3,
                                             img_masked.size()[1],
                                             img_masked.size()[2])

                pred_rot, pred_trans, pred_score, emb = self.estimator(
                    img_masked, cloud, choose, index)
                pred_rot = pred_rot / torch.norm(pred_rot, dim=2).view(
                    1, num_points, 1)

                pred_score = pred_score.view(batch_size, num_points)
                _, which_max = torch.max(pred_score, 1)
                pred_trans = pred_trans.view(batch_size * num_points, 1, 3)
                points = cloud.view(batch_size * num_points, 1, 3)
                rotation = pred_rot[0][which_max[0]].view(-1).cpu().\
                    data.numpy()
                translation = (points + pred_trans)[which_max[0]].view(-1).\
                    cpu().data.numpy()

                for _ in range(iteration):
                    T = torch.from_numpy(translation.astype(np.float32)).\
                             cuda().view(1, 3).\
                             repeat(num_points, 1).contiguous().\
                             view(1, num_points, 3)
                    trans_matrix = np.eye(4)
                    trans_matrix[:3, :3] = quaternion2matrix(
                        quaternion_normalize(rotation))
                    R = torch.from_numpy(trans_matrix[:3, :3].astype(
                        np.float32)).cuda().view(1, 3, 3)
                    trans_matrix[0:3, 3] = translation
                    new_cloud = torch.bmm((cloud - T), R).contiguous()
                    refined_rot, refined_trans = self.refiner(
                        new_cloud, emb, index)
                    refined_rot = refined_rot.view(1, 1, -1)
                    refined_rot = refined_rot / (torch.norm(
                        refined_rot, dim=2).view(1, 1, 1))
                    rotation_2 = refined_rot.view(-1).cpu().data.numpy()
                    translation_2 = refined_trans.view(-1).cpu().data.numpy()
                    trans_matrix_2 = np.eye(4)
                    trans_matrix_2[:3, :3] = quaternion2matrix(
                        quaternion_normalize(rotation_2))

                    trans_matrix_2[0:3, 3] = translation_2

                    trans_matrix_final = np.dot(trans_matrix, trans_matrix_2)
                    rotation_final = matrix2quaternion(
                        trans_matrix_final[:3, :3])
                    translation_final = np.array([
                        trans_matrix_final[0][3], trans_matrix_final[1][3],
                        trans_matrix_final[2][3]
                    ])

                    rotation = rotation_final
                    translation = translation_final
            translations.append(translation)
            rotations.append(quaternion_normalize(rotation))
        return rotations, translations
Example #42
0
def plot_all_experiments(experiments, bucket='ccurtis.data', folder='test',
                         yrange=(10**-1, 10**1), fps=100.02,
                         xrange=(10**-2, 10**0), upload=True,
                         outfile='test.png', exponential=True):
    """Plots precision-weighted averages of MSD datasets.

    Plots pre-calculated precision-weighted averages of MSD datasets calculated
    from precision_averaging and stored in an AWS S3 bucket.

    Parameters
    ----------
    group : list of str
        List of experiment names to plot. Each experiment must have an MSD and
        SEM file associated with it in s3.
    bucket : str
        S3 bucket from which to download data.
    folder : str
        Folder in s3 bucket from which to download data.
    yrange : list of float
        Y range of plot
    xrange: list of float
        X range of plot
    upload : bool
        True to upload to S3
    outfile : str
        Filename of output image

    """

    n = len(experiments)

    color = iter(cm.viridis(np.linspace(0, 0.9, n)))

    fig = plt.figure(figsize=(8.5, 8.5))
    plt.xlim(xrange[0], xrange[1])
    plt.ylim(yrange[0], yrange[1])
    plt.xlabel('Tau (s)', fontsize=25)
    plt.ylabel(r'Mean Squared Displacement ($\mu$m$^2$)', fontsize=25)

    geo = {}
    gstder = {}
    counter = 0
    for experiment in experiments:
        aws.download_s3('{}/geomean_{}.csv'.format(folder, experiment),
                        'geomean_{}.csv'.format(experiment), bucket_name=bucket)
        aws.download_s3('{}/geoSEM_{}.csv'.format(folder, experiment),
                        'geoSEM_{}.csv'.format(experiment), bucket_name=bucket)

        geo[counter] = np.genfromtxt('geomean_{}.csv'.format(experiment))
        gstder[counter] = np.genfromtxt('geoSEM_{}.csv'.format(experiment))
        geo[counter] = ma.masked_equal(geo[counter], 0.0)
        gstder[counter] = ma.masked_equal(gstder[counter], 0.0)

        frames = np.shape(gstder[counter])[0]
        xpos = np.linspace(0, frames-1, frames)/fps
        c = next(color)

        if exponential:
            plt.loglog(xpos, np.exp(geo[counter]), c=c, linewidth=6,
                       label=experiment)
            plt.loglog(xpos, np.exp(geo[counter] - 1.96*gstder[counter]),
                       c=c, dashes=[6, 2], linewidth=4)
            plt.loglog(xpos, np.exp(geo[counter] + 1.96*gstder[counter]),
                       c=c, dashes=[6, 2], linewidth=4)
        else:
            plt.loglog(xpos, geo[counter], c=c, linewidth=6,
                       label=experiment)
            plt.loglog(xpos, geo[counter] - 1.96*gstder[counter], c=c,
                       dashes=[6, 2], linewidth=4)
            plt.loglog(xpos, geo[counter] + 1.96*gstder[counter], c=c,
                       dashes=[6, 2], linewidth=4)

        counter = counter + 1

    plt.legend(frameon=False, prop={'size': 16})

    if upload:
        fig.savefig(outfile, bbox_inches='tight')
        aws.upload_s3(outfile, folder+'/'+outfile, bucket_name=bucket)
Example #43
0
def precision_averaging(group, geomean, geo_stder, weights, save=True,
                        bucket='ccurtis.data', folder='test',
                        experiment='test'):
    """Calculates precision-weighted averages of MSD datasets.

    Parameters
    ----------
    group : list of str
        List of experiment names to average. Each element corresponds to a key
        in geo_stder and geomean.
    geomean : dict of numpy.ndarray
        Each entry in dictionary corresponds to an MSD profiles, they key
        corresponding to an experiment name.
    geo_stder : dict of numpy.ndarray
        Each entry in dictionary corresponds to the standard errors of an MSD
        profile, the key corresponding to an experiment name.
    weights : numpy.ndarray
        Precision weights to be used in precision averaging.

    Returns
    -------
    geo : numpy.ndarray
        Precision-weighted averaged MSDs from experiments specified in group
    geo_stder : numpy.ndarray
        Precision-weighted averaged SEMs from experiments specified in group

    """

    frames = np.shape(geo_stder[group[0]])[0]
    slices = len(group)

    video_counter = 0
    geo_holder = np.zeros((slices, frames))
    gstder_holder = np.zeros((slices, frames))
    w_holder = np.zeros((slices, frames))
    for sample in group:
        w_holder[video_counter, :] = (1/(geo_stder[sample]*geo_stder[sample])
                                      )/weights
        geo_holder[video_counter, :] = w_holder[video_counter, :
                                                ] * geomean[sample]
        gstder_holder[video_counter, :] = 1/(geo_stder[sample]*geo_stder[sample]
                                             )
        video_counter = video_counter + 1

    w_holder = ma.masked_equal(w_holder, 0.0)
    w_holder = ma.masked_equal(w_holder, 1.0)
    geo_holder = ma.masked_equal(geo_holder, 0.0)
    geo_holder = ma.masked_equal(geo_holder, 1.0)
    gstder_holder = ma.masked_equal(gstder_holder, 0.0)
    gstder_holder = ma.masked_equal(gstder_holder, 1.0)

    geo = ma.sum(geo_holder, axis=0)
    geo_stder = ma.sqrt((1/ma.sum(gstder_holder, axis=0)))

    if save:
        geo_f = 'geomean_{}.csv'.format(experiment)
        gstder_f = 'geoSEM_{}.csv'.format(experiment)
        np.savetxt(geo_f, geo, delimiter=',')
        np.savetxt(gstder_f, geo_stder, delimiter=',')
        aws.upload_s3(geo_f, '{}/{}'.format(folder, geo_f), bucket_name=bucket)
        aws.upload_s3(gstder_f, '{}/{}'.format(folder, gstder_f),
                      bucket_name=bucket)

    geodata = Bunch(geomean=geo, geostd=geo_stder, weighthold=w_holder,
                    geostdhold=gstder_holder)

    return geodata
Example #44
0
def geomean_msdisp(prefix, umppx=0.16, fps=100.02, upload=True,
                   remote_folder="01_18_Experiment", bucket='ccurtis.data',
                   backup_frames=651):
    """Comptes geometric averages of mean squared displacement datasets

    Calculates geometric averages and stadard errors for MSD datasets. Might
    error out if not formatted as output from all_msds2.

    Parameters
    ----------
    prefix : string
        Prefix of file name to be plotted e.g. features_P1.csv prefix is P1.
    umppx : float
        Microns per pixel of original images.
    fps : float
        Frames per second of video.
    upload : bool
        True if you want to upload to s3.
    remote_folder : string
        Folder in S3 bucket to upload to.
    bucket : string
        Name of S3 bucket to upload to.

    Returns
    -------
    geo_mean : numpy.ndarray
        Geometric mean of trajectory MSDs at all time points.
    geo_stder : numpy.ndarray
        Geometric standard errot of trajectory MSDs at all time points.

    """

    merged = pd.read_csv('msd_{}.csv'.format(prefix))
    try:
        particles = int(max(merged['Track_ID']))
        frames = int(max(merged['Frame']))
        ypos = np.zeros((particles+1, frames+1))

        for i in range(0, particles+1):
            ypos[i, :] = merged.loc[merged.Track_ID == i, 'MSDs']*umppx*umppx
            xpos = merged.loc[merged.Track_ID == i, 'Frame']/fps

        geo_mean = np.nanmean(ma.log(ypos), axis=0)
        geo_stder = ma.masked_equal(stats.sem(ma.log(ypos), axis=0,
                                              nan_policy='omit'), 0.0)

    except ValueError:
        geo_mean = np.nan*np.ones(backup_frames)
        geo_stder = np.nan*np.ones(backup_frames)

    np.savetxt('geomean_{}.csv'.format(prefix), geo_mean, delimiter=",")
    np.savetxt('geoSEM_{}.csv'.format(prefix), geo_stder, delimiter=",")

    if upload:
        aws.upload_s3('geomean_{}.csv'.format(prefix),
                      remote_folder+'/'+'geomean_{}.csv'.format(prefix),
                      bucket_name=bucket)
        aws.upload_s3('geoSEM_{}.csv'.format(prefix),
                      remote_folder+'/'+'geoSEM_{}.csv'.format(prefix),
                      bucket_name=bucket)

    return geo_mean, geo_stder
Example #45
0
    depth = np.array(Image.open('{0}/{1}-depth.png'.format(opt.dataset_root, testlist[now])))
    posecnn_meta = scio.loadmat('{0}/results_PoseCNN_RSS2018/{1}.mat'.format(ycb_toolbox_dir, '%06d' % now))
    label = np.array(posecnn_meta['labels'])
    posecnn_rois = np.array(posecnn_meta['rois'])

    lst = posecnn_rois[:, 1:2].flatten()
    my_result_wo_refine = []
    my_result = []
    
    for idx in range(len(lst)):
        itemid = lst[idx]
        try:
            rmin, rmax, cmin, cmax = get_bbox(posecnn_rois)

            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, itemid))
            mask = mask_label * mask_depth

            choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
            # generate randomly num_points points from the posecnn segmentation
            # yes but why? --> for pointnet
            if len(choose) > num_points:
                c_mask = np.zeros(len(choose), dtype=int)
                c_mask[:num_points] = 1
                np.random.shuffle(c_mask)
                choose = choose[c_mask.nonzero()]
            else:
                choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

            depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
            xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
Example #46
0
def full_prediction(image, depth, meta, segmentor, estimator, refiner, to_tensor, normalize, device, cuda, color_dict,
                    class_names=None, point_clouds=None, plot=False, color_prediction=False, bbox=False, put_text=False):

    start_time = time.time()
    output_dict = {'predictions': {},
                   'elapsed_times': {}}

    if color_prediction:
        output_dict['segmented_prediction'] = np.array(copy.deepcopy(image), dtype=np.float)
        output_dict['pose_prediction'] = np.array(copy.deepcopy(image), dtype=np.float)

    # preprocesses input
    x = copy.deepcopy(image)
    x = to_tensor(x)
    x = normalize(x)

    x = x.to(device)
    x = x.unsqueeze(0)
    # get segmetation label
    pred = segmentor.predict(x)
    pred = F.softmax(pred, dim=1)
    if cuda:
        pred = pred.cpu()
    pred = pred[0]
    # crop and preproceses label to pass into the pose estimation
    pred_arg = torch.argmax(pred, dim=0).numpy()

    found_cls, counts = np.unique(pred_arg, return_counts=True)

    if 0 in found_cls:
        start = 1
    else:
        start = 0

    for i, cls in enumerate(found_cls[start:]):
        if counts[i+start] > 100:
            cls_pred_arg = copy.deepcopy(pred_arg)
            cls_pred_arg[cls_pred_arg != cls] = 0
            cls_pred = cls_pred_arg * pred[cls].numpy()

            ret, labels = cv2.connectedComponents(np.array(cls_pred_arg, dtype=np.uint8), connectivity=8)

            biggest = 1
            biggest_score = 0
            unique = np.unique(labels)
            if 0 in unique:
                start2 = 1
            else:
                start2 = 0
            for u in unique[start2:]:
                score = np.mean(cls_pred[labels == u])
                if score > biggest_score:
                    biggest_score = score
                    biggest = u

            cls_pred[labels != biggest] = 0
            cls_pred[cls_pred != 0] = 255
            cls_pred = np.array(cls_pred, dtype=np.uint8)

            output_dict['predictions'][class_names[cls-1]] = {'mask': cls_pred}

            if color_prediction:
                for c, color_value in enumerate(color_dict[class_names[cls-1]]['value']):
                    c_mask = np.zeros(cls_pred.shape)
                    c_mask[cls_pred != 0] = color_value
                    output_dict['segmented_prediction'][:, :, c][cls_pred != 0] = \
                        output_dict['segmented_prediction'][:, :, c][cls_pred != 0] * 0.7 + c_mask[cls_pred != 0] * 0.3

                if bbox:
                    bbox = get_bbox(cls_pred)
                    cv2.rectangle(output_dict['segmented_prediction'],
                                  (bbox[2], bbox[0]),
                                  (bbox[3], bbox[1]),
                                  color_dict[class_names[cls-1]]['value'],
                                  2)
                if put_text:
                    bbox = get_bbox(cls_pred)
                    try:
                        cv2.putText(output_dict['segmented_prediction'],
                                    'Segmentation',
                                    (10, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    1,
                                    (0, 0, 0),
                                    2,
                                    cv2.LINE_AA)

                        cv2.putText(output_dict['segmented_prediction'],
                                    class_names[cls - 1],
                                    (bbox[2] + 10, bbox[0] - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    1,
                                    color_dict[class_names[cls-1]]['value'],
                                    2,
                                    cv2.LINE_AA)
                    except:
                        pass



    if color_prediction:
        output_dict['segmented_prediction'][output_dict['segmented_prediction'] < 0] = 0
        output_dict['segmented_prediction'][output_dict['segmented_prediction'] > 255] = 255
        output_dict['segmented_prediction'] = np.array(output_dict['segmented_prediction'], dtype=np.uint8)

    output_dict['elapsed_times']['segmentation'] = time.time()-start_time

    start_time_pose = time.time()
    xmap = np.array([[j for i in range(640)] for j in range(480)])
    ymap = np.array([[i for i in range(640)] for j in range(480)])
    num_points = 1000
    # estimate the pose
    for cls in output_dict['predictions']:
        #print('cls name', cls, class_names.index(cls))
        mask_label = ma.getmaskarray(ma.masked_equal(output_dict['predictions'][cls]['mask'], 255))
        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
        mask = mask_label * mask_depth
        # select some points on the object
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            continue

        if len(choose) > num_points:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:num_points] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')

        # select the choosen points
        depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        # compute the cartesian space position of each pixel and sample to point cloud

        pt2 = depth_masked * meta['depth_scale']
        pt0 = (ymap_masked - meta['intr']['ppx']) * pt2 / meta['intr']['fx']
        pt1 = (xmap_masked - meta['intr']['ppy']) * pt2 / meta['intr']['fy']

        points = np.concatenate((pt0, pt1, pt2), axis=1)
        #np_points = copy.deepcopy(points)

        points = torch.from_numpy(points.astype(np.float32)).unsqueeze(0).to(device)
        choose = torch.LongTensor(choose.astype(np.int32)).unsqueeze(0).to(device)

        img = np.transpose(np.array(image)[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
        img = normalize(torch.from_numpy(img.astype(np.float32))).unsqueeze(0).to(device)
        idx = torch.LongTensor([int(class_names.index(cls))]).unsqueeze(0).to(device)


        pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
        new_points = get_new_points(pred_r, pred_t, pred_c, points)
        _, my_r, my_t = my_estimator_prediction(pred_r, pred_t, pred_c, num_points, 1, points)

        # refine the pose
        for ite in range(0, 2):
            pred_r, pred_t = refiner(new_points, emb, idx)
        _, my_r, my_t = my_refined_prediction(pred_r, pred_t, my_r, my_t)

        output_dict['predictions'][cls]['position'] = my_t
        output_dict['predictions'][cls]['rotation'] = my_r

        if color_prediction:
            my_r = quaternion_matrix(my_r)[:3, :3]
            np_pred = np.dot(point_clouds[class_names.index(cls)], my_r.T)
            np_pred = np.add(np_pred, my_t)

            output_dict['pose_prediction'] = pc_utils.pointcloud2image(output_dict['pose_prediction'],
                                                                       np_pred,
                                                                       3,
                                                                       meta['intr'],
                                                                       color=color_dict[cls]['value'])

            if put_text:
                try:
                    cv2.putText(output_dict['pose_prediction'],
                                'Pose Estimation',
                                (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                1,
                                (0, 0, 0),
                                2,
                                cv2.LINE_AA)
                except:
                    pass

    if color_prediction:
        output_dict['pose_prediction'][output_dict['pose_prediction'] < 0] = 0
        output_dict['pose_prediction'][output_dict['pose_prediction'] > 255] = 255
        output_dict['pose_prediction'] = np.array(output_dict['pose_prediction'], dtype=np.uint8)

    output_dict['elapsed_times']['pose_estimation'] = time.time() - start_time_pose

    if plot and color_prediction:
        title = ''
        for cls in output_dict['predictions']:
            title += '{}: {}, '.format(color_dict[cls]['tag'], cls)
        fig, axs = plt.subplots(1, 2, constrained_layout=True)
        fig.suptitle(title)
        plt.subplot(1, 2, 1)
        plt.imshow(output_dict['segmented_prediction'])
        plt.axis('off')
        plt.title('segmented_prediction')
        plt.subplot(1, 2, 2)
        plt.imshow(output_dict['pose_prediction'])
        plt.axis('off')
        plt.title('pose_prediction')
        plt.show()

    del_keys = []
    for cls in output_dict['predictions'].keys():
        #print(output_dict['predictions'][cls].keys())
        if 'position' not in output_dict['predictions'][cls]:
            del_keys.append(cls)
        elif 'rotation' not in output_dict['predictions'][cls]:
            del_keys.append(cls)
        elif 'mask' not in output_dict['predictions'][cls]:
            del_keys.append(cls)

    for cls in del_keys:
        print('Deleting cls "{}"'.format(cls))
        del output_dict['predictions'][cls]

    output_dict['elapsed_times']['total'] = time.time() - start_time

    # return dict with found objects and their pose. also return the painted image

    return output_dict
    def __getitem__(self, idx):
        index = random.randint(0, self.data_len - 10)

        label = np.array(
            Image.open('{0}/{1}-label.png'.format(self.root,
                                                  self.path[index])))
        meta = scio.loadmat('{0}/{1}-meta.mat'.format(self.root,
                                                      self.path[index]))
        if not self.use_noise:
            rgb = np.array(
                Image.open('{0}/{1}-color.png'.format(
                    self.root, self.path[index])).convert("RGB"))
        else:
            rgb = np.array(
                self.trancolor(
                    Image.open('{0}/{1}-color.png'.format(
                        self.root, self.path[index])).convert("RGB")))

        if self.path[index][:8] == 'data_syn':
            rgb = Image.open('{0}/{1}-color.png'.format(
                self.root, self.path[index])).convert("RGB")
            rgb = ImageEnhance.Brightness(rgb).enhance(1.5).filter(
                ImageFilter.GaussianBlur(radius=0.8))
            rgb = np.array(self.trancolor(rgb))
            seed = random.randint(0, self.back_len - 10)
            back = np.array(
                self.trancolor(
                    Image.open('{0}/{1}-color.png'.format(
                        self.root, self.path[seed])).convert("RGB")))
            back_label = np.array(
                Image.open('{0}/{1}-label.png'.format(self.root,
                                                      self.path[seed])))
            mask = ma.getmaskarray(ma.masked_equal(label, 0))
            back = np.transpose(back, (2, 0, 1))
            rgb = np.transpose(rgb, (2, 0, 1))
            rgb = rgb + np.random.normal(loc=0.0, scale=5.0, size=rgb.shape)
            rgb = back * mask + rgb
            label = back_label * mask + label
            rgb = np.transpose(rgb, (1, 2, 0))
            # scipy.misc.imsave('embedding_final/rgb_{0}.png'.format(index), rgb)
            # scipy.misc.imsave('embedding_final/label_{0}.png'.format(index), label)

        if self.use_noise:
            choice = random.randint(0, 3)
            if choice == 0:
                rgb = np.fliplr(rgb)
                label = np.fliplr(label)
            elif choice == 1:
                rgb = np.flipud(rgb)
                label = np.flipud(label)
            elif choice == 2:
                rgb = np.fliplr(rgb)
                rgb = np.flipud(rgb)
                label = np.fliplr(label)
                label = np.flipud(label)

        obj = meta['cls_indexes'].flatten().astype(np.int32)
        obj = np.append(obj, [0], axis=0)
        target = copy.deepcopy(label)

        rgb = np.transpose(rgb, (2, 0, 1))
        orig = copy.deepcopy(rgb)
        rgb = self.norm(torch.from_numpy(rgb.astype(np.float32)))
        #rgb = torch.from_numpy(rgb.astype(np.float32))
        target = torch.from_numpy(target.astype(np.int64))

        #return rgb, target  # Normal
        return '{0}{1}'.format(
            self.root,
            self.path[index]), orig, rgb, target  # To test and see orig image
Example #48
0
 def setUp(self):
     self.cube = iris.cube.Cube(ma.masked_equal([1, 2, 3, 4, 5], 3))
     self.cube.add_dim_coord(DimCoord([6, 7, 8, 9, 10], long_name='foo'),
                             0)
     self.func = lambda x: x >= 3
Example #49
0
    def __getitem__(self, index):

        if self.list_rgb[index].find('renders') != -1:
            img = Image.open(self.list_rgb[index])
            ori_img = np.array(img)
            depth = np.array(Image.open(self.list_depth[index]))
            label = np.array(Image.open(self.list_label[index]))
            pose = pickle.load(open(self.list_pose[index], 'rb'), encoding='utf-8')['RT']
            cx = self.render_cx
            cy = self.render_cy
            fx = self.render_fx
            fy = self.render_fy
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(1)))
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask = mask_label * mask_depth
        elif self.list_rgb[index].find('fuse') != -1:
            img = Image.open(self.list_rgb[index])
            ori_img = np.array(img)
            depth = np.array(Image.open(self.list_depth[index]))
            label = np.array(Image.open(self.list_label[index]))
            cx = self.cam_cx
            cy = self.cam_cy
            fx = self.cam_fx
            fy = self.cam_fy
            idx = self.objlist.index(self.obj_id)
            pose = pickle.load(open(self.list_pose[index], 'rb'), encoding='utf-8')[1][idx]
            begins = pickle.load(open(self.list_pose[index], 'rb'), encoding='utf-8')[0][idx]
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(self.obj_id)))
            mask = mask_label * mask_depth
            cx += begins[1]
            cy += begins[0]
        else:
            img = Image.open(self.list_rgb[index])
            ori_img = np.array(img)
            depth = np.array(Image.open(self.list_depth[index]))
            label = np.array(Image.open(self.list_label[index]))
            pose = np.load(self.list_pose[index])
            cx = self.cam_cx
            cy = self.cam_cy
            fx = self.cam_fx
            fy = self.cam_fy
            if len(label.shape) == 2:
                mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
            else:
                mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask = mask_label * mask_depth

        if self.mode == 'train':
            img = self.trancolor(img)

        img = np.array(img)[:, :, :3]
        img = np.transpose(img, (2, 0, 1))
        img_masked = img

        # rmin, rmax, cmin, cmax = get_bbox_mask(mask)
        rmin, rmax, cmin, cmax = get_bbox(mask_to_bbox(mask_label))
        img_masked = img_masked[:, rmin:rmax, cmin:cmax]

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return(cc, cc, cc, cc, cc, cc, cc, cc, cc, cc)
        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')
        
        depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])
        cam_scale = 1.0
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - cx) * pt2 / fx
        pt1 = (xmap_masked - cy) * pt2 / fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1) / 1000

        model_points = self.pt / 1000
        dellist = [j for j in range(0, len(model_points))]
        dellist = random.sample(dellist, len(model_points) - self.num)
        model_points = np.delete(model_points, dellist, axis=0)

        target_r = pose[:3, :3]
        target_t = pose[:3, 3]
        target = np.dot(model_points, target_r.T)
        target = np.add(target, target_t)

        model_kp = self.kp
        scene_kp = np.add(np.dot(model_kp, target_r.T), target_t)
        vertex_gt = compute_vertex_hcoords(cloud, scene_kp)

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy((img_masked/255).astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.from_numpy(model_kp.astype(np.float32)), \
               torch.from_numpy(vertex_gt.astype(np.float32)), \
               torch.LongTensor([0]), \
               torch.from_numpy(target_r.astype(np.float32)), \
               torch.from_numpy(target_t.astype(np.float32))
nc2 = xr.open_dataset(fname2)
sfcelv_hydroweb2 = nc2.sfcelv_hydroweb.values
sfcelv_cmf2 = nc2.sfcelv_cmf.values
lons2 = nc2.lon.values
lats2 = nc2.lat.values
pnames2 = nc2.name.values
pnum2 = len(pnames2)
print np.shape(sfcelv_cmf2), pnum2
#- masked out -9999 values
sfcelv_cmf2 = ma.masked_where(sfcelv_hydroweb2 == -9999.0,
                              sfcelv_cmf2).filled(-9999.0)
#sfcelv_hydroweb=ma.masked_where(sfcelv_hydroweb==-9999.0,sfcelv_hydroweb)
sfcelv_diff2 = ma.masked_where(sfcelv_hydroweb2 == -9999.0,
                               (sfcelv_cmf2 -
                                sfcelv_hydroweb2)**2).filled(-9999.0)
sfcelv_rmse2 = np.mean(ma.masked_equal(sfcelv_diff2, -9999.0),
                       axis=0)  #.compressed()#
sfcelv_rmse2 = sfcelv_rmse2.filled()
print np.shape(sfcelv_rmse2)
print sfcelv_rmse2[0:10]
# sfcelv_bias_com=ma.masked_equal(sfcelv_bias1,-9999.0).compressed()

os.system("mkdir -p ./fig")

# river width
sup = 2
w = 0.02
alpha = 1
width = 0.5

land = "#C0C0C0"
Example #51
0
def draw_subregions(subregions,
                    lats,
                    lons,
                    fname,
                    fmt='png',
                    ptitle='',
                    parallels=None,
                    meridians=None,
                    subregion_masks=None):
    ''' Draw subregion domain(s) on a map.

    :param subregions: The subregion objects to plot on the map.
    :type subregions: :class:`list` of subregion objects (Bounds objects)

    :param lats: Array of latitudes values.
    :type lats: :class:`numpy.ndarray`

    :param lons: Array of longitudes values.
    :type lons: :class:`numpy.ndarray`

    :param fname: The filename of the plot.
    :type fname: :mod:`string`

    :param fmt: (Optional) filetype for the output.
    :type fmt: :mod:`string`

    :param ptitle: (Optional) plot title.
    :type ptitle: :mod:`string`

    :param parallels: (Optional) :class:`list` of :class:`int` or :class:`float` for the parallels to
        be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
        for additional information.
    :type parallels: :class:`list` of :class:`int` or :class:`float`

    :param meridians: (Optional) :class:`list` of :class:`int` or :class:`float` for the meridians to
        be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
        for additional information.
    :type meridians: :class:`list` of :class:`int` or :class:`float`

    :param subregion_masks: (Optional) :class:`dict` of :class:`bool` arrays for each
        subregion for giving finer control of the domain to be drawn, by default
        the entire domain is drawn.
    :type subregion_masks: :class:`dict` of :class:`bool` arrays
    '''
    # Set up the figure
    fig = plt.figure()
    fig.set_size_inches((8.5, 11.))
    fig.dpi = 300
    ax = fig.add_subplot(111)

    # Determine the map boundaries and construct a Basemap object
    lonmin = lons.min()
    lonmax = lons.max()
    latmin = lats.min()
    latmax = lats.max()
    m = Basemap(projection='cyl',
                llcrnrlat=latmin,
                urcrnrlat=latmax,
                llcrnrlon=lonmin,
                urcrnrlon=lonmax,
                resolution='l',
                ax=ax)

    # Draw the borders for coastlines and countries
    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=.75)
    m.drawstates()

    # Create default meridians and parallels. The interval between
    # them should be 1, 5, 10, 20, 30, or 40 depending on the size
    # of the domain
    length = max((latmax - latmin), (lonmax - lonmin)) / 5
    if length <= 1:
        dlatlon = 1
    elif length <= 5:
        dlatlon = 5
    else:
        dlatlon = np.round(length, decimals=-1)

    if meridians is None:
        meridians = np.r_[np.arange(0, -180, -dlatlon)[::-1],
                          np.arange(0, 180, dlatlon)]
    if parallels is None:
        parallels = np.r_[np.arange(0, -90, -dlatlon)[::-1],
                          np.arange(0, 90, dlatlon)]

    # Draw parallels / meridians
    m.drawmeridians(meridians, labels=[0, 0, 0, 1], linewidth=.75, fontsize=10)
    m.drawparallels(parallels, labels=[1, 0, 0, 1], linewidth=.75, fontsize=10)

    # Set up the color scaling
    cmap = plt.cm.rainbow
    norm = mpl.colors.BoundaryNorm(np.arange(1, len(subregions) + 3), cmap.N)

    # Process the subregions
    for i, reg in enumerate(subregions):
        if subregion_masks is not None and reg.name in subregion_masks.keys():
            domain = (i + 1) * subregion_masks[reg.name]
        else:
            domain = (i + 1) * np.ones((2, 2))

        nlats, nlons = domain.shape
        domain = ma.masked_equal(domain, 0)
        reglats = np.linspace(reg.lat_min, reg.lat_max, nlats)
        reglons = np.linspace(reg.lon_min, reg.lon_max, nlons)
        reglons, reglats = np.meshgrid(reglons, reglats)

        # Convert to to projection coordinates. Not really necessary
        # for cylindrical projections but keeping it here in case we need
        # support for other projections.
        x, y = m(reglons, reglats)

        # Draw the subregion domain
        m.pcolormesh(x, y, domain, cmap=cmap, norm=norm, alpha=.5)

        # Label the subregion
        xm, ym = x.mean(), y.mean()
        m.plot(xm,
               ym,
               marker='$%s$' % ("R" + str(i + 1)),
               markersize=12,
               color='k')

    # Add the title
    ax.set_title(ptitle)

    # Save the figure
    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
    fig.clf()
Example #52
0
def draw_subregions(subregions,
                    lats,
                    lons,
                    fname,
                    fmt='png',
                    ptitle='',
                    parallels=None,
                    meridians=None,
                    subregion_masks=None):
    '''
    Purpose::
        Function to draw subregion domain(s) on a map

    Input::
        subregions - a list of subRegion objects
        lats - array of latitudes
        lons - array of longitudes
        fname  - a string specifying the filename of the plot
        fmt  - an optional string specifying the filetype, default is .png
        ptitle - an optional string specifying plot title
        parallels - an optional list of ints or floats for the parallels to be drawn
        meridians - an optional list of ints or floats for the meridians to be drawn
        subregion_masks - optional dictionary of boolean arrays for each subRegion
                         for giving finer control of the domain to be drawn, by default
                         the entire domain is drawn.
    '''
    # Set up the figure
    fig = plt.figure()
    fig.set_size_inches((8.5, 11.))
    fig.dpi = 300
    ax = fig.add_subplot(111)

    # Determine the map boundaries and construct a Basemap object
    lonmin = lons.min()
    lonmax = lons.max()
    latmin = lats.min()
    latmax = lats.max()
    m = Basemap(projection='cyl',
                llcrnrlat=latmin,
                urcrnrlat=latmax,
                llcrnrlon=lonmin,
                urcrnrlon=lonmax,
                resolution='l',
                ax=ax)

    # Draw the borders for coastlines and countries
    m.drawcoastlines(linewidth=1)
    m.drawcountries(linewidth=.75)
    m.drawstates()

    # Create default meridians and parallels. The interval between
    # them should be 1, 5, 10, 20, 30, or 40 depending on the size
    # of the domain
    length = max((latmax - latmin), (lonmax - lonmin)) / 5
    if length <= 1:
        dlatlon = 1
    elif length <= 5:
        dlatlon = 5
    else:
        dlatlon = np.round(length, decimals=-1)

    if meridians is None:
        meridians = np.r_[np.arange(0, -180, -dlatlon)[::-1],
                          np.arange(0, 180, dlatlon)]
    if parallels is None:
        parallels = np.r_[np.arange(0, -90, -dlatlon)[::-1],
                          np.arange(0, 90, dlatlon)]

    # Draw parallels / meridians
    m.drawmeridians(meridians, labels=[0, 0, 0, 1], linewidth=.75, fontsize=10)
    m.drawparallels(parallels, labels=[1, 0, 0, 1], linewidth=.75, fontsize=10)

    # Set up the color scaling
    cmap = plt.cm.rainbow
    norm = mpl.colors.BoundaryNorm(np.arange(1, len(subregions) + 3), cmap.N)

    # Process the subregions
    for i, reg in enumerate(subregions):
        if subregion_masks is not None and reg.name in subregion_masks.keys():
            domain = (i + 1) * subregion_masks[reg.name]
        else:
            domain = (i + 1) * np.ones((2, 2))

        nlats, nlons = domain.shape
        domain = ma.masked_equal(domain, 0)
        reglats = np.linspace(reg.latmin, reg.latmax, nlats)
        reglons = np.linspace(reg.lonmin, reg.lonmax, nlons)
        reglons, reglats = np.meshgrid(reglons, reglats)

        # Convert to to projection coordinates. Not really necessary
        # for cylindrical projections but keeping it here in case we need
        # support for other projections.
        x, y = m(reglons, reglats)

        # Draw the subregion domain
        m.pcolormesh(x, y, domain, cmap=cmap, norm=norm, alpha=.5)

        # Label the subregion
        xm, ym = x.mean(), y.mean()
        m.plot(xm, ym, marker='$%s$' % (reg.name), markersize=12, color='k')

    # Add the title
    ax.set_title(ptitle)

    # Save the figure
    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
    fig.clf()
Example #53
0
    def __getitem__(self, index):
        img = Image.open(self.list_rgb[index])
        ori_img = np.array(img)
        depth = np.array(Image.open(self.list_depth[index]))
        label = np.array(Image.open(self.list_label[index]))
        obj = self.list_obj[index]
        rank = self.list_rank[index]

        if obj == 2:
            for i in range(0, len(self.meta[obj][rank])):
                if self.meta[obj][rank][i]['obj_id'] == 2:
                    meta = self.meta[obj][rank][i]
                    break
        else:
            meta = self.meta[obj][rank][0]

        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
        if self.mode == 'eval':
            mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
        else:
            mask_label = ma.getmaskarray(
                ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]

        mask = mask_label * mask_depth

        if self.add_noise:
            img = self.trancolor(img)

        img = np.array(img)[:, :, :3]
        img = np.transpose(img, (2, 0, 1))
        img_masked = img

        if self.mode == 'eval':
            rmin, rmax, cmin, cmax = get_bbox(mask_to_bbox(mask_label))
        else:
            rmin, rmax, cmin, cmax = get_bbox(meta['obj_bb'])

        img_masked = img_masked[:, rmin:rmax, cmin:cmax]
        #p_img = np.transpose(img_masked, (1, 2, 0))
        #scipy.misc.imsave('evaluation_result/{0}_input.png'.format(index), p_img)

        target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
        target_t = np.array(meta['cam_t_m2c'])
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return (cc, cc, cc, cc, cc, cc)

        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = 1.0
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = cloud / 1000.0

        if self.add_noise:
            cloud = np.add(cloud, add_t)

        #fw = open('evaluation_result/{0}_cld.xyz'.format(index), 'w')
        #for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        model_points = self.pt[obj] / 1000.0
        dellist = [j for j in range(0, len(model_points))]
        dellist = random.sample(dellist,
                                len(model_points) - self.num_pt_mesh_small)
        model_points = np.delete(model_points, dellist, axis=0)

        #fw = open('evaluation_result/{0}_model_points.xyz'.format(index), 'w')
        #for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t / 1000.0 + add_t)
            out_t = target_t / 1000.0 + add_t
        else:
            target = np.add(target, target_t / 1000.0)
            out_t = target_t / 1000.0

        #fw = open('evaluation_result/{0}_tar.xyz'.format(index), 'w')
        #for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        #fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([self.objlist.index(obj)])
Example #54
0
def mainFunction(f):

    #############################################################################
    """
  # biomass
  predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'
  trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'
  shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'
  trainND = -32768
  predND = -9999
  trgField = 'US_L3CODE'
  descrField = 'US_L3NAME'
  outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/ecoregion_correlation'
  xyLim = (500, 500)
  xLab = 'Reference (tons/ha)'
  yLab = 'Prediction (tons/ha)'
  annoXY = (15,420)
  """

    # biomass hexagon
    predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'
    trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'
    shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'
    trainND = -32768
    predND = -9999
    trgField = 'id'
    descrField = 'id'
    outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'
    xyLim = (500, 500)
    xLab = 'Reference (tons/ha)'
    yLab = 'Prediction (tons/ha)'
    annoXY = (15, 420)
    """
  # cc
  predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'
  trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'
  #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'
  shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'
  trainND = 255
  predND = 255
  trgField = 'id'
  descrField = 'id'
  #trgField = 'US_L3CODE'
  #descrField = 'US_L3NAME'
  #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'
  outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'
  xyLim = (100, 100)
  xLab = 'Reference (%)'
  yLab = 'Prediction (%)'
  annoXY = (5,82)
  """
    #############################################################################

    # get color setup
    norm = colors.Normalize(vmin=0, vmax=1)
    f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))

    # open the shapefile
    vDriver = ogr.GetDriverByName("ESRI Shapefile")
    vSrc = vDriver.Open(shpF, 0)
    vLayer = vSrc.GetLayer()

    commonBox = get_intersec([predF, trainF])

    #for f in range(vLayer.GetFeatureCount()):
    feature = vLayer[f]
    name = feature.GetField(trgField)
    print('f: ' + str(f))
    outFig = os.path.join(
        outDir,
        (trgField.replace(' ', '_').lower() + '_' + str(name) + '.png'))
    if os.path.exists(outFig):
        #break
        return

    descr = feature.GetField(descrField)

    predP, coords = get_zone_pixels(feature, shpF, predF, 1, [
        commonBox[0], commonBox[2], commonBox[3], commonBox[1]
    ])  #.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]
    trainP, coords = get_zone_pixels(
        feature, shpF, trainF, 1,
        [coords[0], coords[1], coords[2], coords[3]])  #.compressed()

    predP = ma.masked_equal(predP, predND)
    trainP = ma.masked_equal(trainP, trainND)
    trainP = ma.masked_equal(trainP, 0)

    combMask = np.logical_not(
        np.logical_not(predP.mask) * np.logical_not(trainP.mask))
    predP[combMask] = ma.masked
    trainP[combMask] = ma.masked
    predP = predP.compressed()
    trainP = trainP.compressed()
    if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP == 0).all() | (
            trainP == 0).all():
        predP = np.array([0, 0, 1, 1], dtype='float64')
        trainP = np.array([0, 0, 1, 1], dtype='float64')
    mae = round(np.mean(np.absolute(np.subtract(predP, trainP))), 1)
    rmse = round(np.sqrt(np.mean((predP - trainP)**2)), 1)

    totPixs = trainP.shape[0]
    sampSize = round(totPixs * 1)
    pickFrom = range(sampSize)
    #sampIndex = np.random.choice(pickFrom, size=sampSize)
    sampIndex = pickFrom

    r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)
    if (mae == 0) & (r == 1):
        r = 0.0
    rColor = f2hex(f2rgb, r)
    p = sns.jointplot(trainP[sampIndex],
                      predP[sampIndex],
                      kind="hex",
                      color='blue',
                      xlim=(0, xyLim[0]),
                      ylim=(0, xyLim[1]),
                      size=5)
    p.ax_joint.set_xlabel(xLab)
    p.ax_joint.set_ylabel(yLab)
    p.ax_joint.annotate(
        'r: ' + str(r) + '\nrmse: ' + str(rmse) + '\nmae: ' + str(mae), annoXY)
    plt.tight_layout()
    outFig = os.path.join(
        outDir,
        (trgField.replace(' ', '_').lower() + '_' + str(name) + '.png'))
    p.savefig(outFig)

    df = pd.DataFrame(
        {
            'id': name,
            'descr': descr,
            'r': r,
            'rmse': rmse,
            'mae': mae,
            'color': rColor,
            'img': os.path.basename(outFig)
        },
        index=[0])
    outCSV = outFig.replace('.png', '.csv')
    df.to_csv(outCSV, ',', index=False)
Example #55
0
    def __getitem__(self, index):
        # print("------------------- Get data -------------------")
        img = Image.open('{0}/{1}-color.png'.format(self.root,
                                                    self.list[index]))
        depth = np.array(
            Image.open('{0}/{1}-depth.png'.format(self.root,
                                                  self.list[index])))
        label = np.array(
            Image.open('{0}/{1}-label.png'.format(self.root,
                                                  self.list[index])))
        meta = scio.loadmat('{0}/{1}-meta.mat'.format(self.root,
                                                      self.list[index]))

        if self.list[index][:8] != 'data_syn' and int(
                self.list[index][5:9]) >= 60:
            cam_cx = self.cam_cx_2
            cam_cy = self.cam_cy_2
            cam_fx = self.cam_fx_2
            cam_fy = self.cam_fy_2
        else:
            cam_cx = self.cam_cx_1
            cam_cy = self.cam_cy_1
            cam_fx = self.cam_fx_1
            cam_fy = self.cam_fy_1

        mask_back = ma.getmaskarray(ma.masked_equal(label, 0))

        add_front = False
        if self.add_noise:
            for k in range(5):
                seed = random.choice(self.syn)
                front = np.array(
                    self.trancolor(
                        Image.open('{0}/{1}-color.png'.format(
                            self.root, seed)).convert("RGB")))
                front = np.transpose(front, (2, 0, 1))
                f_label = np.array(
                    Image.open('{0}/{1}-label.png'.format(self.root, seed)))
                front_label = np.unique(f_label).tolist()[1:]
                if len(front_label) < self.front_num:
                    continue
                front_label = random.sample(front_label, self.front_num)
                for f_i in front_label:
                    mk = ma.getmaskarray(ma.masked_not_equal(f_label, f_i))
                    if f_i == front_label[0]:
                        mask_front = mk
                    else:
                        mask_front = mask_front * mk
                t_label = label * mask_front
                if len(t_label.nonzero()[0]) > 1000:
                    label = t_label
                    add_front = True
                    break

        obj = meta['cls_indexes'].flatten().astype(np.int32)

        while 1:
            idx = np.random.randint(0, len(obj))
            mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
            mask_label = ma.getmaskarray(ma.masked_equal(label, obj[idx]))
            mask = mask_label * mask_depth
            # print("idx : " + str(idx))
            # print("mask : " + str(mask))
            if len(mask.nonzero()[0]) > self.minimum_num_pt:
                break

        if self.add_noise:
            img = self.trancolor(img)

        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        img = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax,
                                                               cmin:cmax]

        if self.list[index][:8] == 'data_syn':
            seed = random.choice(self.real)
            back = np.array(
                self.trancolor(
                    Image.open('{0}/{1}-color.png'.format(
                        self.root, seed)).convert("RGB")))
            back = np.transpose(back, (2, 0, 1))[:, rmin:rmax, cmin:cmax]
            img_masked = back * mask_back[rmin:rmax, cmin:cmax] + img
        else:
            img_masked = img

        if self.add_noise and add_front:
            img_masked = img_masked * mask_front[
                rmin:rmax, cmin:cmax] + front[:, rmin:rmax, cmin:cmax] * ~(
                    mask_front[rmin:rmax, cmin:cmax])

        if self.list[index][:8] == 'data_syn':
            img_masked = img_masked + np.random.normal(
                loc=0.0, scale=7.0, size=img_masked.shape)

        # p_img = np.transpose(img_masked, (1, 2, 0))
        # scipy.misc.imsave('temp/{0}_input.png'.format(index), p_img)
        # scipy.misc.imsave('temp/{0}_label.png'.format(index), mask[rmin:rmax, cmin:cmax].astype(np.int32))

        target_r = meta['poses'][:, :, idx][:, 0:3]
        target_t = np.array([meta['poses'][:, :, idx][:, 3:4].flatten()])
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) > self.num_pt:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num_pt] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = meta['factor_depth'][0][0]
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
        pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        if self.add_noise:
            cloud = np.add(cloud, add_t)

        # fw = open('temp/{0}_cld.xyz'.format(index), 'w')
        # for it in cloud:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        dellist = [j for j in range(0, len(self.cld[obj[idx]]))]
        if self.refine:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_large)
        else:
            dellist = random.sample(
                dellist,
                len(self.cld[obj[idx]]) - self.num_pt_mesh_small)
        model_points = np.delete(self.cld[obj[idx]], dellist, axis=0)

        # fw = open('temp/{0}_model_points.xyz'.format(index), 'w')
        # for it in model_points:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        target = np.dot(model_points, target_r.T)
        if self.add_noise:
            target = np.add(target, target_t + add_t)
        else:
            target = np.add(target, target_t)

        # fw = open('temp/{0}_tar.xyz'.format(index), 'w')
        # for it in target:
        #    fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
        # fw.close()

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_points.astype(np.float32)), \
               torch.LongTensor([int(obj[idx]) - 1])
Example #56
0
    def __getitem__(self, index):
        img = Image.open(self.imgs[index])
        ori_img = np.array(img)
        depth = np.array(Image.open(self.depths[index]))
        label = np.array(Image.open(self.labels[index]))
        obj = self.objects[index]
        rank = self.ranks[index]

        if obj == 2:
            for i in range(0, len(self.meta[obj][rank])):
                if self.meta[obj][rank][i]['obj_id'] == 2:
                    meta = self.meta[obj][rank][i]
                    break
        else:
            meta = self.meta[obj][rank][0]

        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
        mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
        mask = mask_label * mask_depth

        img = np.array(img)[:, :, :3]

        # add noise to image
        rows, cols, chs = img.shape
        gauss = np.random.normal(0, self.sigma, (rows, cols, chs)) * 255.0
        gauss = np.reshape(gauss.astype(img.dtype), (rows, cols, chs))
        img = img + gauss

        # # // show noisy image
        # cv2.imshow(f'Noisy img std {self.sigma}', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
        # cv2.waitKey(0)
        # # //

        img = np.transpose(img, (2, 0, 1))
        masked_img = img

        masked_bbox = mask_2_bbox(mask_label)
        rmin, rmax, cmin, cmax = get_bbox(masked_bbox)
        masked_img = masked_img[:, rmin:rmax, cmin:cmax]

        # # // Uncomment to show cropped image
        # crop_img = np.transpose(masked_img, (1, 2, 0))
        # cv2.imshow('Cropped image', cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
        # # //

        target_r = np.resize(np.array(meta['cam_R_m2c']), (3, 3))
        target_t = np.array(meta['cam_t_m2c'])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return (cc, cc, cc, cc, cc, cc)

        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')

        depth_masked = depth[rmin:rmax,
                             cmin:cmax].flatten()[choose][:,
                                                          np.newaxis].astype(
                                                              np.float32)
        xmap_masked = self.xmap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        ymap_masked = self.ymap[
            rmin:rmax,
            cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
        choose = np.array([choose])

        cam_scale = 1.0
        pt2 = depth_masked / cam_scale
        pt0 = (ymap_masked - self.cam_cx) * pt2 / self.cam_fx
        pt1 = (xmap_masked - self.cam_cy) * pt2 / self.cam_fy
        cloud = np.concatenate((pt0, pt1, pt2), axis=1)
        cloud = cloud / 1000.0

        # add noise to cloud
        rows, cols = cloud.shape
        gauss = np.random.normal(0, self.sigma,
                                 (rows, cols)).astype(cloud.dtype)
        gauss = np.reshape(gauss, (rows, cols))
        cloud = cloud + gauss

        # # // show input cloud
        # pcd_input = o3d.geometry.PointCloud()
        # pcd_input.points = o3d.utility.Vector3dVector(cloud)
        # pcd_input.paint_uniform_color([0, 0, 1])
        # visualize(pcd_input, self.imgs[index], self.depths[index], 'Input point mask')
        # o3d.visualization.draw_geometries([pcd_input], window_name=f'Input cloud with sigma {self.sigma}')
        # # //

        model_pts = self.pt[obj] / 1000.0
        dellist = [j for j in range(0, len(model_pts))]
        dellist = random.sample(dellist,
                                len(model_pts) - self.num_pt_mesh_small)
        model_pts = np.delete(model_pts, dellist, axis=0)

        # # // show init pose
        # pcd_init = o3d.geometry.PointCloud()
        # pcd_init.points = o3d.utility.Vector3dVector(model_pts)
        # pcd_init.paint_uniform_color([1, 0, 0])
        # visualize(pcd_init, self.imgs[index], self.depths[index], 'Init')
        # o3d.visualization.draw_geometries([pcd_init], window_name='Init pose')
        # # //

        target = np.dot(model_pts, target_r.T)
        target = np.add(target, target_t / 1000.0)

        # # // show ground truth
        # pcd_target = o3d.geometry.PointCloud()
        # pcd_target.points = o3d.utility.Vector3dVector(target)
        # pcd_target.paint_uniform_color([1, 0, 0])
        # visualize(pcd_target, self.imgs[index], self.depths[index], 'Ground truth pose')
        # o3d.visualization.draw_geometries([pcd_target], window_name='Ground truth cloud')
        # # //

        out_t = target_t / 1000.0

        return torch.from_numpy(cloud.astype(np.float32)), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(masked_img.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32)), \
               torch.from_numpy(model_pts.astype(np.float32)), \
               torch.LongTensor([self.objs.index(obj)])
nclouds = np.mean(nclouds_bin_all, axis=0)
hn_normalized = np.mean(hn_normalized_all, axis=0)

filledbin_all = np.argmin(hn_normalized[:])
fit = CSD_fit(hn_normalized[0:10], size[0:10])
logfit = fit[3]
a = fit[0]
b = fit[1]
c = fit[2]
print 'a, b, c:'
print a, b, c

print logfit

sizelog = np.log10(size)
hnlog = ma.filled(np.log10(ma.masked_equal(hn_normalized, 0)), np.nan)
ncloudslog = ma.filled(np.log10(ma.masked_equal(nclouds, 0)), np.nan)

#res = ma.filled(log2(ma.masked_equal(m, 0)), 0)

mindistance_plus = mindistance_mean + mindistance_std
mindistance_minus = mindistance_mean - mindistance_std

filledbin = np.argmin(mindistance_mean)
slope, intercept, r_value, p_value, std_err = stats.linregress(
    size[0:filledbin], mindistance_mean[0:filledbin])

print 'r-squared:', r_value**2
line = intercept + slope * size

print 'slope:', slope
def _NTAI_CAL(date,spl_arr, CoverageID_LST):

    #request image cube for the specified date and area by WCS.
    #firstly we get the temporal length of avaliable NDVI data from the DescribeCoverage of WCS
    endpoint='http://192.168.1.104:8080/rasdaman/ows'
    field={}
    field['SERVICE']='WCS'
    field['VERSION']='2.0.1'
    field['REQUEST']='DescribeCoverage'
    field['COVERAGEID']=CoverageID_LST#'LST_MOD11C2005_uptodate'#'LST_MOD11C2005'#'trmm_3b42_coverage_1'
    url_values = urllib.urlencode(field,doseq=True)
    full_url = endpoint + '?' + url_values
    data = urllib.urlopen(full_url).read()
    root = etree.fromstring(data)
    lc = root.find(".//{http://www.opengis.net/gml/3.2}lowerCorner").text
    uc = root.find(".//{http://www.opengis.net/gml/3.2}upperCorner").text
    start_date=int((lc.split(' '))[2])
    end_date=int((uc.split(' '))[2])
    #print [start_date, end_date]

    #generate the dates list 
    cur_date=datetime.strptime(date,"%Y-%m-%d")
    #startt=145775
    start=datetime.fromtimestamp((start_date-(datetime(1970,01,01)-datetime(1601,01,01)).days)*24*60*60)
    #print start

    #tmp_date=datetime(start.year,cur_date.month,cur_date.day)
    #if tmp_date > start :
    #    start=(tmp_date-datetime(1601,01,01)).days
    #else: start=(datetime(start.year+1,cur_date.month,cur_date.day)-datetime(1601,01,01)).days
    #datelist=range(start+1,end_date-1,365)
    #print datelist

    #find the position of the requested date in the datelist
    #cur_epoch=(cur_date-datetime(1601,01,01)).days
    #cur_pos=min(range(len(datelist)),key=lambda x:abs(datelist[x]-cur_epoch))
    #print ('Current position:',cur_pos)

    try:    
        datelist, cur_pos = datelist_irregular_coverage(root, start_date, start, cur_date)
        print 'irregular'
        logging.info('irregular')
    except IndexError:
        datelist, cur_pos = datelist_regular_coverage(root, start_date, start, cur_date)
        print 'regular'
        logging.info('regular')
        
    #retrieve the data cube
    cube_arr=[]
    for d in datelist:
        print 'LST', d
        field={}
        field['SERVICE']='WCS'
        field['VERSION']='2.0.1'
        field['REQUEST']='GetCoverage'
        field['COVERAGEID']=CoverageID_LST#'LST_MOD11C2005_uptodate'#'LST_MOD11C2005'#'trmm_3b42_coverage_1'
        field['SUBSET']=['ansi('+str(d)+')',
                         'Lat('+str(spl_arr[1])+','+str(spl_arr[3])+')',
                        'Long('+str(spl_arr[0])+','+str(spl_arr[2])+')']
        field['FORMAT']='image/tiff'
        url_values = urllib.urlencode(field,doseq=True)
        full_url = endpoint + '?' + url_values
        #print full_url
        tmpfilename='test'+str(d)+'.tif'
        f,h = urllib.urlretrieve(full_url,tmpfilename)
        #print h
        ds=gdal.Open(tmpfilename)

        cube_arr.append(ds.ReadAsArray())
        #print d

    #calculate the regional VCI
    cube_arr_ma=ma.masked_equal(numpy.asarray(cube_arr),0) #nan val is 0 for lst
    #VCI=(cube_arr_ma[cur_pos,:,:]-numpy.amin(cube_arr_ma,0))*1.0/(numpy.amax(cube_arr_ma,0)-numpy.amin(cube_arr_ma,0))
    NTAI=(cube_arr_ma[cur_pos,:,:]-numpy.mean(cube_arr_ma,0))*1.0/(numpy.amax(cube_arr_ma,0)-numpy.amin(cube_arr_ma,0))
    
    return NTAI, cur_date
Example #59
0
def gran_AOT(aotList, shrink=1):
    '''
    Returns the granulated AOT
    '''

    try:
        reload(viirsAero)
        reload(viirs_edr_data)
        del (viirsAeroObj)
        del (latArr)
        del (lonArr)
        del (aotArr)
        del (retArr)
        del (qualArr)
        del (lsmArr)
    except:
        pass

    LOG.debug("Creating viirsAeroObj...")
    reload(viirsAero)
    viirsAeroObj = viirsAero.viirsAero()
    LOG.debug("done")

    # Determine the correct fillValue
    trimObj = ViirsTrimTable()
    eps = 1.e-6

    # Build up the swath...
    for grans in np.arange(len(aotList)):

        LOG.debug("\nIngesting granule {} ...".format(grans))
        retList = viirsAeroObj.ingest(aotList[grans], 'aot', shrink, 'linear')

        try:
            latArr = np.vstack((latArr, viirsAeroObj.Lat[:, :]))
            lonArr = np.vstack((lonArr, viirsAeroObj.Lon[:, :]))
            ModeGran = viirsAeroObj.ModeGran
            LOG.debug("subsequent geo arrays...")
        except NameError:
            latArr = viirsAeroObj.Lat[:, :]
            lonArr = viirsAeroObj.Lon[:, :]
            ModeGran = viirsAeroObj.ModeGran
            LOG.debug("first geo arrays...")

        try:
            aotArr = np.vstack((aotArr, viirsAeroObj.ViirsAProdSDS[:, :]))
            retArr = np.vstack((retArr, viirsAeroObj.ViirsAProdRet[:, :]))
            qualArr = np.vstack((qualArr, viirsAeroObj.ViirsCMquality[:, :]))
            #lsmArr  = np.vstack((lsmArr ,viirsAeroObj.LandSeaMask[:,:]))
            LOG.debug("subsequent aot arrays...")
        except NameError:
            aotArr = viirsAeroObj.ViirsAProdSDS[:, :]
            retArr = viirsAeroObj.ViirsAProdRet[:, :]
            qualArr = viirsAeroObj.ViirsCMquality[:, :]
            #lsmArr  = viirsAeroObj.LandSeaMask[:,:]
            LOG.debug("first aot arrays...")

        LOG.debug("Intermediate aotArr.shape = {}".format(str(aotArr.shape)))
        LOG.debug("Intermediate retArr.shape = {}".format(str(retArr.shape)))
        LOG.debug("Intermediate qualArr.shape = {}".format(str(qualArr.shape)))
        #LOG.debug("Intermediate lsmArr.shape = {}".format(str(lsmArr.shape)))

    lat_0 = latArr[np.shape(latArr)[0] / 2, np.shape(latArr)[1] / 2]
    lon_0 = lonArr[np.shape(lonArr)[0] / 2, np.shape(lonArr)[1] / 2]

    LOG.debug("lat_0,lon_0 = ", lat_0, lon_0)

    try:
        # Determine masks for each fill type, for the VCM IP
        aotFillMasks = {}
        for fillType in trimObj.sdrTypeFill.keys():
            fillValue = trimObj.sdrTypeFill[fillType][aotArr.dtype.name]
            if 'float' in fillValue.__class__.__name__:
                aotFillMasks[fillType] = ma.masked_inside(
                    aotArr, fillValue - eps, fillValue + eps).mask
                if (aotFillMasks[fillType].__class__.__name__ != 'ndarray'):
                    aotFillMasks[fillType] = None
            elif 'int' in fillValue.__class__.__name__:
                aotFillMasks[fillType] = ma.masked_equal(aotArr,
                                                         fillValue).mask
                if (aotFillMasks[fillType].__class__.__name__ != 'ndarray'):
                    aotFillMasks[fillType] = None
            else:
                LOG.debug("Dataset was neither int not float... a worry")
                pass

        # Construct the total mask from all of the various fill values
        fillMask = ma.array(np.zeros(aotArr.shape, dtype=np.bool))
        for fillType in trimObj.sdrTypeFill.keys():
            if aotFillMasks[fillType] is not None:
                fillMask = fillMask * ma.array(np.zeros(aotArr.shape,dtype=np.bool),\
                    mask=aotFillMasks[fillType])

        # Define any masks based on the quality flags...
        ViirsCMqualityMask = ma.masked_equal(qualArr, 0)  # VCM quality == poor
        ViirsAProdRetMask = ma.masked_not_equal(retArr,
                                                0)  # Interp/NAAPS/Climo

        # Define the land and water masks
        #ViirsLandMask      = ma.masked_greater(lsmArr,1)
        #ViirsWaterMask     = ma.masked_less(lsmArr,2)

        # Define the total mask
        totalMask = fillMask * ViirsCMqualityMask * ViirsAProdRetMask

        try:
            data = ma.array(aotArr, mask=totalMask.mask)
            lats = ma.array(latArr, mask=totalMask.mask)
            lons = ma.array(lonArr, mask=totalMask.mask)
        except ma.core.MaskError:
            LOG.debug(
                ">> error: Mask Error, probably mismatched geolocation and product array sizes, aborting..."
            )
            sys.exit(1)

    except Exception, err:
        LOG.debug(">> error: {}...".format(str(err)))
        sys.exit(1)
Example #60
0
    def __getitem__(self, index):
        #outputs:
        #pointcloud: xyz points of projected cropped depth image (just the object region)
        #choose: select num_points points from the cropped region of the image, this is a mask
        #img_masked: cropped region of image (RGB) with object
        #target: Rt matrix
        #model_points: just the model points, num_pt_mesh_small of them
        #obj: class (in our case, just a single number since we only have 1 object)
        img = Image.open(self.list_rgb[index])
        ori_img = np.array(img)

        depth = np.array(Image.open(self.list_depth[index]))
        label = np.array(Image.open(self.list_label[index]))
        obj = self.list_obj[index]

        #transform indices are 1 off from image indices
        gt_trans = self.meta[obj][self.list_meta[index] + 1]

        #remove infinities
        mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, np.max(depth)))

        #get mask crop
        mask_label = ma.getmaskarray(ma.masked_equal(label, 65535))
        mask = mask_label * mask_depth

        if self.add_noise:
            img = self.trancolor(img)

        img = np.array(img)[:, :, :3]

        #remove horizon from unity by setting infinite distance poitns to gray
        img[depth == np.max(depth)] = self.gray

        img = np.transpose(img, (2, 0, 1))
        img_masked = img

        rmin, rmax, cmin, cmax = get_bbox(mask_label)
        img_masked = img_masked[:, rmin:rmax, cmin:cmax]

        target_r_quat = convert_quat(gt_trans[1])
        target_r = quaternion_rotation_matrix(target_r_quat)
        target_t = gt_trans[0] * 1000
        target_t[2] = -target_t[2]
        add_t = np.array([
            random.uniform(-self.noise_trans, self.noise_trans)
            for i in range(3)
        ])

        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        if len(choose) == 0:
            cc = torch.LongTensor([0])
            return (cc, cc, cc, cc, cc, cc)

        if len(choose) > self.num:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:self.num] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, self.num - len(choose)), 'wrap')

        depth_projected = self.udp.project_depth(depth)[rmin:rmax,
                                                        cmin:cmax].reshape(
                                                            (-1, 3))

        depth_masked = depth_projected[choose].astype(np.float32)
        choose = np.array([choose])
        cloud = depth_masked

        if self.add_noise:
            cloud = np.add(cloud, add_t)

        model_points = self.pt[obj] * 10
        dellist = [j for j in range(0, len(model_points))]
        dellist = random.sample(dellist,
                                len(model_points) - self.num_pt_mesh_small)
        model_points = np.delete(model_points, dellist, axis=0)

        #want to swap the axes of the target
        x_90 = np.zeros((3, 3))
        x_90[0, 0] = 1
        x_90[1, 2] = 1
        x_90[2, 1] = 1

        x_180 = np.zeros((3, 3))
        x_180[0, 0] = 1
        x_180[1, 1] = -1
        x_180[2, 2] = -1

        y_180 = np.zeros((3, 3))
        y_180[0, 0] = -1
        y_180[1, 1] = 1
        y_180[2, 2] = -1

        target = np.copy(model_points)
        # target_mean = np.mean(target, axis=0)

        # target -= target_mean
        # target = np.dot(target, swap_left.T)
        # target += target_mean

        target = np.dot(target, (target_r @ y_180).T)

        if self.add_noise:
            target = np.add(target, target_t + add_t * 10000)
        else:
            target = np.add(target, target_t)

        #AT THE VERY END, CONVERT EVERYTHING TO METERS
        return torch.from_numpy(cloud.astype(np.float32) / 10000.), \
               torch.LongTensor(choose.astype(np.int32)), \
               self.norm(torch.from_numpy(img_masked.astype(np.float32))), \
               torch.from_numpy(target.astype(np.float32) / 10000.), \
               torch.from_numpy(model_points.astype(np.float32) / 10000.), \
               torch.LongTensor([self.objlist.index(obj)])