コード例 #1
0
ファイル: agent_plotter.py プロジェクト: RabbitNick/extrasy
def plot_visit_table(fignum, data):
    
    # set up any custom plot formatting functions
    def format_visit_table_coord(x, y):
        if data["visit_tables"]:
            visit_table = data["visit_tables"][-1].T
            
            (num_actions, num_states) = visit_table.shape
            col = int(x)
            row = int(y)
            if col>=0 and col<num_states and row>=0 and row<num_actions:
            
                z = visit_table[row,col]
                
                return 'row=%d, col=%d, z=%1.4f'%(row, col, z)
            else:
                return 'row=%d, col=%d'%(row, col)
        else:
            return 'row=%d, col=%d'%(row, col)  
    
    fig=figure(fignum)
    # make sure there is data to plot
    if data["visit_tables"]:
        
        epoch_nums = data["epoch_nums"]
        
        # check if mesh exists
        if fig.axes and not fig.axes[0].collections:
            # if plotting for the first time, make a new mesh
            plot_data = ma.masked_less_equal(data["visit_tables"][-1].T, visitThresh)
            
            pcolormesh(plot_data,edgecolors="black", cmap=myCmap)
            
            ax = fig.axes[0]
            #cbaxes = fig.add_axes([0.8, 0.1, 0.03, 0.8])
            cb = colorbar(ax=ax,use_gridspec=True, cmap=myCmap)
            clim(0, np.max( [np.max(plot_data),10]))
            cb.ax.set_ylabel("Visit Count", fontsize=yTitleFontSize, weight='bold')
            ax.format_coord = format_visit_table_coord
            ax.set_title('Visitation Table for Epoch %i\n'%epoch_nums[-1], fontsize=figTitleFontSize, weight='bold')
            ax.set_xlabel("State", fontsize=yTitleFontSize, weight='bold')
            ax.set_ylabel("Action", fontsize=yTitleFontSize, weight='bold')
            
            ax.set_yticks(np.array(range(data["num_actions"]))+.5 )
            ax.set_yticklabels(range(data["num_actions"]))
            ax.set_xticks(np.array(range(data["num_states"]))+.5 )
            ax.set_xticklabels(range(data["num_states"]))
            
            fig.tight_layout()
        elif fig.axes:
            # otherwise update existing mesh data values
            ax = fig.axes[0]
            plot_data = ma.masked_less_equal(data["visit_tables"][-1].T, visitThresh)
            clim(0, np.max( [np.max(plot_data),10]))
            ax.collections[0].set_array(plot_data.ravel())
            ax.set_title('Visitation Table for Epoch %i\n'%epoch_nums[-1], fontsize=figTitleFontSize, weight='bold')
    
    
        fig.canvas.draw()
コード例 #2
0
 def __init__(self, space_group, lattice_parameters, elements, coordinates,miller_indexes,reconstruction):
     self._define_parameters_(space_group, lattice_parameters)
     self._define_elements_(elements)
     self._define_coordinates_(coordinates)
     self.lattice = mg.Lattice.from_parameters(self.a,self.b,self.c,self.alpha,self.beta,self.gamma)
     self.Amat = ma.masked_less_equal(self.lattice.matrix,10 ** (-15)).filled(0.)
     self.Bmat = ma.masked_less_equal(ma.masked_equal(self.lattice.reciprocal_lattice.matrix,-0.).filled(0.),10 ** (-15)).filled(0.)
     self.crystal = mg.Structure.from_spacegroup(self.space_group,self.lattice,self.elements,self.coordinates)
     self._define_base_(self.crystal.sites)
     self.kvec = np.dot(np.array(miller_indexes),self.Bmat)
     self._get_hkl_oriented_lattice_(self.kvec)
     self.reconstruction_string = reconstruction
     self._get_surface_(self.Amat, self.reconstruction_string)
コード例 #3
0
def error_independent(consumption, stressor, pointer_array, spatial_unit,
                      stressor_name, stressor_unit, latitude, longitude,
                      idlist):
    integral1 = np.sum(consumption, axis=1)
    threshold = stats.scoreatpercentile(integral1[integral1 > 0], per=25)
    integral1temp = ma.masked_less_equal(
        integral1,
        threshold)  #percentil 10 of integral where consupmtio nintegral is >0
    mask = integral1temp.mask

    s = exclude(stressor, mask)
    c = exclude(consumption, mask)

    sd = np.std(s, axis=1)
    mean = np.mean(s, axis=1)

    sd1 = np.std(c, axis=1)
    mean1 = np.mean(c, axis=1)

    err = np.sqrt(np.square(sd / mean) + np.square(sd1 / mean1))

    map_err = make_map(err, pointer_array, idlist, spatial_unit)
    plt.matshow(map_err,
                vmin=stats.scoreatpercentile(err, 5),
                vmax=stats.scoreatpercentile(err, 95))
    #tifffile.imsave(Input.outputDir +'/'+ 'ff_integral_'+ stressor_name + '_' + Input.name_timeperiod + '_' + Input.name_scale + '.tiff', map_ff)
    new_map_netcdf(
        Input.outputDir + '/' + 'error' + stressor_name + '_' +
        Input.name_timeperiod + '_' + Input.name_scale, map_err, stressor_name,
        stressor_unit, latitude, longitude)

    return err, map_err
コード例 #4
0
def FF_mean(consumption, stressor, pointer_array, spatial_unit, stressor_name,
            stressor_unit, latitude, longitude, idlist):  #for gw head

    integral = np.mean(
        stressor, axis=1)  #careful with the time conversion: stressor. time
    integral1 = np.sum(
        consumption,
        axis=1)  #careful with the time conversion: consumption volume
    #integral1 = ma.masked_inside(integral1, -1, 1825)#100 L/day per human -> at least 1 human in the catchment
    #integral1 = ma.masked_values(integral1, 0)
    #integral1 = ma.masked_inside(integral1, -1, 1)#100 L/day per human -> at least 5 human in the catchment
    threshold = stats.scoreatpercentile(integral1[integral1 > 0], per=25)
    integral1temp = ma.masked_less_equal(
        integral1,
        threshold)  #percentil 10 of integral where consupmtio nintegral is >0
    integraltemp = ma.masked_where(integral1.mask == 1, integral)

    ff = integraltemp / integral1temp
    ff = ma.masked_where(isnan(ff) == 1, ff)

    map_ff = make_map(ff, pointer_array, idlist, spatial_unit)
    plt.matshow(map_ff,
                vmin=stats.scoreatpercentile(ff, 5),
                vmax=stats.scoreatpercentile(ff, 95))
    #tifffile.imsave(Input.outputDir +'/'+ 'ff_integral_'+ stressor_name + '_' + Input.name_timeperiod + '_' + Input.name_scale + '.tiff', map_ff)
    new_map_netcdf(
        Input.outputDir + '/' + 'ff_mean_' + stressor_name + '_' +
        Input.name_timeperiod + '_' + Input.name_scale, map_ff, stressor_name,
        stressor_unit, latitude, longitude)

    plt.imshow(map_ff)

    return ff, map_ff
コード例 #5
0
    def _check_geophysical(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        spec = product_type.get_geophysical_check_spec()
        if len(spec) != 0:
            a = ProductVerifier.__get_data(dataset, spec[0], scale=True)
            b = ProductVerifier.__get_data(dataset, spec[1], scale=True)
            d = a - b
            # count pixels with differences less than the minimum
            suspicious_data = ma.masked_greater_equal(d, spec[2])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_minimum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_minimum_check_failed_for'] = filename
            # count pixels with differences greater than the maximum
            suspicious_data = ma.masked_less_equal(d, spec[3])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_maximum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_maximum_check_failed_for'] = filename
コード例 #6
0
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     A = ma.masked_less_equal(A, 0, copy=False)
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
コード例 #7
0
    def _check_geophysical(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        spec = product_type.get_geophysical_check_spec()
        if len(spec) != 0:
            a = ProductVerifier.__get_data(dataset, spec[0], scale=True)
            b = ProductVerifier.__get_data(dataset, spec[1], scale=True)
            d = a - b
            # count pixels with differences less than the minimum
            suspicious_data = ma.masked_greater_equal(d, spec[2])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_minimum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_minimum_check_failed_for'] = filename
            # count pixels with differences greater than the maximum
            suspicious_data = ma.masked_less_equal(d, spec[3])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_maximum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_maximum_check_failed_for'] = filename
コード例 #8
0
ファイル: colors.py プロジェクト: blitzmann/Pyfa-skel
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     A = ma.masked_less_equal(A, 0, copy=False)
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
コード例 #9
0
ファイル: micp.py プロジェクト: sunjiangming/GFA_MICP
def computeICPValues(X_properTrain, y_properTrain, X_Test, y_Test,
                     X_calibration, y_calibration):
    # Compute nonconformity scores
    # The reason for doing this is that libsvm always uses the label of the first training
    # example to define the negative side of the decision boundary, unless class labels are -1 and 1.
    #zeroClassFirst(X_properTrain,y_properTrain)
    # Build model a calculate nonconformity scores for the calibration set.
    y_calibrationAlphas = X_calibration
    conditionZero = ma.masked_less_equal(y_calibration, 0.5)
    conditionOne = ma.masked_greater(y_calibration, 0.5)
    if (y_properTrain.max() > y_properTrain.min()
        ):  # The higher value response will have the higher decision value.
        alpha_zeros = np.extract(conditionZero.mask, y_calibrationAlphas)
        alpha_ones = np.extract(
            conditionOne.mask, -1.0 *
            y_calibrationAlphas)  # Negate to create a nonconformity score.
    else:  # The lower value response will have the higher decision value.
        print("At least two labels should be prepared!!!")
        sys.exit()

    alpha_zeros.sort()
    alpha_ones.sort()

    # Compute p-values for the test examples.
    y_testAlphas = X_Test
    # Searching is done from the left, thus a larger value of searchsorted is more nonconforming.
    # Indexing start at 0 this is why we set +2 rather than +1.
    p_zeros = 1.0 - 1.0 * (np.searchsorted(alpha_zeros, y_testAlphas) +
                           1) / (len(alpha_zeros) + 1)
    p_ones = 1.0 - 1.0 * (np.searchsorted(alpha_ones, -1.0 * y_testAlphas) +
                          1) / (len(alpha_ones) + 1)

    return p_zeros, p_ones
コード例 #10
0
ファイル: stock.py プロジェクト: mcmweb80/iris
def simple_3d_mask():
    """
    Returns an abstract three dimensional cube that has data masked.

    >>>print simple_3d_mask()
    thingness / (1)                     (wibble: 2; latitude: 3; longitude: 4)
     Dimension coordinates:
          wibble                           x            -             -
          latitude                         -            x             -
          longitude                        -            -             x

    >>> print simple_3d_mask().data
    [[[-- -- -- --]
      [-- -- -- --]
      [-- 9 10 11]]

    [[12 13 14 15]
     [16 17 18 19]
     [20 21 22 23]]]

    """
    cube = simple_3d()
    cube.data = ma.asanyarray(cube.data)
    cube.data = ma.masked_less_equal(cube.data, 8.)
    return cube
コード例 #11
0
ファイル: colors.py プロジェクト: rinigan/matplotlib
    def __call__(self, value, clip=None):
        if clip is None:
            clip = self.clip

        result, is_scalar = self.process_value(value)

        result = ma.masked_less_equal(result, 0, copy=False)

        self.autoscale_None(result)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin <= 0:
            raise ValueError("values must all be positive")
        elif vmin == vmax:
            result.fill(0)
        else:
            if clip:
                mask = ma.getmask(result)
                result = ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask)
            # in-place equivalent of above can be much faster
            resdat = result.data
            mask = result.mask
            if mask is np.ma.nomask:
                mask = resdat <= 0
            else:
                mask |= resdat <= 0
            cbook._putmask(resdat, mask, 1)
            np.log(resdat, resdat)
            resdat -= np.log(vmin)
            resdat /= np.log(vmax) - np.log(vmin)
            result = np.ma.array(resdat, mask=mask, copy=False)
        if is_scalar:
            result = result[0]
        return result
コード例 #12
0
ファイル: tools.py プロジェクト: Xinjun-Wu/kinugawa-flood
def area_extract(target_area, extract_area, buffer_height, buffer_width,
                 equal_value, less_equal_value):
    """ #输入为二阶array或者tensor 
        #通过目标区域的buffered mask 提取下一时刻的研究区域
        #目标区域的掩码识别值为equal_value,当值为等于mask_value时,掩码值为True.否则为False
        #目标区域的掩码识别值为less_equal_value,当值为小于等于mask_value时,掩码值为True.否则为False
        #less_equal_value和equal_value二选一,另一个值输入为None
        #buffer 方式为矩形buffer,根据每一个边缘点的矩形范围创建buffer
        # 
    """
    if isinstance(target_area, torch.Tensor):
        target_area = target_area.cpu()
        target_area = target_area.numpy()

    if isinstance(extract_area, torch.Tensor):
        extract_area = extract_area.cpu()
        extract_area = extract_area.numpy()

    if equal_value is not None and less_equal_value is None:
        target_masked = ma.masked_equal(target_area, equal_value)
    elif equal_value is None and less_equal_value is not None:
        target_masked = ma.masked_less_equal(target_area, less_equal_value)
    else:
        raise ValueError('equal_value 和 less_equal_value 其一必须为None')

    input_mask = target_masked.mask
    if buffer_height == 0 and buffer_width == 0:
        buffered_mask = input_mask
    else:
        buffered_mask = _mask_buffer(input_mask, buffer_height, buffer_width)

    extracted_area = ma.masked_array(extract_area, buffered_mask, fill_value=0)
    extracted_area = extracted_area.filled()
    return extracted_area
コード例 #13
0
    def __call__(self, value, clip=None):
        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        val = ma.masked_less_equal(val, 0, copy=False)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin<=0:
            raise ValueError("values must all be positive")
        elif vmin==vmax:
            result = 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
        if vtype == 'scalar':
            result = result[0]
        return result
コード例 #14
0
ファイル: strFact.py プロジェクト: desicos/desicos
    def _getPatternTStrip(self,tsStart,tsStop):
        ntpat=800
        t0=np.mod(tsStart,2.0*np.pi)
        t1=np.mod(tsStop,2.0*np.pi)

        tpat=np.zeros((2,ntpat))
        tpat[0]=np.linspace(0,2.0*np.pi,tpat.shape[1])

        m1=ma.masked_greater_equal(tpat[0],t0).mask
        m2=ma.masked_less_equal(tpat[0],t1).mask

        mm=m1*m2
        if tsStop > 2.0*np.pi or tsStart<0.0:
            mm=m1+m2

#        tpat[1]=mm*(self.AT*np.random.random(ntpat))   #((self.AT)/2.0 + 0.5*self.AT*np.random.random(ntpat))

        st=self.AT*np.ones(ntpat)
        zeroKeys=np.random.randint(0,ntpat,70)
        st[zeroKeys]=0.3*self.AT
        tpat[1]=st*mm

        tpi=np.hstack((tpat,tpat,tpat))
        tpi[0,0:tpat.shape[1]]=tpat[0]-2*np.pi
        tpi[0,2*tpat.shape[1]:]=tpat[0]+2*np.pi
        return tpi
コード例 #15
0
ファイル: colors.py プロジェクト: blitzmann/Pyfa-skel
    def __call__(self, value, clip=None):
        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        val = ma.masked_less_equal(val, 0, copy=False)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin<=0:
            raise ValueError("values must all be positive")
        elif vmin==vmax:
            result = 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
        if vtype == 'scalar':
            result = result[0]
        return result
コード例 #16
0
ファイル: gz2_tasks.py プロジェクト: vrooje/galaxyzoo2
def determine_ratio_baseline_sigma(plot=False):
    data = pyfits.getdata(fits_path+'local_counts_baseline.fits')
    el = data[:,:,0].astype(np.float)
    sp = data[:,:,1].astype(np.float)
    mask_el = el < 1
    mask_sp = sp < 1
    mask_all = np.logical_and(mask_el, mask_sp)
    mask = np.logical_or(mask_el, mask_sp)
    ratio = pyfits.getdata(fits_path+'local_ratio_baseline.fits')
    count_sum = (el + sp).astype(np.float)
    count_product = (el * sp).astype(np.float)
    np.putmask(count_product, mask, 1.0)
    np.putmask(count_sum, mask, 1.0)
    sigma = (np.log10(np.e))**2 * count_sum / count_product
    sigma = np.sqrt(sigma)
    np.putmask(sigma, mask, unknown_ratio)

    sigma_masked = ma.masked_less_equal(sigma, unknown_ratio)

    if plot:
        fig = plt.figure(3)
        fig.clf()
        ax = fig.add_subplot(111)
        im = ax.imshow(sigma_masked, interpolation='nearest', origin='lower')
        cb = plt.colorbar(im)
        ax.set_aspect('auto')
        ax.set_xlabel(r'$M_R [mag]$',fontsize=22)
        ax.set_ylabel(r'$R_{50} [kpc]$',fontsize=22)

    pyfits.writeto(fits_path+'local_ratio_baseline_sigma.fits',
           sigma, clobber=True)    
コード例 #17
0
ファイル: strFact.py プロジェクト: yuscale/desicos
    def _getPatternTStrip(self, tsStart, tsStop):
        ntpat = 800
        t0 = np.mod(tsStart, 2.0 * np.pi)
        t1 = np.mod(tsStop, 2.0 * np.pi)

        tpat = np.zeros((2, ntpat))
        tpat[0] = np.linspace(0, 2.0 * np.pi, tpat.shape[1])

        m1 = ma.masked_greater_equal(tpat[0], t0).mask
        m2 = ma.masked_less_equal(tpat[0], t1).mask

        mm = m1 * m2
        if tsStop > 2.0 * np.pi or tsStart < 0.0:
            mm = m1 + m2


#        tpat[1]=mm*(self.AT*np.random.random(ntpat))   #((self.AT)/2.0 + 0.5*self.AT*np.random.random(ntpat))

        st = self.AT * np.ones(ntpat)
        zeroKeys = np.random.randint(0, ntpat, 70)
        st[zeroKeys] = 0.3 * self.AT
        tpat[1] = st * mm

        tpi = np.hstack((tpat, tpat, tpat))
        tpi[0, 0:tpat.shape[1]] = tpat[0] - 2 * np.pi
        tpi[0, 2 * tpat.shape[1]:] = tpat[0] + 2 * np.pi
        return tpi
コード例 #18
0
ファイル: __init__.py プロジェクト: SciTools/iris
def simple_3d_mask():
    """
    Returns an abstract three dimensional cube that has data masked.

    >>> print(simple_3d_mask())
    thingness / (1)                     (wibble: 2; latitude: 3; longitude: 4)
     Dimension coordinates:
          wibble                           x            -             -
          latitude                         -            x             -
          longitude                        -            -             x

    >>> print(simple_3d_mask().data)
    [[[-- -- -- --]
      [-- -- -- --]
      [-- 9 10 11]]

    [[12 13 14 15]
     [16 17 18 19]
     [20 21 22 23]]]

    """
    cube = simple_3d()
    cube.data = ma.asanyarray(cube.data)
    cube.data = ma.masked_less_equal(cube.data, 8.)
    return cube
コード例 #19
0
ファイル: fitting.py プロジェクト: ZachGlassman/Instrumental
def _linear_fit_decay(x, y):
    # Takes ndarrays for now, DON'T USE PINT QUANTITIES!
    # From wolfram Mathworld; Need to fix to re-use some calculations!!

    # Linearize by removing offset
    c = y[-20:].mean()
    y = y - c

    # Mask out negative values that log() can't handle
    y_masked = ma.masked_less_equal(y, 0)
    x_masked = ma.array(x, mask=y_masked.mask)

    # Rename for compactness
    x = x_masked.compressed()
    y = y_masked.compressed()

    # Simply ignore every point after y goes <= 0
#    try:
#        last = where(y<=0)[0][0]
#        x = x[:last]
#        y = y[:last]
#        plt.plot(x, y)
#    except IndexError:
#        # Do nothing if y never dips <= 0
#        pass

    a_num = sum(x*x*y)*sum(y*log(y)) - sum(x*y)*sum(x*y*log(y))
    b_num = sum(y)*sum(x*y*log(y)) - sum(x*y)*sum(y*log(y))
    den = sum(y)*sum(x*x*y) - sum(x*y)**2

    a = a_num / den
    b = b_num / den
    return exp(a), b, c
コード例 #20
0
ファイル: fitting.py プロジェクト: zhli-hbar/Instrumental
def _linear_fit_decay(x, y):
    # Takes ndarrays for now, DON'T USE PINT QUANTITIES!
    # From wolfram Mathworld; Need to fix to re-use some calculations!!

    # Linearize by removing offset
    c = y[-20:].mean()
    y = y - c

    # Mask out negative values that log() can't handle
    y_masked = ma.masked_less_equal(y, 0)
    x_masked = ma.array(x, mask=y_masked.mask)

    # Rename for compactness
    x = x_masked.compressed()
    y = y_masked.compressed()

    # Simply ignore every point after y goes <= 0
    #    try:
    #        last = where(y<=0)[0][0]
    #        x = x[:last]
    #        y = y[:last]
    #        plt.plot(x, y)
    #    except IndexError:
    #        # Do nothing if y never dips <= 0
    #        pass

    a_num = sum(x * x * y) * sum(y * log(y)) - sum(x * y) * sum(x * y * log(y))
    b_num = sum(y) * sum(x * y * log(y)) - sum(x * y) * sum(y * log(y))
    den = sum(y) * sum(x * x * y) - sum(x * y)**2

    a = a_num / den
    b = b_num / den
    return exp(a), b, c
コード例 #21
0
ファイル: cnt_helpers.py プロジェクト: neishm/pygeode
 def autoscale(self, A):
   '''
   Set *vmin*, *vmax* to min, max of *A*.
   '''
   A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
   self.vmin = -ma.max(A)
   self.vmax = ma.max(A)
   self.vin = ma.min(A)
コード例 #22
0
def stats(data, bandOut):
    # stats
    masked_data = ma.masked_less_equal(data, -3.4e+38)
    data_min = float(masked_data.min())
    data_max = float(masked_data.max())
    data_std = numpy.std(masked_data)
    bandOut.SetStatistics(data_min, data_max, numpy.mean([data_max, data_min]),
                          data_std)
コード例 #23
0
 def autoscale(self, A):
     '''
 Set *vmin*, *vmax* to min, max of *A*.
 '''
     A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
     self.vmin = -ma.max(A)
     self.vmax = ma.max(A)
     self.vin = ma.min(A)
コード例 #24
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is not None and self.vmax is not None:
         return
     A = ma.masked_less_equal(A, 0, copy=False)
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
コード例 #25
0
ファイル: colors.py プロジェクト: blitzmann/Pyfa-skel
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is not None and self.vmax is not None:
         return
     A = ma.masked_less_equal(A, 0, copy=False)
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
コード例 #26
0
ファイル: envisat_sm.py プロジェクト: pkaza/SAHGutils
    def get_data(self, ll_lat=None, ll_lon=None, ur_lat=None, ur_lon=None):
        """Read the data into a masked 2D Numpy array.

        Both 'no data' and areas with poor surface characteristics are masked
        in the returned array. If ll_lat, ll_lon, ur_lat, ur_lon are specified
        then a masked data array is constructed to fill this region and the
        ENVISAT data is embedded in the correct location, otherwise only the
        ENVISAT data is returned.

        """
        bin_name = self.name + '.img'
        data = np.fromfile(bin_name, np.float32)
        if sys.byteorder == 'big':
            data = data.byteswap()

        nrows = int(self.info['lines'])
        ncols = int(self.info['samples'])

        data = data.reshape(nrows, ncols)

        if (ll_lat != None) and (ll_lon != None) \
           and (ur_lat != None) and (ur_lon != None):
            # embed data in large array
            dlon = float(self.info['map info'].split(',')[5].strip())
            dlat = float(self.info['map info'].split(',')[5].strip())
            lon0 = float(self.info['ul_lon'])
            lat0 = float(self.info['ul_lat'])

            big_nrows = int(ceil((ur_lat - ll_lat)/dlat))
            big_ncols = int(ceil((ur_lon - ll_lon)/dlon))

            big_data = np.ones((big_nrows, big_ncols))
            big_data *= -10000

            start_row = int(ceil((ur_lat - lat0)/dlat))
            start_col = int(ceil((lon0 - ll_lon)/dlon))

            big_data[start_row:start_row+nrows, start_col:start_col+ncols] = data
            data = ma.masked_less_equal(big_data, -9999)
        else:
            data = ma.masked_less_equal(data, -9999)

        return data
コード例 #27
0
 def _define_base_(self, sites):
     base = {}
     for site in sites:
         base[sites.index(site)] = {
             'Element':
             site.species.elements[0],
             'Coordinates':
             ma.masked_less_equal(site.coords, 10**(-15)).filled(0.)
         }
     self.base = base
コード例 #28
0
ファイル: process_data.py プロジェクト: tmatsuzawa/turbulence
def get_mask_for_unphysical_using_cutoff(U, cutoff=None, mode='less'):
    if mode == 'less' or mode == 'l':
        U_masked = ma.masked_less(U, cutoff)
    elif mode == 'lesseqal' or mode == 'leq':
        U_masked = ma.masked_less_equal(U, cutoff)
    elif mode == 'greater' or mode == 'g':
        U_masked = ma.masked_greater(U, cutoff)
    elif mode == 'greaterequal' or mode == 'geq':
        U_masked = ma.masked_greater_equal(U, cutoff)
    return U_masked.mask
コード例 #29
0
def stats(data, bandOut):
    # stats
    masked_data = ma.masked_less_equal(data, -3.4e+38)
    data_min = float(masked_data.min())
    data_max = float(masked_data.max())
    data_std = numpy.std(masked_data)
    bandOut.SetStatistics(
                data_min,
                data_max,
                numpy.mean([data_max, data_min]),
                data_std)
コード例 #30
0
ファイル: process_data.py プロジェクト: tmatsuzawa/turbulence
def get_mask_for_unphysical_using_median(U, cutoffratio=0.4, mode='less'):
    median = np.median(U)
    if mode == 'less' or mode == 'l':
        U_masked = ma.masked_less(U, median * cutoffratio)
    elif mode == 'lesseqal' or mode == 'leq':
        U_masked = ma.masked_less_equal(U, median * cutoffratio)
    elif mode == 'greater' or mode == 'g':
        U_masked = ma.masked_greater(U, median * cutoffratio)
    elif mode == 'greaterequal' or mode == 'geq':
        U_masked = ma.masked_greater_equal(U, median * cutoffratio)
    return U_masked.mask
コード例 #31
0
    def interpolate_pos(self, pos):
        """
        Call the interpolation function at an arbitrary position (in Cartesian
        coordinates).

        Parameters
        ----------
        pos : (3x1) array or list/array of (3x1) arrays
            Input position(s) at which to evaulate the volumetric data.
            Note that vectorized calls to map_coordinates are highly efficient,
            i.e. make use of it whenever you can!
        
        Returns
        -------
        rho : float or (Nx1) array of floats 
            Volumetric data at the given positions. Output format depends on
            class variable convert_to_rs as given upon initialization.
        """

        vox_index = self._which_voxel(pos)

        rho = map_coordinates(self._cube_data,
                              vox_index,
                              order=self.order,
                              mode=self.mode,
                              prefilter=self._prefilter)

        try:
            rho = float(rho)
            if self.convert_to_rs:
                # rho can be negative because of numerical reasons but this is unphysical...
                if rho <= 0.:
                    return nan

                # [rho] e/A**3 --> [rho] e/a.u.**3
                rho *= self.A2au**(-3)

                return (3. / (4 * np.pi * rho))**(1. / 3.)
            else:
                return rho

        except TypeError:
            # vectorized call...
            if self.convert_to_rs:
                mask = masked_less_equal(rho, 0).mask
                rho[mask] *= self.A2au**(-3)
                rho[mask] = (3. / (4 * np.pi * rho[mask]))**(1. / 3.)

                rho[np.logical_not(mask)] = nan

                return rho
            else:
                return rho
コード例 #32
0
ファイル: AdjacencyMatrix.py プロジェクト: jedlin21/Study
def makeAM(vertex, howManyOnes
           ):  #how many vertex there are in the graph,  % of connections
    adjacencyM = np.random.rand(vertex, vertex)  # make random matrix
    zeros = ma.masked_greater(
        adjacencyM, howManyOnes).mask  #find where elements are greater than X
    ones = ma.masked_less_equal(
        adjacencyM, howManyOnes).mask  #find where elements are less than X

    adjacencyM[zeros] = 0  # mask = 0
    adjacencyM[ones] = 1  # mask = 1

    return adjacencyM
コード例 #33
0
def azimuthalAverage(image, center=None, maskval=0):
    """
    calculate the azimuthally averaged radial profile.

    image - 2D image
    center - [x,y] pixel coordinates used as the center. the default is
             None which then uses the center of the image (including
             fractional pixels).
    maskval - threshold value for including data in the profile
    """

    # calculate the indices from the image
    y, x = np.indices(image.shape)

    # default to image center if no center given
    if not center:
        center = np.array([(x.max() - x.min()) / 2.0,
                           (x.max() - x.min()) / 2.0])

    r = np.hypot(x - center[0], y - center[1])

    # get sorted radii and sort image accordingly
    ind = np.argsort(r.flat)
    i_sorted = image.flat[ind]

    # for FP data we need to at least mask out data at
    # 0 or less so the gaps get ignored.
    # also want to mask out area outside of aperture
    # so use given maskval to do that.
    i_ma = ma.masked_less_equal(i_sorted, maskval)
    mask = ma.getmask(i_ma)

    # remove masked data points from further analysis
    r_sorted = ma.compressed(ma.array(r.flat[ind], mask=mask))
    i_mask = ma.compressed(i_ma)

    # get the integer part of the radii (bin size = 1)
    r_int = r_sorted.astype(int)

    # find all pixels that fall within each radial bin.
    deltar = r_int[1:] - r_int[:-1]  # assumes all radii represented
    rind = np.where(deltar)[0]       # location of changed radius
    nr_tot = rind[1:] - rind[:-1]    # total number of points in radius bin

    # cumulative sum to figure out sums for each radius bin
    csim = ma.cumsum(i_mask, dtype=float)
    tbin = csim[rind[1:]] - csim[rind[:-1]]

    # calculate and return profile of mean within each bin
    radial_prof = tbin / nr_tot

    return radial_prof
コード例 #34
0
def contourValuesBelowEqual(lons, lats, rawdata, contourBelowValue):
    """
    Contours a grid for lats and lons passed in having values below the given value or equal

    Args: 
            lons : a grid of the longitudes
            lats : a grid of the latitudes
            rawdata : a grid of values
            contourBelowValue : the value of which to contour anything below or equal
            Note : the grids must be the same dimension and size
    Returns:
            A list of polygons  
    """
    masked_grid = mask.masked_less_equal(rawdata, contourBelowValue)
    return contour(lons, lats, masked_grid)
コード例 #35
0
ファイル: mainwindow.py プロジェクト: richli/dame
    def update_image(self, tabname):
        """Refresh the image in the tab.

        tabname can be: "left", "right", "split", "fade"

        """
        if tabname in ("left", "right"):
            logging.info("Refreshing image {}".format(tabname))
            header = self.sir_files[tabname]['header']
            sirdata = self.sir_files[tabname]['data']
            nsx = header.nsx
            nsy = header.nsy
            vmin = self.sir_files[tabname].setdefault('vmin', header.v_min)
            vmax = self.sir_files[tabname].setdefault('vmax', header.v_max)
            anodata = header.anodata
            v_offset = -vmin
            v_scale = 255 / (vmax - vmin)

            # Scale the SIR image to the range of 0,255
            sir_scale = ma.masked_less_equal(sirdata, anodata, copy=True)
            sir_scale += v_offset
            sir_scale *= v_scale
            sir_scale = sir_scale.filled(0)  # all nodata values are set to 0
            # Clip to 0,255
            sir_scale[sir_scale < 0] = 0
            sir_scale[sir_scale > 255] = 255
            # Ensure uint8 data and C contiguous data
            sir_scale = np.require(sir_scale, np.uint8, 'C')

            # Construct image from sir_scale
            # http://www.swharden.com/blog/2013-06-03-realtime-image-pixelmap-from-numpy-array-data-in-qt/
            # Note that I use the bytesPerLine option here. Without it, the
            # data must be 32-bit aligned (4 bytes). This means with uint8 data
            # that the image width must be a multiple of 4. This does not apply
            # to all SIR images, so I set bytesPerLine to be
            # nsx (items) * 1 (bytes/item)
            image = QImage(sir_scale.data, nsx, nsy, nsx,
                           QImage.Format_Indexed8)
            # ctab = []
            # for i in xrange(256):
            #     ctab.append(QtGui.qRgb(i,i,i))
            # image.setColorTable(ctab)

            # Save pixmap
            pixmap = QPixmap.fromImage(image)
            self.sir_files[tabname]['pixmap'] = pixmap
        else:
            logging.warning("Can't use update_image for {}".format(tabname))
コード例 #36
0
ファイル: mainwindow.py プロジェクト: richli/dame
    def update_image(self, tabname):
        """Refresh the image in the tab.

        tabname can be: "left", "right", "split", "fade"

        """
        if tabname in ("left", "right"):
            logging.info("Refreshing image {}".format(tabname))
            header = self.sir_files[tabname]['header']
            sirdata = self.sir_files[tabname]['data']
            nsx = header.nsx
            nsy = header.nsy
            vmin = self.sir_files[tabname].setdefault('vmin', header.v_min)
            vmax = self.sir_files[tabname].setdefault('vmax', header.v_max)
            anodata = header.anodata
            v_offset = -vmin
            v_scale = 255 / (vmax - vmin)

            # Scale the SIR image to the range of 0,255
            sir_scale = ma.masked_less_equal(sirdata, anodata, copy=True)
            sir_scale += v_offset
            sir_scale *= v_scale
            sir_scale = sir_scale.filled(0)  # all nodata values are set to 0
            # Clip to 0,255
            sir_scale[sir_scale < 0] = 0
            sir_scale[sir_scale > 255] = 255
            # Ensure uint8 data and C contiguous data
            sir_scale = np.require(sir_scale, np.uint8, 'C')

            # Construct image from sir_scale
            # http://www.swharden.com/blog/2013-06-03-realtime-image-pixelmap-from-numpy-array-data-in-qt/
            # Note that I use the bytesPerLine option here. Without it, the
            # data must be 32-bit aligned (4 bytes). This means with uint8 data
            # that the image width must be a multiple of 4. This does not apply
            # to all SIR images, so I set bytesPerLine to be
            # nsx (items) * 1 (bytes/item)
            image = QImage(sir_scale.data, nsx, nsy, nsx,
                           QImage.Format_Indexed8)
            # ctab = []
            # for i in xrange(256):
            #     ctab.append(QtGui.qRgb(i,i,i))
            # image.setColorTable(ctab)

            # Save pixmap
            pixmap = QPixmap.fromImage(image)
            self.sir_files[tabname]['pixmap'] = pixmap
        else:
            logging.warning("Can't use update_image for {}".format(tabname))
コード例 #37
0
def norm_img_mask(img, mask=None):
    """ normalize image to have mean=0 and stddev=1
        take only positive definite pixel values
    """
    if mask is None:
        mimg = ma.masked_less_equal(img, 0)
    else:
        mimg = ma.masked_array(img, mask=mask)

    npixels = mimg.size-np.sum(mimg.mask)    
    mean_mimg = np.mean(mimg)
    std_mimg = np.std(mimg)
    #print(mimg.size, npixels, mean_mimg, std_mimg )
    mimg = (mimg - mean_mimg)/(std_mimg*np.sqrt(npixels))
    #print('normalized pattern: ', mimg.size, npixels, np.mean(mimg), np.std(mimg)*np.sqrt(npixels) )
    return mimg, npixels
コード例 #38
0
ファイル: MyGrids.py プロジェクト: hugke729/MyScripts
def remove_unconnected_basins(depth):
    """Convert any 'lakes' to a sea level of 0

    Inputs
    ------
    depth : 2D array
        Depth below sea level (i.e. positive values are underwater)
    """
    is_masked = ma.isMaskedArray(depth)
    label_vals = label(ma.filled(depth, 0))[0]
    label_mode = mode(label_vals, axis=None)[0]
    depth[label_vals != label_mode] = 0

    if is_masked:
        depth = ma.masked_less_equal(depth, 0)
    return depth
コード例 #39
0
def make_fig(point):
    plt.close()

    #print org[:,point]
    fig=plt.figure()#figsize=(8.27,11.69))
    G = gridspec.GridSpec(1,1)#4,2)
    ax= fig.add_subplot(G[0,0]) 
    #fig, ax = plt.subplots()
    #org_Q=grdc.grdc_Q(pname[point],start_dt,last_dt)
    #org_Q=np.array(org_Q)

    org_Q=grdc_Q(staid[point],start_dt,last_dt)
    #print org, org_Q
    ed=np.shape(org_Q)[0]
    #print ed , np.shape(org[:,point])
    org=nc.outflw[:,ylist[point],xlist[point]]
    print ed , np.shape(org)
    ax.plot(np.arange(start,last),org,label="CaMa-Flood",color="blue",linewidth=0.7,zorder=102)
    if ed == 0:
        print "no GRDC data"
        #return 0
    else:
        ax.plot(np.arange(start,last),ma.masked_less_equal(org_Q,0.0),label="GRDC",color="black",linewidth=0.7,zorder=101)
        NS1=NS(org,org_Q)
        #NS2=1-((np.sum((org[:ed,point]-org_Q)**2))/(np.sum((org_Q-np.mean(org_Q))**2)))
        #print point,NS1,NS2
        Nash="NS:%4.2f"%(NS1)
        ax.text(0.02,0.95,Nash,ha="left",va="center",transform=ax.transAxes,fontsize=10) 
        plt.legend(loc=1,ncol=1,prop={"size":8})
    # Make the y-axis label, ticks and tick labels match the line color.
    ax.set_ylabel('$discharge$ (m$^3$/s)', color='k')
    ax.set_xlim(xmin=0,xmax=ed+1)
    ax.set_ylim(ymin=0)#,xmax=ed+1)
    ax.tick_params('y', colors='k')
    days=np.arange(start_dt.year,last_dt.year+1,5)
    xxlist=np.linspace(1,N,len(days))
    ax.set_xticks(xxlist)
    ax.set_xticklabels(days,fontsize=8)
    #ax1.set_yticklabels(fontsize=11)
    ax.set_xlabel('$year$', color='k')
    # scentific notaion 
    ax.ticklabel_format(style="sci",axis="y",scilimits=(0,0))

    plt.tight_layout(pad=0.2,w_pad=0.05,h_pad=0.05)
    plt.savefig(pm.out_dir()+"/figures/disgraph/"+river[point]+"/"+pname[point]+"_disgraph_GRDC.png",dpi=500)
    #plt.show()
    return 0
コード例 #40
0
def find_center(field,
                comp=0,
                trunc_level=0.0,
                max_radius=None,
                min_radius=None):
    """Find the center of illumination by finding the "center of mass" of the field.

    Parameters:
        field ``GraspField``: The field to work on.
        comp int: The field component to look at.
        trunc_level float: Ignore the contributions from portions of the grid below this field level.
        max_radius float: Ignore portions of the grid outside this radius from the center of the field.
        min_radius float: Ignore portions of the grid inside this radius from the center fo the field.

    Returns:
        x_cent float, y_cent float: The x and y values of the center of the field."""
    xv, yv = field.positions

    f = abs(field.field[:, :, comp])
    if trunc_level != 0.0:
        f = ma.masked_less_equal(f, trunc_level)
        xv = ma.array(xv, mask=f.mask)
        yv = ma.array(yv, mask=f.mask)

    if max_radius is not None:
        rad = field.radius_grid()
        rad_max_mask = ma.masked_greater(rad, max_radius)
        f = ma.array(f, mask=rad_max_mask.mask)
        xv = ma.array(xv, mask=rad_max_mask.mask)
        yv = ma.array(yv, mask=rad_max_mask.mask)

    if min_radius is not None:
        rad = field.radius_grid()
        rad_min_mask = ma.masked_less(rad, min_radius)
        f = ma.array(f, mask=rad_min_mask.mask)
        xv = ma.array(xv, mask=rad_min_mask.mask)
        yv = ma.array(yv, mask=rad_min_mask.mask)

    x_illum = xv * f
    y_illum = yv * f

    norm = np.sum(f)

    x_cent = np.sum(x_illum) / norm
    y_cent = np.sum(y_illum) / norm

    return x_cent, y_cent
コード例 #41
0
ファイル: viz.py プロジェクト: ouinformatics/cybercom_tools
def crefstats(location='-96.60,33.00', start_date=None, stop_date=None, task_id=None):
    output = []
    for ts in fiveminute(start_date,stop_date):
        outdir = tempfile.mkdtemp(dir='/tmp')
        try:
            fname = nmq.getScene(ts, '-96.60,33.00', outdir)
            raster = gdal.Open(fname)
            band = raster.GetRasterBand(1).ReadAsArray()
            mband = ma.masked_less_equal(band,-99)
            shutil.rmtree(outdir)
            logging.info('Timestep %s' % ts)
            tomongo('fire.rccc.ou.edu','bioscatter','maxrefl', {"task_id": task_id, "loc":location,"ts":datetime.strptime(ts, '%Y%m%d.%H%M%S'),"max":float(ma.max(mband)),"min":float(ma.min(mband)),"mean":float(ma.mean(mband)), "std":float(ma.std(mband))})
        except:
            logging.error('Had problem at timestep %s' % ts)
            logging.error(sys.exc_info())
            tomongo('fire.rccc.ou.edu','bioscatter','maxrefl', {"task_id": task_id, "loc":location,"ts":ts,"max":None,"min":None,"std":None})
    return DataFrame(output)
コード例 #42
0
    def _check_variable_limits(self, dataset):
        """

        :type dataset: Dataset
        """
        for variable_name in dataset.variables:
            variable = dataset.variables[variable_name]
            self.report[variable_name + '.count.total'] = variable.size

            data = ProductVerifier.__get_masked_data(variable)
            self.report[variable_name + '.count.valid'] = data.count()

            try:
                valid_max = variable.getncattr('valid_max')
                invalid_data = ma.masked_less_equal(data, valid_max)
                invalid_data_count = invalid_data.count()
                if invalid_data_count == 0:
                    self.report[variable_name +
                                '.valid_max_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name +
                                '.valid_max_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name +
                                '.valid_max_check_failed_for'] = filename
            except AttributeError:
                pass
            try:
                valid_min = variable.getncattr('valid_min')
                invalid_data = ma.masked_greater_equal(data, valid_min)
                invalid_data_count = invalid_data.count()
                self.report[variable_name +
                            '.valid_min_check'] = invalid_data_count
                if invalid_data_count == 0:
                    self.report[variable_name +
                                '.valid_min_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name +
                                '.valid_min_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name +
                                '.valid_min_check_failed_for'] = filename
            except AttributeError:
                pass
コード例 #43
0
ファイル: cwipp.py プロジェクト: nmoisseeva/cwipp
    def get_I(self, flux, length, *Us):
        """
        Finds cross-wind fireline intensity parameter I

        Parameters
        -----------
        flux : ndarray
            3D (time,y,x) array containing heat flux values [kW m-2].
        length : float
            maximum cross-wind length of the fire over the entire timespan [m].
        Us : float, optional
            surface wind direction [deg, relative to y axis] NOT CURRENTLY IMPLEMENTED!

        Returns
        ---------
        I : float
            fireline intensity parameter [K m2 s-1]

        """

        #confirm there are sufficiant dimensions
        dims = np.shape(flux)
        if len(dims) > 3:
            raise ValueError('Too many dimensions for heat flux data')
        elif len(dims)<3:
            raise ValueError('Too few dimensions: need 3D array (time,y,x)')

        #mask and pad the heat source ------------------------
        upwind_padding = int(length/config.dx)
        downwind_padding = int(2000/config.dx)              #assumes ground is not heated beyont 1km downwind
        masked_flux = ma.masked_less_equal(np.pad(flux,((0,0),(0,0),(upwind_padding,0)), 'constant',constant_values=0),1)

        cs_flux = np.nanmean(masked_flux,1)                         #get mean cross section for each timestep
        fire = []                                                   #create storage arrage
        fxmax = np.argmax(cs_flux,axis=1)                           #get location of max heat for each timestep
        for nP, pt in enumerate(fxmax[config.ign_over:]):            #excludes steps containing ignition
            subset = cs_flux[config.ign_over+nP,pt-upwind_padding:pt+downwind_padding]     #set averaging window around a maximum
            fire.append(subset)

        meanFire = np.nanmean(fire,0)                               #calculate mean fire cross section
        ignited = np.array([i for i in meanFire if i > 0.5])        #consider only cells that have heat flux about 500 W/m2
        I = np.trapz(ignited, dx = config.dx) * 1000 / ( 1.2 * 1005)    #calculate Phi by integrating kinematic heat flux along x (Km2/s)

        self.I = I
コード例 #44
0
ファイル: MonthlyMean.py プロジェクト: faunalia/permaclim
    def compute(self, band_numbers, band_out=1):
        #
        bandsIn = [self.imageIn.GetRasterBand(n) for n in band_numbers]
        minimum = min([band.GetMinimum() for band in bandsIn])
        datas = [band.ReadAsArray(0, 0, self.cols, self.rows) for band in bandsIn]

        t_mean = sum(datas)/len(datas)

        # fix no data values
        mask = numpy.greater_equal(t_mean, minimum)
        self.mean = numpy.choose(mask, (-3.4e+38, t_mean))

        # stats
        masked_mean = ma.masked_less_equal(self.mean, -3.4e+38)
        self.min = float(masked_mean.min())
        self.max = float(masked_mean.max())
        self.std = numpy.std(masked_mean)

        self._saveMean(band_out, self.mean)
コード例 #45
0
ファイル: clean_data.py プロジェクト: calanoue/GFIN_Data_Work
    def format_and_clean_data_main(self):
        """
        Main function to format and clean data based on choices by the user.
        """
        # Check if over missing_bound percent or missing_bound number of values are missing
        too_many_missing = self.has_too_many_missing(self.init_perc_remove)
        if ma.any(too_many_missing):
            idx, = ma.where(too_many_missing)
            self.xs[idx] = ma.mask_rows(self.xs[idx])

        # Check array to see if it is filled with values or empty
        if ma.all(self.check_for_all()):
            return self.xs

        # Clean outliers
        self.clean_outliers()

        # Take average of neighbor values to fill up to a given missing value gap length
        self.clean_gaps_w_linspace(fill_gap_length=self.max_gap_length)
        if ma.all(ma.count_masked(self.xs[:, :-self.keep_n_values], axis=1)[np.newaxis,:] == 0):
            return self.xs # if no masked values remain in values before recent ones

        # Remove values if they start the array and are then followed by too many masked values
        start_idx = self.find_new_starting_value()

        # If there are over x% blank values left in the original data after above changes,
        # check to see if x% of the blanks fall after the new start year
        too_many_missing = self.has_too_many_missing(self.second_perc_remove) # boolean array
        if ma.any(too_many_missing):
            n_masked = np.array([ma.count_masked(self.xs[i,s_idx:])
                                 for i, s_idx in enumerate(start_idx)]) / self.N > self.perc_remove_after_start_idx
            if ma.any(n_masked):
                idx, = ma.where(n_masked)
                self.xs[idx] = ma.mask_rows(self.xs[idx])

        # To fill in remaining values, run linear regression on non-zero values
        self.clean_gaps_w_lin_regress(start_idx)

        # If linear regression left negative or zero values, then use linear space to fill in middle gaps
        if ma.any(ma.masked_less_equal(self.xs, 0.)):
            self.clean_gaps_w_linspace()
コード例 #46
0
ファイル: strFact.py プロジェクト: desicos/desicos
    def _getPatternZStrip(self,zsStart,zsStop):
        H=self.H
        nzpat=600

        t0=np.mod(zsStart,2.0*np.pi)
        t1=np.mod(zsStop,2.0*np.pi)


        zpat=np.zeros((2,nzpat))
        zpat[0]=np.linspace(0,H,zpat.shape[1])

        m1=ma.masked_greater_equal(zpat[0],zsStart).mask
        m2=ma.masked_less_equal(tpat[0],zsStop).mask
        mm=m1*m2
        zpat[1]=mm*(self.AZ/2.0 + 0.5*self.AZ*np.random.random(nzpat))

        zpi=np.hstack((zpat,zpat,zpat))

        zpi[0][0:zpat.shape[1]]=(zpat[0]-H)
        zpi[0][(2*zpat.shape[1]):]=(zpat[0]+H)
        return zpi
コード例 #47
0
ファイル: plot_gofr.py プロジェクト: dmerz75/myconfigs
def load_gofr(fn):
    data = np.loadtxt(fn_prot)
    print data.shape
    x = data[::, 0] * 0.1
    gr = data[::, 1]
    gr = gr / gr[-1]

    c = ma.masked_less_equal(gr, 0)
    count_zeros = ma.count(c)
    # print count_zeros
    c = ma.compressed(c)

    y = -0.6 * np.log(c)
    # plt.plot(x,gr,'r-')
    print plot_type
    if plot_type == 'gofr':
        return x, gr
    elif plot_type == 'rdf':
        # print y
        # print x.shape - count_zeros
        return x[x.shape - count_zeros::], y
コード例 #48
0
ファイル: strFact.py プロジェクト: yuscale/desicos
    def _getPatternZStrip(self, zsStart, zsStop):
        H = self.H
        nzpat = 600

        t0 = np.mod(zsStart, 2.0 * np.pi)
        t1 = np.mod(zsStop, 2.0 * np.pi)

        zpat = np.zeros((2, nzpat))
        zpat[0] = np.linspace(0, H, zpat.shape[1])

        m1 = ma.masked_greater_equal(zpat[0], zsStart).mask
        m2 = ma.masked_less_equal(tpat[0], zsStop).mask
        mm = m1 * m2
        zpat[1] = mm * (self.AZ / 2.0 +
                        0.5 * self.AZ * np.random.random(nzpat))

        zpi = np.hstack((zpat, zpat, zpat))

        zpi[0][0:zpat.shape[1]] = (zpat[0] - H)
        zpi[0][(2 * zpat.shape[1]):] = (zpat[0] + H)
        return zpi
コード例 #49
0
ファイル: MonthlyMean.py プロジェクト: wk1984/permaclim
    def compute(self, band_numbers, band_out=1):
        #
        bandsIn = [self.imageIn.GetRasterBand(n) for n in band_numbers]
        minimum = min([band.GetMinimum() for band in bandsIn])
        datas = [
            band.ReadAsArray(0, 0, self.cols, self.rows) for band in bandsIn
        ]

        t_mean = sum(datas) / len(datas)

        # fix no data values
        mask = numpy.greater_equal(t_mean, minimum)
        self.mean = numpy.choose(mask, (-3.4e+38, t_mean))

        # stats
        masked_mean = ma.masked_less_equal(self.mean, -3.4e+38)
        self.min = float(masked_mean.min())
        self.max = float(masked_mean.max())
        self.std = numpy.std(masked_mean)

        self._saveMean(band_out, self.mean)
コード例 #50
0
    def _check_variable_limits(self, dataset):
        """

        :type dataset: Dataset
        """
        for variable_name in dataset.variables:
            variable = dataset.variables[variable_name]
            self.report[variable_name + '.count.total'] = variable.size

            data = ProductVerifier.__get_masked_data(variable)
            self.report[variable_name + '.count.valid'] = data.count()

            try:
                valid_max = variable.getncattr('valid_max')
                invalid_data = ma.masked_less_equal(data, valid_max)
                invalid_data_count = invalid_data.count()
                if invalid_data_count == 0:
                    self.report[variable_name + '.valid_max_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name + '.valid_max_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name + '.valid_max_check_failed_for'] = filename
            except AttributeError:
                pass
            try:
                valid_min = variable.getncattr('valid_min')
                invalid_data = ma.masked_greater_equal(data, valid_min)
                invalid_data_count = invalid_data.count()
                self.report[variable_name + '.valid_min_check'] = invalid_data_count
                if invalid_data_count == 0:
                    self.report[variable_name + '.valid_min_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name + '.valid_min_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name + '.valid_min_check_failed_for'] = filename
            except AttributeError:
                pass
コード例 #51
0
    def _check_corruptness(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        ok = True
        for variable_name in product_type.get_sst_variable_names():
            if variable_name in dataset.variables:
                variable = dataset.variables[variable_name]

                data = ProductVerifier.__get_masked_data(variable)
                valid_data_count = data.count()
                if valid_data_count == 0:
                    ok = False
                try:
                    valid_max = variable.getncattr('valid_max')
                    invalid_data = ma.masked_less_equal(data, valid_max)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                try:
                    valid_min = variable.getncattr('valid_min')
                    invalid_data = ma.masked_greater_equal(data, valid_min)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                if valid_data_count == 0:
                    ok = False
            else:
                ok = False
        if ok:
            self.report['corruptness_check'] = 0
        else:
            self.report['corruptness_check'] = 1
            filename = os.path.basename(self.source_pathname)
            self.report['corruptness_check_failed_for'] = filename
            raise VerificationError
コード例 #52
0
ファイル: clean_data.py プロジェクト: calanoue/GFIN_Data_Work
    def clean_gaps_w_linspace(self, fill_gap_length=0):
        """
        Function to fill gaps with linearly spaced values.

        Parameters
        ----------

        fill_gap_length : integer
            Maximum length of gaps to be filled by averaging.
        """
        self.xs = ma.masked_less_equal(self.xs, 0.)
        condition = ma.getmask(self.xs)

        # Fill masked value gaps if not at the beginning or end of the array or if the
        # gap length is less than the max gap length
        if np.any(condition):
            cont_regions = self.contiguous_regions(condition)
            for step in np.arange(2, np.size(cont_regions, 0) + 1, 2): # 1st row start, 2nd stop
                (axis, blank), (start, stop) = cont_regions[step - 2:step].T
                gap_length = stop - start # Length of gap
                if start and stop < self.N: # Don't fill in gaps at beginning or end
                    if not fill_gap_length or gap_length <= fill_gap_length:
                        self.xs[axis, start:stop] = np.linspace(self.xs[axis, start -1],
                            self.xs[axis, stop], stop - start + 1, endpoint=False)[1:]
コード例 #53
0
ファイル: imgview.py プロジェクト: pauldmccarthy/imgview
        self.ydata = self.img[:, val, :].transpose()
        self._draw_ax(self.yax, self.ydata, self.hdr["xn"], self.hdr["zn"])

    def on_z_slider(self, event):
        val = self.zax_slider.GetValue()
        self.zax_text.SetValue("%u" % val)
        self.zdata = self.img[:, :, val].transpose()
        self._draw_ax(self.zax, self.zdata, self.hdr["xn"], self.hdr["yn"])


if __name__ == "__main__":

    if len(sys.argv) not in [2, 3]:
        print "usage: imgview.py filename [threshold]"
        exit()

    imgfile = sys.argv[1]
    threshold = (len(sys.argv) == 3) and float(sys.argv[2]) or None

    (img, hdr) = loadimg.loadimg(imgfile)

    img = ma.masked_invalid(img)

    if threshold:
        img = ma.masked_less_equal(img, threshold)

    app = wx.PySimpleApp()
    app.frame = ImageFrame(imgfile, hdr, img)
    app.frame.Show()
    app.MainLoop()
コード例 #54
0
ファイル: test_old_ma.py プロジェクト: numpy/numpy
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
コード例 #55
0
    def NewWND5(
        cls, training_set, test_set, feature_weights=None, name=None, split_number=None, quiet=False, error_bars=False
    ):
        """The equivalent of the "wndcharm classify" command in the command line implementation
        of WND-CHARM. Input a training set, a test set, and feature weights, and returns a
        new instance of a FeatureSpaceClassification, with self.individual_results
        filled with a new instances of SingleSampleClassification.

        If feature_weights == None: use 1's as weights."""

        # type checking
        if not isinstance(training_set, FeatureSpace):
            raise ValueError(
                'First argument to New must be of type "FeatureSpace", you gave a {0}'.format(type(test_set).__name__)
            )
        if not isinstance(test_set, FeatureSpace):
            raise ValueError(
                'Second argument to New must be of type "FeatureSpace", you gave a {0}'.format(type(test_set).__name__)
            )
        if feature_weights is not None and not isinstance(feature_weights, FeatureWeights):
            raise ValueError(
                'Third argument to New must be of type "FeatureWeights" or derived class, you gave a {0}'.format(
                    type(feature_weights).__name__
                )
            )

        # feature comparison
        if test_set.feature_names != training_set.feature_names:
            raise ValueError(
                "Can't classify, features in test set don't match features in training set. Try translating feature names from old style to new, or performing a FeatureReduce()"
            )
        if feature_weights is not None and test_set.feature_names != feature_weights.feature_names:
            raise ValueError(
                "Can't classify, features in test set don't match features in weights. Try translating feature names from old style to new, or performing a FeatureReduce()"
            )

        # ignore divides => dist matrices with 0's down the diagonal will have nan's
        # which should be ahndled by the mask
        np.seterr(under="ignore", divide="ignore")

        n_feats = len(training_set.feature_names)
        if feature_weights is None:
            feature_weights = FisherFeatureWeights(size=n_feats)
            feature_weights.values = np.ones((n_feats,))
            feature_weights.feature_names = training_set.feature_names

        # instantiate myself
        split_result = cls(training_set, test_set, feature_weights, name, split_number)
        split_result.use_error_bars = error_bars

        # Say what we're going to do
        if not quiet:
            print "Classifying test set '{0}' ({1} samples) against training set '{2}' ({3} samples)".format(
                test_set.name, test_set.num_samples, training_set.name, training_set.num_samples
            )
            if test_set.num_samples_per_group > 1:
                print "Performing tiled classification."

        # Any collisions? (i.e., where the distance from test sample to training sample
        # is below machine epsilon, i.e., 2.2204e-16
        epsilon = np.finfo(np.float).eps
        import numpy.ma as ma

        # Create slicer for training set class boundaries
        slice_list = []
        start_index = 0
        for n_class_train_samps in training_set.class_sizes:
            end_index = start_index + n_class_train_samps
            slice_list.append(slice(start_index, end_index))
            start_index = end_index

        # Create distance matrix:
        # result dist_mat where rows => train samps and cols => test_samps
        wts = np.array(feature_weights.values)
        w_train_featspace = training_set.data_matrix * wts
        if training_set is test_set:
            from scipy.spatial.distance import pdist
            from scipy.spatial.distance import squareform

            raw_dist_mat = squareform(pdist(w_train_featspace, "sqeuclidean"))
            dist_mat = ma.masked_less_equal(raw_dist_mat, epsilon, False)
        else:
            from scipy.spatial.distance import cdist

            w_test_featspace = test_set.data_matrix * wts
            raw_dist_mat = cdist(w_train_featspace, w_test_featspace, "sqeuclidean")
            dist_mat = ma.masked_less_equal(raw_dist_mat, epsilon, False)

        # Create marginal probabilities from distance matrix:
        similarity_mat = np.power(dist_mat, -5).T

        for test_samp_index, test_samp_sims in enumerate(similarity_mat):
            result = SingleSampleClassification()
            per_class_sims_list = [test_samp_sims[class_slice] for class_slice in slice_list]
            class_siml_means = []
            for class_sims in per_class_sims_list:
                try:
                    val = class_sims.compressed().mean()
                except FloatingPointError:
                    # mean of empty slice raises floating point error
                    val = np.nan
                class_siml_means.append(val)

            class_siml_means = np.array(class_siml_means)
            if not np.any(np.isnan(class_siml_means)):
                result.normalization_factor = class_siml_means.sum()
                result.marginal_probabilities = class_siml_means / result.normalization_factor
                result.predicted_label = training_set.class_names[result.marginal_probabilities.argmax()]

            result.sample_group_id = test_set._contiguous_sample_group_ids[test_samp_index]
            result.sample_sequence_id = test_set._contiguous_sample_sequence_ids[test_samp_index]
            result.num_samples_per_group = test_set.num_samples_per_group
            result.name = test_set._contiguous_sample_names[test_samp_index]
            result.ground_truth_label = test_set._contiguous_ground_truth_labels[test_samp_index]
            result.ground_truth_value = test_set._contiguous_ground_truth_values[test_samp_index]
            result.split_number = split_number
            split_result.individual_results.append(result)

        # Predicted value via class coefficients, if applicable
        if training_set.interpolation_coefficients is not None:
            predicted_values = []
            ground_truth_values = []
            for result in split_result.individual_results:
                if result.marginal_probabilities is not None:
                    result.predicted_value = np.sum(
                        result.marginal_probabilities * training_set.interpolation_coefficients
                    )
                    predicted_values.append(result.predicted_value)
                    ground_truth_values.append(result.ground_truth_value)
            if predicted_values:
                split_result.predicted_values = predicted_values
                split_result.ground_truth_values = ground_truth_values

        # TILING SECTION:
        # Create a whole image classification result that
        # is the average of all the calls from all the tiles
        if test_set.num_samples_per_group > 1:
            split_result.averaged_results = []
            averaged_predicted_values = []
            averaged_ground_truth_values = []
            for start_index in xrange(0, test_set.num_samples, test_set.num_samples_per_group):
                end_index = start_index + test_set.num_samples_per_group
                tiles = split_result.individual_results[start_index:end_index]
                avg_result = AveragedSingleSamplePrediction(tiles, training_set.class_names)
                avg_result.split_number = split_number
                if avg_result.predicted_value is not None:
                    averaged_predicted_values.append(avg_result.predicted_value)
                    averaged_ground_truth_values.append(avg_result.ground_truth_value)

                split_result.averaged_results.append(avg_result)
            if averaged_predicted_values:
                split_result.averaged_predicted_values = averaged_predicted_values
                split_result.averaged_ground_truth_values = averaged_ground_truth_values

        if not quiet:
            first_time_through = True
            if test_set.num_samples_per_group > 1:
                for avg_res in split_result.averaged_results:
                    if first_time_through:
                        avg_res.Print(line_item=True, col_header_only=first_time_through)
                    first_time_through = False
                    for indiv_res in avg_res.individual_results:
                        indiv_res.Print(line_item=True, training_set_class_names=training_set.class_names)
                    avg_res.Print(line_item=True)
            else:
                for indiv_res in split_result.individual_results:
                    indiv_res.Print(
                        line_item=True,
                        include_col_header=first_time_through,
                        training_set_class_names=training_set.class_names,
                    )
                    first_time_through = False

        np.seterr(all="raise")
        return split_result
コード例 #56
0
ファイル: gz2_tasks.py プロジェクト: vrooje/galaxyzoo2
def determine_ratio_baseline(min_count=50):

    # Load the GZ2 sample data

    p_el = pyfits.open(fits_path+'el_binned_counts.fits')
    p_sp = pyfits.open(fits_path+'sp_binned_counts.fits')

    d_el = p_el[0].data.astype(int)                         # Data is pre-binned
    d_sp = p_sp[0].data.astype(int)

    # Bin sizes are set when FITS file is created

    zbins = p_el['REDSHIFT_BIN_CENTERS'].data['centers']
    magbins = p_el['MR_BIN_CENTERS'].data['centers']
    sizebins = p_el['R50_KPC_BIN_CENTERS'].data['centers']

    n_magbin = len(magbins)
    n_sizebin = len(sizebins)

    # Empty 2-D arrays to store the ratio and number of galaxies for elliptical/spiral ratio

    ratio_baseline = np.zeros((n_magbin, n_sizebin), np.float) + unknown_ratio
    counts_baseline = np.zeros((n_magbin, n_sizebin, 2), np.int) + unknown_ratio

    # Trim the data so it only includes counts in the relevant magnitude and size bins

    elz = d_el
    spz = d_sp

    # Loop over each slice in redshift space. Cells without entries will be filled in the first available loop.

    for z_index, zsel in enumerate(zbins[1:]):
        el = elz[z_index, :, :]
        sp = spz[z_index, :, :]
     
        # Create mask for cells with low counts
        mask = (sp + el) < min_count
     
        # Compute the elliptical/spiral ratio (as a logarithm) for the entire array

        ratio = el.astype(np.float)/sp
        ratio = np.log10(ratio)

        # Mask galaxies outside of the selection limits

        np.putmask(ratio, mask, unknown_ratio)        # If ratio is masked, replace it with the value ``unknown ratio''
        np.putmask(ratio, np.isinf(ratio), unknown_ratio)      
        select = np.logical_not(mask)                # Invert the mask so True = good values in select

        empty = ratio_baseline <= unknown_ratio+eps        # Second mask; "True" is where master array has no values yet

        # Combine empty and select masks

        select &= empty                                # Union is where master array has no values AND ratio is determined. 

        # Populate the 2-D empty arrays with the el/sp ratio for all non-masked cells

        ratio_baseline[select] = ratio[select]
        counts_baseline[select] = np.transpose([el[select], sp[select]])

    ratio_baseline_masked = ma.masked_less_equal(ratio_baseline, unknown_ratio)
    counts_baseline_masked = ma.masked_less_equal(counts_baseline, unknown_ratio)

    # Write the results to FITS files

    pyfits.writeto(fits_path+'local_ratio_baseline.fits',
                   ratio_baseline, clobber=True)    
    pyfits.writeto(fits_path+'local_counts_baseline.fits',
                   counts_baseline, clobber=True)    

    pickle.dump(ratio_baseline_masked, open(gz_path+'local_ratio_baseline_masked.pkl','wb')) 
    pickle.dump(counts_baseline_masked, open(gz_path+'local_counts_baseline_masked.pkl','wb')) 

    # Close PyFITS objects

    p_el.close()
    p_sp.close()

    return None
コード例 #57
0
ファイル: WCS_test-USGS.py プロジェクト: ivn888/notebook
if dxdy == 0.0:
    x1 = x0 + dx * ncols
    y1 = y0 + dy * nrows

# <codecell>

import cartopy.crs as ccrs
from cartopy.io.img_tiles import MapQuestOpenAerial, MapQuestOSM, OSM

# <codecell>

print x0,x1,y1,y0

# <codecell>

elevation=ma.masked_less_equal(elevation,-1.e5)

# <codecell>

print elevation.min(), elevation.max()

# <codecell>

plt.figure(figsize=(8,10))
ax = plt.axes(projection=ccrs.PlateCarree())
tiler = MapQuestOpenAerial()
ax.add_image(tiler, 14)
plt.imshow(elevation, cmap='jet', extent=[x0, x1, y1, y0],
           transform=ccrs.PlateCarree(),alpha=0.6,zorder=2);
ax.gridlines(draw_labels=True,zorder=3);
コード例 #58
0
ファイル: mask_disk.py プロジェクト: Cadair/sunpy
x, y = np.meshgrid(*[np.arange(v.value) for v in aia.dimensions]) * u.pixel

###############################################################################
# Now we can convert this to helioprojective coordinates and create a new
# array which contains the normalized radial position for each pixel

hpc_coords = aia.pixel_to_world(x, y)
r = np.sqrt(hpc_coords.Tx ** 2 + hpc_coords.Ty ** 2) / aia.rsun_obs

###############################################################################
# Finally, we create a mask where all values which are less then Rsun are
# masked. We also make a slight change to the colormap so that masked values
# are shown as black instead of the default white.

mask = ma.masked_less_equal(r, 1)
palette = aia.plot_settings['cmap']
palette.set_bad('black')

###############################################################################
# Now we create a new custom aia with our new mask and
# plot the result using our modified colormap

scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)

fig = plt.figure()
plt.subplot(projection=scaled_map)
scaled_map.plot(cmap=palette)
scaled_map.draw_limb()
plt.show()
コード例 #59
0
def fit_rings(file, trim_rad=470, disp=None):
    """
    main routine to take a FITS file, read it in, azimuthally average it,
    find the rings, and then fit voigt profiles to them.

    Parameters
    ----------
    file : string filename of FITS file to analyze
    trim_rad : int maximum radial extent of profile
    disp : boolean to control DS9 display of image

    Returns
    -------
    list containing:
        boolean - success of finding peaks or not
        numpy array - wavelengths of profile
        numpy array - radial flux profile data
        numpy array - best-fit radial flux profile
        dict - parameters of best-fit
    """



    hdu = pyfits.open(file)
    (data, header) = (hdu[0].data, hdu[0].header)
    etalon = int(header['ET-STATE'].split()[3])
    etwave_key = "ET%dWAVE0" % etalon
    name_key = "ET%dMODE" % etalon
    etname = header[name_key]
    cenwave = float(header[etwave_key])
    binning = int(header['CCDSUM'].split()[0])
    if header['OBSMODE'] != 'FABRY-PEROT':
        return False, np.empty(1), np.empty(1), np.empty(1), np.empty(1)

    ysize, xsize = data.shape


    

    # cut FP image down to square
    fp_im = data[:,
                 (xsize - ysize) / 2:
                 (xsize + ysize) / 2]
    if disp:
        disp.set_np2arr(fp_im, dtype=np.int32)
    # mask those gaps
    fp_im = ma.masked_less_equal(data[:,
                                      (xsize - ysize) / 2:
                                      (xsize + ysize) / 2], 0.0)

    # define center based on FP ghost imaging with special mask
    xc = 2054 / binning
    yc = 2008 / binning

#    print "FP ring centre should be at x= %.1f y= %.1f" % (xc+280, yc)

    #Find the list of rings:

    ring_list=findrings(data, thresh=5, niter=5, minsize=10)

    print "%1d ring(s) found" % (len(ring_list))    

    sxc=syc=0
    for i in range(len(ring_list)):
        ring_list[i]=findcenter(data, ring_list[i], method='center')
        print "Ring number %1d: centre at X= %.1f, Y=%.1f, Radius= %.1f" % (i+1, ring_list[i].xc, ring_list[i].yc,ring_list[i].prad)
        if ring_list[i].prad > 250 and ring_list[i].prad < 390:
            sxc=ring_list[i].xc
            syc=ring_list[i].yc

#    xcn,ycn=find_center(fp_im,xc,yc)
       
    if sxc >0 and syc > 0:

        print "Found ring centre at x= %.1f y= %.1f" % (sxc, syc)
        xc=sxc-280
        yc=syc
        
        #Xc0 and Yc0 is the ring centre for a good ring
        # Ted's values:

        xc0=802
        yc0=503

        #From Tim's:
#        xc0=800
#        yc0=500

        deltaX=int(3.6*(syc-yc0))
        deltaY=int(4.7*((sxc)-xc0))

#        print "With this new centre, recommend dX= %1d, dY=%1d" % (deltaX, deltaY)

    else:
        print "Found ring centre at x= %.1f y= %.1f" % (sxc, syc)        
        print "Using FP ghost ring centre at x= %.1f y= %.1f" % (xc+280, yc)
    
    # we use the 4x4 version of trim_rad since the vast majority of FP
    # data is taken with 4x4 binning
    trim_rad *= 4 / binning

    mask_val = 0.0
    f = {}
    if cenwave < 5200:
        f['MR'] = 22149.6
        f['LR'] = 22795.92
        f['TF'] = 24360.32
    if cenwave > 6500 and cenwave < 6600:
        f['MR'] = 22713.0
        f['LR'] = 24191.40
        f['TF'] = 24360.32
    if cenwave >= 6600 and cenwave < 6700:
        f['MR'] = 22848.0
        f['LR'] = 24169.32
        f['TF'] = 23830.20
    if cenwave >= 6700 and cenwave < 6900:
        f['MR'] = 22828.92
        f['LR'] = 24087.68
        f['TF'] = 24553.32
    if cenwave >= 7300 and cenwave < 7700:
        f['MR'] = 22864.06
        f['LR'] = 24087.68
        f['TF'] = 24553.32
    else:
        f['MR'] = 22828.92
        f['LR'] = 24400.32
        f['TF'] = 24553.32

    # get the radial profile and flatten it with a default QTH flat profile
    prof, r = FP_profile(fp_im, xc, yc, trim_rad=trim_rad, mask=mask_val)

#    Not flatfielding the profile as ring already flat-fielded using saltflat.
#    prof = flatprof(prof, binning)

    wave = cenwave / np.sqrt(1.0 + (r * binning / f[etname]) ** 2)

    # find the peaks and bail out if none found
    npeaks, peak_list = find_peaks(prof, width=40)
    if npeaks < 1:
        print "No peaks found."
        return False, np.empty(1), np.empty(1), np.empty(1), np.empty(1)

    print "Found %d rings at:" % npeaks
    cenwidth = 20
    for peak in peak_list:
        cen_peak = centroid(prof, peak, cenwidth)
        if np.isnan(cen_peak):
            cen_peak = peak

        print "\t R %f" % cen_peak
        if disp:
            disp.set("regions command {circle %f %f %f # color=red}" %
                     (xc, yc, cen_peak))

    # max_r = peak_list[0]
    # pmax = prof[max_r]
    back = 250.0
    fwhm = 5.0
    gam = 1.0
    init = [back]
    bounds = [(0.0, 2.0e8)]

    # keep brightest
    peak = peak_list[0]

    # position
    init.append(cenwave / np.sqrt(1.0 + (peak * binning / f[etname]) ** 2))
    bounds.append((cenwave - 30, cenwave + 30))
    # amplitude
    init.append(prof[peak])
    bounds.append((0.0, 1.0e8))
    # FWHM
    init.append(fwhm)
    bounds.append((0.1, 20.0))
    # gamma
    init.append(gam)
    bounds.append((0.0, 5.0))

    ### keep these around in case we want to try again someday
    #
    #fit = opt.fmin_slsqp(fit_func, init, args=(prof, wave),
    #                     bounds=bounds)
    #fit, nfeval, rc = opt.fmin_tnc(fit_func, init, args=(prof, wave),
    #                               epsilon=0.0001,
    #                               bounds=bounds,
    #                               approx_grad=True,
    #                               disp=0,
    #                               maxfun=5000)

    # good ol' powell method FTW
    fit = opt.fmin_powell(fit_func, init, args=(prof, wave),
                          ftol=0.00001, full_output=False, disp=False)

    #print "Return code: %s" % opt.tnc.RCSTRINGS[rc]

    pars = {}
    fit_v = fit[0]
    print "\nBackground = %f" % fit[0]
    pars['Background'] = fit[0]
    pars['R'] = []
    pars['Amplitude'] = []
    pars['Gauss FWHM'] = []
    pars['Gamma'] = []
    pars['FWHM'] = []
    for i in range(1, len(fit), 4):
        fwhm = voigt_fwhm(fit[i + 2], fit[i + 3])
        pars['R'].append(fit[i])
        pars['Amplitude'].append(fit[i + 1])
        pars['Gauss FWHM'].append(fit[i + 2])
        pars['Gamma'].append(fit[i + 3])
        pars['FWHM'].append(fwhm)
        fit_v = fit_v + qvoigt(wave, fit[i + 1], fit[i],
                               fit[i + 2], fit[i + 3])

    return True, wave, prof, fit_v, pars