def algorithm3(self, e0, M):

        # Compute global maximum expansion rate
        Vtest = linspace(0,0.5,1000)
        muplot = []
        for Vi in Vtest:
            J = self.Jac(0,[Vi,0])
            muplot.append( 0.5*max(real(eig(J+J.T)[0])) )
        index = argmax(muplot)
        mustar = muplot[index]
        Vstar = Vtest[index]
        
        self.d1 = [e0]
        self.d2 = [e0]
        self.theta = [0]
        c = []
        for i in range(len(self.T)-1):
            # compute maximal expansion rate c_i in a neigbourhood of V[i] using global vector field bound M
            if abs(self.V[i] - Vstar) <= self.d1[i] + M*(self.T[i+1]-self.T[i]):
                c.append(mustar)
            elif self.V[i] + self.d1[i] + M*(self.T[i+1]-self.T[i]) < Vstar:
                J = Jac(0,[self.V[i] + self.d1[i] + M*(self.T[i+1]-self.T[i]),0])
                c.append(0.5*max(real(eig(J+J.T)[0])))
            else:
                J = Jac(0,[self.V[i] - self.d1[i] - M*(self.T[i+1]-self.T[i]),0])
                c.append(0.5*max(real(eig(J+J.T)[0])))            
            
            # compute diameter of ball based on bound on expansion rate in neighbourhood of current state
            self.d1.append(exp(c[i]*(self.T[i+1]-self.T[i]))*self.d1[i]+self.tolerance)
            self.d2.append(exp(c[i]*(self.T[i+1]-self.T[i]))*self.d2[i]+self.tolerance)
            self.theta.append(0)
Example #2
0
 def autoscale(self, A):
   '''
   Set *vmin*, *vmax* to min, max of *A*.
   '''
   A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
   self.vmin = -ma.max(A)
   self.vmax = ma.max(A)
   self.vin = ma.min(A)
Example #3
0
 def autoscale(self, A):
     '''
 Set *vmin*, *vmax* to min, max of *A*.
 '''
     A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
     self.vmin = -ma.max(A)
     self.vmax = ma.max(A)
     self.vin = ma.min(A)
Example #4
0
 def update_ranges(self, x_data, y_data):
     s_x = ma.mean(x_data)
     s_y = ma.mean(y_data)
     bounds = [
         ma.min(x_data) - s_x,
         ma.max(x_data) + s_x,
         ma.min(y_data) - s_y,
         ma.max(y_data) + s_y
     ]
     plt.axis(bounds)
Example #5
0
 def autoscale_None(self, A):
   ' autoscale only None-valued vmin or vmax'
   if self.vmin is not None and self.vmax is not None and self.vin is not None:
     return
   A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
   if self.vmin is None:
     self.vmin = -ma.max(A)
   if self.vmax is None:
     self.vmax = ma.max(A)
   if self.vin is None:
     self.vin = ma.min(A)
Example #6
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is not None and self.vmax is not None and self.vin is not None:
         return
     A = ma.masked_less_equal(np.abs(A), 1e-16, copy=False)
     if self.vmin is None:
         self.vmin = -ma.max(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
     if self.vin is None:
         self.vin = ma.min(A)
def find_lag_time(stream, reference_stream):

    """
    Find the lag time between a given stream and a reference stream using cross-correlation of the
    horizontal total energy, which is independent of component alignment.
    :param stream: obspy stream object of the seismogram to calculate lag time for
    :param reference_stream: object stream object of the seismogram to use as reference for lag time calculation
    :return: lag time between the two sensors in seconds relative to the reference stream
    """

    # Create normalised amplitude envelopes of the data using horizontal total energy
    stream_envelope = calculate_horizontal_total_energy(stream)
    max_se = ma.max(stream_envelope)
    stream_envelope /= max_se
    ref_envelope = calculate_horizontal_total_energy(reference_stream)
    max_re = ma.max(ref_envelope)
    ref_envelope /= max_re

    # Ensure all data are masked arrays
    if not ma.is_masked(stream_envelope):
        stream_envelope = ma.masked_array(stream_envelope)
    if not ma.is_masked(ref_envelope):
        ref_envelope = ma.masked_array(ref_envelope)

    # Find the lag time from the maximum cross-correlation value between the two waveforms
    xcorr_values = []
    ref_envelope = ref_envelope.filled(0).tolist() + len(stream_envelope) * [0]
    ref_envelope = np.asarray(smooth_data(ref_envelope, int(values[parameters.index('corner_frequency')])))
    for m in range(2 * len(stream_envelope)):

        # Shift the stream
        if m <= len(stream_envelope):
            shifted_stream_envelope = (len(stream_envelope) - m) * [0] + stream_envelope.filled(0).tolist() + m * [0]
        else:
            shifted_stream_envelope = max(0, len(stream_envelope) - m) * [0] + stream_envelope[:len(stream_envelope) - m].filled(0).tolist() + m * [0]
        shifted_stream_envelope = np.asarray(smooth_data(shifted_stream_envelope,
                                                         int(values[parameters.index('corner_frequency')])))

        # Perform cross-correlation
        try:
            xcorr_value = np.corrcoef(shifted_stream_envelope, ref_envelope)[0][1]
        except ValueError:
            print('Cross-correlation failed! Perhaps one stream is a data point different to the other? '
                  'This can occur for certain corner frequency and data length combinations... It is a bug.')
        xcorr_values.append(xcorr_value)

    # Find lag time from highest cross-correlation value
    max_xcorr_value = max(xcorr_values)
    lag_time = (1 / stream[0].stats.sampling_rate) * (len(stream_envelope) - xcorr_values.index(max_xcorr_value))

    return lag_time
Example #8
0
def plot_all_correlations(data_col, plot_flags=True,amax_factor=1.0):
    flags = bad_data(data_col, threshold=4.0, max_iter=20)
    flagged_data = ma.array(data_col.data, mask=flags)
    xx,xy,yx,yy,num_pol = split_data_col(ma.array(flagged_data))
    
    scale=ma.max(abs(flagged_data))
    stddev = max(ma.std(flagged_data.real), ma.std(flagged_data.imag))
    if flags.sum() == product(flags.shape):
        amax=1.0
    else:
        amax=(scale-stddev)*amax_factor
    

    print('scale: %f\nsigma: %f' % (scale, stddev))
    good=logical_not(xx.mask)
    if not plot_flags:
        good = None
    clf()
    if num_pol is 2:
        subplot(121)
        plot_complex_image('XX',xx, good, amin=0.0, amax=amax)
        subplot(122)
        plot_complex_image('YY',yy, good, amin=0.0, amax=amax)
    elif num_pol is 4:
        subplot(141)
        plot_complex_image('XX',xx, good, amin=0.0, amax=amax)
        subplot(142)
        plot_complex_image('XY',xy, good, amin=0.0, amax=amax)
        subplot(143)
        plot_complex_image('YX',yx, good, amin=0.0, amax=amax)
        subplot(144)
        plot_complex_image('YY',yy, good, amin=0.0, amax=amax)
        pass
    pass
Example #9
0
def statistics(numpy_array):
    return {'mean'   : ma.mean(numpy_array),
            'median' : ma.median(numpy_array.real)+1j*ma.median(numpy_array.imag),
            'max'    : ma.max(abs(array)),
            'min'    : ma.min(abs(array)),
            'std'    : ma.std(array),
            'stdmean': ma.std(numpy_array)/sqrt(sum(logical_not(numpy_array.mask))-1)}
def simulacao(tamanho_amostras):
    print(f"\n*Simulacão com {tamanho_amostras} amostras")
    numero_amostras = 100
    valor_minimo = 0
    valor_maximo = 10
    estimadores_momentos_valor_maximo = []
    erros_amostrais_estimadores_momentos = []
    estimadores_maxima_verossimilhanca_valor_maximo = []
    erros_amostrais_estimadores_maxima_verossimilhanca = []

    for i in range(numero_amostras):
        distribuicao_uniforme = np.random.uniform(low=valor_minimo,
                                                  high=valor_maximo,
                                                  size=tamanho_amostras)

        estimador_momentos = 2 * average(distribuicao_uniforme)
        estimadores_momentos_valor_maximo.append(estimador_momentos)
        erro_amostral_estimador_momentos = estimador_momentos - valor_maximo
        erros_amostrais_estimadores_momentos.append(
            erro_amostral_estimador_momentos)

        estimador_maxima_verossimilhanca = max(distribuicao_uniforme)
        estimadores_maxima_verossimilhanca_valor_maximo.append(
            estimador_maxima_verossimilhanca)
        erro_amostral_estimador_maxima_verossimilhanca = estimador_maxima_verossimilhanca - valor_maximo
        erros_amostrais_estimadores_maxima_verossimilhanca.append(
            erro_amostral_estimador_maxima_verossimilhanca)

    imprimir_resultados("Momentos", erros_amostrais_estimadores_momentos,
                        estimadores_momentos_valor_maximo, valor_maximo)
    imprimir_resultados("Máx. Veros.",
                        erros_amostrais_estimadores_maxima_verossimilhanca,
                        estimadores_maxima_verossimilhanca_valor_maximo,
                        valor_maximo)
Example #11
0
def computeAveragesUsingNumpy():
    global sizeX, sizeY, sizeZ
    flattenedArrays = []

    for fileName in fileNames:
        fpath = os.path.join(basepath, fileName)
        print('processing %s' % fpath)

        year = fileName.split('_')[-1][:-4]

        dataset = gdal.Open(fpath)

        sumArray = ma.zeros((dataset.RasterYSize, dataset.RasterXSize))
        total = 0
        count = 0
        numBands = dataset.RasterCount

        for bandId in range(numBands):
            band = ma.masked_outside(dataset.GetRasterBand(bandId + 1).ReadAsArray(), VALUE_RANGE[0], VALUE_RANGE[1])
            sumArray += band

        sumArray /= numBands
        total = ma.sum(ma.sum(sumArray))
        count = sumArray.count()
        minCell = ma.min(sumArray)
        maxCell = ma.max(sumArray)
        sizeX = dataset.RasterXSize
        sizeY = dataset.RasterYSize

        flattenedArrays.append(np.ndarray.flatten(sumArray[::-1,:], 0).astype(np.dtype(np.int32)))

    sizeZ = len(flattenedArrays)

    return np.ma.concatenate(flattenedArrays)
Example #12
0
    def max_(self):
        """
        Calculates the maximum of the image over the segmentation

        :return:
        """
        return ma.max(self.masked_img, 0)
Example #13
0
    def bbox(self, header: PoseHeader):
        data = ma.transpose(self.data, axes=POINTS_DIMS)

        # Split data by components, `ma` doesn't support ".split"
        components = []
        idx = 0
        for component in header.components:
            components.append(data[list(range(idx,
                                              idx + len(component.points)))])
            idx += len(component.points)

        boxes = [
            ma.stack([ma.min(c, axis=0), ma.max(c, axis=0)])
            for c in components
        ]
        boxes_cat = ma.concatenate(boxes)
        if type(boxes_cat.mask
                ) == np.bool_:  # Sometimes, it doesn't concatenate the mask...
            boxes_mask = ma.concatenate([b.mask for b in boxes])
            boxes_cat = ma.array(boxes_cat, mask=boxes_mask)

        new_data = ma.transpose(boxes_cat, axes=POINTS_DIMS)

        confidence_mask = np.split(new_data.mask, [-1], axis=3)[0]
        confidence_mask = np.squeeze(confidence_mask, axis=-1)
        confidence = np.where(confidence_mask == True, 0, 1)

        return NumPyPoseBody(self.fps, new_data, confidence)
Example #14
0
    def plotHeatMap(self):
        import numpy.ma as ma
        import numpy
        dx, dy = self.root.height, 0
        fx, fy = self.root.height/len(self.data.domain.attributes), 1.0
        data, c, w = self.data.toNumpyMA()
        data = (data - ma.min(data))/(ma.max(data) - ma.min(data))
        x = numpy.arange(data.shape[1] + 1)/float(numpy.max(data.shape))
        y = numpy.arange(data.shape[0] + 1)/float(numpy.max(data.shape))*len(self.root)
        self.heatmap_width = numpy.max(x)

        X, Y = numpy.meshgrid(x, y - 0.5)

        self.meshXOffset = numpy.max(X)

        self.plt.jet()
        mesh = self.plt.pcolormesh(X, Y, data[self.root.mapping], edgecolor="b", linewidth=2)

        if self.plot_attr_names:
            names = [attr.name for attr in self.data.domain.attributes]
            self.plt.xticks(numpy.arange(data.shape[1] + 1)/float(numpy.max(data.shape)), names)
        self.plt.gca().xaxis.tick_top()
        for label in self.plt.gca().xaxis.get_ticklabels():
            label.set_rotation(45)

        for tick in self.plt.gca().xaxis.get_major_ticks():
            tick.tick1On = False
            tick.tick2On = False
Example #15
0
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     A = ma.masked_less_equal(A, 0, copy=False)
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
Example #16
0
    def calc_rxndays(self, values, time_grid, threshold):
        """ Calculates Rxnday
        Arguments:
            values -- array of total precipitation
            time_grid -- time grid
            threshold -- number of days (n)
        Returns: Rxnday values
        """

        # First of all, we suppose values are for a month with some time step.
        # Initially, let's sum first 'threshold' days.
        # Then we slide a window along the time grid
        #  subtracting one day on the left and adding one day on the right.
        queue = deque()
        nday_sum = None
        max_sum = None
        it_all_data = groupby(zip(values, time_grid),
                              key=lambda x: (x[1].day, x[1].month))
        for _, one_day_group in it_all_data:  # Iterate over daily groups.
            daily_sum = self._calc_daily_sum(one_day_group)
            queue.append(daily_sum)  # Store daily sums in a queue.
            if nday_sum is None:
                nday_sum = ma.zeros(daily_sum.shape)
            nday_sum += daily_sum  # Additionally sum daily sums.
            if len(queue) > threshold:  # When 'threshold' days are summed...
                nday_sum -= queue.popleft(
                )  # ...subtruct one 'left-most' daily sum from the n-day sum.
            if len(queue) == threshold:
                if max_sum is None:
                    max_sum = deepcopy(nday_sum)
                else:
                    max_sum = ma.max(ma.stack((max_sum, nday_sum)),
                                     axis=0)  # Search for maximum value.

        return max_sum
 def autoscale_None(self, A):
     if self.vmin is not None and self.vmax is not None:
         return
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
Example #18
0
def add_noise(infile, snr):
    '''
    :param infile: file to process
    :param snr: SNR, in decibels, which the output image contains vs input
    :return: File written to disc
    '''
    x = cv2.imread(infile).astype(float)
    x = ma.masked_less(x, 1)  # zeros give error
    uplim = ma.max(x)
    x = x / uplim
    v = ma.var(x) / (10**(snr / 10))
    if snr == 0:
        v = 0.00000001
    x_noise = (noi.random_noise(x, mode='gaussian', mean=0, var=v) *
               uplim).astype(np.uint8)
    if snr == 0:
        cv2.imwrite('%s' % (infile.replace('.png', '_snr%s.png' % (snr))),
                    x_noise)
    elif '.jpg' in infile:
        cv2.imwrite('%s' % (infile.replace('.jpg', '_snr%s.jpg' % (snr))),
                    x_noise)
    elif '.png' in infile:
        cv2.imwrite('%s' % (infile.replace('.png', '_snr%s.tif' % (snr))),
                    x_noise)
    elif '.tif' in infile:
        cv2.imwrite('%s' % (infile.replace('.tif', '_snr%s.tif' % (snr))),
                    x_noise)
Example #19
0
def prep_etccdi_variable(input_path, index_name, aggregation, data_source):
    ds = xr.open_dataset(input_path)

    # Omit final year (2010) of HADEX2 - suspiciously large CDD for Malaysia
    if data_source == 'HADEX2':
        ds = ds.sel(time=slice(datetime.datetime(1951, 1, 1, 0),
                               datetime.datetime(2009, 12, 31, 23)))

    # Calculate maximum rainfall value over whole period
    vals = ds[index_name].values
    if index_name in ['CWD', 'CDD']:
        vals = ds[index_name].values.astype('timedelta64[s]')
        vals = vals.astype('float32') / 86400.0
        vals[vals < 0.0] = np.nan
    vals = ma.masked_invalid(vals)
    if aggregation == 'max':
        data = ma.max(vals, axis=0)
    if aggregation == 'mean':
        data = ma.mean(vals, axis=0)

    # Convert back from to a xarray DataArray for easy plotting
    # - masked array seems to be interpreted as np array (i.e. nans are present
    # in the xarray DataArray
    data2 = xr.DataArray(data,
                         coords={
                             'Latitude': ds['lat'].values,
                             'Longitude': ds['lon'].values
                         },
                         dims=('Latitude', 'Longitude'),
                         name=index_name)

    ds.close()

    return data2
Example #20
0
    def _calc_cddcwd(self, values, threshold, calc_type):
        """ Calculates maximum number of consecutive days with daily values (precipitation) < 1mm or >= 1mm (CDD or CWD).
        """

        if calc_type == 'cdd':
            cmp_func = operator.lt
        elif calc_type == 'cwd':
            cmp_func = operator.ge
        else:
            self.logger.error('Unknown calculation type: %s. Aborting!',
                              calc_type)
            raise ValueError

        data_shape = values.shape[1:]
        cnt = ma.zeros(data_shape)
        max_cnt = ma.zeros(data_shape)
        for arr in values:
            mask = cmp_func(arr, threshold)
            cnt += mask  # Count consecutive days.
            cnt *= mask  # Reset counter where condition does not meet.
            max_cnt = ma.max(ma.stack((max_cnt, cnt)), axis=0)

        max_cnt.mask = values[
            0].mask  # Restore mask from the original data array.

        return max_cnt
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     A = ma.masked_less_equal(A, 0, copy=False)
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
Example #22
0
 def autoscale(self, A):
     """
     Set *vmin*, *vmax* to min, max of *A*.
     """
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
     self._transform_vmin_vmax()
Example #23
0
 def autoscale(self, A):
     """
     Set *vmin*, *vmax* to min, max of *A*.
     """
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
     self._transform_vmin_vmax()
Example #24
0
def _attvalues(attribute, stacked):
    """Attribute values computed in numpy.ma stack."""
    if attribute == "max":
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "min":
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "rms":
        attvalues = np.sqrt(ma.mean(np.square(stacked), axis=2))
    elif attribute == "var":
        attvalues = ma.var(stacked, axis=2)
    elif attribute == "mean":
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "maxpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "maxneg":  # ~ minimum of negative values?
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "maxabs":
        attvalues = ma.max(abs(stacked), axis=2)
    elif attribute == "sumpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumabs":
        attvalues = ma.sum(abs(stacked), axis=2)
    elif attribute == "meanabs":
        attvalues = ma.mean(abs(stacked), axis=2)
    elif attribute == "meanpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "meanneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    else:
        etxt = "Invalid attribute applied: {}".format(attribute)
        raise ValueError(etxt)

    if not attvalues.flags["C_CONTIGUOUS"]:
        mask = ma.getmaskarray(attvalues)
        mask = np.asanyarray(mask, order="C")
        attvalues = np.asanyarray(attvalues, order="C")
        attvalues = ma.array(attvalues, mask=mask, order="C")

    return attvalues
Example #25
0
    def getchanindex(self, chan):
        """

        """
        if ma.max(self._chans) >= chan >= ma.min(self._chans):
            return ma.where(self._chans == int(chan))[0][0]
        else:
            return -1
Example #26
0
    def getchanindex(self, chan):
        """

        """
        if ma.max(self._chans) >= chan >= ma.min(self._chans):
            return ma.where(self._chans == int(chan))[0][0]
        else:
            return -1
Example #27
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is None and np.size(A) > 0:
         self.vmin = ma.min(A)
     if self.vmax is None and np.size(A) > 0:
         self.vmax = ma.max(A)
     if self.vcenter is None:
         self.vcenter = (self.vmax + self.vmin) * 0.5
Example #28
0
 def calc_chrom_fast(self, index, coords_vals):
     self.population[index]['fitness'] = \
     np.abs(self.array_mean - ma.mean(coords_vals[0])) + \
     np.abs(self.array_stdev - ma.std(coords_vals[0])) + \
     np.abs(self.array_range - (ma.max(coords_vals[0])-ma.min(coords_vals[0])))/10  + \
     np.abs((self.chromosome_size-1) - coords_vals[2]) #locations
     #~ print "Chromosome size: ",self.chromosome_size
     print "Number of locations is: ", coords_vals[2]
Example #29
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
     if self.vmid is None:
         self.vmid = (self.vmax+self.vmin)/2.0
Example #30
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
     if self.vmid is None:
         self.vmid = (self.vmax + self.vmin) / 2.0
Example #31
0
    def autoscale_None(self, A):
        ' autoscale only None-valued vmin or vmax'
        if self.vmin is None and np.size(A) > 0:
            self.vmin = ma.min(A)

        if self.vmax is None and np.size(A) > 0:
            self.vmax = ma.max(A)
        self._transform_vmin_vmax()
Example #32
0
def normalizado(c):
    min = np.min(c)
    max = np.max(c)
    new = (c - min) / (max - min)
    OldRange = (max - min)
    NewRange = (1 - 0.1)
    new = (((c - min) * NewRange) / OldRange) + 0.1
    return new
Example #33
0
def _compute_variable_stats(variable, axis, weights, calc_avg, calc_min,
                            calc_max, calc_stddev, calc_count):
    '''
    Calculate statistics for a single variable.
    '''

    # Get the data out. Note: scale_factor and add_offset are automatically
    # applied.
    data = variable[:]
    # Make sure data is a masked array
    data = ma.masked_array(data)

    # Broadcast the weights before we try to combine the masks for data and
    # weights
    weights = ma.masked_array(data=np.broadcast_to(weights.data, data.shape),
                              mask=np.broadcast_to(ma.getmaskarray(weights),
                                                   data.shape))

    # We want all our calculations to happen over areas that are unmasked in
    # both the weights and data
    combined_mask = np.logical_or(ma.getmaskarray(data),
                                  ma.getmaskarray(weights))
    data = ma.masked_array(data.data, mask=combined_mask)
    weights = ma.masked_array(weights.data, mask=combined_mask)

    out = {}
    if calc_count:
        # Irritatingly, the ma.count function can only take one value at a time
        # for the axis. So, instead, construct an array of ones
        ones = np.ones(data.shape)
        # Set the masked areas to 0
        ones[combined_mask] = 0
        out["count"] = ma.sum(ones, axis=axis)
    if calc_min:
        out["min"] = ma.min(data, axis=axis)
    if calc_max:
        out["max"] = ma.max(data, axis=axis)

    # Note: standard deviation needs the weighted average and the weights sum
    if calc_avg or calc_stddev:
        sum_weights = _add_axes_back(ma.sum(weights, axis=axis), axis)

        weighted_avg_numerator = _add_axes_back(
            ma.sum(weights * data, axis=axis), axis)
        weighted_avg = weighted_avg_numerator / sum_weights

        if calc_avg:
            out["avg"] = ma.squeeze(weighted_avg, axis=axis)

    if calc_stddev:
        # calculate the anomaly
        anomaly = data - weighted_avg

        # calculate the standard deviation
        variance = ma.sum(weights * (anomaly**2) / sum_weights, axis=axis)
        out["stddev"] = np.sqrt(variance)

    return out
Example #34
0
    def create_graph(self, iv=None, dv=None, my_data=None, result=None):
        try:
            self.fig.delaxes(self.ax)
        except AttributeError:
            pass

        if len(iv) == 1:  # plot iff we have a single independent variable
            self.ax = self.fig.add_subplot(111)
            self.ax.set_xlabel(iv[0]['name'])
            self.ax.set_ylabel(dv[0]['name'])
            axis_type = self.axis_type.GetString(self.axis_type.GetSelection())

            # Type of axis
            if axis_type == 'Fit All':
                self.ax.set_xlim(iv[0]['min'], iv[0]['max'])
                self.ax.set_ylim(dv[0]['min'], dv[0]['max'])
            elif axis_type == 'Zoom':
                self.plot_zoom_graph(iv[0]['min'], iv[0]['max'], dv[0]['min'],
                                     dv[0]['max'])

            self.zoom.set_bounds_absolute(iv[0]['min'], iv[0]['max'],
                                          dv[0]['min'], dv[0]['max'])

            self.ax.grid()

            dv_plot_data = my_data[0]
            iv_plot_data = my_data[1]

            plot_type = self.plot_type.GetString(self.plot_type.GetSelection())
            if plot_type == 'Scatterplot':
                # adjust marker size and alpha based on how many points we're plotting
                marker_size = mpl.rcParams['lines.markersize']**2
                marker_size *= min(1, max(.12, 200 / len(iv_plot_data)))
                alpha = min(1, max(.002, 500 / len(iv_plot_data)))
                self.ax.scatter(iv_plot_data,
                                dv_plot_data,
                                s=marker_size,
                                alpha=alpha)
            else:  # heatmap
                bins = 200
                heatmap, iv_edges, dv_edges = np.histogram2d(iv_plot_data,
                                                             dv_plot_data,
                                                             bins=bins)
                x_min, x_max = iv_edges[0], iv_edges[-1]
                y_min, y_max = dv_edges[0], dv_edges[-1]
                self.ax.imshow(np.log(heatmap.transpose() + 1),
                               extent=[x_min, x_max, y_min, y_max],
                               cmap='Blues',
                               origin='lower',
                               aspect='auto')

            # plot regression line
            extent = [ma.min(iv_plot_data), ma.max(iv_plot_data)]
            intercept, slope = result.params[0:2]
            self.ax.plot(extent, [intercept + slope * x for x in extent],
                         'r--')
            self.fig.tight_layout()
            self.canvas.draw()
Example #35
0
 def autoscale_None(self, A):
     " autoscale only None-valued vmin or vmax"
     if self.vmin is not None and self.vmax is not None:
         return
     A = ma.masked_less_equal(A, 0, copy=False)
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
Example #36
0
 def autoscale_None(self, A):
     """ autoscale only None-valued vmin or vmax """
     if self.vmin is not None and self.vmax is not None:
         pass
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
     self._transform_vmin_vmax()
Example #37
0
 def autoscale_None(self, A):
     """ autoscale only None-valued vmin or vmax """
     if self.vmin is not None and self.vmax is not None:
         pass
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)
     self._transform_vmin_vmax()
Example #38
0
    def to_plot(vname, axis, cb_axis, cmap_water):
        var_water, data_units = read_var(vname)
        v = var_water[:, start:stop]
        from matplotlib import cm
        mm = np.max(v)
        mm2 = np.min(v)
        mm_tot = round(max(abs(mm), abs(mm2)), 4)
        levels_wat = MaxNLocator(nbins=25).tick_values(mm2, mm)

        if vname == 'B_CH4_CH4' and mm_tot > 1000:

            #norm = cm.colors.Normalize(vmax=2400, vmin=0)
            #levels_wat = MaxNLocator(nbins=25).tick_values(0,2400) #2400)
            CS = axis.contourf(X_water,
                               Y_water,
                               v,
                               levels=levels_wat,
                               cmap=cmap_water)
            '''elif vname == 'B_BIO_O2_rel_sat':
                #norm = cm.colors.Normalize(vmax=2400, vmin=0)
                levels_wat = MaxNLocator(nbins=50).tick_values(0,-100)             
                CS = axis.contourf(X_water,Y_water,v,levels = levels_wat,
                    cmap = cmap_water)

                levels_wat2 = MaxNLocator(nbins=100).tick_values(0,-100)             
                CS2 = axis.contour(X_water,Y_water,v,levels = levels_wat2,colors = 'k',linewidths=0.2)      '''
        else:
            CS = axis.contourf(X_water,
                               Y_water,
                               v,
                               levels=levels_wat,
                               cmap=cmap_water)

        if (mm * mm2) < 0:
            # If changes over 0
            CS = axis.contourf(X_water,
                               Y_water,
                               v,
                               10,
                               vmin=-mm_tot,
                               vmax=mm_tot,
                               cmap=plt.get_cmap('coolwarm'))
            CS_1 = axis.contour(X_water,
                                Y_water,
                                v, [0],
                                linewidths=0.2,
                                colors='k')

        cb1 = add_colorbar(CS, cb_axis)

        ma1 = ma.max(v)

        axis.set_ylim(max_water, min_water)
        axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
        axis.yaxis.set_label_coords(-0.06, 0.5)
        axis.set_ylabel('Depth,m', fontsize=fontsize)
Example #39
0
    def autoscale_None(self, A):
        ' autoscale only None-valued vmin or vmax'
        if self.vmin is None and np.size(A) > 0:
            self.vmin = ma.min(A)

        if self.vmax is None and np.size(A) > 0:
            self.vmax = ma.max(A)

        if self.vcenter is None:
            self.vcenter = (self.vmax + self.vmin) * 0.5
Example #40
0
    def weighted_average(self, axis=0, expaxis=None):
        """ Calculate weighted average of data along axis
            after optionally inserting a new dimension into the
            shape array at position expaxis
        """

        if expaxis is not None:
            vals = ma.expand_dims(self.vals, expaxis)
            dmin = ma.expand_dims(self.dmin, expaxis)
            dmax = ma.expand_dims(self.dmax, expaxis)
            wt = ma.expand_dims(self.wt, expaxis)
        else:
            vals = self.vals
            wt = self.wt
            dmin = self.dmin
            dmax = self.dmax

        # Get average value
        avg, norm = ma.average(vals, axis=axis, weights=wt, returned=True)
        avg_ex = ma.expand_dims(avg, 0)

        # Calculate weighted uncertainty
        wtmax = ma.max(wt, axis=axis)
        neff = norm / wtmax  # Effective number of samples based on uncertainties

        # Seeking max deviation from the average; if above avg use max, if below use min
        term = np.empty_like(vals)

        indices = np.where(vals > avg_ex)
        i0 = indices[0]
        irest = indices[1:]
        ii = tuple(x for x in itertools.chain([i0], irest))
        jj = tuple(x for x in itertools.chain([np.zeros_like(i0)], irest))
        term[ii] = (dmax[ii] - avg_ex[jj])**2

        indices = np.where(vals <= avg_ex)
        i0 = indices[0]
        irest = indices[1:]
        ii = tuple(x for x in itertools.chain([i0], irest))
        jj = tuple(x for x in itertools.chain([np.zeros_like(i0)], irest))
        term[ii] = (avg_ex[jj] - dmin[ii])**2

        dsum = ma.sum(term * wt,
                      axis=0)  # Sum for weighted average of deviations

        dev = 0.5 * np.sqrt(dsum / (norm * neff))

        if isinstance(avg, (float, np.float)):
            avg = avg_ex

        tmp_min = avg - dev
        ii = np.where(tmp_min < 0)
        tmp_min[ii] = TOL * avg[ii]

        return UncertContainer(avg, tmp_min, avg + dev)
def custom_range_for_CNN(r4_array, min_max, mean_centre=False):
    """ Rescale a rank 4 array so that each channel's image lies in custom range
    e.g. input with range of (-5, 15) is rescaled to (-125 125) or (-1 1) for use with VGG16.  
    Designed for use with masked arrays.  
    Inputs:
        r4_array | r4 masked array | works with masked arrays?  
        min_max | dict | 'min' and 'max' of range desired as a dictionary.  
        mean_centre | boolean | if True, each image's channels are mean centered.  
    Returns:
        r4_array | rank 4 numpy array | masked items are set to zero, rescaled so that each channel for each image lies between min_max limits.  
    History:
        2019/03/20 | now includes mean centering so doesn't stretch data to custom range.  
                    Instead only stretches until either min or max touches, whilst mean is kept at 0
        2020/11/02 | MEG | Update so range can have a min and max, and not just a range
        2021/01/06 | MEG | Upate to work with masked arrays.  Not test with normal arrays.
    """
    import numpy as np
    import numpy.ma as ma

    if mean_centre:
        im_channel_means = ma.mean(
            r4_array,
            axis=(1,
                  2))  # get the average for each image (in all thre channels)
        im_channel_means = expand_to_r4(im_channel_means, r4_array[
            0, :, :,
            0].shape)  # expand to r4 so we can do elementwise manipulation
        r4_array -= im_channel_means  # do mean centering

    im_channel_min = ma.min(
        r4_array,
        axis=(1, 2))  # get the minimum of each image and each of its channels
    im_channel_min = expand_to_r4(
        im_channel_min,
        r4_array[0, :, :,
                 0].shape)  # exapnd to rank 4 for elementwise applications
    r4_array -= im_channel_min  # set so lowest channel for each image is 0

    im_channel_max = ma.max(
        r4_array,
        axis=(1, 2))  # get the maximum of each image and each of its channels
    im_channel_max = expand_to_r4(
        im_channel_max,
        r4_array[0, :, :,
                 0].shape)  # make suitable for elementwise applications
    r4_array /= im_channel_max  # should now be in range [0, 1]

    r4_array *= (min_max['max'] - min_max['min']
                 )  # should now be in range [0, new max-min]
    r4_array += min_max['min']  # and now in range [new min, new max]
    r4_nparray = r4_array.filled(
        fill_value=0
    )  # convert to numpy array, maksed incoherent areas are set to zero.

    return r4_nparray
Example #42
0
    def autoscale(self, A):
        """
        Set *vmin*, *vmax* to min, max of *A*.
        """
        self.vmin = ma.min(A)
        if self.vmin < 0:
            self.vmin = 0
            warnings.warn("Power-law scaling on negative values is "
                          "ill-defined, clamping to 0.")

        self.vmax = ma.max(A)
Example #43
0
    def weighted_average(self,axis=0,expaxis=None):
        """ Calculate weighted average of data along axis
            after optionally inserting a new dimension into the
            shape array at position expaxis
        """

        if expaxis is not None:
            vals = ma.expand_dims(self.vals,expaxis)
            dmin = ma.expand_dims(self.dmin,expaxis)
            dmax = ma.expand_dims(self.dmax,expaxis)
            wt = ma.expand_dims(self.wt,expaxis)
        else:
            vals = self.vals
            wt = self.wt
            dmin = self.dmin
            dmax = self.dmax
        
        # Get average value
        avg,norm = ma.average(vals,axis=axis,weights=wt,returned=True)
        avg_ex = ma.expand_dims(avg,0)

        # Calculate weighted uncertainty
        wtmax = ma.max(wt,axis=axis)
        neff = norm/wtmax       # Effective number of samples based on uncertainties

        # Seeking max deviation from the average; if above avg use max, if below use min
        term = np.empty_like(vals)
        
        indices = np.where(vals > avg_ex)
        i0 = indices[0]
        irest = indices[1:]
        ii = tuple(x for x in itertools.chain([i0],irest))
        jj = tuple(x for x in itertools.chain([np.zeros_like(i0)],irest))
        term[ii] = (dmax[ii] - avg_ex[jj])**2
        
        indices = np.where(vals <= avg_ex)
        i0 = indices[0]
        irest = indices[1:]
        ii = tuple(x for x in itertools.chain([i0],irest))
        jj = tuple(x for x in itertools.chain([np.zeros_like(i0)],irest))
        term[ii] = (avg_ex[jj] - dmin[ii])**2
        
        dsum = ma.sum(term*wt,axis=0)     # Sum for weighted average of deviations

        dev = 0.5*np.sqrt(dsum/(norm*neff))
        
        if isinstance(avg,(float,np.float)):
            avg = avg_ex

        tmp_min = avg - dev
        ii = np.where(tmp_min < 0)
        tmp_min[ii] = TOL*avg[ii]
        
        return UncertContainer(avg,tmp_min,avg+dev)
Example #44
0
 def get_mask(self):
     self.array_mean = ma.mean(self.array)
     self.array_stdev = ma.std(self.array)
     self.array_range = ma.max(self.array) - ma.min(self.array)
     print "The mean is %f, the stdev is %f, the range is %f." %(self.array_mean, self.array_stdev, self.array_range)
     from scipy.io.netcdf import netcdf_file as NetCDFFile
     ### get landmask
     nc = NetCDFFile(os.getcwd()+ '/../data/netcdf_files/ORCA2_landmask.nc','r')
     self.mask = ma.masked_values(nc.variables['MASK'][:, :self.time_len, :self.lat_len, :180], -9.99999979e+33)
     nc.close()
     self.xxx, self.yyy, self.zzz = np.lib.index_tricks.mgrid[0:self.time_len, 0:self.lat_len, 0:180]
Example #45
0
    def autoscale_None(self, A):
        ' autoscale only None-valued vmin or vmax'
        if self.vmin is None and np.size(A) > 0:
            self.vmin = ma.min(A)
            if self.vmin < 0:
                self.vmin = 0
                warnings.warn("Power-law scaling on negative values is "
                              "ill-defined, clamping to 0.")

        if self.vmax is None and np.size(A) > 0:
            self.vmax = ma.max(A)
Example #46
0
 def __call__(self, value):
     if self.vmin is None:
         self.vmin = ma.min(value)
         self.vmax = ma.max(value)
     result = ma.array(value).astype(np.float)
     # ma division is slow, take a shortcut
     resdat = result.data
     resdat -= self.vmin
     resdat /= (self.vmax - self.vmin)
     # remask
     result = ma.array(resdat, mask=result.mask, copy=False)
     return result
Example #47
0
File: som.py Project: Zekom/orange
    def initialize_map_random(self, data=None, dimension=5):
        """Initialize the map nodes vectors randomly, by supplying
        either training data or dimension of the data.
        
        """
        if data is not None:
            min, max = ma.min(data, 0), ma.max(data, 0)
            dimension = data.shape[1]
        else:
            min, max = numpy.zeros(dimension), numpy.ones(dimension)
        for node in self:
#            node.vector = min + numpy.random.rand(dimension) * (max - min)
            node.vector = min + random.randint(0, dimension) * (max - min)
Example #48
0
def normalize(x):
    """ Rescale all numeric values in range [0, 1].

    Input must be a numpy ndarray, no coercion is tried.

    :param x: numpy ndarray to be rescaled.
    :return: numpy ndarray with rescaled values.
    """
    if type(x) is not np.ndarray and type(x) is not ma.core.MaskedArray:
        raise TypeError("x must be a numpy.ndarray or numpy.ma.MaskedArray")

    # NOTE: the approach commented out below would be more memory efficient,
    # but doesn't work as such with masked arrays
    # np.true_divide(x, np.max(np.abs(x)), out=x, casting='unsafe')
    # Data may have negative values, thus first add the abs(min(x)) to
    # everything.
    x_min = ma.min(x)
    x_max = ma.max(x)
    return (x - x_min) / (x_max - x_min)
Example #49
0
def search_spurious(data, pol, delta):
    info = list()
    _data = data.getAll(pol=pol)
    max_data  = ma.max(_data, axis=1)
    mean_data = ma.mean(_data, axis=1)
    median_spec = ma.mean(max_data, axis=0)
    peaks = cSearchPeak(median_spec)
    if not peaks.valid_data:
        return (info)

    # first mask peaks available in all data
    peaks.search(delta=(delta/2.0)) #deta=20 for HBA
    for peak, min_sb, max_sb in peaks.max_peaks:
        peakwidth = max_sb - min_sb
        if peakwidth > 8:
            continue
        min_sb = max(min_sb-1, 0)
        max_sb = min(max_sb+1, peaks.n_data-1)
        logger.debug("mask sb %d..%d" %(min_sb, max_sb))
        for i in range(min_sb, max_sb, 1):
            mean_data[:,i] = ma.masked

    # search in all data for spurious
    for rcu in sorted(data.getActiveRcus(pol)):
        rcu_bin = rcu
        if pol not in ('XY', 'xy'):
            rcu_bin /= 2
        logger.debug("rcu=%d  rcu_bin=%d" %(rcu, rcu_bin))
        peaks = cSearchPeak(mean_data[rcu_bin,:])
        if peaks.valid_data:
            peaks.search(delta=delta)
            for peak, min_sb, max_sb in peaks.max_peaks:
                peakwidth = max_sb - min_sb
                if peakwidth > 10:
                    continue
                peak_val = peaks.getPeakValue(peak)
                if peakwidth < 100 and peak_val != NaN:
                    logger.debug("rcu_bin=%d: spurious, subband=%d..%d, peak=%3.1fdB" %(rcu_bin, min_sb, max_sb, peak_val))
            if peaks.nMaxPeaks() > 10:
                #print rcu_bin, peaks.nMaxPeaks()
                info.append(rcu_bin)
    return(info)
Example #50
0
def search_spurious(data, delta):
    global logger
    info = list()
    _data = data.copy()
    max_data  = ma.max(_data, axis=1)
    mean_data = ma.mean(_data, axis=1)
    median_spec = ma.mean(max_data, axis=0)
    peaks = cSearchPeak(median_spec)
    if not peaks.valid_data:
        return (info)
    
    # first mask peaks available in all data
    peaks.search(delta=(delta/2.0)) #deta=20 for HBA 
    for peak in peaks.max_peaks:
        min_sb, max_sb = peaks.getPeakWidth(peak, delta/2.0)
        if (max_sb - min_sb) > 8:
            continue
        min_sb = max(min_sb-1, 0)
        max_sb = min(max_sb+1, peaks.n_data-1)
        logger.debug("mask sb %d..%d" %(min_sb, max_sb))
        for i in range(min_sb, max_sb, 1):        
            mean_data[:,i] = ma.masked
    
    # search in all data for spurious 
    for rcu in range(_data.shape[0]):
        peaks = cSearchPeak(mean_data[rcu,:])
        if peaks.valid_data:
            peaks.search(delta=delta)
            for peak in peaks.max_peaks:
                min_sb, max_sb = peaks.getPeakWidth(peak, delta)
                if (max_sb - min_sb) > 10:
                    continue
                peak_val = peaks.getPeak(peak)
                if (max_sb - min_sb) < 100 and peak_val != NaN:
                    logger.debug("RCU=%d: spurious, subband=%d..%d, peak=%3.1fdB" %(rcu, min_sb, max_sb, peak_val))
            if peaks.nMaxPeaks() > 10:
                #print rcu, peaks.nMaxPeaks()                    
                info.append(rcu)
    return(info)
Example #51
0
def regridToCoarse(fine,fac,mode,missValue):
    nr,nc = np.shape(fine)
    coarse = np.zeros(nr/fac * nc / fac).reshape(nr/fac,nc/fac) + MV
    nr,nc = np.shape(coarse)
    for r in range(0,nr):
        for c in range(0,nc):
            ar = fine[r * fac : fac * (r+1),c * fac: fac * (c+1)]
            m = np.ma.masked_values(ar,missValue)
            if ma.count(m) == 0:
                coarse[r,c] = MV
            else:
                if mode == 'average':
                    coarse [r,c] = ma.average(m)
                elif mode == 'median': 
                    coarse [r,c] = ma.median(m)
                elif mode == 'sum':
                    coarse [r,c] = ma.sum(m)
                elif mode =='min':
                    coarse [r,c] = ma.min(m)
                elif mode == 'max':
                    coarse [r,c] = ma.max(m)
    return coarse    
Example #52
0
    def peak(self,chanrange=None):
        """Return the peak intensity in the given channel range
           If a mask exists, this function operates on the masked spectrum.

           Parameters
           ----------
           chanrange: range of channels over which to compute dispersion
                      [startchan, endchan]

           Returns
           ----------
           Maximum of the absolute value of the spectrum in the channel range
                     max(abs(spectrum[startchan:endchan]))
        """
        s = self.spec()
        chupper = len(s)-1
        chanrange = self._sanitizechanrange(chanrange,chupper)

        # Handle one-channel ranges.
        if (chanrange[0] == chanrange[1]):
           return s[chanrange[0]]

        return ma.max(ma.abs(s[chanrange[0]:chanrange[1]]))
    def algorithm4(self, Gamma0, M):
        Gamma = Gamma0
        U, s, V = svd(inv(array(Gamma)))
        self.d1 = [sqrt(s[0])]
        self.d2 = [sqrt(s[1])]
        self.theta = [arccos(U[1,1])]
        for i in range(len(self.T)-1):
            
            def sdp(c):
                # For fixed c solve the semidefinite program for Algorithm 4
                
                # Variable Gamma_plus (for \Gamma_{i+1})
                Gamma_plus = variable(2,2,name='Gamma_plus')

                # Constraints
                c0 = belongs(Gamma_plus, semidefinite_cone)
                c1 = belongs(Gamma - Gamma_plus, semidefinite_cone)
                c2 = belongs(2*c*Gamma_plus - Gamma_plus*J - J.T*Gamma_plus, semidefinite_cone)

                # Objective function
                obj = -exp(-2*c*dT)*det_rootn(Gamma_plus)
                
                # Find solution
                p = program(minimize(obj), [c0, c1, c2])
                return p.solve(quiet = True)

            def f_Gamma(c):
                # Once the optimal c is found, find the ellipsoid shape matrix
                
                # Variable Gamma_plus (for \Gamma_{i+1})
                Gamma_plus = variable(2,2,name='Gamma_plus')
                
                # Constraints
                c0 = belongs(Gamma_plus, semidefinite_cone)
                c1 = belongs(Gamma - Gamma_plus, semidefinite_cone)
                c2 = belongs(2*c*Gamma_plus - Gamma_plus*J - J.T*Gamma_plus, semidefinite_cone)
                
                # Objective function
                obj = -exp(-2*c*dT)*det_rootn(Gamma_plus)
                
                # Find solution
                p = program(minimize(obj), [c0, c1, c2])
                p.solve(quiet = True)
                return Gamma_plus.value
                    
            # Search for c solving optimization problem using minimize_scalar to minimize function sdp
            J = matrix(Jac(self.T[i],self.X[i]))
            dT = self.T[i+1] - self.T[i]
            cmin = max(diag(J))
            cmax = cmin+1
            res = minimize_scalar(sdp, bounds=(cmin, cmax), method='bounded')
            cstar = res.x
            
            # Update Gamma
            Gamma = exp(-2*cstar*dT)*f_Gamma(cstar)
                    
            # Use Gamma to find width, height and angle of ellipsoid
            U, s, V = svd(inv(array(Gamma)))
            self.d1.append(sqrt(s[0]))
            self.d2.append(sqrt(s[1]))
            self.theta.append(arccos(U[1,1]))
                    
            # Update on our progress
            if i%50 == 0:
                print 'step', i, 'of', num_points
Example #54
0
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     self.vmin = ma.min(A)
     self.vmax = ma.max(A)
Example #55
0
def measure(mode, x, y, x0, x1, thresh = 0):
    """ return the a measure of y in the window x0 to x1
    """
    xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster!
    v = y.view(numpy.ndarray)
    
    xm = ma.masked_outside(xt, x0, x1).T
    ym = ma.array(v, mask = ma.getmask(xm))
    if mode == 'mean':
        r1 = ma.mean(ym)
        r2 = ma.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p': # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std': # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var': # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum': # cumulative sum
        r1 = ma.cumsum(ym) # Note: returns an array
        r2 = 0
    if mode == 'anom': # anomalies = difference from averge
        r1 = ma.anom(ym) # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))
        r2 = 0
    if mode == 'latency': # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        return(0,0)
        slope = numpy.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win)/20) # look over small ranges
        for k in win: # move through the slope measurementwindow
            tb = range(k-st, k+st) # get tb array
            newa = numpy.array(self.dat[i][j, thisaxis, tb])
            ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures
            slope = numpy.append(slope, ppars[0]) # keep track of max slope
        r1 = numpy.amax(slope)
        r2 = numpy.argmax(slope)
    return(r1, r2)
nc.close()
data_cflux_5day = ma.masked_values(data_cflux_5day, masked_value)
data_cflux_5day = ma.array(data_cflux_5day, dtype=np.float32)



data_name = data_cflux_5day_name
data = data_cflux_5day*unit_changer
year_stack = np.split(data, 10)
year_stack = ma.array(year_stack)
print "Year stack has shape: ", np.shape(year_stack)

decadal_mean = ma.mean(data, 0)
dec_mean = ma.mean(decadal_mean)
dec_stdev = ma.std(decadal_mean)
dec_range = np.abs(ma.max(decadal_mean) - ma.min(decadal_mean))
samples = []
for item in coords_and_values:
    samples.append(decadal_mean[ item[1], item[2]])

samples = ma.array(samples)
samples_mean = ma.mean(samples)
samples_stdev = ma.std(samples)
samples_range = np.abs(ma.max(samples) - ma.min(samples))
original_fitness = np.abs(dec_mean - samples_mean) + np.abs(dec_stdev - samples_stdev) \
 + np.abs(dec_range - samples_range) 
   

#~ x = 0
year_sample_dict_data = {}
year_sample_list_data = {}
Example #57
0
 def autoscale_None(self, A):
     " autoscale only None-valued vmin or vmax"
     if self.vmin is None:
         self.vmin = ma.min(A)
     if self.vmax is None:
         self.vmax = ma.max(A)