コード例 #1
0
ファイル: google_earth_tools.py プロジェクト: DomoCat/PyAMPR
def gearth_fig(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, pixels=1024):
    """
    Return a Matplotlib `fig` and `ax` handles for a Google-Earth Image.
    TJL - Obtained from 
    http://ocefpaf.github.io/python4oceanographers/blog/2014/03/10/gearth/
    
    """
    aspect = np.cos(np.mean([llcrnrlat, urcrnrlat]) * np.pi/180.0)
    xsize = np.ptp([urcrnrlon, llcrnrlon]) * aspect
    ysize = np.ptp([urcrnrlat, llcrnrlat])
    aspect = ysize / xsize
    
    if aspect > 1.0:
        figsize = (10.0 / aspect, 10.0)
    else:
        figsize = (10.0, 10.0 * aspect)
    
    if False:
        plt.ioff()  # Make `True` to prevent the KML components from popping-up.
    fig = plt.figure(figsize=figsize, frameon=False, dpi=pixels//10)
    # KML friendly image.  If using basemap try: `fix_aspect=False`.
    ax = fig.add_axes([0, 0, 1, 1])
    ax.set_xlim(llcrnrlon, urcrnrlon)
    ax.set_ylim(llcrnrlat, urcrnrlat)
    return fig, ax
コード例 #2
0
ファイル: rfimitigator.py プロジェクト: mtlam/PyPulse
    def zap_minmax(self,windowsize=20,threshold=4):
        '''
        Run NANOGrav algorithm, median zapping. Run per subintegration
        windowsize = 20 frequency bins long
        threshold = 4 sigma
        '''
        if not self.can_mitigate():
            return


        nsubint = self.archive.getNsubint()
        nchan = self.archive.getNchan()

        # Prepare data
        data = self.archive.getData(squeeze=False)
        spavg = self.archive.spavg #SinglePulse average profile, no need to invoke creating more SinglePulse instances
        opw = spavg.opw
        
        if nchan <= windowsize:
            for i in xrange(nsubint):
                for j in xrange(nchan):
                    subdata = data[i,0,:,opw] 
                    compptp = np.ptp(data[i,0,j,opw])
                    ptps = np.zeros(windowsize)
                    for k in xrange(windowsize):
                        ptps[k] = np.ptp(subdata[k,:])
                

                    med = np.median(ptps)
                    if compptp > threshold*med:
                        self.zap(f=j)
            return

        
        for i in xrange(nsubint):
            for j in xrange(nchan):
                low = j - windowsize//2
                high = j + windowsize//2

                if low < 0:
                    high = abs(low)
                    low = 0
                elif high > nchan:
                    diff = high - nchan
                    high -= diff
                    low -= diff

                subdata = data[i,0,low:high,opw] 
                compptp = np.ptp(data[i,0,j,opw])
                ptps = np.zeros(windowsize)
                for k in xrange(windowsize):
                    ptps[k] = np.ptp(subdata[k,:])
                    
                #ptps = np.array(map(lambda subdata: np.ptp(subdata),data[i,0,low:high,opw]))

                med = np.median(ptps)
                if compptp > threshold*med:
                    self.zap(f=j)
                
        return
コード例 #3
0
ファイル: GenericTransport.py プロジェクト: PMEAL/OpenPNM
 def _get_domain_area(self, inlets=None, outlets=None):
     logger.warning('Attempting to estimate inlet area...will be low')
     network = self.project.network
     # Abort if network is not 3D
     if np.sum(np.ptp(network['pore.coords'], axis=0) == 0) > 0:
         raise Exception('The network is not 3D, specify area manually')
     if inlets is None:
         inlets = self._get_inlets()
     if outlets is None:
         outlets = self._get_outlets()
     inlets = network['pore.coords'][inlets]
     outlets = network['pore.coords'][outlets]
     if not iscoplanar(inlets):
         logger.error('Detected inlet pores are not coplanar')
     if not iscoplanar(outlets):
         logger.error('Detected outlet pores are not coplanar')
     Nin = np.ptp(inlets, axis=0) > 0
     if Nin.all():
         logger.warning('Detected inlets are not oriented along a '
                        + 'principle axis')
     Nout = np.ptp(outlets, axis=0) > 0
     if Nout.all():
         logger.warning('Detected outlets are not oriented along a '
                        + 'principle axis')
     hull_in = ConvexHull(points=inlets[:, Nin])
     hull_out = ConvexHull(points=outlets[:, Nout])
     if hull_in.volume != hull_out.volume:
         logger.error('Inlet and outlet faces are different area')
     area = hull_in.volume  # In 2D volume=area, area=perimeter
     return area
コード例 #4
0
def preprocess():
    numberTrain = 50000
    numberAttribute = 784
    with open('AI_quick_draw.pickle', 'rb') as open_ai_quick:
        train_data1 = pickle.load(open_ai_quick)
        train_label1 = pickle.load(open_ai_quick)
        test_data = pickle.load(open_ai_quick)
        test_label = pickle.load(open_ai_quick)
    train_data1 = train_data1.astype(np.float64) / 255.0
    test_data = test_data.astype(np.float64) / 255.0
    permutation = np.random.permutation(range(train_data1.shape[0]))
    validation_data = train_data1[permutation[numberTrain:], :]
    validation_label = train_label1[permutation[numberTrain:]]
    train_data = train_data1[permutation[0:numberTrain], :]
    train_label = train_label1[permutation[0:numberTrain]]
    toRemove = []
    for i in range(numberAttribute):
        if np.ptp(train_data[:, i]) == 0.0 and \
                        np.ptp(validation_data[:, i]) == 0.0:
            toRemove.append(i)
    train_data = np.delete(train_data, toRemove, axis=1)
    test_data = np.delete(test_data, toRemove, axis=1)
    validation_data = np.delete(validation_data, toRemove, axis=1)
    print("Preprocessing Done!")
    return train_data, train_label, validation_data, validation_label, test_data, test_label
コード例 #5
0
def plot_histogram( ax ):
    """Here we take the data from ROI (between aN and bN). A 100 ms window (size
    = 10) slides over it. At each step, we get min and max of window, store
    these values in a list. 

    We plot histogram of the list
    """
    global newtime, time
    roiData = sensor[aN:bN]
    baselineData = np.concatenate( (sensor[:aN], sensor[bN:]) )
    windowSize = 10
    histdataRoi = []
    for i in range( len(roiData) ):
        window = roiData[i:i+windowSize]
        histdataRoi.append( np.ptp( window ) ) # peak to peak

    histdataBaseline = []
    for i in range( len(baselineData) ):
        window = baselineData[i:i+windowSize]
        histdataBaseline.append( np.ptp( window ) )

    plt.hist( histdataBaseline
            , bins = np.arange( min(histdataBaseline), max(histdataBaseline), 5)
            , normed = True, label = 'baseline (peak to peak)'
            , alpha = 0.7
            )
    plt.hist( histdataRoi
            , bins = np.arange( min(histdataRoi), max(histdataRoi), 5)
            , normed = True , label = 'ROI (peak to peak)'
            , alpha = 0.7
            )
    # plt.title('Histogram of sensor readout')
    plt.legend(loc='best', framealpha=0.4)
コード例 #6
0
    def plot_checkpoint(self,e):
        filename = "/data/sample_"+str(e)+".png"

        noise = self.sample_latent_space(16)
        images = self.generator.Generator.predict(noise)
        
        plt.figure(figsize=(10,10))
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i+1)
            if self.C==1:
                image = images[i, :, :]
                image = np.reshape(image, [self.H,self.W])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image,cmap='gray')
            elif self.C==3:
                image = images[i, :, :, :]
                image = np.reshape(image, [self.H,self.W,self.C])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image)
            
            plt.axis('off')
        plt.tight_layout()
        plt.savefig(filename)
        plt.close('all')
        return
コード例 #7
0
ファイル: transportability.py プロジェクト: cephdon/meta-core
def vehicle_fit(surface, up_direction, fore_direction, transport_dimensions):
    """
    Determines if the vehicle can fit into specified dimensions for transport.
    """
    ## Get the vehicle extent in the 3 cartesian directions
    ## determine orientation of the vehicle given the direction up and forward
    coord_array = np.vstack((surface['x'], surface['y'], surface['z'])).T
    side_direction = np.cross(up_direction, fore_direction)

    width = np.ptp(np.dot(coord_array, side_direction))
    height = np.ptp(np.dot(coord_array, up_direction))
    length = np.ptp(np.dot(coord_array, fore_direction))
    #print width, height, length

    ## Store the calculated vehicle dimensions for info only (not an output metric)
    results = {"_vehicle_calculated_dimension": {"vehicle_length[m]": length,
                                                 "vehicle_width[m]": width,
                                                 "vehicle_height[m]": height}}

    ## Check each transport option in turn and write True for any that can fit the vehicle
    trans_compat = results["Transportation_Compatibility"] = {}
    for transport, size in transport_dimensions.items():
        if size["max_length"] < length or size["max_width"] < width or size["max_height"] < height:
            trans_compat[transport] = False
        else:
            trans_compat[transport] = True

    return results
コード例 #8
0
ファイル: readfile.py プロジェクト: mattja/nsim
def _load_edflib(filename):
    """load a multi-channel Timeseries from an EDF (European Data Format) file
    or EDF+ file, using edflib.

    Args:
      filename: EDF+ file

    Returns:
      Timeseries
    """
    import edflib
    e = edflib.EdfReader(filename, annotations_mode='all')
    if np.ptp(e.get_samples_per_signal()) != 0:
        raise Error('channels have differing numbers of samples')
    if np.ptp(e.get_signal_freqs()) != 0:
        raise Error('channels have differing sample rates')
    n = e.samples_in_file(0)
    m = e.signals_in_file
    channelnames = e.get_signal_text_labels()
    dt = 1.0/e.samplefrequency(0)
    # EDF files hold <=16 bits of information for each sample. Representing as
    # double precision (64bit) is unnecessary use of memory. use 32 bit float:
    ar = np.zeros((n, m), dtype=np.float32)
    # edflib requires input buffer of float64s
    buf = np.zeros((n,), dtype=np.float64)
    for i in range(m):
        e.read_phys_signal(i, 0, n, buf)
        ar[:,i] = buf
    tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
    return Timeseries(ar, tspan, labels=[None, channelnames])
コード例 #9
0
ファイル: prepare_fixed_flt.py プロジェクト: gbrammer/unicorn
def process_raw_all(field = 'AEGIS'):
    #### Reprocess *all* of the FLTs with variable backgrounds that 
    #### weren't already refit above
    import glob
    import os
    
    import numpy as np
    
    import unicorn
    import threedhst
    from threedhst import catIO
    
       
    files = glob.glob('/3DHST/Spectra/Work/BACKGROUND/%s/*G141_orbit.dat'%(field))
    redo_list = []
    for file in files:
        bg = catIO.Readfile(file, save_fits=False, force_lowercase=True)
        var_bg = np.ptp(bg.bg[1:]) > 0.15
        no_skip = True
        if os.path.exists('%sq_flt.fits' %(os.path.split(file)[-1].split('j_')[0])): 
            im2flt_key = threedhst.utils.gethead('%sq_flt.fits' %(os.path.split(file)[-1].split('j_')[0]), keys=['IMA2FLT'])
            if im2flt_key[0] == '': 
                no_skip = True
            else: 
                no_skip = False
        rawfile='%sq_raw.fits'%(os.path.split(file)[-1].split('j_')[0])
        print rawfile, np.ptp(bg.bg[1:]), var_bg, no_skip, var_bg & no_skip
        #   
        if var_bg & no_skip:
            redo_list.append(rawfile)
            if not os.path.exists(rawfile):
                print '%s does not exist!'%(rawfile)
                continue
            #
            unicorn.prepare.make_IMA_FLT(raw=rawfile, pop_reads=[])
コード例 #10
0
ファイル: specutils.py プロジェクト: andycasey/sick
def resample(old_dispersion, new_dispersion):
    """
    Resample a spectrum to a new dispersion map while conserving total flux.

    :param old_dispersion:
        The original dispersion array.

    :type old_dispersion:
        :class:`numpy.array`

    :param new_dispersion:
        The new dispersion array to resample onto.

    :type new_dispersion:
        :class:`numpy.array`
    """

    data = []
    old_px_indices = []
    new_px_indices = []
    for i, new_wl_i in enumerate(new_dispersion):

        # These indices should span just over the new wavelength pixel.
        indices = np.unique(np.clip(
            old_dispersion.searchsorted(new_dispersion[i:i + 2], side="left") \
                + [-1, +1], 0, old_dispersion.size - 1))
        N = np.ptp(indices)

        if N == 0:
            # 'Fake' pixel.
            data.append(np.nan)
            new_px_indices.append(i)
            old_px_indices.extend(indices)
            continue

        # Sanity checks.
        assert (old_dispersion[indices[0]] <= new_wl_i \
            or indices[0] == 0)
        assert (new_wl_i <= old_dispersion[indices[1]] \
            or indices[1] == old_dispersion.size - 1)

        fractions = np.ones(N)

        # Edges are handled as fractions between rebinned pixels.
        _ = np.clip(i + 1, 0, new_dispersion.size - 1)
        lhs = old_dispersion[indices[0]:indices[0] + 2]
        rhs = old_dispersion[indices[-1] - 1:indices[-1] + 1]
        fractions[0]  = (lhs[1] - new_dispersion[i])/np.ptp(lhs)
        fractions[-1] = (new_dispersion[_] - rhs[0])/np.ptp(rhs)

        # Being binned to a single pixel. Prevent overflow from fringe cases.
        fractions = np.clip(fractions, 0, 1)
        fractions /= fractions.sum()

        data.extend(fractions) 
        new_px_indices.extend([i] * N) # Mark the new pixel indices affected.
        old_px_indices.extend(np.arange(*indices)) # And the old pixel indices.

    return scipy.sparse.csc_matrix((data, (old_px_indices, new_px_indices)),
        shape=(old_dispersion.size, new_dispersion.size))
コード例 #11
0
ファイル: common.py プロジェクト: QuantumElephant/horton
    def check(dms_a, dms_b):
        """Check quadratic energy model between two dms."""
        ham.reset(*dms_a)
        energy_a_0 = ham.compute_energy()
        focks_a = [np.zeros(dm_a.shape) for dm_a in dms_a]
        ham.compute_fock(*focks_a)

        delta_dms = []
        for idm in xrange(ham.ndm):
            delta_dms.append(dms_b[idm] - dms_a[idm])
        ham.reset_delta(*delta_dms)
        dots_a = [np.zeros(dm_a.shape) for dm_a in dms_a]
        ham.compute_dot_hessian(*dots_a)

        energy_a_1 = 0.0
        energy_a_2 = 0.0
        for idm in xrange(ham.ndm):
            energy_a_1 += np.einsum('ab,ba', focks_a[idm], delta_dms[idm])*ham.deriv_scale
            energy_a_2 += np.einsum('ab,ba', dots_a[idm], delta_dms[idm])*ham.deriv_scale**2

        # print 'energy_a_0', energy_a_0
        # print 'energy_a_1', energy_a_1
        # print 'energy_a_2', energy_a_2

        # Compute interpolation and compare
        energies_x = np.zeros(npoint)
        energies_2nd_order = np.zeros(npoint)
        derivs_x = np.zeros(npoint)
        derivs_2nd_order = np.zeros(npoint)
        for ipoint in xrange(npoint):
            x = xs[ipoint]
            dms_x = []
            for idm in xrange(ham.ndm):
                dm_x = dms_a[idm]*(1-x) + dms_b[idm]*x
                dms_x.append(dm_x)
            ham.reset(*dms_x)
            energies_x[ipoint] = ham.compute_energy()
            ham.compute_fock(*focks_a)
            for idm in xrange(ham.ndm):
                derivs_x[ipoint] += np.einsum('ab,ba', focks_a[idm], delta_dms[idm]) * \
                                    ham.deriv_scale

            energies_2nd_order[ipoint] = energy_a_0 + x*energy_a_1 + 0.5*x*x*energy_a_2
            derivs_2nd_order[ipoint] = energy_a_1 + x*energy_a_2
            # print '%5.2f %15.8f %15.8f' % (x, energies_x[ipoint], energies_2nd_order[ipoint])

        if do_plot:  # pragma: no cover
            import matplotlib.pyplot as pt
            pt.clf()
            pt.plot(xs, energies_x, 'ro')
            pt.plot(xs, energies_2nd_order, 'k-')
            pt.savefig('test_energies.png')
            pt.clf()
            pt.plot(xs, derivs_x, 'ro')
            pt.plot(xs, derivs_2nd_order, 'k-')
            pt.savefig('test_derivs.png')

        assert abs(energies_x - energies_2nd_order).max()/np.ptp(energies_x) < threshold
        assert abs(derivs_x - derivs_2nd_order).max()/np.ptp(derivs_x) < threshold
        return energy_a_0, energy_a_1, energy_a_2
コード例 #12
0
ファイル: minimize.py プロジェクト: dornja/goa2
    def _test( self, deltas ):
        # "Passing" behavior is more like the original (slower, more energy).
        # "Failing" behavior is more optimized (faster, less energy).

        fitness = np.array( self.get_fitness( deltas ) )
        if len( fitness ) == 0:
            return self.UNRESOLVED
        if np.any( fitness == 0 ):
            return self.UNRESOLVED
        m = np.mean( fitness, axis = 0 )
        s = np.std( fitness, axis = 0 )
        sqrtn = np.sqrt( fitness.shape[ 0 ] )
        for i in range( fitness.shape[ 1 ] ):
            infomsg( "   ", m[ i ], "+/-", 1.96 * s[ i ] / sqrtn )
        for i in range( fitness.shape[ 1 ] ):
            if np.ptp( self.optimized[ ::, i ] ) == 0 and \
                    np.ptp( fitness[ ::, i ] ) == 0 and \
                    self.optimized[ 0, i ] == fitness[ 0, i ]:
                # Optimized and fitness are all the same value, likely because
                # we are comparing the optimized variant to itself. This counts
                # as a fail, since they are clearly drawn from the same distro.
                continue
            pval = mannwhitneyu( self.optimized[ ::, i ], fitness[ ::, i ] )[ 1 ]
            if pval < options.alpha and m[ i ] < self.mean[ i ]:
                return self.PASS
        return self.FAIL
コード例 #13
0
ファイル: utils.py プロジェクト: stomachacheGE/esvm-python
def get_matching_mask(f_real, Ibox):
    """
    Find the best matching region per level in the feature pyramid    
    """
    maskers = []
    sizers = []
    
    import numpy.ma as mask
    from scipy.misc import imresize
    
    for i in range(len(f_real)):
        feature_goods = mask.array(np.sum(np.square(f_real[i]), 2), dtype=np.bool_)
        Ibox_resize = imresize(Ibox, (f_real[i].shape[0], f_real[i].shape[1]))
        Ibox_resize = Ibox_resize.astype(np.float64) / 255.0
        Ibox_goods = Ibox_resize > 0.1
        
        masker = np.logical_and(feature_goods, Ibox_goods)
        
        max_indice = np.unravel_index(Ibox_resize.argmax(), Ibox_resize.shape)
        
        if np.where(masker == True)[0].size == 0:
            masker[max_indice[0], max_indice[1]] = True
            
        indices = np.where(masker == True)
        masker[np.amin(indices[0]):np.amax(indices[0]),
               np.amin(indices[1]):np.amax(indices[1])] = True
        sizer=[np.ptp(indices[0])+1, np.ptp(indices[1])+1]   
        maskers.append(masker)
        sizers.append(sizer)
        
    return(maskers, sizers)
コード例 #14
0
ファイル: xy_density.py プロジェクト: asteca/ASteCA
def main(clp, center_stddev, **kwargs):
    """
    Obtain Gaussian filtered 2D x,y histograms and the maximum values in them
    as centers.
    """

    # Standard deviation values for the Gaussian filter.
    st_dev_lst = (center_stddev * .5, center_stddev, center_stddev * 2.)

    # Obtain center coordinates using Gaussian filters with different
    # standard deviation values, applied on the 2D (x,y) histogram.
    cents_xy, hist_2d_g, cents_bin_2d = center_xy(
        clp['hist_2d'], clp['xedges'], clp['yedges'], st_dev_lst)

    # Raise a flag if the standard deviation for either coordinate is larger
    # than 10% of that axis range. Use the full x,y positions list to
    # calculate the STDDEV.
    flag_center_std = False
    stddev = np.std(zip(*cents_xy[:3]), 1)
    if stddev[0] > 0.1 * np.ptp(clp['xedges']) or \
            stddev[1] > 0.1 * np.ptp(clp['yedges']):
        flag_center_std = True

    clp['flag_center_std'], clp['cents_xy'], clp['hist_2d_g'],\
        clp['cents_bin_2d'], clp['st_dev_lst'] = flag_center_std, cents_xy,\
        hist_2d_g, cents_bin_2d, st_dev_lst

    return clp
コード例 #15
0
ファイル: geom_dotplot.py プロジェクト: jwhendy/plotnine
    def draw_group(data, panel_params, coord, ax, **params):
        data = coord.transform(data, panel_params)
        fill = to_rgba(data['fill'], data['alpha'])
        color = to_rgba(data['color'], data['alpha'])
        ranges = coord.range(panel_params)

        # For perfect circles the width/height of the circle(ellipse)
        # should factor in the dimensions of axes
        bbox = ax.get_window_extent().transformed(
            ax.figure.dpi_scale_trans.inverted())
        ax_width, ax_height = bbox.width, bbox.height

        factor = ((ax_width/ax_height) *
                  np.ptp(ranges.y)/np.ptp(ranges.x))
        size = data.loc[0, 'binwidth'] * params['dotsize']
        offsets = data['stackpos'] * params['stackratio']

        if params['binaxis'] == 'x':
            width, height = size, size*factor
            xpos, ypos = data['x'], data['y'] + height*offsets
        elif params['binaxis'] == 'y':
            width, height = size/factor, size
            xpos, ypos = data['x'] + width*offsets, data['y']

        circles = []
        for xy in zip(xpos, ypos):
            patch = mpatches.Ellipse(xy, width=width, height=height)
            circles.append(patch)

        coll = mcoll.PatchCollection(circles,
                                     edgecolors=color,
                                     facecolors=fill)
        ax.add_collection(coll)
コード例 #16
0
ファイル: test_function_base.py プロジェクト: ericsuh/numpy
 def test_basic(self):
     a = [3, 4, 5, 10, -3, -5, 6.0]
     assert_equal(np.ptp(a, axis=0), 15.0)
     b = [[3, 6.0, 9.0],
          [4, 10.0, 5.0],
          [8, 3.0, 2.0]]
     assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0])
     assert_equal(np.ptp(b, axis= -1), [6.0, 6.0, 6.0])
コード例 #17
0
ファイル: loadlog.py プロジェクト: neuromind81/aibs
def loadsweeptimes(path):
    '''loads sweep timing corrected for computer - monitor delay'''
    datapath = path + ".dat"
    metapath = path + ".meta"
    
    channels,_ = loadmeta(metapath)
#    m = open(metapath)
#    meta = m.readlines()
#    
#    channels = int(meta[7].split(' = ')[1])
    #samplerate = int(meta[10].split(' = ')[1])
    #duration = len(data)/channels/samplerate
    
    data = loadbinary(datapath, channels=channels)
    sweeptrace = np.array(data[:, (channels-5)])
    vsynctrace = np.array(data[:, (channels-4)])
    diodetrace = np.array(data[:, (channels-3)])    
    d = open(datapath)
#    data = np.fromfile(d,np.int16)    
#    datareshaped = np.transpose(np.reshape(data,(len(data)/channels,channels)))
#    del data
#    
#    sweeptrace = datareshaped[(channels-3),:]
#    vsynctrace = datareshaped[(channels-2),:]
#    diodetrace = datareshaped[(channels-1),:]
#    del datareshaped    
    
    #sweep start and end times
    sthr = np.ptp(sweeptrace)/4
    sweepup = findlevels(sweeptrace, sthr, 40000, 'up')
    sweepdown = findlevels(sweeptrace, (-1*sthr), 40000, 'down')
    if sweepdown[0] > sweepup[0]:
        if len(sweepup) > len(sweepdown):
            sweep = np.column_stack([sweepup[:-1], sweepdown])
        else:
            sweep = np.column_stack([sweepup, sweepdown])
    elif sweepdown[0] <= sweep[0]:
        sweep = np.column_stack([sweepup, sweepdown[1:]])
    
    vthr = -1*(np.ptp(vsynctrace)/5)
    vsync = findlevels(vsynctrace, vthr, 300, 'up')
    
    dthr = np.ptp(diodetrace)/4    
    diode = findlevels(diodetrace, dthr, 200, 'both')
    #diode = np.reshape(diode, (len(diode)/2, 2))
    diode = np.delete(diode,[0,1],0)
    
    #corrects for delay between computer and monitor
    delay = vsync[0] - diode[0] + 0.0
    print "***Monitor lag:", (delay/20000)
    if delay > 0:
        print "ERROR: diode before vsync"
        sys.exit('diode error')
    sweep -= delay
    #converts to time in seconds
    sweeptiming = sweep + 0.0
    sweeptiming /= 20000    
    return sweeptiming
コード例 #18
0
ファイル: euclidean.py プロジェクト: andycasey/ges
def main():
    """Do the things"""

    # Check if we have already loaded the data
    global benchmarks, node_data, stellar_parameters, node_results_filenames

    try: benchmarks
    except NameError:
    	logger.info("Loading data..")
        node_results_filenames = glob("data/iDR2.1/GES_iDR2_WG11_*.fits")
        remove_nodes = ("Recommended", )
        node_results_filenames = [filename for filename in node_results_filenames \
            if "_".join(os.path.basename(filename).split("_")[3:]).rstrip(".fits") not in remove_nodes]

        # Load the data
        stellar_parameters = ("TEFF", "LOGG", "MH")
        benchmarks, node_data = prepare_data("data/benchmarks.txt", node_results_filenames,
            stellar_parameters)
    else:
    	logger.info("Using pre-loaded data")

    # Calculate weights based on minimal Euclidean distance
    stellar_parameters = ("TEFF", "LOGG", "MH")

    num_nodes = node_data.shape[2]
    recommended_measurements = np.zeros(map(len, [stellar_parameters, benchmarks]))
    weights = get_weights(benchmarks, node_data, stellar_parameters,
        scales={
            "TEFF": 1./np.ptp(benchmarks["TEFF"]),
            "LOGG": 1./np.ptp(benchmarks["LOGG"]),
            "MH": 1./np.ptp(benchmarks["MH"])
        })

    for j, stellar_parameter in enumerate(stellar_parameters):
        for i, benchmark in enumerate(benchmarks):

            node_measurements = node_data[2*j, i, :]
            isfinite = np.isfinite(node_measurements)

            # Normalise the weights
            normalised_weights = weights[isfinite]/sum(weights[isfinite])
            
            m_euclidean = np.sum((normalised_weights * node_measurements[isfinite]))
            recommended_measurements[j, i] = m_euclidean
            
    # Visualise the differences
    labels = ("$\Delta{}T_{\\rm eff}$ (K)", "$\Delta{}\log{g}$ (dex)", "$\Delta{}$[Fe/H] (dex)")
    figs = boxplots(benchmarks, node_data[::2, :, :], stellar_parameters,
        labels=labels, recommended_values=recommended_measurements)
    [fig.savefig("euclidean-benchmarks-{0}.png".format(stellar_parameter.lower())) \
    	for fig, stellar_parameter in zip(figs, stellar_parameters)]

    # Compare individual node dispersions to the recommended values
    repr_node = lambda filename: "_".join(os.path.basename(filename).split("_")[3:]).rstrip(".fits")
    fig = histograms(benchmarks, node_data[::2, :, :], stellar_parameters,
        parameter_labels=labels, recommended_values=recommended_measurements,
        node_labels=map(repr_node, node_results_filenames))
    fig.savefig("euclidean-distributions.png")
コード例 #19
0
def svdClean():
    #-- read file
    t0=time.time(); a,tx,ty=red(opt.FILE)
    #print "File read in",round(time.time()-t0,1),'s'    
    ntx,nbx=npy.shape(tx);nty,nby=npy.shape(ty)
    print '[H, V] bpms: [',nbx, nby,']'
    print '[H, V] turns: [',ntx, nty,']'

    #--- peak-2-peak cut, for LHC convert to microns
    print "Peak to peak cut:",opt.PK2PK, "mm"        
    pkx=npy.nonzero(npy.ptp(tx,axis=0)>float(opt.PK2PK))[0]
    pky=npy.nonzero(npy.ptp(ty,axis=0)>float(opt.PK2PK))[0]
    tx=npy.take(tx,pkx,1);ty=npy.take(ty,pky,1)
    print '[H,V] BPMs after P2P cut:',len(pkx),len(pky)
    
    #--- svd cut
    #t0=time.time()
    #gdx,gdy=foreach(rBPM,[tx,ty],threads=2,return_=True)
    gdx=rBPM(tx);gdy=rBPM(ty); #-- gdx->rdx for corr index 
    rdx=[pkx[j] for j in gdx]; rdy=[pky[j] for j in gdy]
    tx=npy.take(tx,(gdx),1);ty=npy.take(ty,(gdy),1)
    #print "Applied SVD cut in",round(time.time()-t0,1),'s'

    #--- svd clean
    if int(opt.SVALS)<nbx and int(opt.SVALS)<nby:
        t0=time.time();
        tx,ty=foreach(clean,[tx,ty],threads=2,return_=True)
        print "Cleaned using SVD in",round(time.time()-t0,1),'s'
    else: print "All singulars values retained, no svd clean applied"

    #--- bad bpms to file
    f=open(opt.FILE+'.bad','w')
    print >> f, "@  FILE %s ",opt.FILE
    print >> f, "*  NAME    S    PLANE"
    print >> f, "$   %s     %le   %s "
    for j in range(len(a.H)):
        if j not in rdx:
            print >> f, a.H[j].name, a.H[j].location, "H"
    for j in range(len(a.V)):
        if j not in rdy:
            print >> f, a.V[j].name, a.V[j].location, "V"
    f.close()
    
    #--- good data to file #t0=time.time()
    f = open(opt.FILE+'.new','w')
    f.write('# '+opt.FILE+'\n')
    for j in range(len(gdx)):
        f.write('0 '+a.H[rdx[j]].name+' '+str(a.H[rdx[j]].location)+' ')
        #f.write('%s\n' % ' '.join(['%5.5f' % val for val in \
        #                           a.H[rdx[j]].data]))
        f.write('%s\n' % ' '.join(['%5.5f' % val for val in tx[:,j]]))
    for j in range(len(gdy)):
        f.write('1 '+a.V[rdy[j]].name+' '+str(a.V[rdy[j]].location)+' ')
        #f.write('%s\n' % ' '.join(['%5.5f' % val for val in \
        #                           a.V[rdy[j]].data]))
        f.write('%s\n' % ' '.join(['%5.5f' % val for val in ty[:,j]]))
    f.close();#print "File written in",round(time.time()-t0,1),'s'
    print "Total",round(time.time()-t0,1),'s'
コード例 #20
0
ファイル: plot_hrd.py プロジェクト: andycasey/ngc2808
def plot_hrd(data, members):

    x, y, c = data["TEFF"], data["LOGG"], data["FEH"]
    x_err, y_err = data["E_TEFF"], data["E_LOGG"]

    member_scatter_kwds = {
        "edgecolor": "#000000",
        "linewidths": 2,
        "s": 50,
        "zorder": 2
    }

    uves = np.array(["U580" in _ for _ in data["SETUP"]])
    giraffe = ~uves

    fig, ax = plt.subplots()
    scat = ax.scatter(x[members * uves], y[members * uves], c=c[members * uves],
        marker="s", label="UVES", **member_scatter_kwds)
    
    scat = ax.scatter(x[members * giraffe], y[members * giraffe],
        c=c[members * giraffe], marker="o", label="GIRAFFE",
        **member_scatter_kwds)
    
    ax.errorbar(x[members], y[members],
        xerr=x_err[members], yerr=y_err[members],
        fmt=None, ecolor="#000000", zorder=-1, elinewidth=1.5)

    #ax.legend(loc="upper left", frameon=False)

    cbar = plt.colorbar(scat)
    cbar.set_label(r"$[{\rm Fe/H}]$")

    ax.set_xlim(ax.get_xlim()[::-1])
    ax.set_ylim(ax.get_ylim()[::-1])

    ax.set_xlabel(r"$T_{\rm eff}$ $(K)$")
    ax.set_ylabel(r"$\log{g}$")

    ax.xaxis.set_major_locator(MaxNLocator(4))
    ax.yaxis.set_major_locator(MaxNLocator(4))

    ax.set(adjustable='box-forced',
        aspect=np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))

    fig.tight_layout()

    # Load isochrone?
    """
    isochrone = Table.read("basti.isochrone", format="ascii",
        names=(
            "(M/Mo)in", "(M/Mo)", "log(L/Lo)", "logTe", "Mv", 
            "(U-B)", "(B-V)", "(V-I)", "(V-R)", "(V-J)", 
            "(V-K)", "(V-L)", "(H-K)"))
    """

    return fig
コード例 #21
0
 def test_null(self):
     # call llnull, so null model is attached, side effect of cached attribute
     self.res1.llnull
     # check model instead of value
     exog_null = self.res1.res_null.model.exog
     exog_infl_null = self.res1.res_null.model.exog_infl
     assert_array_equal(exog_infl_null.shape,
                  (len(self.res1.model.exog), 1))
     assert_equal(np.ptp(exog_null), 0)
     assert_equal(np.ptp(exog_infl_null), 0)
コード例 #22
0
ファイル: utils.py プロジェクト: emitc2h/fontfinder
def normalize(img):
    """
    Normalize an image to range 0.0-1.0 float32
    """

    ptp = np.ptp(img)
    if ptp > 0:
        return np.multiply(np.add(img, -np.min(img)), 1.0/np.ptp(img))
    else:
        return np.zeros(img.shape)
コード例 #23
0
ファイル: dap2arc.py プロジェクト: hetland/dap2arc
def ww2raster(url ='http://motherlode.ucar.edu/thredds/dodsC/fmrc/NCEP/WW3/Regional_US_West_Coast/NCEP-WW3-Regional_US_West_Coast_best.ncd',
    box = [-132.95925,35.442,-117.279,51.12225],
    var = 'Significant_height_of_combined_wind_waves_and_swell'):
    
    '''
    NetCDF4-Python test to read DEM data via OPeNDAP and create Arc Raster
    also tests out writing a small plot using Matplotlib
    Global: http://motherlode.ucar.edu/thredds/dodsC/fmrc/NCEP/WW3/Global/NCEP-WW3-Global_best.ncd
    West Coast: http://motherlode.ucar.edu/thredds/dodsC/fmrc/NCEP/WW3/Regional_US_West_Coast/NCEP-WW3-Regional_US_West_Coast_best.ncd
    '''
    nc = netCDF4.Dataset(url)
    print "Source name: %s" % nc.title
    lon = nc.variables['lon'][:]-360.0
    lat = nc.variables['lat'][:]
    bi = (lon>=box[0]) & (lon<=box[2])
    bj = (lat>=box[1]) & (lat<=box[3])
    
    # find time index to read
    hours_from_now = 0   # Examples: 0=>nowcast, 3 => forecast 3 hours from now, etc. 
    date = datetime.datetime.utcnow()+datetime.timedelta(0,3600*hours_from_now)  
    #date=datetime.datetime(2011,9,9,17,00)  # specific time (UTC)
    
    tindex = netCDF4.date2index(date,nc.variables['time'],select='nearest')
    z = nc.variables[var][tindex,bj,bi]
    lonmin = np.min(lon[bi])
    latmin = np.min(lat[bj])
    dx = np.diff(lon)
    dy = np.diff(lat)
    # check if dx or dy vary by more than one percent
    assert np.abs(np.ptp(dx)/np.mean(dx))<=0.01,'longitude spacing is not uniform'
    assert np.abs(np.ptp(dy)/np.mean(dy))<=0.01,'latitude spacing is not uniform'
    if dy[0]>0:  # lat increasing
        z=np.array(z[::-1,:])
    if dx[0]<0:  # lon decreasing
        z=np.array(z[:,::-1])    
    dx=np.abs(np.mean(dx))
    dy=np.abs(np.mean(dy))
    xyOrig = arcpy.Point(float(lonmin),float(latmin))

    # create Arc Raster
    arcpy.workspace  = "C:\\workspace"
    arcpy.env.overwriteOutput = True
    rasterName = "sig_ht"
    outRaster = os.path.normpath(os.path.join(arcpy.workspace,rasterName))
    print outRaster
    grid1=arcpy.NumPyArrayToRaster(z,xyOrig,dx,dy)
    grid1.save(os.path.join(arcpy.workspace,outRaster))
    strPrj = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID"\
             "['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],"\
             "UNIT['Degree',0.0174532925199433]]"
    arcpy.DefineProjection_management(outRaster,strPrj)
    print "Written: %s" % grid1
    arcpy.AddMessage("Written: %s" % grid1)
    nc.close()
コード例 #24
0
def print_to_pdf(shapley_list, filename, random_colours=True, **kwargs):

    scale = kwargs['scale']
    pylab.rcParams['savefig.dpi'] = 254

    x_min = []
    x_max = []
    y_min = []
    y_max = []

    for shape in shapley_list:
        bound = shape.bounds
        x_min.append(bound[0])
        y_min.append(bound[1])
        x_max.append(bound[2])
        y_max.append(bound[3])

    x_limits = [np.min(x_min), np.max(x_max)]
    y_limits = [np.min(y_min), np.max(y_max)]

    fig_width = np.ptp(x_limits)
    fig_height = np.ptp(y_limits)

    fig = plt.figure(figsize=(fig_width/2.54, fig_height/2.54))
    fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
    ax = fig.add_subplot(111)

    for shape in shapley_list:

        if random_colours:
            colours = np.append(
                np.random.uniform(size=3), 0.3)
        else:
            colours = [0, 0, 0, 0.3]

        scaled_shape = aff.scale(
            shape, xfact=scale, yfact=scale)
        patch = des.PolygonPatch(
            scaled_shape, fc=colours
        )
        ax.add_patch(patch)

    ax.set_xlim(x_limits)
    ax.set_ylim(y_limits)

    plt.grid(True)

    plt.savefig("temp.png")
    subprocess.call([
        "convert", "-units", "PixelsPerInch",
        "temp.png", "-density", "254", "temp.pdf"
    ])
    os.rename("temp.pdf", filename)
    os.remove("temp.png")
コード例 #25
0
ファイル: plwf.py プロジェクト: mdcb/python-gist
def xyz_wf (z, y, x, scale = 1.0) :

   '''
   xyz_wf (z, [y, x] [,scale = 1.0])
      returns a 3-by-ni-by-nj array whose 0th entry is x, 1th entry
      is y, and 2th entry is z. z is ni-by-nj. x and y, if present,
      must be the same shape. If not present, integer ranges will
      be used to create an equally spaced coordinate grid in x and y.
      The function which scales the 'topography' of z(x,y) is
      potentially useful apart from plwf.
      For example, the xyz array used by plwf can be converted from
      a quadrilateral mesh plotted using plf to a polygon list plotted
      using plfp like this:
        xyz= xyz_wf(z,y,x,scale=scale);
        ni= shape(z)[1];
        nj= shape(z)[2];
        list = ravel (add.outer (
           ravel(add.outer (adders,zeros(nj-1, int32))) +
           arange((ni-1)*(nj-1), dtype= int32),
           array ( [[0, 1], [nj + 1, nj]])))
        xyz=array([take(ravel(xyz[0]),list),
           take(ravel(xyz[1]),list),
           take(ravel(xyz[2]),list)])
        nxyz= ones((ni-1)*(nj-1)) * 4;
      The resulting array xyz is 3-by-(4*(nj-1)*(ni-1)).
      xyz[0:3,4*i:4*(i+1)] are the clockwise coordinates of the
      vertices of cell number i.
   '''

   if len (numpy.shape (z)) < 2 :
      raise _Xyz_wfError('impossible dimensions for z array')
   nx = numpy.shape (z) [0]
   ny = numpy.shape (z) [1]
   if y == None or x == None :
      if x != None or y != None :
         raise _Xyz_wfError('either give y,x both or neither')
      x = numpy.tile(numpy.linspace (0, ny - 1, ny), nx).reshape(nx,ny)
      y = numpy.transpose (numpy.tile(numpy.linspace (0, nx - 1, nx), ny).reshape(ny,nx))
   elif numpy.shape (x) != numpy.shape (z) or numpy.shape (y) != numpy.shape (z) :
      raise _Xyz_wfError('x, y, and z must all have same dimensions')
   xyscl = max (numpy.ptp(x),
                numpy.ptp(y))
   if scale != None:
      xyscl = xyscl * scale
   dz = numpy.ptp(z)
   zscl= dz + (dz == 0.0)
   if zscl :
      z = z * 0.5 * xyscl /zscl
   xbar = numpy.mean (x)
   ybar = numpy.mean (y)
   zbar = numpy.mean (z)
   xyz = numpy.array ( [x - xbar, y - ybar, z - zbar], numpy.float32)
   return (xyz)
コード例 #26
0
ファイル: dipolearray.py プロジェクト: marksbrown/DipoleArray
    def structure_factor(self, adir, light, dist='analytical', verbose=0):
        """
        Calculates the Structure Factor : F(q)

        --args--
        n0 : incident direction
        n1 : outgoing directions
        k : Wavenumber
        x_scatterers : Number of points in first axis
        y_scatterers : Number of points in second axis
        lc : Lattice definition
        verbose : verbosity control
        """

        d1, t1, d2, t2 = self.lattice

        n0 = light.incoming_vector
        n1 = light.outgoing_vectors[adir]

        q = light.k*subtract(n0, n1)

        exponent_factor = lambda length, angle: length*(q[...,0]*cos(angle)+q[...,1]*sin(angle))

        N1 = ptp(self.x_scatterers)
        N2 = ptp(self.y_scatterers)

        if verbose > 0:
            print("{0}x{1}".format(N1,N2))

        if dist == "analytical":
            #returns the analytical expression


            fx = exponent_factor(d1, t1)
            fy = exponent_factor(d2, t2)

            F = sin(N1*fx/2)**2/sin(fx/2)**2*sin(N2*fy/2)**2/sin(fy/2)**2
            F[(fx==0) |(fy==0)] = N1**2+N2**2

            return F

        elif dist == "sum":

            structure_factor_1d = lambda i, f: where(f!=0, exp(1j*i*f), 1)

            Fx = sum([structure_factor_1d(n, exponent_factor(d1, t1)) for n in range(*self.x_scatterers)], axis=0)
            Fx *= conj(Fx)

            Fy = sum([structure_factor_1d(n, exponent_factor(d2, t2)) for n in range(*self.y_scatterers)], axis=0)
            Fy *= conj(Fy)

            return real(Fx) * real(Fy)
コード例 #27
0
ファイル: ExportData.py プロジェクト: sapphire008/Python
def AddTraceScaleBar(xunit, yunit, color='k',linewidth=None,\
                         fontsize=None, ax=None, xscale=None, yscale=None,
                         loc=5, bbox_to_anchor=None):
        """Add scale bar on trace. Specifically designed for voltage /
        current / stimulus vs. time traces.
        xscale, yscale: add the trace bar to the specified window of x and y.
        """
        if ax is None: ax=plt.gca()
        def scalebarlabel(x, unitstr):
            x = int(x)
            if unitstr.lower()[0] == 'm':
                return(str(x)+" " + unitstr if x<1000 else str(int(x/1000))+ " " +
                        unitstr.replace('m',''))
            elif unitstr.lower()[0] == 'p':
                return(str(x)+" "+ unitstr if x<1000 else str(int(x/1000))+ " " +
                        unitstr.replace('p','n'))
            else: # no prefix
                return(str(x)+" " + unitstr)

        ax.set_axis_off() # turn off axis
        X = np.ptp(ax.get_xlim()) if xscale is None else xscale
        Y = np.ptp(ax.get_ylim()) if yscale is None else yscale
        # calculate scale bar unit length
        X, Y = roundto125(X/5), roundto125(Y/(5 if Y<1200 else 10))
        # Parse scale bar labels
        xlab, ylab = scalebarlabel(X, xunit), scalebarlabel(Y, yunit)
        # Get color of the scalebar
        if color is None:
            color = ax.get_lines()[0]
        if 'matplotlib.lines.Line2D' in str(type(color)):
            color = color.get_color()
        if linewidth is None:
            try:
                linewidth = ax.get_lines()[0]
            except:
                linewidth=0.70
                #raise(AttributeError('Did not find any line in this axis. Please explicitly specify the linewidth'))
        if 'matplotlib.lines.Line2D' in str(type(linewidth)):
            linewidth = linewidth.get_linewidth()
        # print(linewidth)
        if fontsize is None:
            fontsize = ax.yaxis.get_major_ticks()[2].label.get_fontsize()
        scalebarBox = AuxTransformBox(ax.transData)
        scalebarBox.add_artist(matplotlib.patches.Rectangle((0, 0), X, 0, fc="none", edgecolor='k', linewidth=linewidth, joinstyle='miter', capstyle='projecting')) #TODO capstyle
        scalebarBox.add_artist(matplotlib.patches.Rectangle((X, 0), 0, Y, fc="none", edgecolor='k', linewidth=linewidth, joinstyle='miter', capstyle='projecting'))
        scalebarBox.add_artist(matplotlib.text.Text(X/2, -Y/20, xlab, va='top', ha='center', color='k'))
        scalebarBox.add_artist(matplotlib.text.Text(X+X/20, Y/2, ylab, va='center', ha='left', color='k'))
        anchored_box = AnchoredOffsetbox(loc=loc, pad=-9, child=scalebarBox, frameon=False, bbox_to_anchor=bbox_to_anchor)
        ax.add_artist(anchored_box)
        return(anchored_box)
コード例 #28
0
ファイル: flow.py プロジェクト: joefutrelle/oii
def core(X, Y):
    # estimate density at each point
    # but subsample the points because kde is slow
    step = max(1,X.size//1000)
    x, y = X[::step], Y[::step]
    xy = np.vstack([x,y])
    z = stats.gaussian_kde(xy)(xy)
    # select non-outliers
    zix = np.where(z > np.percentile(z,10))
    # compute minimum aspect ratio
    # robust to x/y swap, assumes instrument will not generate
    # horizontal core
    aspect_ratio = 1. * np.ptp(x[zix]) / np.ptp(y[zix])
    return min(aspect_ratio, 1/aspect_ratio)
コード例 #29
0
ファイル: flow.py プロジェクト: joefutrelle/oii
def clipping(X, Y):
    # compute a bounding box slightly inside the bounding
    # box of all the points
    xpad = np.ptp(X) * 0.01
    ypad = np.ptp(Y) * 0.01
    ll = np.array([np.min(X) + xpad, np.min(Y) + ypad])
    ur = np.array([np.max(X) - xpad, np.max(Y) - ypad])

    # count the points inside the bounding box
    P = np.vstack((X,Y)).T
    inidx = np.all(np.logical_and(ll <= P, P <= ur), axis=1)
    # there should not be a high percentage of points outside the bbox;
    # if there are, images may be clipped at the edge of the camera field
    return 100. - (100. * np.sum(inidx) / X.size)
コード例 #30
0
 def cog(star):
     '''
     Internal intensity weighted center of gravity method. This is used to find the 
     approximate center of a star.
     '''
 
     xi = np.array([float(p[0][0]) for p in star])
     yi = np.array([float(p[0][1]) for p in star])
     wi = np.array([float(p[1]) for p in star])
     n = sum(wi)
     xc = sum(xi*wi)/n
     yc = sum(yi*wi)/n
     wx = np.ptp(xi)
     wy = np.ptp(yi)
     return (xc,yc),(wx,wy)
コード例 #31
0
def make_q_q_plots(snv1,values1,mn_values1,snv2,values2,mn_values2):

   threshold = 0.005
   flag = 0
   min_length = 200
   range1 = np.ptp(values1)
   #print "Relief range: ", range1
   for i in range(0,len(snv1)):
        if (snv1[i] <= 0):
            frac_diff = abs((values1[i] - mn_values1[i]))/range1
            if (frac_diff < threshold):
                if (flag == 0):
                    flag = 1
                    count = 0
                    for j in range(1,min_length+1):
                        next_frac = abs((values1[i+j] - mn_values1[i+j]))/range1
                        if (next_frac < threshold):
                            count = count+1
                    if (count == min_length):
                        relief_thresh = snv1[i]
                        print "Relief threshold: ", values1[i]
                    else:
                        flag = 0
                
   flag = 0  
   range2 = np.ptp(values2) 
   print "Slope range: ", range2              
   for i in range(0,len(snv2)):       
       if (snv2[i] <= 0):
           frac_diff = abs((values2[i] - mn_values2[i]))/range2
           if (frac_diff < threshold):
                if (flag == 0):
                    flag = 1
                    count = 0
                    for j in range(1,min_length):
                        next_frac = abs((values2[i+j] - mn_values2[i+j]))/range2
                        if (next_frac < threshold):
                            count = count+1
                    if (count == min_length-1):
                        slope_thresh = snv2[i]
                        print "Slope threshold: ", values2[i]
                    else:
                        flag = 0
   print slope_thresh, relief_thresh
    
   plt.figure(1, facecolor='White',figsize=[10,5])
   ax1 = plt.subplot(1,2,1)            
   ax1.plot(snv1,values1,linewidth=2,color="blue",label="Real data")
   ax1.plot(snv1,mn_values1,"--",linewidth=2,color="red",label="Normal distribution")
   ax1.axvline(x=relief_thresh,linestyle='--',linewidth=1.5,color='black')
   xmin,xmax = ax1.get_xlim()       
   ax1.axvspan(xmin, relief_thresh, alpha = 0.2, color='blue') 
   ax1.legend(loc = 2)
   ax1.set_xlabel("Standard Normal Variate", fontsize=rcParams['font.size']+2)
   ax1.set_ylabel("Channel relief ($m$)", fontsize=rcParams['font.size']+2)
   ax1.set_xlim(xmin,xmax)
   ax1.grid(True)

    
   ax2 = plt.subplot(1,2,2)
   ax2.plot(snv2,values2,linewidth=2,color="blue",label="Real data")
   ax2.plot(snv2,mn_values2,"--",linewidth=2,color="red",label="Normal distribution")
   ax2.axvline(x=slope_thresh,linestyle='--',linewidth=1.5,color='black')
   xmin2,xmax2 = ax2.get_xlim()       
   ax2.axvspan(xmin2, slope_thresh, alpha = 0.2, color='blue') 
   #ax2.legend(loc = 2)
   ax2.set_xlabel("Standard Normal Variate", fontsize=rcParams['font.size']+2)
   ax2.set_ylabel("Gradient", fontsize=rcParams['font.size']+2)
   ax2.set_xlim(xmin2,xmax2)
   ax2.grid(True)
   plt.tight_layout()
コード例 #32
0
 def normalizeVolume(self, volume):
     # d is a (n x dimension) np array
     volume -= np.min(volume)
     volume /= np.ptp(volume)
     return volume
コード例 #33
0
    def get_track_analysis(self):
        try:
            if self.track is not None:
                #self.show(self.sp.audio_analysis(self.track)['segments'][0:2])
               
                features = self.sp.audio_features(self.track)[0]
                
                self.acoustic = features['acousticness']
                self.energy = features['energy']
                self.valence = features['valence']
                #print(self.track_info['item']['name'] + "\t acoust\t" + str(features['acousticness']) + " energy\t" + str(features['energy']) + " liveness\t" + str(features['liveness']) + " valence\t" + str(features['valence']))
                
                analysis = self.sp.audio_analysis(self.track)
                segments = analysis['segments']
                
                try:
                    self.key = analysis['sections'][0]['key']
                except:
                    self.key = 7

                self.bpm = analysis['track']['tempo']
                self.bpm = min(self.bpm, 120)
                print(self.bpm)
                self.refresh_rate = (60.0/self.bpm)/SAMPLE_FACTOR


                #self.refresh_rate = 0.05 
               
                time_vals = []
                loudness_vals = []
                pitch_vals = []
                for segment in segments:
                    if 'start' not in segment:
                        time_vals.append(0.0)
                    else:
                        time_vals.append(segment['start'])

                    if 'loudness_start' not in segment:
                        segment['loudness_start'] = -30.0
                    if 'loudness_max' not in segment:
                        segment['loudness_max'] = segment['loudness_start']
                   
                    #look to average loudness based off timbre 
                    loudness_vals.append(segment['timbre'][0])
                    
                    pitch_norm = np.array(segment['pitches'])
                    #put more of a bias on higher values for pitch
                    pitch_norm = np.power(pitch_norm, 3)
                    pitch_norm = pitch_norm/np.sum(pitch_norm)

                    pitch_vals.append(pitch_norm)

                self.time_vals = time_vals

                #normalization for loudness vals from 0 to 1
                loudness_vals = np.array(loudness_vals)

                
                #when normalizing don't look at beginning and end to give more dynamic range and avoid looking at outliers
                adj_loud = loudness_vals[10:-10]
                loudness_vals = (loudness_vals - np.min(adj_loud))/np.ptp(adj_loud)
                
                #make quiet sounds even quieter
                loudness_vals[loudness_vals < 0] = 0
                loudness_vals = np.power(loudness_vals, 2)
                

                loudness_vals = loudness_vals
                self.loudness_vals = loudness_vals

                #self.time_vals, self.loudness_vals = self.interp(self.time_vals, self.loudness_vals)

                
                
                self.pitch_vals = []
                self.pitch_vals = np.array(pitch_vals)
        except:
            print("Could not get track analysis")
            self.stop()
コード例 #34
0
 def test_scaling_mean_span(self):
     table = Scale(center=Scale.Mean, scale=Scale.Span)(self.table)
     np.testing.assert_almost_equal(np.mean(table, 0), 0)
     np.testing.assert_almost_equal(np.ptp(table, 0), 1)
コード例 #35
0
parser.add_argument("-p",
                    "--plot",
                    help="plotting flag",
                    action="store_true",
                    default="trackersim.conf")
args = parser.parse_args()

pdf_file = PdfPages('python.pdf')
popt_list = []
for i in range(0, 24):
    temp_fig, temp_popt = make_plot("longsweep" + str(i))
    popt_list.append(list(temp_popt))
    pdf_file.savefig(temp_fig)
    plt.close(temp_fig)
pdf_file.close()

popt_arr = np.array(popt_list)
# make sure offsets are within 12 mV of each other
if np.ptp(popt_arr, axis=0)[1] > 12:
    bad_channel = -1
    off_min = np.min(popt_arr, axis=0)[1]
    off_max = np.max(popt_arr, axis=0)[1]
    off_mean = np.mean(popt_arr, axis=0)[1]
    if np.abs(off_min - off_mean) > np.abs(off_max - off_mean):
        bad_channel = np.where(popt_arr == off_min)[0][0]
    elif np.abs(off_min - off_mean) < np.abs(off_max - off_mean):
        bad_channel = np.where(popt_arr == off_max)[0][0]
    print "Channel %d is out of the 12 mV offset range" % bad_channel

print "done"
コード例 #36
0
 def _check_constant(self) -> bool:
     col_delta = ptp(self.exog.ndarray, 0)
     has_constant = npany(col_delta == 0)
     self._const_col = where(col_delta == 0)[0][0] if has_constant else None
     return bool(has_constant)
コード例 #37
0
# 求语文平均数
result = np.mean(chineses)
print("语文平均数:", result)

print('-----------矩阵中的最大值最小值--------')

a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(a)
# 矩阵中的最小值
print(np.amin(a))

# 矩阵中0轴的最小值
print(np.amin(a, 0))

# 最大值与最小值之间的差距
print(np.ptp(a))
print(np.ptp(a, 0))

print('--------------排序---------------')

b = np.array([[4, 1, 3], [1, 5, 2]])
print(b)

print(np.sort(b))

# 按0维排序  按x轴来排序
print(np.sort(b, axis=0))

# 按1维来排序   按y轴来排序
print(np.sort(b, axis=1))
コード例 #38
0
def clean_up_templates(templates, weights, spike_train, tmp_loc, geometry,
                       neighbors, snr_threshold, spread_threshold):
    """Clean up bad templates

    Parameters
    ----------
    templates: numpy.ndarray(n_channels, temporal_size, n_templates)
        templates

    weights: np.array(n_templates)
        weights coming out of template computation

    spike_train: np.array(n_data, 3)
        The 3 columns represent spike time, unit id,
        weight (from soft assignment)

    tmp_loc: np.array(n_templates)
        At which channel the clustering is done.

    geometry: np.array(n_channels, 2)
        geometry info

    neighbors: np.array(n_channels, n_channels) boolean
        neighboring channel info

    snr_threshold: float
        a threshold for removing small template

    spread_threshold: float
        a threshold for removing widely spread templates


    Returns
    -------
    templates: npy.ndarray
        Templates after clean up

    weights: np.array(n_templates)
        weights after clean up

    spike_train2: np.array
        spike_train after clean up

    idx_good_templates: np.array
        index of which templates are kept
    """
    # get size
    n_channels, temporal_size, n_templates = templates.shape

    # get energy
    energy = np.ptp(templates, axis=1)
    mainc = np.argmax(energy, axis=0)

    # check for overly spread template first
    too_spread = np.zeros(n_templates, 'bool')
    uncentered = np.zeros(n_templates, 'bool')
    too_small = np.zeros(n_templates, 'bool')
    for k in range(n_templates):

        # checking for spread
        idx_c = energy[:, k] > np.max(energy[:, k]) * 0.5
        if np.sum(idx_c) > 1:
            lam, V = np.linalg.eig(
                np.cov(geometry[idx_c].T, aweights=energy[idx_c, k]))
            lam[lam < 0] = 0
            if np.sqrt(np.max(lam)) > spread_threshold:
                too_spread[k] = 1

        # checking for uncentered
        ch_idx = np.where(neighbors[mainc[k]])[0]
        if not np.any(tmp_loc[k] == ch_idx):
            uncentered[k] = 1

        # checking for small templates
        if energy[mainc[k], k] < snr_threshold:
            too_small[k] = 1

    idx_good_templates = np.where(
        ~np.logical_or(np.logical_or(too_spread, uncentered), too_small))[0]

    spike_train2 = np.zeros((0, 3), 'int32')
    for j, k in enumerate(idx_good_templates):
        idx_k = np.where(spike_train[:, 1] == k)[0]
        temp = np.copy(spike_train[idx_k])
        temp[:, 1] = j
        spike_train2 = np.vstack((spike_train2, temp))

    templates = templates[:, :, idx_good_templates]
    weights = weights[idx_good_templates]

    return templates, weights, spike_train2, idx_good_templates
コード例 #39
0
#        if len(X[0])>len(Xref[0]):
#           Xref = X

# use the moving dataset as reference
Xref = np.copy(moving['Neurons']['Positions'].T)
Xref -= np.mean(Xref, axis=0)
# load atlas data
neuron2D = '../../utility/celegans277positionsKaiser.csv'
labels = np.loadtxt(neuron2D, delimiter=',', usecols=(0), dtype=str)
neuronAtlas2D = np.loadtxt(neuron2D, delimiter=',', usecols=(1, 2))
relevantIds = (neuronAtlas2D[:, 0] > -0.1)  #*(neuronAtlas2D[:,0]<0.15)
A = neuronAtlas2D[relevantIds]
A[:, 0] = -A[:, 0]
labels = labels[relevantIds]
A -= np.mean(A, axis=0)
A /= np.ptp(A, axis=0)
A *= 1.2 * np.ptp(Xref[:, :2], axis=0)

# plot all data
AN = []
keeping_track = []
special = []
index = 1
bestGuess = []
ventral = [1, 1, 1, -1, 1, 1, 1]
for key, marker in zip(['AML32_moving', 'AML70_chip'], ['o', "^"]):
    dset = data[key]['input']
    res = data[key]['analysis']
    for idn in np.sort(dset.keys())[:]:
        if idn == movingAML32:
            movIndex = index
コード例 #40
0
    def simulate(self, writeToLog=False):
        # Zbroj redaka
        self.CO = np.array(np.sum(self.zavisnost, axis=1))
        # Zbroj stupaca
        self.CI = np.array(np.sum(self.zavisnost, axis=0))

        # SNAP 7 i 8, 11 i 12
        C = self.zavisnost / (np.max(self.CI) + 1)
        D = np.identity(self.n) - C
        E = np.linalg.inv(D)
        F = np.matmul(C, E)
        sumRedaka = np.array(np.sum(F, axis=1))
        sumStupaca = np.array(np.sum(F, axis=0))
        rs = sumRedaka - sumStupaca
        np.around(rs, 8, out=rs)

        # SNAP 9 i 10
        S = self.normalizirajStupceSumom(self.zavisnost, self.CI)
        E = np.ones((self.n, self.n)) / self.n
        G = (0.85 * S) + (0.15 * E)
        G = self.izracunajGranicnuMatricu(G)
        self.norm_S9 = G[0:, 0]

        #SNAP 11 i 12
        H = (0.85 * C) + (0.15 * E)
        I = np.identity(self.n) - H
        J = np.linalg.inv(I)
        K = np.matmul(H, J)
        sumRedaka = np.array(np.sum(K, axis=1))
        sumStupaca = np.array(np.sum(K, axis=0))
        rs2 = sumRedaka - sumStupaca
        np.around(rs2, 8, out=rs2)

        self.razlike = self.CO - self.CI
        # SNAP 1
        self.norm_S1 = self.razlike + np.ptp(self.razlike, axis=0)
        self.norm_S7 = rs + np.ptp(rs, axis=0)
        self.norm_S11 = rs2 + np.ptp(rs2, axis=0)
        # SNAP 3
        self.norm_S3 = self.razlike + 4 * (self.n - 1)
        # SNAP 5
        self.norm_S5 = self.razlike + abs(min(self.razlike))
        sum1 = np.sum(self.norm_S1)
        sum3 = np.sum(self.norm_S3)
        sum5 = np.sum(self.norm_S5)
        sum7 = np.sum(self.norm_S7)
        sum11 = np.sum(self.norm_S11)

        # prvi, treći i peti snap su normalizacija zbrojem
        if sum1 != 0:
            self.norm2_S1 = self.norm_S1 / sum1
        else:
            self.norm2_S1 = np.ones(self.n) / self.n

        if sum3 != 0:
            self.norm2_S3 = self.norm_S3 / sum3
        else:
            self.norm2_S3 = self.norm_S3

        if sum5 != 0:
            self.norm2_S5 = self.norm_S5 / sum5
        else:
            self.norm2_S5 = np.ones(self.n) / self.n

        if sum7 != 0:
            self.norm2_S7 = self.norm_S7 / sum7
        else:
            self.norm2_S7 = np.ones(self.n) / self.n

        if sum11 != 0:
            self.norm2_S11 = self.norm_S11 / sum11
        else:
            self.norm2_S11 = np.ones(self.n) / self.n

        # Dio iz AHP-a SNAP 1, dvojka, četvorka i šestica su bez ovoga
        self.tezine_S1 = (self.tezineUsporedbi + self.norm2_S1) / 2
        self.tezine_S2 = self.norm2_S1
        self.tezine_S3 = (self.tezineUsporedbi + self.norm2_S3) / 2
        self.tezine_S4 = self.norm2_S3
        self.tezine_S5 = (self.tezineUsporedbi + self.norm2_S5) / 2
        self.tezine_S6 = self.norm2_S5
        self.tezine_S7 = (self.tezineUsporedbi + self.norm2_S7) / 2
        self.tezine_S8 = self.norm2_S7
        self.tezine_S9 = (self.tezineUsporedbi + self.norm_S9.flatten()) / 2
        self.tezine_S10 = self.norm_S9
        self.tezine_S11 = (self.tezineUsporedbi + self.norm2_S11) / 2
        self.tezine_S12 = self.norm2_S11
コード例 #41
0
ファイル: stat_bindot.py プロジェクト: tr8dr/plotnine
def densitybin(x, weight=None, binwidth=None, bins=None, rangee=None):
    """
    Do density binning

    It does not collapse each bin with a count.

    Parameters
    ----------
    x : array-like
        Numbers to bin
    weight : array-like
        Weights
    binwidth : numeric
        Size of the bins
    rangee : tuple
        Range of x

    Returns
    -------
    data : DataFrame
    """
    if all(pd.isnull(x)):
        return pd.DataFrame()

    if weight is None:
        weight = np.ones(len(x))
    weight = np.asarray(weight)
    weight[np.isnan(weight)] = 0

    if rangee is None:
        rangee = np.min(x), np.max(x)
    if bins is None:
        bins = 30
    if binwidth is None:
        binwidth = np.ptp(rangee) / bins

    # Sort weight and x, by x
    order = np.argsort(x)
    weight = weight[order]
    x = x[order]

    cbin = 0                # Current bin ID
    binn = [None] * len(x)  # The bin ID for each observation
    # End position of current bin (scan left to right)
    binend = -np.inf

    # Scan list and put dots in bins
    for i, value in enumerate(x):
        # If past end of bin, start a new bin at this point
        if value >= binend:
            binend = value + binwidth
            cbin = cbin + 1
        binn[i] = cbin

    def func(series):
        return (series.min()+series.max())/2

    results = pd.DataFrame({'x': x,
                            'bin': binn,
                            'binwidth': binwidth,
                            'weight': weight})
    # This is a plyr::ddply
    results['bincenter'] = results.groupby('bin')['x'].transform(func)
    return results
コード例 #42
0
import numpy as np

h, l = np.loadtxt('data.csv', delimiter=',', usecols=(2, 3), unpack=True)
print "highest = ", np.max(h)
print "lowest = ", np.min(l)

print "Spread high price", np.ptp(h)
print "Spread low price", np.ptp(l)
コード例 #43
0
ファイル: sim_tod.py プロジェクト: zonca/toast
    def simulate_scan(self, samples):
        # simulate the scanning with turnarounds. Regardless of firsttime,
        # we must simulate from the beginning of the CES.
        # Generate matching common flags.
        # Sets self._boresight.
        autotimer = timing.auto_timer(type(self).__name__)

        self._az = np.zeros(samples)
        self._commonflags = np.zeros(samples, dtype=np.uint8)
        # Scan starts from the left edge of the patch at the fixed scan rate
        lim_left = self._azmin
        lim_right = self._azmax
        if lim_right < lim_left:
            # We are scanning across the zero meridian
            lim_right += 2*np.pi
        az_last = lim_left
        scanrate = self._scanrate / self._rate # per sample, not per second
        # Modulate scan rate so that the rate on sky is constant
        scanrate /= np.cos(self._el)
        scan_accel = self._scan_accel / self._rate # per sample, not per second
        scan_accel /= np.cos(self._el)
        tol = self._rate / 10
        # the index, i, is relative to the start of the tod object.
        # If CES begun before the TOD, first values of i are negative.
        i = int((self._CES_start - self._firsttime - tol) * self._rate)
        starts = [0] # Subscan start indices
        self._stable_starts = []
        self._stable_stops = []
        while True:
            #
            # Left to right, fixed rate
            #
            self._stable_starts.append(i)
            dazdt = scanrate
            nstep = min(int((lim_right-az_last) // dazdt) + 1, samples-i)
            offset_in = max(0, -i)
            offset_out = max(0, i)
            ngood = nstep - offset_in
            if ngood > 0:
                self._commonflags[offset_out:offset_out+ngood] \
                    |= self.LEFTRIGHT_SCAN
                self._az[offset_out:offset_out+ngood] \
                    = az_last + np.arange(offset_in, offset_in+ngood)*dazdt
            i += nstep
            self._stable_stops.append(i)
            if i == samples:
                break
            az_last += dazdt*nstep
            #
            # Left to right, turnaround
            #
            nstep_full = int((2*scanrate) // scan_accel) + 1
            nstep = min(int(nstep_full), samples-i)
            offset_in = max(0, -i)
            offset_out = max(0, i)
            ngood = nstep - offset_in
            if ngood > 0:
                self._commonflags[offset_out:offset_out+ngood] \
                    |= self.LEFTRIGHT_TURNAROUND
                ii = np.arange(offset_in, offset_in+ngood)
                self._az[offset_out:offset_out+ngood] \
                    = az_last + ii*dazdt - 0.5*scan_accel*ii**2
                halfway = i + nstep_full//2
                if halfway > 0 and halfway < samples:
                    starts.append(halfway)
            i += nstep
            if i == samples:
                break
            az_last += dazdt*nstep - .5*scan_accel*nstep**2
            #
            # Right to left, fixed rate
            #
            self._stable_starts.append(i)
            dazdt = -scanrate
            nstep = min(int((lim_left-az_last) // dazdt) + 1, samples-i)
            offset_in = max(0, -i)
            offset_out = max(0, i)
            ngood = nstep - offset_in
            if ngood > 0:
                self._commonflags[offset_out:offset_out+ngood] \
                    |= self.RIGHTLEFT_SCAN
                self._az[offset_out:offset_out+ngood] \
                    = az_last + np.arange(offset_in, offset_in+ngood)*dazdt
            i += nstep
            self._stable_stops.append(i)
            if i == samples: break
            az_last += dazdt*nstep
            #
            # Right to left, turnaround
            #
            nstep_full = int((2*scanrate) // scan_accel) + 1
            nstep = min(int(nstep_full), samples-i)
            offset_in = max(0, -i)
            offset_out = max(0, i)
            ngood = nstep - offset_in
            if ngood > 0:
                self._commonflags[offset_out:offset_out+ngood] \
                    |= self.RIGHTLEFT_TURNAROUND
                ii = np.arange(offset_in, offset_in+ngood)
                self._az[offset_out:offset_out+ngood] \
                    = az_last + ii*dazdt + 0.5*scan_accel*ii**2
                halfway = i + nstep_full//2
                if halfway > 0 and halfway < samples:
                    starts.append(halfway)
            i += nstep
            if i == samples:
                break
            az_last += dazdt*nstep + .5*scan_accel*nstep**2

        starts.append(samples)
        sizes = np.diff(starts)
        if np.sum(sizes) != samples:
            raise RuntimeError("Subscans do not match samples")

        # Store the scan range before discarding samples not assigned
        # to this process

        self._az %= 2*np.pi
        if np.ptp(self._az) < np.pi:
            self._min_az = np.amin(self._az)
            self._max_az = np.amax(self._az)
        else:
            # Scanning across the zero azimuth.
            self._min_az = np.amin(self._az[self._az > np.pi]) - 2*np.pi
            self._max_az = np.amax(self._az[self._az < np.pi])
        self._min_el = self._el
        self._max_el = self._el

        return sizes, starts[:-1]
コード例 #44
0
#print('pd.read_csv(fname)')
#print(pd.read_csv(fname))
#print('pd.read_csv(fname)["x"]')
#print(pd.read_csv(fname)["x"])
#print('pd.read_csv(fname).iloc[:,1]')
#print(pd.read_csv(fname).iloc[:,1])
##cut_time_series = np.array(pd.read_csv(fname)["x"][:164])

temp_tot_df = pd.read_csv(fname)
temp_tot_df.columns = ["dttm", "sale_count"]
temp_df = temp_tot_df["sale_count"]

#print(temp_df)
temp_df[temp_df < 1] = 0.1
time_series = np.array(temp_df)
norm_time_series = (time_series - np.min(time_series)) / np.ptp(time_series)
norm_time_series[norm_time_series <= 0.0] = 0.000001
print(norm_time_series)
neural_net = TimeSeriesNnet(hidden_layers=hidlay,
                            activation_functions=actifunc)
neural_net.fit(norm_time_series, lag=nolag, epochs=noepoch)
neural_net.predict_ahead(n_ahead=noweek)

end_date = temp_tot_df.iloc[temp_tot_df.shape[0] - 1]["dttm"]
start_date = temp_tot_df.iloc[0]["dttm"]
week_list = pd.date_range(start=end_date, freq='W', periods=noweek + 1)
week_list = week_list[1:]
#print(week_list)

all_datapoints = list(neural_net.timeseries)[temp_df.shape[0]:]
#print(list(neural_net.timeseries))
コード例 #45
0
def Two_category_classification(rel_file_path, a=8, n=2000):
    # list to hold values of cost per iteration
    J_values = []

    # all data as a 2d array
    data = np.loadtxt(rel_file_path, delimiter=',')

    # m is the number of datapoints
    m, num_features = data.shape
    # num features is one les than number of columns bc output is discluded
    num_features = num_features - 1

    # arrays to hold the averages and ranges of each feature
    feature_avg = np.zeros(num_features)
    feature_range = np.zeros(num_features)
    for feature in range(num_features):
        feature_avg[feature] = np.average(data[:, feature])
        feature_range[feature] = np.ptp(data[:, feature])
    # scale feature data with info from above
    for feature in range(num_features):
        for row in range(m):
            data[row,
                 feature] = (data[row, feature] -
                             feature_avg[feature]) / feature_range[feature]

    # theta gradients and cost initilization. We will use 1 to start.
    # there is one more theta than features, hence the +1
    theta_gradients = np.full(num_features + 1, 1)
    J = np.Inf
    # iterate until optimal is found, n interations has happened, or J is
    # increasing. If n iterations happens, update alpha by multiplying it by
    # 3 to speed up process and start over. If J is increasing, alpha is too
    # large, so divide it by 3 and start over
    # optimal is found when both theta gradients are sufficiently small
    #initialize number of iterations and theta values to 0
    iterations = 0
    theta_vals = np.zeros(num_features + 1)
    while True:
        #bool to hold if we will break or not
        break_out = True
        for gradient in theta_gradients:
            if np.abs(gradient) > 10e-6:
                break_out = False
        if break_out:
            break
        iterations = iterations + 1
        # keep track of previous J to ensure it is not increasing
        J_prev = J
        J, theta_gradients = Cost_and_gradients(data, theta_vals, m,
                                                num_features)
        # check that iterations is not to high, otherwise the convergence is
        # to slow! If so, increase alpha and reset
        if (iterations > n):
            #J increased, decrease alpha, reset, and start over
            a = a * (1 + random.uniform(0, 1))
            print("Trying a bigger alpha value, alpha = " + str(a), end='\n')
            # set gradients to 1 as a temperary value
            theta_gradients = np.full(num_features, 1)
            #reset thetas and interations
            theta_vals = np.zeros(num_features + 1)
            #reset J_values
            J_values = []
            iterations = 0
            J = np.inf
            continue
        # ensure cost is not increasing
        if (J_prev < J):
            #increase alpha by factor of 3
            a = a / (1 + random.uniform(0, 1))
            print("Trying a smaller alpha value, alpha = " + str(a), end='\n')
            # set gradients to 1 as a temperary value
            theta_gradients = np.full(num_features, 1)
            #reset thetas and interations
            theta_vals = np.zeros(num_features + 1)
            #reset J_values
            J_values = []
            iterations = 0
            J = np.inf
            continue
        # update thetas and J_values and scale gradients by alpha
        theta_vals = theta_vals - a * theta_gradients
        J_values.append(J)
    # print the results
    print("The optimal theta paremeters are: " + str(repr(theta_vals)))
    # show plots if feasible
    if (num_features == 2):
        Plot_data_2_cats(data, theta_vals, J_values, m, feature_avg,
                         feature_range)
    return theta_vals, feature_avg, feature_range
コード例 #46
0
ファイル: shapes.py プロジェクト: eryao0/tyssue
def sheet_from_cell_centers(points, noise=0, interp_s=1e-4):
    """Returns a Sheet object from the Voronoï tessalation
    of the cell centers.

    The strategy is to project the points on a sphere, get the Voronoï
    tessalation on this sphere and reproject the vertices on the
    original (implicit) surface through linear interpolation of the cell centers.

    Works for relatively smooth surfaces (at the very minimum star convex).

    Parameters
    ----------

    points : np.ndarray of shape (Nf, 3)
        the x, y, z coordinates of the cell centers
    noise : float, default 0.0
        addiditve normal noise stdev
    interp_s : float, default 1e-4
        interpolation smoothing factor (might need to set higher)

    Returns
    -------
    sheet : a :class:`Sheet` object with Nf faces


    """
    points = points.copy()
    if noise:
        points += np.random.normal(0, scale=noise, size=points.shape)
    points -= points.mean(axis=0)
    bbox = np.ptp(points, axis=0)
    points /= bbox

    rhos = np.linalg.norm(points, axis=1)
    thetas = np.arcsin(points[:, 2] / rhos)
    phis = np.arctan2(points[:, 0], points[:, 1])

    sphere_rad = rhos.max() * 1.1

    points_sphere = np.vstack((
        sphere_rad * np.cos(thetas) * np.cos(phis),
        sphere_rad * np.cos(thetas) * np.sin(phis),
        sphere_rad * np.sin(thetas),
    )).T
    points_sphere = np.concatenate(([[0, 0, 0]], points_sphere))

    vor3D = Voronoi(points_sphere)

    dsets = from_3d_voronoi(vor3D)
    eptm_ = Epithelium("v", dsets)

    eptm_ = single_cell(eptm_, 0)

    eptm = get_outer_sheet(eptm_)
    eptm.reset_index()
    eptm.reset_topo()
    eptm.vert_df["rho"] = np.linalg.norm(eptm.vert_df[eptm.coords], axis=1)
    mean_rho = eptm.vert_df["rho"].mean()

    SheetGeometry.scale(eptm, sphere_rad / mean_rho, ["x", "y", "z"])
    SheetGeometry.update_all(eptm)

    eptm.face_df["phi"] = np.arctan2(eptm.face_df.y, eptm.face_df.x)
    eptm.face_df["rho"] = np.linalg.norm(eptm.face_df[["x", "y", "z"]], axis=1)
    eptm.face_df["theta"] = np.arcsin(eptm.face_df.z / eptm.face_df["rho"])
    _itrp = interpolate.SmoothSphereBivariateSpline(thetas + np.pi / 2,
                                                    phis + np.pi,
                                                    rhos,
                                                    s=interp_s)
    eptm.face_df["rho"] = _itrp(eptm.face_df["theta"] + np.pi / 2,
                                eptm.face_df["phi"] + np.pi,
                                grid=False)
    eptm.face_df["x"] = eptm.face_df.eval("rho * cos(theta) * cos(phi)")
    eptm.face_df["y"] = eptm.face_df.eval("rho * cos(theta) * sin(phi)")
    eptm.face_df["z"] = eptm.face_df.eval("rho * sin(theta)")

    eptm.edge_df[["fx", "fy",
                  "fz"]] = eptm.upcast_face(eptm.face_df[["x", "y", "z"]])
    eptm.vert_df[["x", "y",
                  "z"]] = eptm.edge_df.groupby("srce")[["fx", "fy",
                                                        "fz"]].mean()
    for i, c in enumerate("xyz"):
        eptm.vert_df[c] *= bbox[i]

    SheetGeometry.update_all(eptm)

    eptm.sanitize(trim_borders=True)

    eptm.reset_index()
    eptm.reset_topo()
    SheetGeometry.update_all(eptm)
    null_length = eptm.edge_df.query("length == 0")

    while null_length.shape[0]:
        type1_transition(eptm, null_length.index[0])
        SheetGeometry.update_all(eptm)
        null_length = eptm.edge_df.query("length == 0")

    return eptm
コード例 #47
0
def run_simulation(prob=None):
    """
    Routine to run the simulation of a second order problem

    Args:
        prob (str): name of the problem

    """

    if prob == 'outer_solar_system':
        description, controller_params = setup_outer_solar_system()
        # set time parameters
        t0 = 0.0
        Tend = 10000.0
        num_procs = 100
        maxmeaniter = 4.0
    elif prob == 'full_solar_system':
        description, controller_params = setup_full_solar_system()
        # set time parameters
        t0 = 0.0
        Tend = 1000.0
        num_procs = 100
        maxmeaniter = 16.0
    else:
        raise NotImplemented('Problem type not implemented, got %s' % prob)

    f = open(prob + '_out.txt', 'w')
    out = 'Running ' + prob + ' problem with %s processors...' % num_procs
    f.write(out + '\n')
    print(out)

    # instantiate the controller
    controller = allinclusive_classic_nonMPI(num_procs=num_procs, controller_params=controller_params,
                                             description=description)

    # get initial values on finest level
    P = controller.MS[0].levels[0].prob
    uinit = P.u_exact(t=t0)

    # call main function to get things done...
    uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)

    # filter statistics by type (number of iterations)
    filtered_stats = filter_stats(stats, type='niter')

    # convert filtered statistics to list of iterations count, sorted by process
    iter_counts = sort_stats(filtered_stats, sortby='time')

    # compute and print statistics
    # for item in iter_counts:
    #     out = 'Number of iterations for time %4.2f: %2i' % item
    #     f.write(out)
    #     print(out)

    niters = np.array([item[1] for item in iter_counts])
    out = '   Mean number of iterations: %4.2f' % np.mean(niters)
    f.write(out + '\n')
    print(out)
    out = '   Range of values for number of iterations: %2i ' % np.ptp(niters)
    f.write(out + '\n')
    print(out)
    out = '   Position of max/min number of iterations: %2i -- %2i' % \
          (int(np.argmax(niters)), int(np.argmin(niters)))
    f.write(out + '\n')
    print(out)
    out = '   Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
    f.write(out + '\n')
    print(out)
    f.close()

    assert np.mean(niters) <= maxmeaniter, 'Mean number of iterations is too high, got %s' % np.mean(niters)

    fname = 'data/' + prob + '.dat'
    f = open(fname, 'wb')
    dill.dump(stats, f)
    f.close()

    assert os.path.isfile(fname), 'Run for %s did not create stats file' % prob
コード例 #48
0
    def __init__(self, endog=None, exog=None, order=None,
                 seasonal_order=None, ar_order=None, diff=None, ma_order=None,
                 seasonal_ar_order=None, seasonal_diff=None,
                 seasonal_ma_order=None, seasonal_periods=None, trend=None,
                 enforce_stationarity=None, enforce_invertibility=None,
                 concentrate_scale=None, trend_offset=1, dates=None, freq=None,
                 missing='none'):

        # Basic parameters
        self.enforce_stationarity = enforce_stationarity
        self.enforce_invertibility = enforce_invertibility
        self.concentrate_scale = concentrate_scale
        self.trend_offset = trend_offset

        # Validate that we were not given conflicting specifications
        has_order = order is not None
        has_specific_order = (ar_order is not None or diff is not None or
                              ma_order is not None)
        has_seasonal_order = seasonal_order is not None
        has_specific_seasonal_order = (seasonal_ar_order is not None or
                                       seasonal_diff is not None or
                                       seasonal_ma_order is not None or
                                       seasonal_periods is not None)
        if has_order and has_specific_order:
            raise ValueError('Cannot specify both `order` and either of'
                             ' `ar_order` or `ma_order`.')
        if has_seasonal_order and has_specific_seasonal_order:
            raise ValueError('Cannot specify both `seasonal_order` and any of'
                             ' `seasonal_ar_order`, `seasonal_ma_order`,'
                             ' or `seasonal_periods`.')

        # Compute `order`
        if has_specific_order:
            ar_order = 0 if ar_order is None else ar_order
            diff = 0 if diff is None else diff
            ma_order = 0 if ma_order is None else ma_order
            order = (ar_order, diff, ma_order)
        elif not has_order:
            order = (0, 0, 0)

        # Compute `seasonal_order`
        if has_specific_seasonal_order:
            seasonal_ar_order = (
                0 if seasonal_ar_order is None else seasonal_ar_order)
            seasonal_diff = 0 if seasonal_diff is None else seasonal_diff
            seasonal_ma_order = (
                0 if seasonal_ma_order is None else seasonal_ma_order)
            seasonal_periods = (
                0 if seasonal_periods is None else seasonal_periods)
            seasonal_order = (seasonal_ar_order, seasonal_diff,
                              seasonal_ma_order, seasonal_periods)
        elif not has_seasonal_order:
            seasonal_order = (0, 0, 0, 0)

        # Validate shapes of `order`, `seasonal_order`
        if len(order) != 3:
            raise ValueError('`order` argument must be an iterable with three'
                             ' elements.')
        if len(seasonal_order) != 4:
            raise ValueError('`seasonal_order` argument must be an iterable'
                             ' with four elements.')

        # Validate differencing parameters
        if order[1] < 0:
            raise ValueError('Cannot specify negative differencing.')
        if order[1] != int(order[1]):
            raise ValueError('Cannot specify fractional differencing.')
        if seasonal_order[1] < 0:
            raise ValueError('Cannot specify negative seasonal differencing.')
        if seasonal_order[1] != int(seasonal_order[1]):
            raise ValueError('Cannot specify fractional seasonal'
                             ' differencing.')
        if seasonal_order[3] < 0:
            raise ValueError('Cannot specify negative seasonal periodicity.')

        # Standardize to integers or lists of integers
        order = (
            standardize_lag_order(order[0], 'AR'),
            int(order[1]),
            standardize_lag_order(order[2], 'MA'))
        seasonal_order = (
            standardize_lag_order(seasonal_order[0], 'seasonal AR'),
            int(seasonal_order[1]),
            standardize_lag_order(seasonal_order[2], 'seasonal MA'),
            int(seasonal_order[3]))

        # Validate seasonals
        if seasonal_order[3] == 1:
            raise ValueError('Seasonal periodicity must be greater than 1.')
        if ((seasonal_order[0] != 0 or seasonal_order[1] != 0 or
                seasonal_order[2] != 0) and seasonal_order[3] == 0):
            raise ValueError('Must include nonzero seasonal periodicity if'
                             ' including seasonal AR, MA, or differencing.')

        # Basic order
        self.order = order
        self.ar_order, self.diff, self.ma_order = order

        self.seasonal_order = seasonal_order
        (self.seasonal_ar_order, self.seasonal_diff, self.seasonal_ma_order,
         self.seasonal_periods) = seasonal_order

        # Lists of included lags
        if isinstance(self.ar_order, list):
            self.ar_lags = self.ar_order
        else:
            self.ar_lags = np.arange(1, self.ar_order + 1).tolist()
        if isinstance(self.ma_order, list):
            self.ma_lags = self.ma_order
        else:
            self.ma_lags = np.arange(1, self.ma_order + 1).tolist()

        if isinstance(self.seasonal_ar_order, list):
            self.seasonal_ar_lags = self.seasonal_ar_order
        else:
            self.seasonal_ar_lags = (
                np.arange(1, self.seasonal_ar_order + 1).tolist())
        if isinstance(self.seasonal_ma_order, list):
            self.seasonal_ma_lags = self.seasonal_ma_order
        else:
            self.seasonal_ma_lags = (
                np.arange(1, self.seasonal_ma_order + 1).tolist())

        # Maximum lag orders
        self.max_ar_order = self.ar_lags[-1] if self.ar_lags else 0
        self.max_ma_order = self.ma_lags[-1] if self.ma_lags else 0

        self.max_seasonal_ar_order = (
            self.seasonal_ar_lags[-1] if self.seasonal_ar_lags else 0)
        self.max_seasonal_ma_order = (
            self.seasonal_ma_lags[-1] if self.seasonal_ma_lags else 0)

        self.max_reduced_ar_order = (
            self.max_ar_order +
            self.max_seasonal_ar_order * self.seasonal_periods)
        self.max_reduced_ma_order = (
            self.max_ma_order +
            self.max_seasonal_ma_order * self.seasonal_periods)

        # Check that we don't have duplicate AR or MA lags from the seasonal
        # component
        ar_lags = set(self.ar_lags)
        seasonal_ar_lags = set(np.array(self.seasonal_ar_lags)
                               * self.seasonal_periods)
        duplicate_ar_lags = ar_lags.intersection(seasonal_ar_lags)
        if len(duplicate_ar_lags) > 0:
            raise ValueError('Invalid model: autoregressive lag(s) %s are'
                             ' in both the seasonal and non-seasonal'
                             ' autoregressive components.'
                             % duplicate_ar_lags)

        ma_lags = set(self.ma_lags)
        seasonal_ma_lags = set(np.array(self.seasonal_ma_lags)
                               * self.seasonal_periods)
        duplicate_ma_lags = ma_lags.intersection(seasonal_ma_lags)
        if len(duplicate_ma_lags) > 0:
            raise ValueError('Invalid model: moving average lag(s) %s are'
                             ' in both the seasonal and non-seasonal'
                             ' moving average components.'
                             % duplicate_ma_lags)

        # Handle trend
        self.trend = trend
        self.trend_poly, _ = prepare_trend_spec(trend)

        # Check for a constant column in the provided exog
        exog_is_pandas = _is_using_pandas(exog, None)
        if (exog is not None and len(self.trend_poly) > 0 and
                self.trend_poly[0] == 1):
            # Figure out if we have any constant columns
            x = np.asanyarray(exog)
            ptp0 = np.ptp(x, axis=0)
            col_is_const = ptp0 == 0
            nz_const = col_is_const & (x[0] != 0)
            col_const = nz_const

            # If we already have a constant column, raise an error
            if np.any(col_const):
                raise ValueError('A constant trend was included in the model'
                                 ' specification, but the `exog` data already'
                                 ' contains a column of constants.')

        # This contains the included exponents of the trend polynomial,
        # where e.g. the constant term has exponent 0, a linear trend has
        # exponent 1, etc.
        self.trend_terms = np.where(self.trend_poly == 1)[0]
        # Trend order is either the degree of the trend polynomial, if all
        # exponents are included, or a list of included exponents. Here we need
        # to make a distinction between a degree zero polynomial (i.e. a
        # constant) and the zero polynomial (i.e. not even a constant). The
        # former has `trend_order = 0`, while the latter has
        # `trend_order = None`.
        self.k_trend = len(self.trend_terms)
        if len(self.trend_terms) == 0:
            self.trend_order = None
            self.trend_degree = None
        elif np.all(self.trend_terms == np.arange(len(self.trend_terms))):
            self.trend_order = self.trend_terms[-1]
            self.trend_degree = self.trend_terms[-1]
        else:
            self.trend_order = self.trend_terms
            self.trend_degree = self.trend_terms[-1]

        # Handle endog / exog
        # Standardize exog
        self.k_exog, exog = prepare_exog(exog)

        # Standardize endog (including creating a faux endog if necessary)
        faux_endog = endog is None
        if endog is None:
            endog = [] if exog is None else np.zeros(len(exog)) * np.nan

        # Add trend data into exog
        nobs = len(endog) if exog is None else len(exog)
        if self.trend_order is not None:
            # Add in the data
            trend_data = self.construct_trend_data(nobs, trend_offset)
            if exog is None:
                exog = trend_data
            elif exog_is_pandas:
                trend_data = pd.DataFrame(trend_data, index=exog.index,
                                          columns=self.construct_trend_names())
                exog = pd.concat([trend_data, exog], axis=1)
            else:
                exog = np.c_[trend_data, exog]

        # Create an underlying time series model, to handle endog / exog,
        # especially validating shapes, retrieving names, and potentially
        # providing us with a time series index
        self._model = TimeSeriesModel(endog, exog=exog, dates=dates, freq=freq,
                                      missing=missing)
        self.endog = None if faux_endog else self._model.endog
        self.exog = self._model.exog

        # Validate endog shape
        if not faux_endog and self.endog.ndim > 1 and self.endog.shape[1] > 1:
            raise ValueError('SARIMAX models require univariate `endog`. Got'
                             ' shape %s.' % str(self.endog.shape))

        self._has_missing = (
            None if faux_endog else np.any(np.isnan(self.endog)))
コード例 #49
0
ファイル: test_ica.py プロジェクト: sbp894/mne-python
def test_ica_core(method):
    """Test ICA on raw and epochs."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()

    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = [method]
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    pytest.raises(ValueError, ICA, n_components=3, max_pca_components=2)
    pytest.raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method,
                  max_iter=1)
        pytest.raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        pytest.raises(RuntimeError, ica.get_sources, raw)
        pytest.raises(RuntimeError, ica.get_sources, epochs)

        # test decomposition
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        repr(ica)  # to test repr
        assert ('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        raw_sources = ica.get_sources(raw)
        # test for #3804
        assert_equal(raw_sources._filenames, [None])
        print(raw_sources)

        # test for gh-6271 (scaling of ICA traces)
        fig = raw_sources.plot()
        assert len(fig.axes[0].lines) in (4, 5, 6)
        for line in fig.axes[0].lines:
            y = line.get_ydata()
            if len(y) > 2:  # actual data, not markers
                assert np.ptp(y) < 15
        plt.close('all')

        sources = raw_sources[:, :][0]
        assert (sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        pytest.raises(RuntimeError, ica.apply, raw3, include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov,
                  n_components=n_comp,
                  max_pca_components=max_n,
                  n_pca_components=max_n,
                  random_state=0,
                  method=method)
        with pytest.warns(None):  # sometimes warns
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert (sources.shape[1] == ica.n_components_)

        pytest.raises(ValueError,
                      ica.score_sources,
                      epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        pytest.raises(RuntimeError, ica.apply, epochs3, include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica.pre_whitener_.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica.pre_whitener_)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    pytest.raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    pytest.raises(ValueError, ica.get_sources, offender)
    pytest.raises(TypeError, ica.fit, offender)
    pytest.raises(TypeError, ica.apply, offender)
コード例 #50
0
def influence_plot(results,
                   external=True,
                   alpha=.05,
                   criterion="cooks",
                   size=48,
                   plot_alpha=.75,
                   ax=None,
                   **kwargs):
    """
    Plot of influence in regression. Plots studentized resids vs. leverage.

    Parameters
    ----------
    results : results instance
        A fitted model.
    external : bool
        Whether to use externally or internally studentized residuals. It is
        recommended to leave external as True.
    alpha : float
        The alpha value to identify large studentized residuals. Large means
        abs(resid_studentized) > t.ppf(1-alpha/2, dof=results.df_resid)
    criterion : str {'DFFITS', 'Cooks'}
        Which criterion to base the size of the points on. Options are
        DFFITS or Cook's D.
    size : float
        The range of `criterion` is mapped to 10**2 - size**2 in points.
    plot_alpha : float
        The `alpha` of the plotted points.
    ax : matplotlib Axes instance
        An instance of a matplotlib Axes.

    Returns
    -------
    fig : matplotlib figure
        The matplotlib figure that contains the Axes.

    Notes
    -----
    Row labels for the observations in which the leverage, measured by the
    diagonal of the hat matrix, is high or the residuals are large, as the
    combination of large residuals and a high influence value indicates an
    influence point. The value of large residuals can be controlled using the
    `alpha` parameter. Large leverage points are identified as
    hat_i > 2 * (df_model + 1)/nobs.
    """
    fig, ax = utils.create_mpl_ax(ax)

    infl = results.get_influence()

    if criterion.lower().startswith('coo'):
        psize = infl.cooks_distance[0]
    elif criterion.lower().startswith('dff'):
        psize = np.abs(infl.dffits[0])
    else:
        raise ValueError("Criterion %s not understood" % criterion)

    # scale the variables
    #TODO: what is the correct scaling and the assumption here?
    #we want plots to be comparable across different plots
    #so we would need to use the expected distribution of criterion probably
    old_range = np.ptp(psize)
    new_range = size**2 - 8**2

    psize = (psize - psize.min()) * new_range / old_range + 8**2

    leverage = infl.hat_matrix_diag
    if external:
        resids = infl.resid_studentized_external
    else:
        resids = infl.resid_studentized_internal

    from scipy import stats

    cutoff = stats.t.ppf(1. - alpha / 2, results.df_resid)
    large_resid = np.abs(resids) > cutoff
    large_leverage = leverage > _high_leverage(results)
    large_points = np.logical_or(large_resid, large_leverage)

    ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)

    # add point labels
    labels = results.model.data.row_labels
    if labels is None:
        labels = lrange(len(resids))
    ax = utils.annotate_axes(
        np.where(large_points)[0], labels, lzip(leverage, resids),
        lzip(-(psize / 2)**.5, (psize / 2)**.5), "x-large", ax)

    #TODO: make configurable or let people do it ex-post?
    font = {"fontsize": 16, "color": "black"}
    ax.set_ylabel("Studentized Residuals", **font)
    ax.set_xlabel("H Leverage", **font)
    ax.set_title("Influence Plot", **font)
    return fig
コード例 #51
0
    def recover_or_die(self, pop, soc, config):
        '''see whether to recover or die
    
    
        Keyword arguments
        -----------------
        population : ndarray
            array containing all data on the population
    
        frame : int
            the current timestep of the simulation
    
        recovery_duration : tuple
            lower and upper bounds of duration of recovery, in simulation steps
    
        mortality_chance : float
            the odds that someone dies in stead of recovers (between 0 and 1)
    
        risk_age : int or flaot
            the age from which mortality risk starts increasing
    
        critical_age: int or float
            the age where mortality risk equals critical_mortality_change
    
        critical_mortality_chance : float
            the heightened odds that an infected person has a fatal ending
    
        risk_increase : string
            can be 'quadratic' or 'linear', determines whether the mortality risk
            between the at risk age and the critical age increases linearly or
            exponentially
    
        no_treatment_factor : int or float
            defines a change in mortality odds if someone cannot get treatment. Can
            be larger than one to increase risk, or lower to decrease it.
    
        treatment_dependent_risk : bool
            whether availability of treatment influences patient risk
    
        treatment_factor : int or float
            defines a change in mortality odds if someone is in treatment. Can
            be larger than one to increase risk, or lower to decrease it.
    
        verbose : bool
            whether to report to terminal the recoveries and deaths for each simulation step
        '''
        population = pop.population
        #find infected people
        infected_people = population[population[:, 6] == 1]

        #define vector of how long everyone has been sick
        illness_duration_vector = config.frame - infected_people[:, 8]

        recovery_odds_vector = (illness_duration_vector -
                                self.recovery_duration[0]) / np.ptp(
                                    self.recovery_duration)
        recovery_odds_vector = np.clip(recovery_odds_vector,
                                       a_min=0,
                                       a_max=None)

        #update states of sick people
        indices = infected_people[:, 0][
            recovery_odds_vector >= infected_people[:, 9]]

        recovered = []
        fatalities = []

        #decide whether to die or recover
        for idx in indices:
            #Non-lethal virus
            updated_mortality_chance = 0

            if infected_people[infected_people[:, 0] == int(
                    idx)][:, 10] == 0 and soc.treatment_dependent_risk:
                #if person is not in treatment, increase risk by no_treatment_factor
                updated_mortality_chance = updated_mortality_chance * soc.no_treatment_factor
            elif infected_people[infected_people[:, 0] == int(
                    idx)][:, 10] == 1 and soc.treatment_dependent_risk:
                #if person is in treatment, decrease risk by
                updated_mortality_chance = updated_mortality_chance * soc.treatment_factor

            if np.random.random() <= updated_mortality_chance:
                #die
                infected_people[:, 6][infected_people[:, 0] == idx] = 3
                infected_people[:, 10][infected_people[:, 0] == idx] = 0
                fatalities.append(
                    np.int32(infected_people[infected_people[:,
                                                             0] == idx][:,
                                                                        0][0]))
            else:
                #recover (become immune)
                infected_people[:, 6][infected_people[:, 0] == idx] = 2
                infected_people[:, 10][infected_people[:, 0] == idx] = 0
                recovered.append(
                    np.int32(infected_people[infected_people[:,
                                                             0] == idx][:,
                                                                        0][0]))

        if len(fatalities) > 0 and config.verbose:
            print('\nat timestep %i these people died: %s' %
                  (config.frame, fatalities))
        if len(recovered) > 0 and config.verbose:
            print('\nat timestep %i these people recovered: %s' %
                  (config.frame, recovered))

        #put array back into population
        population[population[:, 6] == 1] = infected_people

        return population
コード例 #52
0
def parallel_coords(df):
    df['train_batch_size'] = df.train_batch_size.astype('category')
    df.train_batch_size = df.train_batch_size.apply(str)
    # df['train_batch_size_encoded'] = df.train_batch_size.cat.codes
    cols = ['optimizer', 'activation', 'train_batch_size', 'mean_acc']
    x = [
        i for i in range(len(cols) - 1)
    ]  # -1 for colorbar var. 'mean_acc' which is excluded, len(cols) not len(cols)-1 because shared y-axis.
    mean_acc_colors = ['red', 'orange', 'yellow', 'green', 'blue']
    mean_acc_cut = pd.cut(df.mean_acc, [0.0, 0.25, 0.5, 0.75, 1.0])
    mean_acc_color_mappings = {
        mean_acc_cut.cat.categories[i]: mean_acc_colors[i]
        for i, _ in enumerate(mean_acc_cut.cat.categories)
    }

    fig = plt.figure()
    # First axis is for optimizer:
    optimizer_axis = plt.subplot(1, len(x), 1)
    fig.add_subplot(optimizer_axis, sharex=None, sharey=None)
    # plt.setp(optimizer_axis.get_xticklabels(), fontsize=6)

    # Second axis is for activation:
    activation_axis = plt.subplot(1, len(x), 2)
    fig.add_subplot(activation_axis, sharex=None, sharey=None)

    # Third axis is for train_batch_size and does sharex:
    # train_batch_axis = plt.subplot(1, len(x), 3)
    # fig.add_subplot(train_batch_axis, sharex=activation_axis, sharey=None)
    # fig.add_subplot()

    # Third axis is for colorbar:
    cax = plt.subplot(1, len(x), 3)
    fig.add_subplot(cax, sharex=None, sharey=None)

    # axes = [optimizer_axis, activation_axis, train_batch_axis, cax]
    axes = [optimizer_axis, activation_axis, cax]

    # min, max, and range for each column:
    min_max_range = {}
    for col in cols:
        if col == 'optimizer' or col == 'activation' or col == 'train_batch_size':
            # Range for categorical's is dependent upon number of unique categories:
            min_max_range[col] = [
                df[col].cat.codes.min(), df[col].cat.codes.max(),
                np.ptp(df[col].cat.codes)
            ]
        else:
            min_max_range[col] = [
                df[col].min(), df[col].max(),
                np.ptp(df[col])
            ]
            # Normalize the column:
            df[col] = np.true_divide(df[col] - df[col].min(), np.ptp(df[col]))

    # Plot each row
    for i, ax in enumerate(axes):
        if i == len(axes) - 1:
            continue
        else:
            for idx in df.index:
                mean_acc_interval = mean_acc_cut.loc[idx]
                ax.plot(
                    x, df.loc[idx,
                              ['optimizer', 'activation', 'train_batch_size']],
                    mean_acc_color_mappings[mean_acc_interval])
            ax.set_xlim([x[i], x[i + 1]])

    # Save the original tick labels for the last axis:
    df_y_tick_labels = [
        tick.get_text() for tick in axes[0].get_yticklabels(minor=False)
    ]

    # set tick positions and labels on y axis for each plot
    def set_ticks_for_axis(dim, ax, categorical, ticks, ytick_labels=None):
        min_val, max_val, val_range = min_max_range[cols[dim]]
        step = val_range / float(ticks - 1)

        # For final column:
        if categorical:
            norm_min = df[cols[dim]].cat.codes.min()
            norm_range = np.ptp(df[cols[dim]].cat.codes)
        else:
            norm_min = df[cols[dim]].min()
            norm_range = np.ptp(df[cols[dim]])
        norm_step = norm_range / float(ticks - 1)

        if not ytick_labels:
            df_tick_labels = ax.get_yticklabels(minor=False)
            tick_labels = [
                tick_label.get_text().split('_')[-1]
                for tick_label in df_tick_labels
            ]
        else:
            tick_labels = ytick_labels
        ticks = [round(norm_min + norm_step * i, 2) for i in range(ticks)]
        if dim == 0:
            # Optimizer
            relevant_tick_labels = [0, len(tick_labels) - 1]
        elif dim == 1:
            # Activation
            relevant_tick_labels = [1, len(tick_labels) - 2]
        elif dim == 2:
            # Train batch size
            relevant_tick_labels = [2, 3]
        else:
            relevant_tick_labels = None
        tick_labels = [
            tick_labels[i] if i in relevant_tick_labels else ''
            for i in range(len(tick_labels))
        ]
        ax.set_yticklabels(tick_labels)
        # ax.set_you
        # ax.set_ylim([0, 1], auto=True)
        # ax.autoscale(enable=True, axis=ax.yaxis)

    for dim, ax in enumerate(axes):
        if dim == len(axes) - 1:
            ax.xaxis.set_major_locator(ticker.FixedLocator([0]))
            set_ticks_for_axis(dim,
                               ax,
                               ytick_labels=df_y_tick_labels,
                               categorical=True,
                               ticks=2)
            ax.set_xticklabels([cols[dim]])
        else:
            ax.xaxis.set_major_locator(ticker.FixedLocator([dim]))
            set_ticks_for_axis(dim,
                               ax,
                               ytick_labels=None,
                               categorical=True,
                               ticks=2)
            ax.set_xticklabels([cols[dim]])

    # Move final axis' ticks to right-hand side
    # ax = plt.twinx(axes[1])
    # dim = 1
    # ax.xaxis.set_major_locator(ticker.FixedLocator([x[0], x[1]]))
    # set_ticks_for_axis(dim=dim, ax=ax, ticks=2)

    # ax.set_xticklabels
    # dim = len(axes)
    # ax.xaxis.set_major_locator(ticker.FixedLocator([x[-2], x[-1]]))
    # set_ticks_for_axis(dim, ax, ticks=2)
    # ax.set_xticklabels([cols[-2], cols[-1]])

    # Remove space between subplots:
    plt.subplots_adjust(wspace=0)

    # Remove unused parts of x-axis
    axes[-1].spines['right'].set_visible(False)
    axes[-1].spines['top'].set_visible(False)
    axes[-1].spines['bottom'].set_visible(False)

    # Add colorbar:
    # cax = plt.twinx(axes[-1])
    # fig.axes[-1].imshow(df['mean_acc'].values, interpolation='nearest', cmap=cm.coolwarm)

    # custom colormap:
    # cdic

    # cbar = fig.colorbar(fig.axes[-1], ticks=[0, 1, 2, 3], orientation='vertical')

    # add legend:
    plt.legend([
        plt.Line2D((0, 1), (0, 0), color=mean_acc_color_mappings[cat])
        for cat in mean_acc_cut.cat.categories
    ],
               mean_acc_cut.cat.categories,
               bbox_to_anchor=(1.2, 1),
               loc=0,
               borderaxespad=0.0)

    # cbar.ax.set_yticklabels(['< -1', '0', '> 1'])

    plt.title('Accuracy with varying hyperparameters')

    plt.show()
コード例 #53
0
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stat
# import scipy.optimize as opt

# Uses numpy statistics routines, scipy.stats statistical functions, and 
# possibly scipy.optimize optimization.
# Note you can also implement pandas if needed.

first_data = np.array([8.8, 3.1, 4.2, 6.2, 7.6, 3.6, 5.2, 8.6, 6.3, 1.8, 6.8, 3.9])
# number_of_elements = first_data.size # 1D
number_or_elements = first_data.shape # multipleD
# row, col = first_data.shape # 2D
minimum = np.amin(first_data)
maximum = np.amax(first_data)
peak_to_peak = np.ptp(first_data)

mean = np.mean(first_data)
std = np.std(first_data)
std_of_mean = stat.sem(first_data)
# covariance = np.cov(first_data, second_data) # for 2D datasets

# Describe data statistics
print(' ')
print('overview', stat.describe(first_data))
print('number_or_elements', number_or_elements)
print('minimum', minimum)
print('maximum', maximum)
print('peak_to_peak', peak_to_peak)
print('mean', mean)
print('std', std)
コード例 #54
0
    def get_peak(self):
        ''' Computes metrics related to each cell's peak response condition.
        
        Returns
        -------
        Panda data frame with the following fields (_sg suffix is
        for static grating):
            * ori_sg (orientation)
            * sf_sg (spatial frequency)
            * phase_sg
            * response_variability_sg
            * osi_sg (orientation selectivity index)
            * peak_dff_sg (peak dF/F)
            * ptest_sg
            * time_to_peak_sg
        '''
        StaticGratings._log.info('Calculating peak response properties')

        peak = pd.DataFrame(
            index=range(self.numbercells),
            columns=('ori_sg', 'sf_sg', 'phase_sg', 'reliability_sg', 'osi_sg',
                     'peak_dff_sg', 'ptest_sg', 'time_to_peak_sg',
                     'cell_specimen_id', 'p_run_sg', 'cv_os_sg',
                     'run_modulation_sg', 'sf_index_sg'))
        cids = self.data_set.get_cell_specimen_ids()

        orivals_rad = np.deg2rad(self.orivals)
        for nc in range(self.numbercells):
            cell_peak = np.where(
                self.response[:, 1:, :, nc,
                              0] == np.nanmax(self.response[:, 1:, :, nc, 0]))
            pref_ori = cell_peak[0][0]
            pref_sf = cell_peak[1][0] + 1
            pref_phase = cell_peak[2][0]
            peak.cell_specimen_id.iloc[nc] = cids[nc]
            peak.ori_sg[nc] = pref_ori
            peak.sf_sg[nc] = pref_sf
            peak.phase_sg[nc] = pref_phase

            #            peak.response_reliability_sg[nc] = self.response[
            #                pref_ori, pref_sf, pref_phase, nc, 2] / 0.48  # TODO: check number of trials

            pref = self.response[pref_ori, pref_sf, pref_phase, nc, 0]
            orth = self.response[np.mod(pref_ori + 3, 6), pref_sf, pref_phase,
                                 nc, 0]
            tuning = self.response[:, pref_sf, pref_phase, nc, 0]
            tuning = np.where(tuning > 0, tuning, 0)
            CV_top_os = np.empty((6), dtype=np.complex128)
            for i in range(6):
                CV_top_os[i] = (tuning[i] * np.exp(1j * 2 * orivals_rad[i]))
            peak.cv_os_sg.iloc[nc] = np.abs(CV_top_os.sum()) / tuning.sum()

            peak.osi_sg[nc] = (pref - orth) / (pref + orth)
            peak.peak_dff_sg[nc] = pref
            groups = []

            for ori in self.orivals:
                for sf in self.sfvals[1:]:
                    for phase in self.phasevals:
                        groups.append(self.mean_sweep_response[
                            (self.stim_table.spatial_frequency == sf)
                            & (self.stim_table.orientation == ori) &
                            (self.stim_table.phase == phase)][str(nc)])
            groups.append(self.mean_sweep_response[
                self.stim_table.spatial_frequency == 0][str(nc)])

            _, p = st.f_oneway(*groups)
            peak.ptest_sg[nc] = p

            test_rows = (self.stim_table.orientation == self.orivals[pref_ori]) & \
                (self.stim_table.spatial_frequency == self.sfvals[pref_sf]) & \
                (self.stim_table.phase == self.phasevals[pref_phase])

            if len(test_rows) < 2:
                msg = "Static grating p value requires at least 2 trials at the preferred "
                "orientation/spatial frequency/phase. Cell %d (%f, %f, %f) has %d." % \
                    (int(nc), self.orivals[pref_ori], self.sfvals[pref_sf],
                     self.phasevals[pref_phase], len(test_rows))

                raise BrainObservatoryAnalysisException(msg)

            test = self.sweep_response[test_rows][str(nc)].mean()
            peak.time_to_peak_sg[nc] = (
                np.argmax(test) - self.interlength) / self.acquisition_rate

            #running modulation
            subset = self.mean_sweep_response[
                (self.stim_table.spatial_frequency == self.sfvals[pref_sf])
                & (self.stim_table.orientation == self.orivals[pref_ori]) &
                (self.stim_table.phase == self.phasevals[pref_phase])]
            subset_run = subset[subset.dx >= 1]
            subset_stat = subset[subset.dx < 1]
            if (len(subset_run) > 4) & (len(subset_stat) > 4):
                (_,
                 peak.p_run_sg.iloc[nc]) = st.ttest_ind(subset_run[str(nc)],
                                                        subset_stat[str(nc)],
                                                        equal_var=False)

                if subset_run[str(nc)].mean() > subset_stat[str(nc)].mean():
                    peak.run_modulation_sg.iloc[nc] = (
                        subset_run[str(nc)].mean() -
                        subset_stat[str(nc)].mean()) / np.abs(
                            subset_run[str(nc)].mean())
                elif subset_run[str(nc)].mean() < subset_stat[str(nc)].mean():
                    peak.run_modulation_sg.iloc[nc] = -1 * (
                        (subset_stat[str(nc)].mean() -
                         subset_run[str(nc)].mean()) /
                        np.abs(subset_stat[str(nc)].mean()))
            else:
                peak.p_run_sg.iloc[nc] = np.NaN
                peak.run_modulation_sg.iloc[nc] = np.NaN

            #reliability
            subset = self.sweep_response[
                (self.stim_table.spatial_frequency == self.sfvals[pref_sf])
                & (self.stim_table.orientation == self.orivals[pref_ori]) &
                (self.stim_table.phase == self.phasevals[pref_phase])]
            corr_matrix = np.empty((len(subset), len(subset)))
            for i in range(len(subset)):
                for j in range(len(subset)):
                    r, p = st.pearsonr(subset[str(nc)].iloc[i][28:42],
                                       subset[str(nc)].iloc[j][28:42])
                    corr_matrix[i, j] = r
            mask = np.ones((len(subset), len(subset)))
            for i in range(len(subset)):
                for j in range(len(subset)):
                    if i >= j:
                        mask[i, j] = np.NaN
            corr_matrix *= mask
            peak.reliability_sg.iloc[nc] = np.nanmean(corr_matrix)

            #SF index
            sf_tuning = self.response[pref_ori, 1:, pref_phase, nc, 0]
            trials = self.mean_sweep_response[
                (self.stim_table.spatial_frequency != 0)
                & (self.stim_table.orientation == self.orivals[pref_ori]) &
                (self.stim_table.phase
                 == self.phasevals[pref_phase])][str(nc)].values
            SSE_part = np.sqrt(
                np.sum((trials - trials.mean())**2) / (len(trials) - 5))
            peak.sf_index_sg.iloc[nc] = (np.ptp(sf_tuning)) / (
                np.ptp(sf_tuning) + 2 * SSE_part)

        return peak
コード例 #55
0
ファイル: main.py プロジェクト: AiratMR/WANN_demo
import pandas as pd
import logging

from Model import WANNModel
from keras.datasets import boston_housing
from ModelGenerator import generate_wann_model

logging.basicConfig(filename="wann.log", level=logging.INFO)

if __name__ == "__main__":
    # Cu - dataset
    data = pd.read_excel('data.xlsx')
    data = np.array(data)

    input_data = data[:, :4]
    in_scaled = (input_data - np.min(input_data)) / np.ptp(input_data)
    output_data = data[:, 4:]
    out_scaled = (output_data - np.min(output_data)) / np.ptp(output_data)

    logging.info("Cu - generating model start")

    logging.info("Cu - min connections optimization:")
    result = generate_wann_model(in_scaled, out_scaled, tol=0.001, gen_num=40, niter=512,
                                 sort_type="conn")
    model = result
    model.save('cu_model_conn')

    logging.info("Cu - min nodes optimization:")
    result = generate_wann_model(in_scaled, out_scaled, tol=0.001, gen_num=40, niter=512,
                                 sort_type="nodes")
    model = result
コード例 #56
0
 def _normalize_img(img):
     return np.nan_to_num((img - np.min(img)) / np.ptp(img))
コード例 #57
0
    def find_slope_lines(self, tolerance=1.):
        """
        This method attempts to find slope-consistent line profiles up facets,
        perpendicular to the fault.
        Assumes you used define_aspect_node_subset_local().
        """
        grid = self.grid
        self.possible_core_nodes = np.where(
            np.logical_and(self.steep_nodes, self.aspect_close_nodes))[0]
        pcn = self.possible_core_nodes
        unique_starting_pts = np.unique(
            self.closest_ft_node[pcn])  # in real node nos
        # set up places to store the profile data:
        profile_ft_node_id = []
        profile_ft_node_x = []
        profile_ft_node_y = []
        profile_ft_node_z = []
        profile_ft_node_dist = []
        profile_x_facet_pts = []
        profile_z_facet_pts = []
        profile_S_facet_pts = []
        count = 0
        for i in unique_starting_pts:
            count += 1
            print("Running ", count, " of ", unique_starting_pts.size)
            # set the local angle of the ft trace:
            ft_pt_distances_to_node = self.grid.calc_distances_of_nodes_to_point(
                (grid.node_x[i], grid.node_y[i]),
                node_subset=self.ft_trace_node_ids)
            close_ft_nodes = np.less(ft_pt_distances_to_node, 5. * grid.dx)
            x = grid.node_x[self.ft_trace_node_ids[close_ft_nodes]]
            y = grid.node_y[self.ft_trace_node_ids[close_ft_nodes]]
            (grad, offset) = np.polyfit(x, y, 1)
            condition = np.equal(self.closest_ft_node[pcn], i)
            nodes_possible = pcn[condition]
            print(nodes_possible.size, " nodes")
            if nodes_possible.size > 10.:
                #their_az = self.angle_to_ft[nodes_possible]
                #their_diff_angles = self.diff_angles[nodes_possible]
                their_elevs = self.elevs[grid.core_nodes][nodes_possible]
                #their_distances = self.distance_to_ft[nodes_possible]
                # need to make new distances so we remove the ambiguity of angle around the ft point (i.e., dists from a far-field pt on the ft normal)
                # now make a multiplier to make sure the reference point for
                # new distances is far from the actual pts:
                multiplier = 10. * \
                    np.ptp(grid.node_y[grid.core_nodes[nodes_possible]])
                # derive the position:
                x_ref = grid.node_x[i] + cmp(
                    grid.node_x[i],
                    np.mean(grid.node_x[grid.core_nodes[nodes_possible]])
                ) * multiplier * abs(grad)
                y_ref = grid.node_y[i] + cmp(
                    grid.node_y[i],
                    np.mean(grid.node_y[
                        grid.core_nodes[nodes_possible]])) * multiplier
                # get new absolute distances
                dist_to_ft = self.grid.calc_distances_of_nodes_to_point(
                    (x_ref, y_ref), node_subset=np.array([i]))
                dists_along_profile = self.grid.calc_distances_of_nodes_to_point(
                    (x_ref, y_ref),
                    node_subset=grid.core_nodes[nodes_possible]) - dist_to_ft
                # note the ft is now the origin, but pts might be back-to-front (consistently, though)
                # sort the distances. Remove any pts that aren't in a "cluster".
                # We assume there will be one big "bunched" plane, then a load
                # of outliers
                dist_order = np.argsort(dists_along_profile)
                dist_diffs = np.diff(dists_along_profile[dist_order])
                print("dists along profile sorted: ",
                      dists_along_profile[dist_order])
                print("dist diffs: ", dist_diffs)
                # max_diff = 3.*np.median(dist_diffs) #######this might need
                # attention if there's a heavy tail on the distances
                if grad < 1:
                    mod = np.sqrt(1. + grad**2.)
                else:
                    mod = np.sqrt(1. + (1. / grad)**2.)
                max_diff = 1.9 * mod * grid.dx
                locs_of_large_diffs = np.where(dist_diffs > max_diff)[0]
                # there should only be 1 place on the line where there's a cluster, i.e., a large pts_betw_of_max_diffs.
                # This is what we're seeking.
                # ...this can be empty quite easily
                pts_betw_large_diffs = np.diff(locs_of_large_diffs)
                # need to be careful here in case the where call gives an empty
                # array
                if locs_of_large_diffs.size > 1:
                    biggest_interval_loc = np.argmax(pts_betw_large_diffs)
                elif locs_of_large_diffs.size == 1:
                    # one side or the other must be bigger:
                    if 2. * locs_of_large_diffs[
                            0] < dists_along_profile.size - 1:
                        locs_of_large_diffs = np.array([
                            locs_of_large_diffs[0],
                            (dists_along_profile.size - 1)
                        ])
                    else:
                        locs_of_large_diffs = np.array(
                            [0, locs_of_large_diffs[0]])
                    biggest_interval_loc = np.array([0])
                    # here we assume that the single large diff must be further
                    # from the ft than the plane
                else:
                    locs_of_large_diffs = np.array(
                        [0, (dists_along_profile.size - 1)])
                    biggest_interval_loc = np.array([0])
                    #...all the pts in the line are one cluster
                # apply a test to ensure we only save "big" patches; a
                # threshold of 10 pts on the line
                try:
                    patch_size = pts_betw_large_diffs[biggest_interval_loc]
                except IndexError:  # pts_betw_large_diffs is empty
                    patch_size = locs_of_large_diffs[1] - locs_of_large_diffs[0]
                if patch_size > 10.:
                    start_pt_of_cluster = locs_of_large_diffs[
                        biggest_interval_loc] + 1
                    end_pt_of_cluster = locs_of_large_diffs[
                        biggest_interval_loc +
                        1] + 1  # both referring to the sorted list
                    # both +1s are to account for required frame of ref changes - indices refer to where the big gaps start, not where they ends
                    # so:
                    dists_to_sorted_pts = dists_along_profile[dist_order][
                        start_pt_of_cluster:end_pt_of_cluster]
                    elevs_of_sorted_pts = their_elevs[dist_order][
                        start_pt_of_cluster:end_pt_of_cluster]
                    slopes_of_sorted_pts = self.slopes[nodes_possible][
                        dist_order][start_pt_of_cluster:end_pt_of_cluster]
                    profile_ft_node_id.append(i.copy())
                    profile_ft_node_x.append(grid.node_x[i].copy())
                    profile_ft_node_y.append(grid.node_y[i].copy())
                    profile_ft_node_z.append(self.elevs[i].copy())
                    profile_ft_node_dist.append(dist_to_ft.copy())
                    profile_x_facet_pts.append(dists_to_sorted_pts.copy())
                    profile_z_facet_pts.append(elevs_of_sorted_pts.copy())
                    profile_S_facet_pts.append(slopes_of_sorted_pts.copy())
                    figure(5)
                    plot(dists_to_sorted_pts, elevs_of_sorted_pts)
                    # dirty, but effective code!

        self.profile_ft_node_id = profile_ft_node_id
        self.profile_ft_node_x = profile_ft_node_x
        self.profile_ft_node_y = profile_ft_node_y
        self.profile_ft_node_z = profile_ft_node_z
        self.profile_ft_node_dist = profile_ft_node_dist
        self.profile_x_facet_pts = profile_x_facet_pts
        self.profile_z_facet_pts = profile_z_facet_pts
        self.profile_S_facet_pts = profile_S_facet_pts
コード例 #58
0
def ptp(amplitudes, axis=-1):
    return _np.ptp(amplitudes, axis=axis)
コード例 #59
0
 def individual_meter_forecast_value_calculate(
         self, previous_3):  # Average, Maximum, Minimum, range of values
     return np.average(previous_3), np.amax(previous_3), np.amin(
         previous_3), np.ptp(previous_3)
コード例 #60
0
    '--model_name',
    default='resnet',
    help='Define model name, must be same as is in service. default: resnet',
    dest='model_name')
args = vars(parser.parse_args())

channel = grpc.insecure_channel("{}:{}".format(args['grpc_address'],
                                               args['grpc_port']))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

processing_times = np.zeros((0), int)

# optional preprocessing depending on the model
imgs = np.load(args['images_numpy_path'], mmap_mode='r', allow_pickle=False)
imgs = imgs - np.min(imgs)  # Normalization 0-255
imgs = imgs / np.ptp(imgs) * 255  # Normalization 0-255
#imgs = imgs[:,:,:,::-1] # RGB to BGR
print('Image data range:', np.amin(imgs), ':', np.amax(imgs))
# optional preprocessing depending on the model

if args.get('labels_numpy_path') is not None:
    lbs = np.load(args['labels_numpy_path'], mmap_mode='r', allow_pickle=False)
    matched_count = 0
    total_executed = 0
batch_size = int(args.get('batchsize'))

while batch_size >= imgs.shape[0]:
    imgs = np.append(imgs, imgs, axis=0)
    if args.get('labels_numpy_path') is not None:
        lbs = np.append(lbs, lbs, axis=0)