Example #1
0
    def _fetch_data(dat_path):
        try:
            finder = bossdata.path.Finder()
            mirror = bossdata.remote.Manager()
        except ValueError as e:
            print(e)

        raw_data = []
        with open(dat_path, 'r') as f:
            f.readline()
            for line in f:
                line_split = line.split()

                plate = np.int(line_split[0])
                mjd = np.int(line_split[1])
                fiber = np.int(line_split[2])
                z = np.float(line_split[3])

                remote_path = finder.get_spec_path(plate=plate, mjd=mjd, fiber=fiber, lite=True)
                local_path = mirror.get(remote_path)
                try:
                    spec = bossdata.spec.SpecFile(local_path)
                    data = spec.get_valid_data()
                    wlen, flux, dflux = data['wavelength'][:], data['flux'][:], data['dflux'][:]
                except RuntimeError as e:
                    print e
                    print plate,mjd,fiber

                qso = {'plate':plate, 'mjd':mjd, 'fiber':fiber, \
                        'wlen':wlen, 'flux':flux, 'dflux':dflux, 'z':z}
                raw_data.append(qso)
            return raw_data
def savgol(x, window_size=3, order=2, deriv=0, rate=1):
    ''' Savitzky-Golay filter '''
        
    # Check the input
    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError:
        raise ValueError("window_size and order have to be of type int")
    if window_size > len(x):
        raise TypeError("Not enough data points!")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 1:
        raise TypeError("window_size is too small for the polynomials order")
    if order <= deriv:
        raise TypeError("The 'deriv' of the polynomial is too high.")


    # Calculate some required parameters
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    num_data = len(x)
    
    # Construct Vandermonde matrix, its inverse, and the Savitzky-Golay coefficients   
    a = [[ii**jj for jj in order_range] for ii in range(-half_window, half_window+1)]
    pa = np.linalg.pinv(a)
    sg_coeff = pa[deriv] * rate**deriv * scipy.special.factorial(deriv)
      
    # Get the coefficients for the fits at the beginning and at the end of the data
    coefs = np.array(order_range)**np.sign(deriv)
    coef_mat = np.zeros((order+1, order+1))
    row = 0
    for ii in range(deriv,order+1):
        coef = coefs[ii]
        for jj in range(1,deriv):
            coef *= (coefs[ii]-jj)
        coef_mat[row,row+deriv]=coef
        row += 1
    coef_mat *= rate**deriv
    
    # Add the first and last point half_window times
    firstvals = np.ones(half_window) * x[0] 
    lastvals  = np.ones(half_window) * x[-1]
    x_calc = np.concatenate((firstvals, x, lastvals))

    y = np.convolve( sg_coeff[::-1], x_calc, mode='full')
    
    # chop away intermediate data
    y = y[window_size-1:window_size+num_data-1]

    # filtering for the first and last few datapoints
    y[0:half_window] = np.dot(np.dot(np.dot(a[0:half_window], coef_mat), \
                                   np.mat(pa)), x[0:window_size])
    y[len(y)-half_window:len(y)] = np.dot(np.dot(np.dot(a[half_window+1:window_size], \
                        coef_mat), pa), x[len(x)-window_size:len(x)])
    
    return y

    
def ll2utm(lon,lat,zone=None,north=None):
    '''Convert a given longitude and latitude into a UTM coordinate

    (UTMx,UTMy,UTMz)=ll2utm(lon,lat,[zone=],[north=])
    If not specified the UTM zone is calculated from the longitude.
    If not specified north/south is calculated from the latitude.

    Either zone or hemisphere can be forced via zone=n or
    north=1/0 (1=northern hemisphere, 0=southern hemisphere)

    Assumes a WGS84 datum/spheroid for both projections
    '''
    if not gdalloaded:
        raise ImportError("OSR not available")

    if zone is None:
        zone=np.int(np.ceil((lon+180)/6))
        if zone==0: zone=1
    if north is None:
        north=np.int(lat>=0)
    # create a UTM zone X projectiong reference
    utm_proj=osr.SpatialReference()
    # utm_proj.ImportFromEPSG(32613)
    utm_proj.SetUTM(zone,north)
    utm_proj.SetWellKnownGeogCS( 'WGS84' )

    # create a geographic reference in WGS84
    geog_ref=osr.SpatialReference()
    geog_ref.ImportFromEPSG(4326)

    # create a transfomration object between the two reference systems
    transform=osr.CoordinateTransformation(geog_ref,utm_proj)
    # transform the input coordinates to lat lon
    xy=transform.TransformPoint(lon,lat)
    return xy
Example #4
0
def empirical_ci(y,alpha=0.05):
	"""Computes an empirical (alpha/2,1-alpha/2) confidence interval for the distributional data in x.

	Parameters:
	------------
	x : numpy array, required
		set of data to produce empirical upper/lower bounds for

	alpha : float, optional
		sets desired CI range

	Returns:
	------------
	lb, ub : floats, lower and upper bounds for x

	"""
	ytilde = sort(y)
	xl = (alpha/2)*len(y)
	xu = (1.0 - alpha/2)*len(y)
	l1 = int(floor(xl))
	l2 = int(ceil(xl))
	u1 = int(floor(xu))
	u2 = int(ceil(xu))
	lb = interp(xl,[l1,l2],[ytilde[l1],ytilde[l2]])
	ub = interp(xu,[u1,u2],[ytilde[u1],ytilde[u2]])
	return lb,ub
Example #5
0
    def _TO_DELETE_initialize_drifters(self, driftersPerOceanModel):
        """
        Initialize drifters and attach them for each particle.
        """
        self.driftersPerOceanModel = np.int32(driftersPerOceanModel)
        
        # Define mid-points for the different drifters 
        # Decompose the domain, so that we spread the drifters as much as possible
        sub_domains_y = np.int(np.round(np.sqrt(self.driftersPerOceanModel)))
        sub_domains_x = np.int(np.ceil(1.0*self.driftersPerOceanModel/sub_domains_y))
        self.midPoints = np.empty((driftersPerOceanModel, 2))
        for sub_y in range(sub_domains_y):
            for sub_x in range(sub_domains_x):
                drifter_id = sub_y*sub_domains_x + sub_x
                if drifter_id >= self.driftersPerOceanModel:
                    break
                self.midPoints[drifter_id, 0]  = (sub_x + 0.5)*self.nx*self.dx/sub_domains_x
                self.midPoints[drifter_id, 1]  = (sub_y + 0.5)*self.ny*self.dy/sub_domains_y
              
        # Loop over particles, sample drifters, and attach them
        for i in range(self.numParticles+1):
            drifters = GPUDrifterCollection.GPUDrifterCollection(self.gpu_ctx, self.driftersPerOceanModel,
                                                 observation_variance=self.observation_variance,
                                                 boundaryConditions=self.boundaryConditions,
                                                 domain_size_x=self.nx*self.dx, domain_size_y=self.ny*self.dy)

            initPos = np.empty((self.driftersPerOceanModel, 2))
            for d in range(self.driftersPerOceanModel):
                initPos[d,:] = np.random.multivariate_normal(self.midPoints[d,:], self.initialization_cov_drifters)
            drifters.setDrifterPositions(initPos)
            self.particles[i].attachDrifters(drifters)
Example #6
0
def movav(y, Dx, dx):
    """
    Moving average rectangular window filter:
    calculate average of signal y by using sliding rectangular
    window of size Dx using binsize dx
    
    
    Parameters
    ----------
    y : numpy.ndarray
        Signal
    Dx : float
        Window length of filter.
    dx : float
        Bin size of signal sampling.
                
    
    Returns
    -------
    numpy.ndarray
        Filtered signal.
    
    """
    if Dx <= dx:
        return y
    else:
        ly = len(y)
        r = np.zeros(ly)
        n = np.int(np.round((Dx / dx)))
        r[0:np.int(n / 2.)] = 1.0 / n
        r[-np.int(n / 2.)::] = 1.0 / n
        R = np.fft.fft(r)
        Y = np.fft.fft(y)
        yf = np.fft.ifft(Y * R)
        return yf
Example #7
0
def fit(dataset):

    # f = gzip.open('../../../datasets/Mnist/mnist.pkl.gz', 'rb')
    # train_set, _, _ = pickle.load(f)
    # f.close()
    #
    # _, labels = train_set

    # features = pickle.load(open('../../../datasets/Mnist/convnet_train_features.p', 'rb'))
    samples = pickle.load(open('../../../datasets/Mnist/bag_train_features.p', 'rb'))

    features = []
    labels = []
    for sample in samples:
        features.append(sample['features'])
        labels.append(sample['label'])
    features = np.array(features)
    labels = np.array(labels)

    model = Oasis(n_iter=100000, do_psd=True, psd_every=3,
                  save_path="/tmp/gwtaylor/oasis_test").fit(features, labels,
                                                            verbose=True)
    W = model._weights.view()

    W.shape = (np.int(np.sqrt(W.shape[0])), np.int(np.sqrt(W.shape[0])))

    # pickle.dump(W, open('../convnet/oasis_weights.p', 'wb'))
    pickle.dump(W, open('../bag/oasis_weights.p', 'wb'))
Example #8
0
def qd_read_hyp_file(filename):
    f = open(filename, "r")
    lines = f.readlines()
    f.close()

    for line in lines:
        words = line.split()
        try:
            if words[0] == "HYPOCENTER":
                hypo_x = np.float(words[2])
                hypo_y = np.float(words[4])
                hypo_z = np.float(words[6])
            if words[0] == "GEOGRAPHIC":
                year = np.int(words[2])
                month = np.int(words[3])
                day = np.int(words[4])
                hour = np.int(words[5])
                minute = np.int(words[6])
                seconds = np.float(words[7])
                otime = utcdatetime.UTCDateTime(year, month, day, hour, minute, seconds)
            if words[0] == "STATISTICS":
                sigma_x = np.sqrt(np.float(words[8]))
                sigma_y = np.sqrt(np.float(words[14]))
                sigma_z = np.sqrt(np.float(words[18]))
        except IndexError:
            pass

    return (otime, hypo_x, sigma_x, hypo_y, sigma_y, hypo_z, sigma_z)
Example #9
0
def qd_read_picks_from_hyp_file(filename):
    f = open(filename, "r")
    lines = f.readlines()
    f.close()

    for iline in range(len(lines)):
        line = lines[iline]
        words = line.split()
        if words[0] == "PHASE":
            iline_phase = iline
            break

    phases = {}
    for line in lines[iline + 1 :]:
        words = line.split()
        try:
            if words[4] == "P":
                station = words[0]
                year = np.int(words[6][0:4])
                month = np.int(words[6][4:6])
                day = np.int(words[6][6:8])
                hour = np.int(words[7][0:2])
                minute = np.int(words[7][2:4])
                seconds = np.float(words[8])
                ptime = utcdatetime.UTCDateTime(year, month, day, hour, minute, seconds)
                phases[station] = ptime
        except IndexError:
            pass

    return phases
Example #10
0
def interpgrid(a, xi, yi):
    """Fast 2D, linear interpolation on an integer grid"""

    Ny, Nx = np.shape(a)
    if isinstance(xi, np.ndarray):
        x = xi.astype(np.int)
        y = yi.astype(np.int)
        # Check that xn, yn don't exceed max index
        xn = np.clip(x + 1, 0, Nx - 1)
        yn = np.clip(y + 1, 0, Ny - 1)
    else:
        x = np.int(xi)
        y = np.int(yi)
        # conditional is faster than clipping for integers
        if x == (Nx - 2): xn = x
        else: xn = x + 1
        if y == (Ny - 2): yn = y
        else: yn = y + 1

    a00 = a[y, x]
    a01 = a[y, xn]
    a10 = a[yn, x]
    a11 = a[yn, xn]
    xt = xi - x
    yt = yi - y
    a0 = a00 * (1 - xt) + a01 * xt
    a1 = a10 * (1 - xt) + a11 * xt
    ai = a0 * (1 - yt) + a1 * yt

    if not isinstance(xi, np.ndarray):
        if np.ma.is_masked(ai):
            raise TerminateTrajectory

    return ai
Example #11
0
File: PPI.py Project: cbxx/Phantom
    def _calcNeighbours(self):
        """ Calculates the neighbours and creates the bitstrings """
        self._logger.info("Creating neighbourhood bitstrings")  

        bitstrings = []

        # apply BS func on all c alphas in peptide chains
        cas = self._df.loc[(self._df['an'] == 'CA') & (self._df['peptideChain'] == True) & (self._df['surface'] == True), ['chain', 'resi', 'resn', 'inscode']]
        for tpl in cas.itertuples():
            idx, chain, resi, resn, inscode = tpl

            # other c alphas in same chain, including insertion mutants (i.e. same resi but different inscode)
            others = self._df.loc[(self._df['an']=='CA') & (-(self._df['resi']==resi) | -(self._df['inscode']==inscode)) & (self._df['chain']==chain)].index
            first = second = None            
            # copy distance values from matrix and sort
            distances = self._distMatrix.loc[idx, others].copy()
            distances.sort()
            # first two entries
            minEntries = self._df.loc[distances.iloc[:2].index, 'resn']
            first, second = minEntries

            bs = np.NaN
            # set the bits            
            if resn in PPIBitstrings.AADICT:
                bs = np.zeros(60, dtype=np.int)
                bs[PPIBitstrings.AADICT[resn][0]] = np.int(1)
                for k,v in enumerate([first, second]):
                    if v in PPIBitstrings.AADICT:
                        bs[(k+1)*20 + PPIBitstrings.AADICT[v][0]] = np.int(1)   
            bitstrings.append(bs)
        
        # create series
        s = pd.Series(bitstrings, index=cas.index, name='bitstring', dtype=np.object)
        # join to our df
        self._df = self._df.join(pd.DataFrame(s))       
Example #12
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Example #13
0
def dispims_color(M, border=0, bordercolor=[0.0, 0.0, 0.0], savePath=None, *imshow_args, **imshow_keyargs):
    """ Display an array of rgb images. 

    The input array is assumed to have the shape numimages x numpixelsY x numpixelsX x 3
    """
    bordercolor = numpy.array(bordercolor)[None, None, :]
    numimages = len(M)
    M = M.copy()
    for i in range(M.shape[0]):
        M[i] -= M[i].flatten().min()
        M[i] /= M[i].flatten().max()
    height, width, three = M[0].shape
    assert three == 3
    
    n0 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
    n1 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
    im = numpy.array(bordercolor)*numpy.ones(
                             ((height+border)*n1+border,(width+border)*n0+border, 1),dtype='<f8')
    for i in range(n0):
        for j in range(n1):
            if i*n1+j < numimages:
                im[j*(height+border)+border:(j+1)*(height+border)+border,
                   i*(width+border)+border:(i+1)*(width+border)+border,:] = numpy.concatenate((
                  numpy.concatenate((M[i*n1+j,:,:,:],
                         bordercolor*numpy.ones((height,border,3),dtype=float)), 1),
                  bordercolor*numpy.ones((border,width+border,3),dtype=float)
                  ), 0)
    imshow_keyargs["interpolation"]="nearest"
    pylab.imshow(im, *imshow_args, **imshow_keyargs)
    
    if savePath == None:
        pylab.show()
    else:
        pylab.savefig(savePath)
Example #14
0
def findValidFFTWDim( inputDims ):
    """
    Finds a valid dimension for which FFTW can optimize its calculations. The 
    return is a shape which is forced to be square, as this gives uniform pixel
    size in x-y in Fourier space.
    
    If you want a minimum padding size, call as findValidFFTWDim( image.shape + 128 ) 
    or similar.
    """
    dim = np.max( np.round( inputDims ) )
    maxPow2 = np.int( np.ceil( math.log( dim, 2 ) ) )
    maxPow3 = np.int( np.ceil( math.log( dim, 3 ) ) )
    maxPow5 = np.int( np.ceil( math.log( dim, 5 ) ) )
    maxPow7 = np.int( np.ceil( math.log( dim, 7 ) ) )   
    
    dimList = np.zeros( [(maxPow2+1)*(maxPow3+1)*(maxPow5+1)*(maxPow7+1)] )
    count = 0
    for I in np.arange(0,maxPow7+1):
        for J in np.arange(0,maxPow5+1):
            for K in np.arange(0,maxPow3+1):
                for L in np.arange(0,maxPow2+1):
                    dimList[count] = 2**L * 3**K * 5**J * 7**I
                    count += 1
    dimList = np.sort( np.unique( dimList ) )
    dimList = dimList[ np.argwhere(dimList < 2*dim)].squeeze()
    dimList = dimList.astype('int64')
    # Throw out odd image shapes, this just causes more problems with many 
    # functions
    dimList = dimList[ np.mod(dimList,2)==0 ]
    
    # Find first dim that equals or exceeds dim
    nextValidDim =  dimList[np.argwhere( dimList >= dim)[0,0]]
    return np.array( [nextValidDim, nextValidDim] )    
Example #15
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
    def siggen_model(s, rad, phi, z, e, temp):
      out = np.zeros_like(data)
      
      detector.SetTemperature(temp)
      siggen_wf= detector.GetSiggenWaveform(rad, phi, z, energy=2600)

      if siggen_wf is None:
        return np.ones_like(data)*-1.
      if np.amax(siggen_wf) == 0:
        print "wtf is even happening here?"
        return np.ones_like(data)*-1.
      siggen_wf = np.pad(siggen_wf, (detector.zeroPadding,0), 'constant', constant_values=(0, 0))


      tout, siggen_wf, x = signal.lsim(system, siggen_wf, t)
      siggen_wf /= np.amax(siggen_wf)
      
      siggen_data = siggen_wf[detector.zeroPadding::]
      
      siggen_data = siggen_data*e
      
      
      #ok, so the siggen step size is 1 ns and the
      #WARNING: only works for 1 ns step size for now
      #TODO: might be worth downsampling BEFORE applying transfer function
    
      siggen_start_idx = np.int(np.around(s, decimals=1) * data_to_siggen_size_ratio % data_to_siggen_size_ratio)
      switchpoint_ceil = np.int( np.ceil(s) )
      
      samples_to_fill = (len(data) - switchpoint_ceil)
      sampled_idxs = np.arange(samples_to_fill, dtype=np.int)*data_to_siggen_size_ratio+siggen_start_idx
      
      out[switchpoint_ceil:] = siggen_data[sampled_idxs]

      return out
Example #17
0
def _interpolate_2d(x, y, a1, b1, a2, b2, c):
  
  """
  parameters:
  (x,y) = where we want to estimate the function 
  a1 , b1 = lower bounds of the grid
  a2 , b2 = upper bounds of the grid
  c = spline coefficients
  """
  n1 = c.shape[0] - 3
  n2 = c.shape[1] - 3
  h1 = (b1 - a1)/n1
  h2 = (b2 - a2)/n2
  
  l1 = np.int((x - a1)/h1) + 1
  l2 = np.int((y - a2)/h2) + 1
  m1 = min(l1 + 3, n1 + 3)
  m2 = min(l2 + 3, n2 + 3)
  
  s = 0

  for i1 in xrange(l1, m1 + 1):
    u_x = u(x, i1, a1, h1)
    for i2 in xrange(l2, m2 + 1):
      u_y = u(y, i2, a2, h2)
      s += c[i1 - 1, i2 - 1] * u_x * u_y

  return s
def rec_full(h5fname, rot_center, algorithm, binning):
    
    data_shape = get_dx_dims(h5fname, 'data')

    # Select sinogram range to reconstruct.
    sino_start = 0
    sino_end = data_shape[1]

    chunks = 6          # number of sinogram chunks to reconstruct
                        # only one chunk at the time is reconstructed
                        # allowing for limited RAM machines to complete a full reconstruction

    nSino_per_chunk = (sino_end - sino_start)/chunks
    print("Reconstructing [%d] slices from slice [%d] to [%d] in [%d] chunks of [%d] slices each" % ((sino_end - sino_start), sino_start, sino_end, chunks, nSino_per_chunk))            

    strt = 0
    for iChunk in range(0,chunks):
        print('\n  -- chunk # %i' % (iChunk+1))
        sino_chunk_start = np.int(sino_start + nSino_per_chunk*iChunk)
        sino_chunk_end = np.int(sino_start + nSino_per_chunk*(iChunk+1))
        print('\n  --------> [%i, %i]' % (sino_chunk_start, sino_chunk_end))
                
        if sino_chunk_end > sino_end: 
            break

        sino = (int(sino_chunk_start), int(sino_chunk_end))
        # Reconstruct.
        rec = reconstruct(h5fname, sino, rot_center, binning, algorithm)
                
        # Write data as stack of TIFs.
        fname = os.path.dirname(h5fname) + '/' + os.path.splitext(os.path.basename(h5fname))[0]+ '_full_rec/' + 'recon'
        print("Reconstructions: ", fname)
        dxchange.write_tiff_stack(rec, fname=fname, start=strt)
        strt += sino[1] - sino[0]
Example #19
0
def gauss_filter(dat, bin_freq, window=300, sigma=100):
    """
        turn psth into firing rate estimate. window size is in ms
    """
    if dat is None:
        return None, None
    window = np.int(1. / bin_freq * window)
    sigma = np.int(1. / bin_freq * sigma)
    r = range(-int(window / 2), int(window / 2) + 1)
    gaus = [1 / (sigma * np.sqrt(2 * np.pi)) *
            np.exp(-float(x) ** 2 / (2 * sigma ** 2)) for x in r]
    if len(dat.shape) > 1:
        fr = np.zeros_like(dat, dtype=np.float)
        for d in range(len(dat)):
            fr[d] = np.convolve(dat[d], gaus, 'same')
    else:
        fr = np.convolve(dat, gaus, 'same')
#    import pylab as plt
#    print bin_freq
#    plt.subplot(311)
#    plt.plot(gaus)
#    plt.subplot(312)
#    plt.plot(dat[:5].T)
#    plt.subplot(313)
#    plt.plot(fr[:5].T)
#    plt.show()

    return fr, len(gaus) / 2
Example #20
0
def loss(x, method, a_vec=np.zeros(3)):
    rtn = 0
    if(sum(a_vec)==0):
        tmp = sorted(x)
        a_vec[0] = tmp[np.int(len(tmp)*0.5)]
        a_vec[1] = tmp[np.int(len(tmp)*0.75)]
        a_vec[2] = tmp[np.int(len(tmp)*0.85)]
    a = a_vec[0]
    b = a_vec[1]
    c = a_vec[2]
    if(sum(x<0)==0):
        rtn = np.zeros(len(x))
        
        rtn[x<=a] = x[x<=a]**2/2
        rtn[(x>a)*(x<=b)] = a*x[(x>a)*(x<=b)]-a*a/2
        rtn[(x>b)*(x<=c)] = a*(x[(x>b)*(x<=c)]-c)**2/(2*(b-c))+a*(b+c-a)/2
        rtn[x>c] = a*(b+c-a)/2
        
        '''
        tmp = x[x<=c]/c
        rtn[x<=c] = 1-(1-tmp**2)**3
        rtn[x>c] = 1
        '''
        #rtn = a**2*np.log(1+(x/a)**2)
        #rtn = a**2*(np.sqrt(1+(x/a)**2)-1)
    return(rtn)
Example #21
0
def loss_dev(x, method, a_vec=np.zeros(3)):
    rtn = 0
    if(sum(a_vec)==0):
        tmp = sorted(x)
        a_vec[0] = tmp[np.int(len(tmp)*0.5)]
        a_vec[1] = tmp[np.int(len(tmp)*0.75)]
        a_vec[2] = tmp[np.int(len(tmp)*0.85)]
    a = a_vec[0]
    b = a_vec[1]
    c = a_vec[2]
    if(sum(x<0)==0):
        rtn = np.zeros(len(x))
        
        rtn[x<=a] = x[x<=a]
        rtn[(x>a)*(x<=b)] = a
        rtn[(x>b)*(x<=c)] = a*(x[(x>b)*(x<=c)]-c)/(b-c)
        rtn[x>c] = 0
        
        '''
        tmp = x[x<=c]/c
        rtn[x<=c] = 6*tmp/c*(1-tmp**2)**2
        '''
        #rtn = 2*x/(1+(x/a)**2)
        #rtn = 2*x/np.sqrt(1+(x/a)**2)
    return(rtn)
Example #22
0
def rand_jacobi_rotation(A):
    """Random Jacobi rotation of a sparse matrix.
    
    Parameters
    ----------
    A : spmatrix
        Input sparse matrix.
    
    Returns
    -------
    spmatrix
        Rotated sparse matrix.
    """
    if A.shape[0] != A.shape[1]:
        raise Exception("Input matrix must be square.")
    n = A.shape[0]
    angle = (2 * np.random.random() - 1) * np.pi
    a = 1.0 / np.sqrt(2) * np.exp(-1j * angle)
    b = 1.0 / np.sqrt(2) * np.exp(1j * angle)
    i = np.int(np.floor(np.random.random() * n))
    j = i
    while i == j:
        j = np.int(np.floor(np.random.random() * n))
    data = np.hstack(([a, -b, a, b], np.ones(n - 2, dtype=int)))
    diag = np.delete(np.arange(n), [i, j])
    rows = np.hstack(([i, i, j, j], diag))
    cols = np.hstack(([i, j, i, j], diag))
    R = sp.coo_matrix((data, (rows, cols)), shape=[n, n]).tocsr()
    A = R * A * R.conj().transpose()
    return A
Example #23
0
    def _subplot_dims(self):
        """Determine size of subplot grid, given possible
        constraints on nrows, ncols"""
        nrows = self.subplot_opts.get('nrows', None)
        ncols = self.subplot_opts.get('ncols', None)

        #if 2 keys provided, rows and cols are fixed
        if len(self._keys) == 2:
            nr = len(self._key_index[0])
            nc = len(self._key_index[1])

            if ((nrows is not None and nrows != nr) or
                (ncols is not None and ncols != nc)):
                raise ValueError("Two keys specified: (nrows, ncols) must be "
                                 "(%i, %i) " % (nr, nc))
            return nr, nc

        sz = len(self._key_index[0])

        #if 1 key provided, just need nrows * ncols >= nfacets
        if nrows is None:
            if ncols is None:
                nrows = max(1, np.int(np.sqrt(sz)))
            else:
                nrows = np.int(np.ceil(1. * sz / ncols))
        if ncols is None:
            ncols = np.int(np.ceil(1. * sz / nrows))
        if nrows * ncols < sz:
            raise ValueError("nrows (%i) and ncols (%i) not big enough "
                             "to plot %i facets" % (nrows, ncols, sz))
        return nrows, ncols
def avhrr(scans_nb, scan_points,
          scan_angle=55.37, frequency=1 / 6.0, apply_offset=True):
    """Definition of the avhrr instrument.

    Source: NOAA KLM User's Guide, Appendix J
    http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm
    """
    # build the avhrr instrument (scan angles)
    avhrr_inst = np.vstack(((scan_points / 1023.5 - 1)
                            * np.deg2rad(-scan_angle),
                            np.zeros((len(scan_points),))))

    avhrr_inst = np.tile(
        avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1])

    # building the corresponding times array
    # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1])
    #         + np.expand_dims(offset, 1))

    times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1])
    if apply_offset:
        offset = np.arange(np.int(scans_nb)) * frequency
        times += np.expand_dims(offset, 1)

    return ScanGeometry(avhrr_inst, times)
Example #25
0
	def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
		
		seqs = co.matrix(0.0, (dims, lens))
		lbls = co.matrix(0, (1,lens))
		marker = 0

		# generate first state sequence
		for d in range(dims):
			seqs[d,:] = co.normal(1,lens)*vars1[d] + means1[d]

		prob = np.random.uniform()
		if prob<anom_prob:		
			# add second state blocks
			while (True):
				max_block_len = 0.6*lens
				min_block_len = 0.1*lens
				block_len = np.int(max_block_len*np.single(co.uniform(1))+3)
				block_start = np.int(lens*np.single(co.uniform(1)))

				if (block_len - (block_start+block_len-lens)-3>min_block_len):
					break

			block_len = min(block_len,block_len - (block_start+block_len-lens)-3)
			lbls[block_start:block_start+block_len-1] = 1
			marker = 1
			for d in range(dims):
				#print block_len
				seqs[d,block_start:block_start+block_len-1] = co.normal(1,block_len-1)*vars2[d] + means2[d]

		return (seqs, lbls, marker)
Example #26
0
def run():
    def predict_return(N,d,Pfade):
        anz_zurueck = 0
        for _ in range(Pfade):
            walks = rw(N,d)
            weite = np.zeros(d)
            for i in range(N):
                weite = weite + walks[i]
                if np.sum(np.abs(weite)) == 0:
                    anz_zurueck += 1
                    break
        print np.float(anz_zurueck)/np.float(Pfade) 
        return anz_zurueck/np.float(Pfade) 
    print '1D'
    d1 = [predict_return(np.int(np.exp(i)),1,200) for i in range(15)]
    #print d1
    print '2D'
    d2 = [predict_return(np.int(np.exp(i)),2,200) for i in range(15)]
    print '3D'
    d3 = [predict_return(np.int(np.exp(i)),3,200) for i in range(15)]
    plt.plot(range(15),d1,label = 'Prediction in 1D')
    plt.plot(range(15),d2,label = 'Prediction in 2D')
    plt.plot(range(15),d3,label = 'Prediction in 3D')
    plt.legend()
    plt.show()
def olci(scans_nb, scan_points=None):
    """Definition of the OLCI instrument.

    Source: Sentinel-3 OLCI Coverage
    https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage
    """

    if scan_points is None:
        scan_len = 4000  # samples per scan
        scan_points = np.arange(4000)
    else:
        scan_len = len(scan_points)
    # scan_rate = 0.044  # single scan, seconds
    scan_angle_west = 46.5  # swath, degrees
    scan_angle_east = -22.1  # swath, degrees
    # sampling_interval = 18e-3  # single view, seconds
    # build the olci instrument scan line angles
    scanline_angles = np.linspace(np.deg2rad(scan_angle_west),
                                  np.deg2rad(scan_angle_east), scan_len)
    inst = np.vstack((scanline_angles, np.zeros(scan_len,)))

    inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scans_nb), 1])

    # building the corresponding times array
    # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1])
    #         + np.expand_dims(offset, 1))

    times = np.tile(np.zeros_like(scanline_angles), [np.int(scans_nb), 1])
    # if apply_offset:
    #     offset = np.arange(np.int(scans_nb)) * frequency
    #     times += np.expand_dims(offset, 1)

    return ScanGeometry(inst, times)
    def _grid(self, corners=False):
        """Create an xy grid of coordinates for heliographic array.

        Uses meshgrid. If corners is selected, this function will shift the array by half a pixel in both directions
        so that the corners of the normal array can be accessed easily.

        Args:
            corners (bool, optional): defaults to False, chooses whether to apply the corner calculation or not

        Returns:
            xg: 2D array containing the x-coordinates of each pixel
            yg: 2D array containing the y-coordinates of each pixel

        """
        # Retrieve integer dimensions and create arrays holding
        # x and y coordinates of each pixel
        x_dim = np.int(np.floor(self.im_raw.dimensions[0].value))
        y_dim = np.int(np.floor(self.im_raw.dimensions[1].value))

        if corners:
            x_row = (np.arange(0, x_dim + 1) - self.par['X0'] - 0.5) * self.par['xscale']
            y_row = (np.arange(0, y_dim + 1) - self.par['Y0'] - 0.5) * self.par['yscale']
            xg, yg = mnp.meshgrid(x_row, y_row)
            rg = mnp.sqrt(xg ** 2 + yg ** 2)
            self.Rg = rg
        else:
            x_row = (np.arange(0, x_dim) - self.par['X0']) * self.par['xscale']
            y_row = (np.arange(0, y_dim) - self.par['Y0']) * self.par['yscale']
            xg, yg = mnp.meshgrid(x_row, y_row)
            rg = mnp.sqrt(xg ** 2 + yg ** 2)
            self.xg = xg
            self.yg = yg
            self.rg = rg

        return xg, yg
Example #29
0
def test_superpixel(aia171_test_map, aia171_test_map_with_mask):
    dimensions = (2, 2)*u.pix
    superpixel_map_sum = aia171_test_map.superpixel(dimensions)
    assert_quantity_allclose(superpixel_map_sum.dimensions[1], aia171_test_map.dimensions[1]/dimensions[1]*u.pix)
    assert_quantity_allclose(superpixel_map_sum.dimensions[0], aia171_test_map.dimensions[0]/dimensions[0]*u.pix)
    assert_quantity_allclose(superpixel_map_sum.data[0][0], (aia171_test_map.data[0][0] +
                                                             aia171_test_map.data[0][1] +
                                                             aia171_test_map.data[1][0] +
                                                             aia171_test_map.data[1][1]))

    superpixel_map_avg = aia171_test_map.superpixel(dimensions, func=np.mean)
    assert_quantity_allclose(superpixel_map_avg.dimensions[1], aia171_test_map.dimensions[1]/dimensions[1]*u.pix)
    assert_quantity_allclose(superpixel_map_avg.dimensions[0], aia171_test_map.dimensions[0]/dimensions[0]*u.pix)
    assert_quantity_allclose(superpixel_map_avg.data[0][0], (aia171_test_map.data[0][0] +
                                                             aia171_test_map.data[0][1] +
                                                             aia171_test_map.data[1][0] +
                                                             aia171_test_map.data[1][1])/4.0)

    # Test that the mask is respected
    superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions)
    assert superpixel_map_sum.mask is not None
    assert_quantity_allclose(superpixel_map_sum.mask.shape[0],
                             aia171_test_map.dimensions[1]/dimensions[1])
    assert_quantity_allclose(superpixel_map_sum.mask.shape[1],
                             aia171_test_map.dimensions[0]/dimensions[0])

    # Test that the offset is respected
    superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions, offset=(1, 1)*u.pix)
    assert_quantity_allclose(superpixel_map_sum.dimensions[1], aia171_test_map.dimensions[1]/dimensions[1]*u.pix - 1*u.pix)
    assert_quantity_allclose(superpixel_map_sum.dimensions[0], aia171_test_map.dimensions[0]/dimensions[0]*u.pix - 1*u.pix)

    dimensions = (7, 9)*u.pix
    superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions, offset=(4, 4)*u.pix)
    assert_quantity_allclose(superpixel_map_sum.dimensions[0], np.int((aia171_test_map.dimensions[0]/dimensions[0]).value)*u.pix - 1*u.pix)
    assert_quantity_allclose(superpixel_map_sum.dimensions[1], np.int((aia171_test_map.dimensions[1]/dimensions[1]).value)*u.pix - 1*u.pix)
Example #30
0
def chain2image(chaincode,start_pix):

    """
    Method to compute the pixel contour providing the chain code string
    and the starting pixel location [X,Y].
    Author: Xavier Bonnin (LESIA)
    """

    if (type(chaincode) != str):
        print "First input argument must be a string!"
        return None

    if (len(start_pix) != 2):
        print "Second input argument must be a 2-elements vector!"
        return None

    ardir = np.array([[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]])
    ccdir = np.array([0,7,6,5,4,3,2,1])

    X=[start_pix[0]]
    Y=[start_pix[1]]
    for c in chaincode:
        if (abs(np.int8(c)) > 7):
            print "Wrong chain code format!"
            return None
        wc = np.where(np.int8(c) == np.int8(ccdir))[0]
        X.append(X[-1] + np.int(ardir[wc,0]))
        Y.append(Y[-1] + np.int(ardir[wc,1]))
    return X,Y
Example #31
0
def cov_type(data):
    return np.int(data)
Example #32
0
def plotfunc():
    
    
    scale = main.lineEdit.text()
    scale=np.int(scale) 
    
    SecNo_CMM =11 
    SecNo_FEM = main.FEM_sec.text()
    SecNo_FEM=np.int(SecNo_FEM)
    if SecNo_FEM<10:
        SecNo_FEM = '0' + str(SecNo_FEM)
    
    ax=np.zeros(4)
    CylNo = 4
    add='FEM_OD_WT'
    temp_FEM_OD_WT=np.zeros((124,2))
    # this part counts the number of step files in the add directory
    StepFileNo=len(fnmatch.filter(os.listdir(add), 'STEP*.txt'))

    fig={}
    ax1f1={}
    
    for LinerNo in range (CylNo):
        
        

        
        displacement_names_FEM='STEP1LINER0'+str(LinerNo+1)+'SEC'+str(SecNo_FEM)+'.txt'
        displacement_add=main.adr+"//"+displacement_names_FEM
        NodeAdd=main.adr+"//nodes.txt"
        
        # Datas for the Node array
        nodes_FEM_OD_WT=pandas.read_csv(NodeAdd, header=None)
        nodes_FEM_OD_WT=nodes_FEM_OD_WT.to_numpy()
        
        # Datas for displacement array
        displacement_FEM_OD_WT=pandas.read_csv(displacement_add, header=None)
        displacement_FEM_OD_WT=displacement_FEM_OD_WT.to_numpy()    

        # Find the array elements of disp in node
        indices = np.where(np.in1d(nodes_FEM_OD_WT[:,0], displacement_FEM_OD_WT[:,0]))[0]
        temp_OD=nodes_FEM_OD_WT[indices,:]
        
        Cx_OD=np.mean(temp_OD[:,1])
        temp_OD[:,1]=temp_OD[:,1]-Cx_OD
        deformation_OD=temp_OD[:,1:]+displacement_FEM_OD_WT[:,1:]
        
        Number_FEM_OD_WT=len(displacement_FEM_OD_WT)
        
        Cmean_FEM_OD_WT=np.mean(temp_OD[:,1:],axis=0)
        Rnorm_FEM_OD_WT=LA.norm(deformation_OD[0,:]-Cmean_FEM_OD_WT)
        
        #polar plotting requirements
        rpolar=np.sqrt(deformation_OD[:,0]**2+deformation_OD[:,1]**2)
        phi_polar=np.arctan2(deformation_OD[:,1],deformation_OD[:,0])
        dr_FEM_OD_WT=rpolar-39.3
        
        temp_FEM_OD_WT[:,0]=phi_polar
        temp_FEM_OD_WT[:,1]=dr_FEM_OD_WT*scale+39.3
        temp_FEM_OD_WT=temp_FEM_OD_WT[np.argsort(temp_FEM_OD_WT[:,0])]
        
        
        fig[LinerNo+1]=Figure()
        ax1f1[LinerNo+1]=fig[LinerNo+1].add_subplot(111, projection='polar')
        ax1f1[LinerNo+1].plot(temp_FEM_OD_WT[:,0],temp_FEM_OD_WT[:,1], label='CylNO'+str(LinerNo+1))
        ax1f1[LinerNo+1].legend()
        
        main.addmpl(fig[LinerNo+1])
        
    
    main.L1.clear()    
    main.addfig('Cylinder No 1',fig[1])
    main.addfig('Cylinder No 2',fig[2])
    main.addfig('Cylinder No 3',fig[3])
    main.addfig('Cylinder No 4',fig[4])
Example #33
0
    def run(self, walker_data):
        """Use ensemble sampling to determine posteriors."""
        from mosfit.fitter import draw_walker, frack, ln_likelihood, ln_prior

        prt = self._printer

        self._emcee_est_t = 0.0
        self._bh_est_t = 0.0
        if self._burn is not None:
            self._burn_in = min(self._burn, self._iterations)
        elif self._post_burn is not None:
            self._burn_in = max(self._iterations - self._post_burn, 0)
        else:
            self._burn_in = int(np.round(self._iterations / 2))

        self._ntemps, ndim = (self._num_temps,
                              self._model._num_free_parameters)

        if self._num_walkers:
            self._nwalkers = self._num_walkers
        else:
            self._nwalkers = 2 * ndim

        test_walker = self._iterations > 0
        self._lnprob = None
        self._lnlike = None
        pool_size = max(self._pool.size, 1)
        # Derived so only half a walker redrawn with Gaussian distribution.
        redraw_mult = 0.5 * np.sqrt(2) * scipy.special.erfinv(
            float(self._nwalkers - 1) / self._nwalkers)

        prt.message('nmeas_nfree', [self._model._num_measurements, ndim])
        if test_walker:
            if self._model._num_measurements <= ndim:
                prt.message('too_few_walkers', warning=True)
            if self._nwalkers < 10 * ndim:
                prt.message('want_more_walkers', [10 * ndim, self._nwalkers],
                            warning=True)
        p0 = [[] for x in range(self._ntemps)]

        # Generate walker positions based upon loaded walker data, if
        # available.
        walkers_pool = []
        walker_weights = []
        nmodels = len(set([x[0] for x in walker_data]))
        wp_extra = 0
        while len(walkers_pool) < len(walker_data):
            appended_walker = False
            for walk in walker_data:
                if (len(walkers_pool) + wp_extra) % nmodels != walk[0]:
                    continue
                new_walk = np.full(self._model._num_free_parameters, None)
                for k, key in enumerate(self._model._free_parameters):
                    param = self._model._modules[key]
                    walk_param = walk[1].get(key)
                    if walk_param is None or 'value' not in walk_param:
                        continue
                    if param:
                        val = param.fraction(walk_param['value'])
                        if not np.isnan(val):
                            new_walk[k] = val
                walkers_pool.append(new_walk)
                walker_weights.append(walk[2])
                appended_walker = True
            if not appended_walker:
                wp_extra += 1

        # Make sure weights are normalized.
        if None not in walker_weights:
            totw = np.sum(walker_weights)
            walker_weights = [x / totw for x in walker_weights]

        # Draw walker positions. This is either done from the priors or from
        # loaded walker data. If some parameters are not available from the
        # loaded walker data they will be drawn from their priors instead.
        pool_len = len(walkers_pool)
        for i, pt in enumerate(p0):
            dwscores = []
            while len(p0[i]) < self._nwalkers:
                prt.status(self,
                           desc='drawing_walkers',
                           iterations=[
                               i * self._nwalkers + len(p0[i]) + 1,
                               self._nwalkers * self._ntemps
                           ])

                if self._pool.size == 0 or pool_len:
                    self._p, score = draw_walker(
                        test_walker,
                        walkers_pool,
                        replace=pool_len < self._ntemps * self._nwalkers,
                        weights=walker_weights)
                    p0[i].append(self._p)
                    dwscores.append(score)
                else:
                    nmap = min(self._nwalkers - len(p0[i]),
                               max(self._pool.size, 10))
                    dws = self._pool.map(draw_walker, [test_walker] * nmap)
                    p0[i].extend([x[0] for x in dws])
                    dwscores.extend([x[1] for x in dws])

                if self._fitter._draw_above_likelihood is not False:
                    self._fitter._draw_above_likelihood = np.mean(dwscores)

        prt.message('initial_draws', inline=True)
        self._p = list(p0)

        self._emi = 0
        self._acor = None
        self._aacort = -1
        self._aa = 0
        self._psrf = np.inf
        self._all_chain = np.array([])
        self._scores = np.ones((self._ntemps, self._nwalkers)) * -np.inf

        tft = 0.0  # Total self._fracking time
        sli = 1.0  # Keep track of how many times chain halved
        s_exception = None
        kmat = None
        ages = np.zeros((self._ntemps, self._nwalkers), dtype=int)
        oldp = self._p

        max_chunk = 1000
        kmat_chunk = 5
        iter_chunks = int(np.ceil(float(self._iterations) / max_chunk))
        iter_arr = [
            max_chunk if xi < iter_chunks - 1 else self._iterations -
            max_chunk * (iter_chunks - 1)
            for xi, x in enumerate(range(iter_chunks))
        ]
        # Make sure a chunk separation is located at self._burn_in
        chunk_is = sorted(
            set(np.concatenate(([0, self._burn_in], np.cumsum(iter_arr)))))
        iter_arr = np.diff(chunk_is)

        # The argument of the for loop runs emcee, after each iteration of
        # emcee the contents of the for loop are executed.
        converged = False
        exceeded_walltime = False
        ici = 0

        try:
            if self._iterations > 0:
                sampler = MOSSampler(self._ntemps,
                                     self._nwalkers,
                                     ndim,
                                     ln_likelihood,
                                     ln_prior,
                                     pool=self._pool)
                st = time.time()
            while (self._iterations > 0
                   and (self._cc is not None or ici < len(iter_arr))):
                slr = int(np.round(sli))
                ic = (max_chunk if self._cc is not None else iter_arr[ici])
                if exceeded_walltime:
                    break
                if (self._cc is not None and converged
                        and self._emi > self._iterations):
                    break
                for li, (self._p, self._lnprob, self._lnlike) in enumerate(
                        sampler.sample(self._p,
                                       iterations=ic,
                                       gibbs=self._gibbs if
                                       self._emi >= self._burn_in else True)):
                    if (self._fitter._maximum_walltime is not False
                            and time.time() - self._fitter._start_time >
                            self._fitter._maximum_walltime):
                        prt.message('exceeded_walltime', warning=True)
                        exceeded_walltime = True
                        break
                    self._emi = self._emi + 1
                    emim1 = self._emi - 1
                    messages = []

                    # Increment the age of each walker if their positions are
                    # unchanged.
                    for ti in range(self._ntemps):
                        for wi in range(self._nwalkers):
                            if np.array_equal(self._p[ti][wi], oldp[ti][wi]):
                                ages[ti][wi] += 1
                            else:
                                ages[ti][wi] = 0

                    # Record then reset sampler proposal/acceptance counts.
                    accepts = list(
                        np.mean(sampler.nprop_accepted / sampler.nprop,
                                axis=1))
                    sampler.nprop = np.zeros(
                        (sampler.ntemps, sampler.nwalkers), dtype=np.float)
                    sampler.nprop_accepted = np.zeros(
                        (sampler.ntemps, sampler.nwalkers), dtype=np.float)

                    # During self._burn-in only, redraw any walkers with scores
                    # significantly worse than their peers, or those that are
                    # stale (i.e. remained in the same position for a long
                    # time).
                    if emim1 <= self._burn_in:
                        pmedian = [np.median(x) for x in self._lnprob]
                        pmead = [
                            np.mean([abs(y - pmedian) for y in x])
                            for x in self._lnprob
                        ]
                        redraw_count = 0
                        bad_redraws = 0
                        for ti, tprob in enumerate(self._lnprob):
                            for wi, wprob in enumerate(tprob):
                                if (wprob <= pmedian[ti] -
                                        max(redraw_mult * pmead[ti],
                                            float(self._nwalkers))
                                        or np.isnan(wprob)
                                        or ages[ti][wi] >= self._REPLACE_AGE):
                                    redraw_count = redraw_count + 1
                                    dxx = np.random.normal(scale=0.01,
                                                           size=ndim)
                                    tar_x = np.array(self._p[np.random.randint(
                                        self._ntemps)][np.random.randint(
                                            self._nwalkers)])
                                    # Reflect if out of bounds.
                                    new_x = np.clip(
                                        np.where(
                                            np.where(tar_x + dxx < 1.0, tar_x +
                                                     dxx, tar_x - dxx) > 0.0,
                                            tar_x + dxx, tar_x - dxx), 0.0,
                                        1.0)
                                    new_like = ln_likelihood(new_x)
                                    new_prob = new_like + ln_prior(new_x)
                                    if new_prob > wprob or np.isnan(wprob):
                                        self._p[ti][wi] = new_x
                                        self._lnlike[ti][wi] = new_like
                                        self._lnprob[ti][wi] = new_prob
                                    else:
                                        bad_redraws = bad_redraws + 1
                        if redraw_count > 0:
                            messages.append(
                                '{:.0%} redraw, {}/{} success'.format(
                                    redraw_count /
                                    (self._nwalkers * self._ntemps),
                                    redraw_count - bad_redraws, redraw_count))

                    oldp = self._p.copy()

                    # Calculate the autocorrelation time.
                    low = 10
                    asize = 0.5 * (emim1 - self._burn_in) / low
                    if asize >= 0 and self._ct == 'acor':
                        acorc = max(
                            1,
                            min(self._MAX_ACORC,
                                int(np.floor(0.5 * self._emi / low))))
                        self._aacort = -1.0
                        self._aa = 0
                        self._ams = self._burn_in
                        cur_chain = (np.concatenate(
                            (self._all_chain,
                             sampler.chain[:, :, :li + 1:slr, :]),
                            axis=2) if len(self._all_chain) else
                                     sampler.chain[:, :, :li + 1:slr, :])
                        for a in range(acorc, 1, -1):
                            ms = self._burn_in
                            if ms >= self._emi - low:
                                break
                            try:
                                acorts = sampler.get_autocorr_time(
                                    chain=cur_chain,
                                    low=low,
                                    c=a,
                                    min_step=int(np.round(float(ms) / sli)),
                                    max_walkers=5,
                                    fast=True)
                                acort = max([max(x) for x in acorts])
                            except AutocorrError:
                                continue
                            else:
                                self._aa = a
                                self._aacort = acort * sli
                                self._ams = ms
                                break
                        self._acor = [self._aacort, self._aa, self._ams]

                        self._actc = int(np.ceil(self._aacort / sli))
                        actn = np.int(
                            float(self._emi - self._ams) / self._actc)

                        if (self._cc is not None and actn >= self._cc
                                and self._emi > self._iterations):
                            prt.message('converged')
                            converged = True
                            break

                    # Calculate the PSRF (Gelman-Rubin statistic).
                    if li > 1 and self._emi > self._burn_in + 2:
                        cur_chain = (np.concatenate(
                            (self._all_chain,
                             sampler.chain[:, :, :li + 1:slr, :]),
                            axis=2) if len(self._all_chain) else
                                     sampler.chain[:, :, :li + 1:slr, :])
                        vws = np.zeros((self._ntemps, ndim))
                        for ti in range(self._ntemps):
                            for xi in range(ndim):
                                vchain = cur_chain[
                                    ti, :,
                                    int(np.floor(self._burn_in / sli)):, xi]
                                vws[ti][xi] = self.psrf(vchain)
                        self._psrf = np.max(vws)
                        if np.isnan(self._psrf):
                            self._psrf = np.inf

                        if (self._ct == 'psrf' and self._cc is not None
                                and self._psrf < self._cc
                                and self._emi > self._iterations):
                            prt.message('converged')
                            converged = True
                            break

                    if self._cc is not None:
                        self._emcee_est_t = -1.0
                    else:
                        self._emcee_est_t = float(
                            time.time() - st - tft) / self._emi * (
                                self._iterations -
                                self._emi) + tft / self._emi * max(
                                    0, self._burn_in - self._emi)

                    # Perform self._fracking if we are still in the self._burn
                    # in phase and iteration count is a multiple of the frack
                    # step.
                    frack_now = (self._fracking and self._frack_step != 0
                                 and self._emi <= self._burn_in
                                 and self._emi % self._frack_step == 0)

                    self._scores = [np.array(x) for x in self._lnprob]
                    if emim1 % kmat_chunk == 0:
                        sout = self._model.run_stack(self._p[np.unravel_index(
                            np.argmax(self._lnprob), self._lnprob.shape)],
                                                     root='objective')
                        kmat = sout.get('kmat')
                        kdiag = sout.get('kdiagonal')
                        variance = sout.get('obandvs', sout.get('variance'))
                        if kdiag is not None and kmat is not None:
                            kmat[np.diag_indices_from(kmat)] += kdiag
                        elif kdiag is not None and kmat is None:
                            kmat = np.diag(kdiag + variance)
                    prt.status(
                        self,
                        desc='fracking' if frack_now else
                        ('burning'
                         if self._emi < self._burn_in else 'walking'),
                        scores=self._scores,
                        kmat=kmat,
                        accepts=accepts,
                        iterations=[
                            self._emi,
                            None if self._cc is not None else self._iterations
                        ],
                        acor=self._acor,
                        psrf=[self._psrf, self._burn_in],
                        messages=messages,
                        make_space=emim1 == 0,
                        convergence_type=self._ct,
                        convergence_criteria=self._cc)

                    if s_exception:
                        break

                    if not frack_now:
                        continue

                    # Fracking starts here
                    sft = time.time()
                    ijperms = [[x, y] for x in range(self._ntemps)
                               for y in range(self._nwalkers)]
                    ijprobs = np.array([
                        1.0
                        # self._lnprob[x][y]
                        for x in range(self._ntemps)
                        for y in range(self._nwalkers)
                    ])
                    ijprobs -= max(ijprobs)
                    ijprobs = [np.exp(0.1 * x) for x in ijprobs]
                    ijprobs /= sum([x for x in ijprobs if not np.isnan(x)])
                    nonzeros = len([x for x in ijprobs if x > 0.0])
                    selijs = [
                        ijperms[x]
                        for x in np.random.choice(range(len(ijperms)),
                                                  pool_size,
                                                  p=ijprobs,
                                                  replace=(
                                                      pool_size > nonzeros))
                    ]

                    bhwalkers = [self._p[i][j] for i, j in selijs]

                    seeds = [
                        int(round(time.time() * 1000.0)) % 4294900000 + x
                        for x in range(len(bhwalkers))
                    ]
                    frack_args = list(zip(bhwalkers, seeds))
                    bhs = list(self._pool.map(frack, frack_args))
                    for bhi, bh in enumerate(bhs):
                        (wi, ti) = tuple(selijs[bhi])
                        if -bh.fun > self._lnprob[wi][ti]:
                            self._p[wi][ti] = bh.x
                            like = ln_likelihood(bh.x)
                            self._lnprob[wi][ti] = like + ln_prior(bh.x)
                            self._lnlike[wi][ti] = like
                    self._scores = [[-x.fun for x in bhs]]
                    prt.status(
                        self,
                        desc='fracking_results',
                        scores=self._scores,
                        kmat=kmat,
                        fracking=True,
                        iterations=[
                            self._emi,
                            None if self._cc is not None else self._iterations
                        ],
                        convergence_type=self._ct,
                        convergence_criteria=self._cc)
                    tft = tft + time.time() - sft
                    if s_exception:
                        break

                if ici == 0:
                    self._all_chain = sampler.chain[:, :, :li + 1:slr, :]
                    self._all_lnprob = sampler.lnprobability[:, :, :li + 1:slr]
                    self._all_lnlike = sampler.lnlikelihood[:, :, :li + 1:slr]
                else:
                    self._all_chain = np.concatenate(
                        (self._all_chain, sampler.chain[:, :, :li + 1:slr, :]),
                        axis=2)
                    self._all_lnprob = np.concatenate(
                        (self._all_lnprob,
                         sampler.lnprobability[:, :, :li + 1:slr]),
                        axis=2)
                    self._all_lnlike = np.concatenate(
                        (self._all_lnlike,
                         sampler.lnlikelihood[:, :, :li + 1:slr]),
                        axis=2)

                mem_mb = (self._all_chain.nbytes + self._all_lnprob.nbytes +
                          self._all_lnlike.nbytes) / (1024. * 1024.)

                if self._fitter._debug:
                    prt.prt('Memory `{}`'.format(mem_mb), wrapped=True)

                if mem_mb > self._fitter._maximum_memory:
                    sfrac = float(self._all_lnprob.shape[-1]
                                  ) / self._all_lnprob[:, :, ::2].shape[-1]
                    self._all_chain = self._all_chain[:, :, ::2, :]
                    self._all_lnprob = self._all_lnprob[:, :, ::2]
                    self._all_lnlike = self._all_lnlike[:, :, ::2]
                    sli *= sfrac
                    if self._fitter._debug:
                        prt.prt('Memory halved, sli: {}'.format(sli),
                                wrapped=True)

                sampler.reset()
                gc.collect()
                ici = ici + 1

        except (KeyboardInterrupt, SystemExit):
            prt.message('ctrl_c', error=True, prefix=False, color='!r')
            s_exception = sys.exc_info()
        except Exception:
            raise

        if s_exception is not None:
            self._pool.close()
            if (not prt.prompt('mc_interrupted')):
                sys.exit()

        msg_criteria = (1.1 if self._cc is None else self._cc)
        if (test_walker and self._ct == 'psrf' and msg_criteria is not None
                and self._psrf > msg_criteria):
            prt.message(
                'not_converged',
                ['default' if self._cc is None else 'specified', msg_criteria],
                warning=True)
Example #34
0
import scipy
from ipyparallel import Client
#mpl.use('Qt5Agg')
import pylab as pl
pl.ion()
#%%
import caiman as cm
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob
from caiman.source_extraction import cnmf
#%%
#backend='SLURM'
backend = 'local'
if backend == 'SLURM':
    n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
    n_processes = np.maximum(
        np.int(psutil.cpu_count()),
        1)  # roughly number of cores on your machine minus 1
print(('using ' + str(n_processes) + ' processes'))
#%% start cluster for efficient computation
single_thread = False

if single_thread:
    dview = None
else:
    try:
        c.close()
    except:
        print('C was not existing, creating one')
Example #35
0
File: cv.py Project: cjjdzh/Carla
def find_lane_pixels_using_histogram(binary_warped):
    DEBUG = False
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)

    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0] // 2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # Choose the number of sliding windowsf
    nwindows = 10
    # Set the width of the windows +/- margin
    #margin = image.shape[1] // 10
    margin = 720 // 10
    # Set minimum number of pixels found to recenter window
    minpix = 30

    # Set height of windows - based on nwindows above and image shape
    window_height = np.int(binary_warped.shape[0] // nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated later for each window in nwindows
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []
    if DEBUG:
        slate = np.dstack((binary_warped, binary_warped, binary_warped)) * 255

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window + 1) * window_height
        win_y_high = binary_warped.shape[0] - window * window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin

        # Identify the nonzero pixels in x and y within the window #
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
                          (nonzerox >= win_xleft_low) &
                          (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
                           (nonzerox >= win_xright_low) &
                           (nonzerox < win_xright_high)).nonzero()[0]

        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)

        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
        if DEBUG:
            slate = cv2.rectangle(slate, (win_xleft_low, win_y_low),
                                  (win_xleft_high, win_y_high), (0, 255, 255),
                                  3)
            slate = cv2.rectangle(slate, (win_xright_low, win_y_low),
                                  (win_xright_high, win_y_high), (0, 255, 255),
                                  3)
            slate = cv2.circle(slate,
                               (leftx_current, (win_y_low + win_y_high) // 2),
                               1, (0, 255, 0), 3)
            slate = cv2.circle(slate,
                               (rightx_current, (win_y_low + win_y_high) // 2),
                               1, (0, 255, 0), 3)

    # Concatenate the arrays of indices (previously was a list of lists of pixels)
    try:
        left_lane_inds = np.concatenate(left_lane_inds)
        right_lane_inds = np.concatenate(right_lane_inds)
    except ValueError:
        # Avoids an error if the above is not implemented fully
        pass

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    if DEBUG:
        plt.figure()
        plt.bar(range(binary_warped.shape[1]), histogram)
        plt.imshow(slate)
    return leftx, lefty, rightx, righty
OutputImge = cv2.resize(img,(0,0),fx=0.5,fy=0.5)
OutputImge = cv2.resize(OutputImge,(0,0),fx=0.5,fy=0.5)
cv2.imshow('Scaled',OutputImge)
cv2.imshow('gradX',grad_X)

blurred = cv2.blur(gradient,(9,9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21,7))
closed = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE, kernel)

# perform a series of erosions and dilations
edged = cv2.erode(closed, None, iterations = 4)
closed = cv2.dilate(closed, None, iterations = 4)

#cnts, _ = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image,contours,_ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(contours, key=cv2.contourArea, reverse=True)[0]

# compute the rotated bounding box of the largest contour
#rect = cv2.minAreaRect(c)
#box = np.int(cv2.cv.BoxPoints(rect))
rect = cv2.minAreaRect(c)
box = np.int(cv2.boxPoints(rect))

cv2.drawContours(img, [box], -1, (0, 255, 0), 3)
cv2.imshow("Image", img)

cv2.waitKey(0)
cv2.destroyAllWindows()
Example #37
0
    def show(self, bin_size=0.025, min_bin=0, max_frame=25, cartoon=False):

        # Store some informations
        self.bin_size = bin_size
        self.min_bin = min_bin
        self.max_frame = max_frame
        self.cartoon = cartoon
        self.H_frame = None
        self.id_to_H_frame = []

        title = ""
        xx, yy = [], []
        count, color, e = [], [], []

        # Get edges
        edges_x, edges_y = self.assignbins2D(self.coord, bin_size)

        # Get 2D histogram, just to have the number of conformation per bin
        H, edges_x, edges_y = np.histogram2d(self.coord[:, 0],
                                             self.coord[:, 1],
                                             bins=(edges_x, edges_y))
        # ... and replace all zeros by nan
        H[H == 0.] = np.nan

        # Initialize histogram array and frame array
        tmp = np.zeros(shape=(edges_x.shape[0], edges_y.shape[0], 1),
                       dtype=np.int32)
        try:
            self.H_frame = np.zeros(shape=(edges_x.shape[0], edges_y.shape[0],
                                           np.int(np.nanmax(H))),
                                    dtype=np.int32)
        except MemoryError:
            print(
                'Error: Histogram too big (memory). Try with a bigger bin size.'
            )
            sys.exit(1)

        if self.energy is not None:
            H_energy = np.empty(shape=(edges_x.shape[0], edges_y.shape[0],
                                       np.int(np.nanmax(H))))
            H_energy.fill(np.nan)

        # Return the indices of the bins to which each value in input array belongs
        # I don't know why - 1, but it works perfectly like this
        ix = np.digitize(self.coord[:, 0], edges_x) - 1
        iy = np.digitize(self.coord[:, 1], edges_y) - 1

        # For each coordinate, we put them in the right bin and add the frame number
        for i in xrange(0, self.frames.shape[0]):
            # Put frame numbers in a histogram too
            self.H_frame[ix[i], iy[i], tmp[ix[i], iy[i]]] = self.frames[i]

            # The same for the energy, if we provide them
            if self.energy is not None:
                H_energy[ix[i], iy[i], tmp[ix[i], iy[i]]] = self.energy[i]

            # Add 1 to the corresponding bin
            tmp[ix[i], iy[i]] += 1

        if self.energy is not None:
            # get mean energy per bin
            H_energy = np.nanmean(H_energy, axis=2)

        # Get STD and MEAN conformations/energy
        if self.energy is not None:
            std = np.nanstd(H_energy)
            mean = np.nanmean(H_energy)
        else:
            std = np.int(np.nanstd(H))
            mean = np.int(np.nanmean(H))

        # Get min_hist and max_hist
        min_hist = mean - std
        max_hist = mean + std
        # Put min_hist equal to min_bin is lower than 0
        min_hist = min_hist if min_hist > 0 else min_bin

        unit = '#conf.' if self.energy is None else 'Kcal/mol'
        print("Min: %8.2f Max: %8.2f (%s)" % (min_hist, max_hist, unit))

        # Add we keep only the bin with structure
        for i in xrange(0, H.shape[0]):
            for j in xrange(0, H.shape[1]):

                if H[i, j] > min_bin:
                    xx.append(edges_x[i])
                    yy.append(edges_y[j])
                    self.id_to_H_frame.append((i, j))
                    count.append(H[i, j])

                    if self.energy is None:
                        value = 1. - (np.float(H[i, j]) -
                                      min_hist) / (max_hist - min_hist)
                    else:
                        value = (np.float(H_energy[i, j]) -
                                 min_hist) / (max_hist - min_hist)
                        e.append(H_energy[i, j])

                    color.append(self.generate_color(value, "jet"))

        TOOLS = "wheel_zoom,box_zoom,undo,redo,box_select,save,reset,hover,crosshair,tap,pan"

        # Create the title with all the parameters contain in the file
        if self.comments:
            for key, value in self.comments.iteritems():
                title += "%s: %s " % (key, value)
        else:
            title = "#conformations: %s" % self.frames.shape[0]

        p = figure(plot_width=1500, plot_height=1500, tools=TOOLS, title=title)
        p.title.text_font_size = '20pt'

        # Create source
        source = ColumnDataSource(
            data=dict(xx=xx, yy=yy, count=count, color=color))

        if self.energy is not None:
            source.add(e, name="energy")

        # Create histogram
        p.rect(x="xx",
               y="yy",
               source=source,
               width=bin_size,
               height=bin_size,
               color="color",
               line_alpha="color",
               line_color="black")

        # Create Hovertools
        tooltips = [("(X, Y)", "(@xx @yy)"), ("#Frames", "@count")]
        if self.energy is not None:
            tooltips += [("Energy (Kcal/mol)", "@energy")]

        hover = p.select({"type": HoverTool})
        hover.tooltips = tooltips

        # open a session to keep our local document in sync with server
        session = push_session(curdoc())
        # Update data when we select conformations
        source.on_change("selected", self.get_selected_frames)
        # Open the document in a browser
        session.show(p)
        # Run forever !!
        session.loop_until_closed()
Example #38
0
def main():
    if args.fp16:
        assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."

    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    csv_file = os.path.join(args.data, 'train.csv')
    df = pd.read_csv(csv_file, index_col=0)
    df = df.drop(['url'], axis=1)

    df_count = df.groupby('landmark_id').size()
    df_count = df_count.sort_values()
    df_count = df_count.to_frame('count')
    df_count['label'] = np.arange(len(df_count))
    label_dict = df_count.loc[:, 'label'].to_dict()

    df['label'] = df['landmark_id'].map(label_dict)
    label_start = df_count[df_count['count'] > 2].iloc[0, 1]
    df2 = df.loc[df['label'] >= label_start]

    r = df2.shape[0]
    rs = np.int(r / 50)
    print('Number of images:', df.shape[0])
    print('Number of labels:', df_count.shape[0])
    print('We sampled ', rs, 'starting from label', label_start,
          'as validation data')

    labels = dict()
    labels['val'] = df2['label'].sample(n=rs)
    labels['train'] = df['label'].drop(labels['val'].index)

    txt_path = dict()
    for phase in ['train', 'val']:
        txt_path[phase] = os.path.join(args.data, phase + '.txt')
        file1 = open(txt_path[phase], "w")
        lc1 = labels[phase].index.tolist()
        lc2 = labels[phase].tolist()
        for id, ll in zip(lc1, lc2):
            file1.write(id[0] + '/' + id[1] + '/' + id[2] + '/' + id + '.jpg' +
                        ' ' + str(ll) + '\n')
        file1.close()
    del df, df_count, df2, labels, label_dict

    crop_size = 224
    val_size = 256
    dataloader = dict()

    print('use ' + ['GPU', 'CPU'][args.dali_cpu] + ' to load data')
    print('Half precision:' + str(args.fp16))

    pipe = HybridTrainPipe(batch_size=args.batch_size,
                           num_threads=args.workers,
                           device_id=args.local_rank,
                           data_dir=args.data,
                           crop=crop_size,
                           dali_cpu=args.dali_cpu,
                           file_list=txt_path['train'])
    pipe.build()
    dataloader['train'] = DALIClassificationIterator(
        pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    pipe = HybridValPipe(batch_size=args.batch_size,
                         num_threads=args.workers,
                         device_id=args.local_rank,
                         data_dir=args.data,
                         crop=crop_size,
                         size=val_size,
                         file_list=txt_path['val'])
    pipe.build()
    dataloader['val'] = DALIClassificationIterator(
        pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    model = []

    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    criterion = nn.CrossEntropyLoss().cuda()
    model = [None] * len(PRIMES)
    optimizer = [None] * len(PRIMES)
    scheduler = [None] * len(PRIMES)

    if args.arch in model_names:
        for i, p in enumerate(PRIMES):
            model[i] = models.__dict__[args.arch](num_classes=p)
            if not args.checkpoint:
                model_type = ''.join([i for i in args.arch if not i.isdigit()])
                model_url = models.__dict__[model_type].model_urls[args.arch]
                pre_trained = model_zoo.load_url(model_url)
                pre_trained['fc.weight'] = pre_trained['fc.weight'][:p, :]
                pre_trained['fc.bias'] = pre_trained['fc.bias'][:p]
                model[i].load_state_dict(pre_trained)
            elif args.checkpoint:
                print('Resuming training from epoch {}, loading {}...'.format(
                    args.resume_epoch, args.checkpoint))
                check_file = os.path.join(args.data, args.checkpoint)
                model[i].load_state_dict(
                    torch.load(check_file['state_' + str(p)],
                               map_location=lambda storage, loc: storage))
            if torch.cuda.is_available():
                model[i] = model[i].cuda(device)
                if args.fp16:
                    model[i] = network_to_half(model[i])
        for i, p in enumerate(PRIMES):
            optimizer[i] = optim.SGD(model[i].parameters(),
                                     lr=args.lr,
                                     momentum=0.9,
                                     weight_decay=args.weight_decay)
            if args.checkpoint:
                model[i].load_state_dict(
                    torch.load(args.checkpoint,
                               map_location=lambda storage, loc: storage.cuda(
                                   args.gpu))['state_' + str(p)])
            scheduler[i] = optim.lr_scheduler.StepLR(optimizer[i],
                                                     step_size=args.step_size,
                                                     gamma=0.1)
            for i in range(args.resume_epoch):
                scheduler[i].step()
    else:
        if args.checkpoint:
            model = mynet.__dict__[args.arch](pretrained=None,
                                              num_classes=PRIMES)
            model.load_state_dict(
                torch.load(args.checkpoint,
                           map_location=lambda storage, loc: storage)['state'])
        else:
            model = mynet.__dict__[args.arch](pretrained='imagenet',
                                              num_classes=PRIMES)

        if torch.cuda.is_available():
            model = model.cuda(device)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=args.weight_decay)
        if args.checkpoint and args.resume_epoch < 0:
            optimizer.load_state_dict(
                torch.load(args.checkpoint,
                           map_location=lambda storage, loc: storage)['optim'])
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=0.1)
        for i in range(args.resume_epoch):
            scheduler.step()
        if args.fp16:
            model = network_to_half(model)
            optimizer = FP16_Optimizer(
                optimizer,
                static_loss_scale=args.static_loss_scale,
                dynamic_loss_scale=args.dynamic_loss_scale)

    best_acc = 0
    for epoch in range(args.resume_epoch, args.epochs):
        print('Epoch {}/{}'.format(epoch + 1, args.epochs))
        print('-' * 5)
        for phase in ['train', 'val']:
            if args.arch in model_names:
                if phase == 'train':
                    for i, p in enumerate(PRIMES):
                        scheduler[i].step()
                        model[i].train()
                else:
                    for i, p in enumerate(PRIMES):
                        model[i].eval()
            else:
                if phase == 'train':
                    scheduler.step()
                    model.train()
                else:
                    model.eval()

            num = 0
            csum = 0
            running_loss = 0.0
            cur = 0
            cur_loss = 0.0

            print(phase, ':')
            end = time.time()
            for ib, data in enumerate(dataloader[phase]):
                data_time = time.time() - end
                inputs = data[0]["data"].to(device, non_blocking=True)
                targets = data[0]["label"].squeeze().to(device,
                                                        non_blocking=True)
                if args.arch in model_names:
                    for i, p in enumerate(PRIMES):
                        optimizer[i].zero_grad()
                else:
                    optimizer.zero_grad()

                batch_size = targets.size(0)
                correct = torch.ones((batch_size),
                                     dtype=torch.uint8).to(device)
                with torch.set_grad_enabled(phase == 'train'):
                    if args.arch in model_names:
                        for i, p in enumerate(PRIMES):
                            outputs = model[i](inputs)
                            targetp = (targets % p).long()
                            loss = criterion(outputs, targetp)
                            if phase == 'train':
                                #loader_len = int(dataloader[phase]._size / args.batch_size)
                                #adjust_learning_rate(optimizer[i], epoch,ib+1, loader_len)
                                if args.fp16:
                                    optimizer[i].backward(loss)
                                else:
                                    loss.backward()
                                optimizer[i].step()
                            _, pred = outputs.topk(1, 1, True, True)
                            correct = correct.mul(pred.view(-1).eq(targetp))
                    elif args.arch in mynet.__dict__:
                        outputs = model(inputs)
                        loss = 0.0
                        for i, p in enumerate(PRIMES):
                            targetp = (targets % p).long()
                            loss += criterion(outputs[i], targetp)
                            _, pred = outputs[i].topk(1, 1, True, True)
                            correct = correct.mul(pred.view(-1).eq(targetp))
                        if phase == 'train':
                            if args.fp16:
                                optimizer.backward(loss)
                            else:
                                loss.backward()
                            optimizer.step()

                num += batch_size
                csum += correct.float().sum(0)
                acc1 = csum / num * 100
                running_loss += loss.item() * batch_size
                average_loss = running_loss / num
                cur += batch_size
                cur_loss += loss.item() * batch_size
                cur_avg_loss = cur_loss / cur
                batch_time = time.time() - end
                end = time.time()
                if (ib + 1) % args.print_freq == 0:
                    print(
                        '{} L:{:.4f} correct:{:.0f} acc1:{:.4f} data:{:.2f}s batch:{:.2f}s'
                        .format(num, cur_avg_loss, csum, acc1, data_time,
                                batch_time))
                    cur = 0
                    cur_loss = 0.0

            print('------SUMMARY:', phase, '---------')
            print('E:{} L:{:.4f} correct:{:.0f} acc1: {:.4f} Time: {:.4f}s'.
                  format(epoch, average_loss, csum, acc1, batch_time))
            dataloader[phase].reset()
        '''save the state'''
        save_file = os.path.join(args.save_folder,
                                 'epoch_' + str(epoch + 1) + '.pth')
        save_dict = {
            'epoch': epoch + 1,
            'acc': acc1,
            'arch': args.arch,
        }
        if args.arch in model_names:
            for i, p in enumerate(PRIMES):
                save_dict['state_' + str(i)] = model[i].state_dict()
                save_dict['optim_' + str(i)] = optimizer[i].state_dict()
        elif args.arch in mynet.__dict__:
            save_dict['state'] = model.state_dict()
            save_dict['optim'] = optimizer.state_dict()
            save_dict['primes'] = PRIMES
        torch.save(save_dict, save_file)
        if acc1 > best_acc:
            shutil.copyfile(save_file, 'model_best.pth.tar')
# Sagittal
glob_string = ('sagittal*.png')

# create list of pngs
all_sag_pngs = glob.glob(os.path.join(arguments.png_dir, glob_string))

# SYNESTHESIA FIGURE 2
pngs_0 = [png for png in all_sag_pngs if '0.png' in png]
syn_sag_pngs = pngs_0[3:6] + pngs_0[(-6):(-3)]

pngs_0_5 = [png for png in all_sag_pngs if '0.png' in png or '5.png' in png]

n = len(pngs_0_5) / 1.
half = np.floor(n / 2)
# Right hemisphere sagittal
right_pngs = pngs_0_5[4:np.int(half)]
# Left hemisphere sagittal
left_pngs = pngs_0_5[np.int(half):(-4)]

whole_pngs = pngs_0_5[2:(-2):2]

# Axial
glob_string = ('axial*png')
all_ax_pngs = glob.glob(os.path.join(arguments.png_dir, glob_string))

pngs_0 = [png for png in all_ax_pngs if '0.png' in png]

syn_ax_pngs = pngs_0[4:11]

# Coronal
#glob_string = ('coronal*png')
Example #40
0
def get_id(data):
    return np.int(data.split("\\")[1].split(".")[0])
Example #41
0
def main():

    # First check to make sure python 2.7 is being used
    version = platform.python_version()
    verlist = version.split('.')

    if( not ((verlist[0] == '2') & (verlist[1] ==  '7') & (int(verlist[2])>=15) )  ):
        print("The PARTEH driver must be run with python 2.7")
        print(" with tertiary version >=15.")
        print(" your version is {}".format(version))
        print(" exiting...")
        sys.exit(2)

    parser = argparse.ArgumentParser(description='Parse command line arguments to this script.')
    parser.add_argument('--xml-file', dest='xml_file', type=str, \
                        help="The path to the XML file controling this simulation.",required=True)
    args = parser.parse_args()

    xml_file = args.xml_file

    # This loads the dictionaries of, and lists of objects that
    # define the variables, parameters and forms that govern the
    # system of equations and solution
    [time_control, fates_cdl_file, driver_params, boundary_method,use_pfts] = load_xml(xml_file)

    num_plants = len(use_pfts)
    

    # -------------------------------------------------------------------------------------
    # Check through the fortran Code we are coupling with, determine the list of parameters
    # that we need.
    # -------------------------------------------------------------------------------------

    var_list = GetParamsInFile('../../parteh/PRTParametersMod.F90')


    # Now look through EDPftvarcon.F90 to determine the variable name in file
    # that is associated with the variable pointer

    var_list = GetPFTParmFileSymbols(var_list,'../../parteh/PRTParamsFATESMod.F90')
    

    # This variable is not added to the list we send to fortran, this
    # is only for the initial condition on the python side
    var_list.append(f90_param_type('hgt_min','fates_recruit_hgt_min',False))
    


    # -------------------------------------------------------------
    # We can now cross reference our list of parameters against
    # the parameter file. This will create a new list of parameters
    # however in the form of a dictionary. This dictionary of
    # entries is accessible by its symbol name, and will also
    # read in and store the actual parameter values from the file.
    # -------------------------------------------------------------

    dims = CDLParseDims(fates_cdl_file)

    fates_params = {}
    for elem in var_list:
        fates_params[elem.var_sym] = CDLParseParam(fates_cdl_file,cdl_param_type(elem.var_name,elem.in_f90),dims)
    print('Finished loading PFT parameters')

    
    
    num_pfts   = dims['fates_pft']
    num_organs = dims['fates_prt_organs']

    # Initialize the PARTEH instance
    iret=f90_fates_partehwrap_obj.__fatespartehwrapmod_MOD_spmappyset()

    # Allocate the PFT and ORGAN arrays  (leaf+root+sap+store+structure+repro = 6)

    WrapPFTAllocArbitrary([val for key,val in dims.iteritems()])

    
    # Set the phenology type
    phen_type = []
    for iplnt in range(num_plants):

        ipft = use_pfts[iplnt]
        evergreen        = np.int(fates_params['evergreen'].data[ipft])
        cold_deciduous   = np.int(fates_params['season_decid'].data[ipft])
        stress_deciduous = np.int(fates_params['stress_decid'].data[ipft])
        if(evergreen==1):
            if(cold_deciduous==1):
                print("Poorly defined phenology mode 0")
                exit(2)
            if(stress_deciduous==1):
                print("Poorly defined phenology mode 1")
                exit(2)
            phen_type.append(1)
        elif(cold_deciduous==1):
            if(evergreen==1):
                print("Poorly defined phenology mode 2")
                exit(2)
            if(stress_deciduous==1):
                print("Poorly defined phenology mode 3")
                exit(2)
            phen_type.append(2)
        elif(stress_deciduous==1):
            if(evergreen==1):
                print("Poorly defined phenology mode 4")
                exit(2)
            if(cold_deciduous==1):
                print("Poorly defined phenology mode 5")
                exit(2)
            phen_type.append(3)
        else:
            print("Unknown phenology mode ? {} {} {}".format(evergreen,cold_deciduous,stress_deciduous))
            exit(2)


    # -------------------------------------------------------------------------
    # Loop through all parameters in the "fates_params"
    # dictionary, send their data to the FORTRAN code
    # ------------------------------------------------------------------------

    # Loop through parameters
    for parm_key, parm_obj in fates_params.iteritems():

        # Loop through their dimensions
        # 2D case
        if(parm_obj.in_f90):
            if(parm_obj.ndims>1):
                for idx0 in range(parm_obj.dim_sizelist[0]):
                    for idx1 in range(parm_obj.dim_sizelist[1]):
                        iret = f90_fates_unitwrap_obj.__prtparamsgeneric_MOD_prtparamspyset(byref(c_double(parm_obj.data[idx0,idx1])), \
                                                                                            byref(c_int(0)), \
                                                                                            byref(c_int(idx1+1)), \
                                                                                            byref(c_int(idx0+1)), \
                                                                                            c_char_p(parm_obj.symbol), \
                                                                                            c_long(len(parm_obj.symbol )))

            else:
                idx1=0
                for idx0 in range(parm_obj.dim_sizelist[0]):
                    iret = f90_fates_unitwrap_obj.__prtparamsgeneric_MOD_prtparamspyset(byref(c_double(parm_obj.data[idx0])), \
                                                                                        byref(c_int(0)), \
                                                                                        byref(c_int(idx0+1)), \
                                                                                        byref(c_int(idx1+1)), \
                                                                                        c_char_p(parm_obj.symbol), \
                                                                                        c_long(len(parm_obj.symbol )))



    # Allocate the cohort array (We create on cohort per PFT)
    iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_cohortinitalloc(byref(c_int(num_plants)))

   
    
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        hgt_min = np.float(fates_params['hgt_min'].data[ipft])
        init_canopy_trim = 1.0
        iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_cohortmodeset(byref(c_int(ipft)), \
                                                                             byref(c_int(int(driver_params['parteh_model'].param_vals[ipft]))))
        iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_cohortpyset(byref(c_int(ipft)), \
                                                                           byref(c_double(hgt_min)), \
                                                                           byref(c_double(init_canopy_trim)))


    # Initialize diagnostics
    diagnostics = []
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        diagnostics.append(PartehTypes.diagnostics_type())


    # --------------------------------------------------------------------------------
    # Time Initialization
    # --------------------------------------------------------------------------------
    time_control.ResetTime()

    # --------------------------------------------------------------------------------
    # Time integration (outer) loop
    # --------------------------------------------------------------------------------
    while (time_control.sim_complete != True):

        print('Simulating Date: {}'.format(time_control.datetime.item()))

        # Start the integration substep loop
        endtime = time_control.datetime+np.timedelta64(int(time_control.dt_fullstep),'s')

        for iplnt in range(num_plants):

            ipft = use_pfts[iplnt]
            # Generate the boundary condition for the current time-step
            # ---------------------------------------------------------------------------

            # First lets query this pft-cohort and return a smattering of indices

            leaf_area  = c_double(0.0)
            agb        = c_double(0.0)
            crown_area = c_double(0.0)
            dbh        = c_double(0.0)
            target_leaf_c = c_double(-9.9)
            leaf_c     = c_double(0.0)
            fnrt_c     = c_double(0.0)
            sapw_c     = c_double(0.0)
            store_c    = c_double(0.0)
            struct_c   = c_double(0.0)
            repro_c    = c_double(0.0)
            root_c_exudate = c_double(0.0)
            growth_resp    = c_double(0.0)
            leaf_cturn     = c_double(0.0)
            fnrt_cturn     = c_double(0.0)
            sapw_cturn     = c_double(0.0)
            store_cturn    = c_double(0.0)
            struct_cturn   = c_double(0.0)

            leaf_n     = c_double(0.0)
            fnrt_n     = c_double(0.0)
            sapw_n     = c_double(0.0)
            store_n    = c_double(0.0)
            struct_n   = c_double(0.0)
            repro_n    = c_double(0.0)
            root_n_exudate = c_double(0.0)
            leaf_nturn     = c_double(0.0)
            fnrt_nturn     = c_double(0.0)
            sapw_nturn     = c_double(0.0)
            store_nturn    = c_double(0.0)
            struct_nturn   = c_double(0.0)

            leaf_p     = c_double(0.0)
            fnrt_p     = c_double(0.0)
            sapw_p     = c_double(0.0)
            store_p    = c_double(0.0)
            struct_p   = c_double(0.0)
            repro_p    = c_double(0.0)
            root_p_exudate = c_double(0.0)
            leaf_pturn     = c_double(0.0)
            fnrt_pturn     = c_double(0.0)
            sapw_pturn     = c_double(0.0)
            store_pturn    = c_double(0.0)
            struct_pturn   = c_double(0.0)

            iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_wrapqueryvars(byref(c_int(ipft)), \
                                                                                 byref(leaf_area), \
                                                                                 byref(crown_area), \
                                                                                 byref(agb), \
                                                                                 byref(store_c),\
                                                                                 byref(target_leaf_c))



            doy = time_control.datetime.astype(object).timetuple().tm_yday



            # Call phenology module, if no leaves... then npp should be zero...
            flush_c,drop_frac_c,leaf_status = SyntheticBoundaries.DeciduousPhenology(doy, \
                                                                                     target_leaf_c.value, \
                                                                                     store_c.value, phen_type[iplnt])



            if(boundary_method=="DailyCFromCArea"):

                presc_npp_p1     = driver_params['fates_prescribed_npp_p1'].param_vals[iplnt]

                net_daily_c = SyntheticBoundaries.DailyCFromCArea(presc_npp_p1, \
                                                                  crown_area.value, \
                                                                  phen_type[iplnt], \
                                                                  leaf_status)
                net_daily_n = 0.0
                net_daily_p = 0.0
                r_maint_demand = 0.0


            elif(boundary_method=="DailyCNPFromCArea"):

                presc_npp_p1   = driver_params['fates_prescribed_npp_p1'].param_vals[iplnt]
                presc_nflux_p1 = driver_params['fates_prescribed_nflux_p1'].param_vals[iplnt]
                presc_pflux_p1 = driver_params['fates_prescribed_pflux_p1'].param_vals[iplnt]

                net_daily_c, net_daily_n, net_daily_p = SyntheticBoundaries.DailyCNPFromCArea(presc_npp_p1, \
                                                                                              presc_nflux_p1, \
                                                                                              presc_pflux_p1, \
                                                                                              crown_area.value, \
                                                                                              phen_type[iplnt], \
                                                                                              leaf_status)
                r_maint_demand = 0.0


            elif(boundary_method=="DailyCNPFromStorageSinWaveNoMaint"):

                presc_npp_amp  = driver_params['fates_prescribed_npp_amp'].param_vals[iplnt]
                presc_npp_p1   = driver_params['fates_prescribed_npp_p1'].param_vals[iplnt]
                presc_nflux_p1 = driver_params['fates_prescribed_nflux_p1'].param_vals[iplnt]
                presc_pflux_p1 = driver_params['fates_prescribed_pflux_p1'].param_vals[iplnt]

                net_daily_c, net_daily_n, net_daily_p = SyntheticBoundaries.DailyCNPFromStorageSinWave(doy,\
                                                                                 store_c.value,\
                                                                                 presc_npp_p1, \
                                                                                 presc_nflux_p1, \
                                                                                 presc_pflux_p1, \
                                                                                 crown_area.value, \
                                                                                 presc_npp_amp, \
                                                                                 phen_type[iplnt], \
                                                                                 leaf_status )
                r_maint_demand = 0.0

            else:
                print("An unknown boundary method was specified\n")
                print("type: {} ? ... quitting.".format(boundary_method))
                exit()






            # This function will pass in all boundary conditions, some will be dummy arguments
            init_canopy_trim = 1.0
            iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_wrapdailyprt(byref(c_int(ipft)), \
                                                                                byref(c_double(net_daily_c)), \
                                                                                byref(c_double(init_canopy_trim)), \
                                                                                byref(c_double(flush_c)), \
                                                                                byref(c_double(drop_frac_c)), \
                                                                                byref(c_int(leaf_status)), \
                                                                                byref(c_double(net_daily_n)), \
                                                                                byref(c_double(net_daily_p)), \
                                                                                byref(c_double(r_maint_demand)))



            # This function will retrieve diagnostics
            iret=f90_fates_cohortwrap_obj.__fatescohortwrapmod_MOD_wrapquerydiagnostics(byref(c_int(ipft)),  \
                                                                                        byref(dbh),     \
                                                                                        byref(leaf_c),  \
                                                                                        byref(fnrt_c),  \
                                                                                        byref(sapw_c),  \
                                                                                        byref(store_c), \
                                                                                        byref(struct_c), \
                                                                                        byref(repro_c), \
                                                                                        byref(leaf_cturn),  \
                                                                                        byref(fnrt_cturn),  \
                                                                                        byref(sapw_cturn),  \
                                                                                        byref(store_cturn), \
                                                                                        byref(struct_cturn), \
                                                                                        byref(leaf_n),  \
                                                                                        byref(fnrt_n),  \
                                                                                        byref(sapw_n),  \
                                                                                        byref(store_n), \
                                                                                        byref(struct_n), \
                                                                                        byref(repro_n), \
                                                                                        byref(leaf_nturn),  \
                                                                                        byref(fnrt_nturn),  \
                                                                                        byref(sapw_nturn),  \
                                                                                        byref(store_nturn), \
                                                                                        byref(struct_nturn), \
                                                                                        byref(leaf_p),  \
                                                                                        byref(fnrt_p),  \
                                                                                        byref(sapw_p),  \
                                                                                        byref(store_p), \
                                                                                        byref(struct_p), \
                                                                                        byref(repro_p), \
                                                                                        byref(leaf_pturn),  \
                                                                                        byref(fnrt_pturn),  \
                                                                                        byref(sapw_pturn),  \
                                                                                        byref(store_pturn), \
                                                                                        byref(struct_pturn), \
                                                                                        byref(crown_area), \
                                                                                        byref(root_c_exudate), \
                                                                                        byref(root_n_exudate), \
                                                                                        byref(root_p_exudate), \
                                                                                        byref(growth_resp))


            diagnostics[iplnt].dates.append(time_control.datetime.astype(datetime))
            diagnostics[iplnt].dbh.append(dbh.value)
            diagnostics[iplnt].leaf_c.append(leaf_c.value)
            diagnostics[iplnt].fnrt_c.append(fnrt_c.value)
            diagnostics[iplnt].sapw_c.append(sapw_c.value)
            diagnostics[iplnt].store_c.append(store_c.value)
            diagnostics[iplnt].struct_c.append(struct_c.value)
            diagnostics[iplnt].repro_c.append(repro_c.value)
            diagnostics[iplnt].leaf_cturn.append(leaf_cturn.value)
            diagnostics[iplnt].fnrt_cturn.append(fnrt_cturn.value)
            diagnostics[iplnt].sapw_cturn.append(sapw_cturn.value)
            diagnostics[iplnt].store_cturn.append(store_cturn.value)
            diagnostics[iplnt].struct_cturn.append(struct_cturn.value)
            diagnostics[iplnt].dailyc.append(net_daily_c)
            diagnostics[iplnt].crown_area.append(crown_area.value)

            diagnostics[iplnt].growth_resp.append(growth_resp.value)

            diagnostics[iplnt].leaf_n.append(leaf_n.value)
            diagnostics[iplnt].fnrt_n.append(fnrt_n.value)
            diagnostics[iplnt].sapw_n.append(sapw_n.value)
            diagnostics[iplnt].store_n.append(store_n.value)
            diagnostics[iplnt].struct_n.append(struct_n.value)
            diagnostics[iplnt].repro_n.append(repro_n.value)
            diagnostics[iplnt].leaf_nturn.append(leaf_nturn.value)
            diagnostics[iplnt].fnrt_nturn.append(fnrt_nturn.value)
            diagnostics[iplnt].sapw_nturn.append(sapw_nturn.value)
            diagnostics[iplnt].store_nturn.append(store_nturn.value)
            diagnostics[iplnt].struct_nturn.append(struct_nturn.value)

            diagnostics[iplnt].leaf_p.append(leaf_p.value)
            diagnostics[iplnt].fnrt_p.append(fnrt_p.value)
            diagnostics[iplnt].sapw_p.append(sapw_p.value)
            diagnostics[iplnt].store_p.append(store_p.value)
            diagnostics[iplnt].struct_p.append(struct_p.value)
            diagnostics[iplnt].repro_p.append(repro_p.value)
            diagnostics[iplnt].leaf_pturn.append(leaf_pturn.value)
            diagnostics[iplnt].fnrt_pturn.append(fnrt_pturn.value)
            diagnostics[iplnt].sapw_pturn.append(sapw_pturn.value)
            diagnostics[iplnt].store_pturn.append(store_pturn.value)
            diagnostics[iplnt].struct_pturn.append(struct_pturn.value)

            diagnostics[iplnt].root_c_exudate.append(root_c_exudate.value)
            diagnostics[iplnt].root_n_exudate.append(root_n_exudate.value)
            diagnostics[iplnt].root_p_exudate.append(root_p_exudate.value)


        # We don't have a fancy time integrator so we simply update with
        # a full step

        time_control.UpdateTime()

        # ---------------------------------------------------------------------------
        # Timestep complete, check the time
        # ---------------------------------------------------------------------------
        #        time_control.CheckFullStepTime(endtime)


#    fig0, ax = plt.subplots()
#    for ipft in range(parameters.num_pfts):
#        ax.plot_date(diagnostics[0].dates,diagnostics[0].dbh)
#        ax.set_xlim(diagnostics[0].dates[0],diagnostics[0].dates[-1])

#    plt.show()
#    code.interact(local=locals())


    linestyles  = ['-','-.','--','-',':','-.','--',':','-','-.','--',':' ]




    fig1, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8)) = plt.subplots(2, 4 , sharex='col') #, sharey='row')
    fig1.set_size_inches(12, 6)
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        ax1.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].struct_c,linestyles[iplnt],label='{}'.format(iplnt))
    ax1.set_title('Structural\n Carbon')
    ax1.legend(loc='upper left')
    ax1.set_ylabel('[kg C]')
    ax1.grid(True)

    for iplnt in range(num_plants):
        ax2.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].leaf_c,linestyles[iplnt])
    ax2.set_title('Leaf\n Carbon')
    ax2.grid(True)

    for iplnt in range(num_plants):
        ax3.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].fnrt_c,linestyles[iplnt])
    ax3.set_title('Fineroot\n Carbon')
    ax3.grid(True)

    for iplnt in range(num_plants):
        ax4.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].sapw_c,linestyles[iplnt])
    ax4.set_title('Sapwood\n Carbon')
    ax4.set_ylabel('[kg C]')
    ax4.grid(True)

    for iplnt in range(num_plants):
        ax5.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].store_c,linestyles[iplnt])
    ax5.set_title('Storage\n Carbon')
    ax5.set_xlabel('Year')
    ax5.grid(True)

    for iplnt in range(num_plants):
        ax6.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].repro_c,linestyles[iplnt])
    ax6.set_title('Integrated\n Reproductive\n Carbon')
    ax6.set_xlabel('Year')
    ax6.grid(True)

    for iplnt in range(num_plants):
        ax7.plot_date(diagnostics[iplnt].dates,np.cumsum(diagnostics[iplnt].root_c_exudate),linestyles[iplnt])
    ax7.set_title('Integrated\n Exudated\n Carbon')
    ax7.set_xlabel('Year')
    ax7.grid(True)

    for iplnt in range(num_plants):
        ax8.plot_date(diagnostics[iplnt].dates,np.cumsum(diagnostics[iplnt].growth_resp),linestyles[iplnt])
    ax8.set_title('Integrated\n Growth\n Respiration')
    ax8.set_xlabel('Year')
    ax8.grid(True)





    plt.tight_layout()

    #  Plant proportions
    #  ---------------------------------------------------------------------------------
    fig2, ( (ax1,ax2),(ax3,ax4) ) = plt.subplots(2,2)
    fig2.set_size_inches(7, 6)
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        ax1.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].dbh,linestyles[iplnt],label='{}'.format(iplnt))
    ax1.set_xlabel('Date')
    ax1.set_title('DBH [cm]')
    ax1.legend(loc='upper left')
    ax1.grid(True)

    for iplnt in range(num_plants):
        ax2.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].crown_area,linestyles[iplnt])
    ax2.set_xlabel('Date')
    ax2.set_title('Crown Area [m2]')
    ax2.grid(True)

    for iplnt in range(num_plants):
        ax3.plot(diagnostics[iplnt].dbh,1000.0*np.array(diagnostics[iplnt].dailyc))

    ax3.set_xlabel('DBH [cm]')
    ax3.set_title('Daily Carbon Gain [g]')
    ax3.grid(True)

    for iplnt in range(num_plants):
        ax4.plot(diagnostics[iplnt].dbh,diagnostics[iplnt].crown_area)
    ax4.set_xlabel('DBH [cm]')
    ax4.set_title('Crown Area [m2]')
    ax4.grid(True)







    plt.tight_layout()


    # Error (bias)
    # ---------------------------------------------------------------------------------

    fig4 = plt.figure()
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        total_plant_carbon0 = np.array(diagnostics[iplnt].struct_c[0]) + \
                              np.array(diagnostics[iplnt].leaf_c[0])   + \
                              np.array(diagnostics[iplnt].fnrt_c[0])   + \
                              np.array(diagnostics[iplnt].sapw_c[0])   + \
                              np.array(diagnostics[iplnt].store_c[0])  + \
                              np.array(diagnostics[iplnt].repro_c[0])

        total_plant_carbon = np.array(diagnostics[iplnt].struct_c) + \
                             np.array(diagnostics[iplnt].leaf_c)   + \
                             np.array(diagnostics[iplnt].fnrt_c)   + \
                             np.array(diagnostics[iplnt].sapw_c)   + \
                             np.array(diagnostics[iplnt].store_c)  + \
                             np.array(diagnostics[iplnt].repro_c)

        integrated_plant_turnover = np.cumsum(diagnostics[iplnt].struct_cturn) + \
                                    np.cumsum(diagnostics[iplnt].leaf_cturn) +  \
                                    np.cumsum(diagnostics[iplnt].fnrt_cturn) +  \
                                    np.cumsum(diagnostics[iplnt].sapw_cturn) +  \
                                    np.cumsum(diagnostics[iplnt].store_cturn)


        plt.plot(np.cumsum(diagnostics[iplnt].dailyc), \
                 (np.cumsum(diagnostics[iplnt].dailyc) - \
                            (total_plant_carbon + \
                             integrated_plant_turnover - \
                             total_plant_carbon0 ) ) / total_plant_carbon )

    plt.xlabel('Integrated Daily Carbon Gain [kg]')
    plt.ylabel('Integrated Bias [kg]')
    plt.grid(True)

    # Plot out the input fluxes

    fig5= plt.figure()
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        plt.plot_date(diagnostics[iplnt].dates,diagnostics[iplnt].dailyc,linestyles[iplnt],label='{}'.format(iplnt))

    plt.xlabel('Date')
    plt.ylabel('Daily Carbon Flux')
    plt.grid(True)
    plt.legend(loc='upper left')


    # Special Focus plots for a PFT of interest

    figs = {}
    for iplnt in range(num_plants):
        ipft = use_pfts[iplnt]
        figs[iplnt], (ax1, ax2, ax3) = plt.subplots(1, 3)

        figs[iplnt].set_size_inches(8, 4)
        ax1.stackplot(np.cumsum(diagnostics[iplnt].dailyc), \
                      np.array(diagnostics[iplnt].struct_c)+np.cumsum(diagnostics[iplnt].struct_cturn), \
                      np.array(diagnostics[iplnt].leaf_c)+np.cumsum(diagnostics[iplnt].leaf_cturn), \
                      np.array(diagnostics[iplnt].fnrt_c)+np.cumsum(diagnostics[iplnt].fnrt_cturn), \
                      np.array(diagnostics[iplnt].sapw_c)+np.cumsum(diagnostics[iplnt].sapw_cturn), \
                      np.array(diagnostics[iplnt].store_c)+np.cumsum(diagnostics[iplnt].store_cturn), \
                      np.array(diagnostics[iplnt].repro_c), \
                      labels = ["Struct","Leaf","FRoot","Sapw","Storage","Repro"])
        ax1.set_title('Allocated Mass \nby Pool [kg]')
        ax1.grid(True)

        ax2.stackplot(np.cumsum(diagnostics[iplnt].dailyc), \
                      np.cumsum(diagnostics[iplnt].struct_cturn), \
                      np.cumsum(diagnostics[iplnt].leaf_cturn), \
                      np.cumsum(diagnostics[iplnt].fnrt_cturn),  \
                      np.cumsum(diagnostics[iplnt].sapw_cturn), \
                      np.cumsum(diagnostics[iplnt].store_cturn), \
                      np.array(diagnostics[iplnt].repro_c), \
                      labels = ["Struct","Leaf","FRoot","Sapw","Storage","Repro"] )
        ax2.legend(loc=2)
        ax2.grid(True)
        ax2.set_xlabel('Integrated Daily\n Carbon Gain [kg]')
        ax2.set_title('Integrated Turnover\n by Pool [kg]')


        #code.interact(local=locals())
        npp_leaf = np.array(diagnostics[iplnt].leaf_c[1:]) - \
                   np.array(diagnostics[iplnt].leaf_c[0:-1]) + \
                   np.array(diagnostics[iplnt].leaf_cturn[1:])
        npp_fnrt = np.array(diagnostics[iplnt].fnrt_c[1:]) - \
                   np.array(diagnostics[iplnt].fnrt_c[0:-1]) + \
                   np.array(diagnostics[iplnt].fnrt_cturn[1:])
        npp_sapw = np.array(diagnostics[iplnt].sapw_c[1:]) - \
                   np.array(diagnostics[iplnt].sapw_c[0:-1]) + \
                   np.array(diagnostics[iplnt].sapw_cturn[1:])
        npp_store = np.array(diagnostics[iplnt].store_c[1:]) - \
                    np.array(diagnostics[iplnt].store_c[0:-1]) + \
                    np.array(diagnostics[iplnt].store_cturn[1:])
        npp_struct = np.array(diagnostics[iplnt].struct_c[1:]) - \
                     np.array(diagnostics[iplnt].struct_c[0:-1]) + \
                     np.array(diagnostics[iplnt].struct_cturn[1:])
        npp_repro = np.array(diagnostics[iplnt].repro_c[1:]) - \
                    np.array(diagnostics[iplnt].repro_c[0:-1])

        ax3.stackplot(np.cumsum(diagnostics[iplnt].dailyc[1:]), \
                      npp_struct, npp_leaf, npp_fnrt, npp_sapw, npp_store,  npp_repro)

        ax3.grid(True)
        ax3.set_title('Daily NPP \nby Pool [kg]')

        plt.figtext(0.1,0.05,"Plant: {}".format(iplnt),bbox={'facecolor':'red', 'alpha':0.5, 'pad':10}, fontsize=15)


        plt.tight_layout()

    plt.show()

    print('\nSimulation Complete \nThank You Come Again')
Example #42
0
def scheduler(epoch):
    if epoch % 20 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.1)
        print("lr changed to {}".format(lr * 0.1))
    return K.get_value(model.optimizer.lr)


x, y = process_data()
arr = np.arange(x.shape[0])
np.random.shuffle(arr)

x = x[arr]
y = y[arr]

ratio = np.int(0.8 * len(x))
x_train = x[:ratio]
y_train = y[:ratio]
x_test = x[ratio:]
y_test = y[ratio:]

y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)

model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(w, h, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
Example #43
0
 def StartNewValidationFromTrainingSet(self):
     threadPortion = np.int(self.validationFromTrainingSetSize /
                            self.threadsNumber)
     self.InValidationFromTrainingThreadsCounter = [
         -1 + i * threadPortion for i in range(self.threadsNumber)
     ]  # -1 is for start from 0
Example #44
0
def detect_lane(warped, lane_hist, num_windows, margin_detect, margin_track,
                recenter_threshold, line_distance_threshold, output_dir,
                img_fname):
    """
    Detect lane and fit curve.
    :param warped: binary warped image
    :param lane_hist: lane history information, set None for separate images
    :param num_windows: number of sliding windows on Y
    :param margin_detect: margin on X for detecting
    :param margin_track: margin on X for tracking
    :param line_distance_threshold: threshold for line distance (upper bound of stdDev, lower bound of mean),
                                    for sanity check
    :param recenter_threshold: a tuple of (t1, t2), if # pixels in the window < t1, recenter window back to base
                               if # pixels in the window > t2, recenter window to the mean the current window
    :param output_dir: output directory
    :param img_fname: output filename for this image, None for disabling output
    :return:
    """
    debug = False

    def poly_value(yval, coeffs):
        return coeffs[0] * yval**2 + coeffs[1] * yval + coeffs[2]

    def radius_of_curvature(yval, coeffs):
        return ((1 + (2 * coeffs[0] * yval + coeffs[1])**2)**
                1.5) / np.absolute(2 * coeffs[0])

    # Constant conversion rate from pixel to world distance
    ym_per_pixel = 30.0 / 720
    xm_per_pixel = 3.7 / 831

    # Create a base color image to draw on and visualize the result
    canvas = cv2.cvtColor(warped.astype(np.uint8), cv2.COLOR_GRAY2RGB)
    # Create a color image to draw the lane region
    region = cv2.cvtColor(
        np.zeros_like(warped).astype(np.uint8), cv2.COLOR_GRAY2RGB)
    # Create a color image to draw the lane pixels
    pixels = cv2.cvtColor(
        np.zeros_like(warped).astype(np.uint8), cv2.COLOR_GRAY2RGB)

    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    midpoint = np.int(warped.shape[1] / 2)
    ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])

    # 1. find pixels that correspond to a lane line
    nonzero_idx = {}
    if lane_hist is not None and lane_hist.use_tracking:
        # Tracking mode: search within a margin of previous fitted curve
        for which in ["left", "right"]:
            nonzeroy_fitx = poly_value(nonzeroy, lane_hist.fit_coeffs[which])
            nonzero_idx[which] = ((nonzerox > (nonzeroy_fitx - margin_track)) &
                                  (nonzerox < (nonzeroy_fitx + margin_track)))

            # Draw search window for the tracking mode
            last_fitx = poly_value(ploty, lane_hist.fit_coeffs[which])
            line_window1 = np.array(
                [np.transpose(np.vstack([last_fitx - margin_track, ploty]))])
            line_window2 = np.array([
                np.flipud(
                    np.transpose(np.vstack([last_fitx + margin_track, ploty])))
            ])
            line_pts = np.hstack((line_window1, line_window2))
            window_img = np.zeros_like(canvas)
            cv2.fillPoly(window_img, np.int_([line_pts]), (0, 255, 0))
            canvas = cv2.addWeighted(canvas, 1, window_img, 0.3, 0)
    else:
        # Detection mode: sliding window
        # X base position will be the starting point for the left and right lines
        xbase = {}
        if lane_hist is not None and lane_hist.use_xbase:
            # Use previously found x base positions
            xbase['left'] = lane_hist.xbase_position['left']
            xbase['right'] = lane_hist.xbase_position['right']
        else:
            # Find the x base position by taking a histogram of the bottom half of the image
            histogram = np.sum(warped[warped.shape[0] // 2:, :], axis=0)
            # Find the peak of the left and right halves of the histogram
            xbase['left'] = np.argmax(histogram[:midpoint])
            xbase['right'] = np.argmax(histogram[midpoint:]) + midpoint

        xcurrent = xbase
        for which in ["left", "right"]:
            all_good_idxs = []
            # Slide through the windows one by one
            window_height = np.int(warped.shape[0] / num_windows)
            for w in range(num_windows):
                # Identify window boundaries in x and y (and right and left)
                win_y_low = warped.shape[0] - (w + 1) * window_height
                win_y_high = warped.shape[0] - w * window_height
                win_x_low = xcurrent[which] - margin_detect
                win_x_high = xcurrent[which] + margin_detect

                # Draw the window for visualization
                cv2.rectangle(canvas, (win_x_low, win_y_low),
                              (win_x_high, win_y_high), (0, 255, 0),
                              thickness=2)

                # Identify the nonzero pixels in x and y within the window
                good_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high)
                             & (nonzerox >= win_x_low) &
                             (nonzerox < win_x_high)).nonzero()[0]
                # Append these indices to the lists
                all_good_idxs.append(good_inds)

                # If number of good pixels > the threshold, recenter next window on their mean position.
                if len(good_inds) > recenter_threshold[1]:
                    xcurrent[which] = np.int(np.mean(nonzerox[good_inds]))
                    debug and print(w, which, 'updated', xcurrent[which])
                # If number of good pixels < the threshold, recenter next window to base.
                elif len(good_inds) < recenter_threshold[0]:
                    xcurrent[which] = xbase[which]
                    debug and print(w, which, "reverted", xcurrent[which])
                else:
                    debug and print(w, which, 'remained', xcurrent[which])

            # Concatenate the arrays of indices
            nonzero_idx[which] = np.concatenate(all_good_idxs)

    # 2. Fit a polynomial
    coeff_pixel = {}
    coeff_world = {}
    fitx = {}
    for which, color in [("left", [255, 0, 0]), ("right", [0, 0, 255])]:
        # Extract line pixel positions
        lane_x = nonzerox[nonzero_idx[which]]
        lane_y = nonzeroy[nonzero_idx[which]]
        # Color the pixels for visualization
        canvas[lane_y, lane_x] = color
        pixels[lane_y, lane_x] = color

        # Fit a second order polynomial on pixel distance
        coeff_pixel[which] = np.polyfit(lane_y, lane_x, deg=2)
        fitx[which] = poly_value(ploty, coeff_pixel[which])

        # Fit a second order polynomial on world distance
        coeff_world[which] = np.polyfit(lane_y * ym_per_pixel,
                                        lane_x * xm_per_pixel,
                                        deg=2)

    # 3. Sanity check after both lane lines are fitted.
    xdistances = np.array(
        [lx - rx for lx, rx in zip(fitx['left'], fitx['right'])])
    stddev = np.std(xdistances)
    meandist = abs(np.mean(xdistances))
    debug and print("std_dev:", stddev, "mean:", meandist)

    def coeff_to_str(coeff):
        return "{:.5f} {:.3f} {:.1f}".format(coeff[0], coeff[1], coeff[2])

    cv2.putText(canvas,
                "StdDev of line distance: {:.1f}".format(stddev),
                org=(350, 50),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                thickness=3,
                color=(255, 255, 255))
    cv2.putText(canvas,
                "Mean   of line distance: {:.1f}".format(meandist),
                org=(350, 100),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                thickness=3,
                color=(255, 255, 255))
    cv2.putText(canvas,
                "Left  fit coeff: {}".format(coeff_to_str(
                    coeff_pixel['left'])),
                org=(350, 150),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                thickness=3,
                color=(255, 255, 255))
    cv2.putText(canvas,
                "Right fit coeff: {}".format(coeff_to_str(
                    coeff_pixel['right'])),
                org=(350, 200),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                thickness=3,
                color=(255, 255, 255))
    success = stddev < line_distance_threshold[
        0] and meandist > line_distance_threshold[1]
    if not success:
        cv2.putText(canvas,
                    "Sanity check failed",
                    org=(350, 250),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    thickness=3,
                    color=(200, 100, 100))

    # 4. Draw the region between left and right lane lines.
    region_pts = {}
    for which in ['left', 'right']:
        # Visualize the fitted curve on the canvas
        # The equivalent of matplotlib.pyplot.plot(X, Y)
        for x, y in zip(fitx[which], ploty):
            cv2.circle(canvas,
                       center=(int(x), int(y)),
                       radius=3,
                       color=[255, 255, 0],
                       thickness=2)

        # Generate the polygon points to draw the fitted lane region.
        pts = np.transpose(np.vstack([fitx[which], ploty]))
        if which == 'right':
            # So that when h-stacked later, the bottom left lane is adjacent to the bottom right lane (U-shape).
            pts = np.flipud(pts)
        region_pts[which] = np.array([pts])  # Don't miss the [] around pts
    cv2.fillPoly(region, np.int_([np.hstack(region_pts.values())]),
                 (0, 255, 0))

    # 5. Compute radius of curvature and lane X position where the vehicle is (bottom of the view).
    lane_radius = {}
    lane_xpos = {}
    for which in ['left', 'right']:
        curv = radius_of_curvature(
            np.max(ploty) * ym_per_pixel, coeff_world[which])
        debug and print(which, "curvature", curv)
        lane_radius[which] = curv
        lane_xpos[which] = poly_value(np.max(ploty), coeff_pixel[which])
    # Geometric mean is more stable for radius
    def get_geomean(iterable):
        a = np.log(iterable)
        return np.exp(a.sum() / len(a))

    avg_radius = get_geomean(list(lane_radius.values()))
    dist_center = (midpoint - np.mean(list(lane_xpos.values()))) * xm_per_pixel

    # 6. Update lane history
    if lane_hist is not None:
        # First time update
        if len(lane_hist.fit_coeffs) == 0:
            lane_hist.fit_coeffs = copy.deepcopy(coeff_pixel)
        if len(lane_hist.xbase_position) == 0:
            lane_hist.xbase_position = copy.deepcopy(lane_xpos)
        if lane_hist.radius_of_curvature is None:
            lane_hist.radius_of_curvature = avg_radius
        if lane_hist.distance_to_center is None:
            lane_hist.distance_to_center = dist_center

        if not success:
            lane_hist.n_continuous_failure += 1
            if lane_hist.n_continuous_failure > lane_hist.continuous_failure_threshold:
                lane_hist.use_tracking = False
                lane_hist.use_xbase = False
        else:
            lane_hist.use_tracking = True
            lane_hist.use_xbase = True
            # Exponential decay and update coefficients and xbase positions
            rate = (1 -
                    lane_hist.decay_rate)**(lane_hist.n_continuous_failure + 1)
            for which in ['left', 'right']:
                lane_hist.xbase_position[which] *= rate
                lane_hist.xbase_position[which] += lane_xpos[which] * (1 -
                                                                       rate)
                lane_hist.fit_coeffs[which] *= rate
                lane_hist.fit_coeffs[which] += coeff_pixel[which] * (1 - rate)
                lane_hist.radius_of_curvature *= rate
                lane_hist.radius_of_curvature += avg_radius * (1 - rate)
                lane_hist.distance_to_center *= rate
                lane_hist.distance_to_center += dist_center * (1 - rate)
            lane_hist.n_continuous_failure = 0

    if img_fname is not None:
        output_img(canvas, os.path.join(output_dir, 'detect-canvas',
                                        img_fname))
    return (canvas, region, pixels,
            avg_radius if lane_hist is None else lane_hist.radius_of_curvature,
            dist_center if lane_hist is None else lane_hist.distance_to_center)
Example #45
0
    def project(self, proj_mat, threads=8, max_blockind=1024):
        if not self.initialized:
            print("Projector is not initialized")
            return

        inv_ar_mat, source_point = proj_mat.get_conanical_proj_matrix(
            voxel_size=self.voxelsize,
            volume_size=self.volumesize,
            origin_shift=self.origin)

        can_proj_matrix = inv_ar_mat.astype(np.float32)
        pixel_array = np.zeros(
            (self.proj_width, self.proj_height)).astype(np.float32)
        sourcex = source_point[0]
        sourcey = source_point[1]
        sourcez = source_point[2]
        g_volume_edge_min_point_x = np.float32(-0.5)
        g_volume_edge_min_point_y = np.float32(-0.5)
        g_volume_edge_min_point_z = np.float32(-0.5)
        g_volume_edge_max_point_x = np.float32(self.volumesize[0] - 0.5)
        g_volume_edge_max_point_y = np.float32(self.volumesize[1] - 0.5)
        g_volume_edge_max_point_z = np.float32(self.volumesize[2] - 0.5)
        g_voxel_element_size_x = self.voxelsize[0]
        g_voxel_element_size_y = self.voxelsize[1]
        g_voxel_element_size_z = self.voxelsize[2]

        #copy to gpu
        proj_matrix_gpu = cuda.mem_alloc(can_proj_matrix.nbytes)
        cuda.memcpy_htod(proj_matrix_gpu, can_proj_matrix)
        pixel_array_gpu = cuda.mem_alloc(pixel_array.nbytes)
        cuda.memcpy_htod(pixel_array_gpu, pixel_array)

        #calculate required blocks
        #threads = 8
        blocks_w = np.int(np.ceil(self.proj_width / threads))
        blocks_h = np.int(np.ceil(self.proj_height / threads))
        print("running:", blocks_w, "x", blocks_h, "blocks with ", threads,
              "x", threads, "threads")

        if blocks_w <= max_blockind and blocks_h <= max_blockind:
            #run kernel
            offset_w = np.int32(0)
            offset_h = np.int32(0)
            self.projKernel(self.proj_width,
                            self.proj_height,
                            self.stepsize,
                            g_volume_edge_min_point_x,
                            g_volume_edge_min_point_y,
                            g_volume_edge_min_point_z,
                            g_volume_edge_max_point_x,
                            g_volume_edge_max_point_y,
                            g_volume_edge_max_point_z,
                            g_voxel_element_size_x,
                            g_voxel_element_size_y,
                            g_voxel_element_size_z,
                            sourcex,
                            sourcey,
                            sourcez,
                            proj_matrix_gpu,
                            pixel_array_gpu,
                            offset_w,
                            offset_h,
                            block=(8, 8, 1),
                            grid=(blocks_w, blocks_h))
        else:
            print("running kernel patchwise")
            for w in range(0, (blocks_w - 1) // max_blockind + 1):
                for h in range(0, (blocks_h - 1) // max_blockind + 1):
                    offset_w = np.int32(w * max_blockind)
                    offset_h = np.int32(h * max_blockind)
                    # print(offset_w, offset_h)
                    self.projKernel(self.proj_width,
                                    self.proj_height,
                                    self.stepsize,
                                    g_volume_edge_min_point_x,
                                    g_volume_edge_min_point_y,
                                    g_volume_edge_min_point_z,
                                    g_volume_edge_max_point_x,
                                    g_volume_edge_max_point_y,
                                    g_volume_edge_max_point_z,
                                    g_voxel_element_size_x,
                                    g_voxel_element_size_y,
                                    g_voxel_element_size_z,
                                    sourcex,
                                    sourcey,
                                    sourcez,
                                    proj_matrix_gpu,
                                    pixel_array_gpu,
                                    offset_w,
                                    offset_h,
                                    block=(8, 8, 1),
                                    grid=(max_blockind, max_blockind))
                    context.synchronize()

        #context.synchronize()
        cuda.memcpy_dtoh(pixel_array, pixel_array_gpu)

        pixel_array = np.swapaxes(pixel_array, 0, 1)
        #normalize to cm
        return pixel_array / 10
Example #46
0
def run_training():
  config = Config()
  test_lines = open(config.test_list, 'r')
  train_lines = open(config.train_list, 'r')
  if not os.path.exists(config.model_save_dir):
      os.makedirs(config.model_save_dir)
  global_step = tf.Variable(0, name='global_step', trainable=False,dtype=tf.float32)
  images_placeholder, labels_placeholder = placeholder_inputs(config.batch_size)

  logit = c3d_model.inference_c3d(
                  images_placeholder,
                  0.5,
                  config.weight_initial,
                  )
  loss_name_scope = ('loss')
  loss = tower_loss(
                  loss_name_scope,
                  logit,
                  labels_placeholder
                  )
  train_op = tf.train.AdamOptimizer(1e-3).minimize(loss,global_step=global_step)
  accuracy = tower_acc(logit, labels_placeholder)

  # Create a saver for writing training checkpoints.
  saver = tf.train.Saver()
  init = tf.global_variables_initializer()

  # Create a session for running Ops on the Graph.
  with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    sess.run(init)
    if config.model_filename!=None  and config.use_pretrained_model:
      saver.restore(sess, config.model_filename)

    minstep = np.int(sess.run(global_step))
    print(minstep)
    for step in xrange(minstep,config.max_steps):
      global_step=global_step+1
      start_time = time.time()
      train_images, train_labels, _, _= input_data.read_clip_and_label(
                      input_lines=train_lines,
                      batch_size=config.batch_size ,
                      num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                      crop_size=c3d_model.CROP_SIZE,
                      shuffle=True
                      )
      sess.run(train_op, feed_dict={
                      images_placeholder: train_images,
                      labels_placeholder: train_labels
                      })
      duration = time.time() - start_time
      print('Step %d: %.3f sec' % (step, duration))

      # Save a checkpoint and evaluate the model periodically.
      if (step) % 10 == 0 or (step + 1) == config.max_steps:
        saver.save(sess, os.path.join(config.model_save_dir, 'c3d_ucf_model'), global_step=step)
        print('Training Data Eval:')
        acc = sess.run([accuracy],
                        feed_dict={images_placeholder: train_images,
                            labels_placeholder: train_labels
                            })
        print ("accuracy: " + "{:.5f}".format(acc))
        print('Validation Data Eval:')
        val_images, val_labels, _, _ = input_data.read_clip_and_label(
                        input_lines=test_lines,
                        batch_size=config.batch_size,
                        num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                        crop_size=c3d_model.CROP_SIZE,
                        shuffle=True
                        )
        acc = sess.run( [accuracy],
                        feed_dict={images_placeholder: val_images,
                                   labels_placeholder: val_labels})
        print ("accuracy: " + "{:.5f}".format(acc))
  print("done")
def compute_STFT_data_from_file_list(wavfile_list,
                                     fs=16000,
                                     wlen_sec=0.032,
                                     hop_percent=0.5,
                                     zp_percent=0,
                                     trim=False,
                                     top_db=60,
                                     out_file=None):
    """
    Compute short-term Fourier transform (STFT) power and phase spectrograms from a list of wav files, 
    and save them to a pickle file.
    
    Parameters
    ----------
    
    wavfile_list                List of wav files
    fs                          Sampling rate
    wlen_sec                    STFT window length in seconds
    hop_percent                 Hop size as a percentage of the window length
    zp_percent                  Zero-padding size as a percentage of the window length
    trim                        Boolean indicating if leading and trailing silences should be trimmed
    top_db                      The threshold (in decibels) below reference to consider as silence (see librosa doc)
    out_file                   Path to the pickle file for saving the data
    
    Returns
    -------
    
    data                        A list of dictionaries, the length of the list is the same as 'wavfile_list' 
                                Each dictionary has the following fields:
                                        'file': The wav file name
                                        'power_spectrogram': The power spectrogram
                                        'phase_spectrogram': The phase spectrogram
    
    Examples
    --------
    
    fs = 16e3 # Sampling rate
    wlen_sec = 64e-3 # STFT window length in seconds
    hop_percent = 0.25  # hop size as a percentage of the window length
    trim=False
    data_folder = '/local_scratch/sileglai/datasets/clean_speech/TIMIT/TEST'
    test_file_list = librosa.util.find_files(data_folder, ext='wav') 
    data = compute_data(test_file_list, fs=fs, wlen_sec=wlen_sec, hop_percent=hop_percent, trim=trim, zp_percent=0,             
                        out_file='test_compute_data.pckl')                   
    
    """

    # STFT parameters
    wlen = wlen_sec * fs  # window length of 64 ms
    wlen = np.int(np.power(2, np.ceil(np.log2(wlen))))  # next power of 2
    hop = np.int(hop_percent * wlen)  # hop size
    nfft = wlen + zp_percent * wlen  # number of points of the discrete Fourier transform
    win = np.sin(np.arange(.5, wlen - .5 + 1) / wlen * np.pi)
    # sine analysis window

    fs_orig = librosa.load(wavfile_list[0], sr=None)[1]  # Get sampling rate

    data = [None] * len(
        wavfile_list)  # Create an empty list that will contain dictionaries

    for n, wavfile in enumerate(wavfile_list):

        path, file_name = os.path.split(wavfile)

        if fs == fs_orig:
            x = librosa.load(wavfile,
                             sr=None)[0]  # Load wav file without resampling
        else:
            print('resampling while loading with librosa')
            x = librosa.load(wavfile,
                             sr=fs)[0]  # Load wav file with resampling

        if trim:
            x = librosa.effects.trim(
                x, top_db=top_db)[0]  # Trim leading and trailing silences

        T_orig = len(x)
        x_pad = librosa.util.fix_length(
            x, T_orig +
            wlen // 2)  # Padding for perfect reconstruction (see librosa doc)

        X = librosa.stft(x_pad,
                         n_fft=nfft,
                         hop_length=hop,
                         win_length=wlen,
                         window=win)  # STFT
        X_abs_2 = np.abs(X)**2  # Power spectrogram
        X_angle = np.angle(X)

        data[n] = {
            'file': file_name,
            'power_spectrogram': X_abs_2,
            'phase_spectrogram': X_angle
        }

    f = open(out_file, 'wb')
    pickle.dump([data, fs, wlen_sec, hop_percent, trim], f)
    f.close()

    return data
Example #48
0
 #print count,months
 if months in [1, 3, 5, 7, 8, 10, 12]:
     monthendday = 31
     hrs = 12
 elif months in [4, 6, 9, 11]:
     monthendday = 30
     hrs = 0
 elif isleap(int(1984)):
     monthendday = 29
     hrs = 12
 else:
     monthendday = 28
     hrs = 0
 times.append(
     cdt.componenttime(
         1984, months, np.int(monthendday / 2.),
         hrs).torel('days since 1955-1-1').value)
 # WOA13v2 extends from 1955 to 2012
 #                    times_bnds.append([cdt.componenttime(1955,months,1,0,0,0).torel('days since 1955-1-1'),
 #                                       cdt.componenttime(2012,months,monthendday,12,59,59).torel('days since 1955-1-1')])
 if months < 12:
     times_bnds.append([
         cdt.componenttime(
             1955, months, 1, 0, 0,
             0).torel('days since 1955-1-1').value,
         cdt.componenttime(
             2012, months + 1, 1, 0, 0,
             0).torel('days since 1955-1-1').value
     ])
 else:
     times_bnds.append([
Example #49
0
def fft_rotate(in_frame, alpha, pad=4, return_full=False):
    """
    3 FFT shear based rotation, following Larkin et al 1997
    
    Copied from the GRAPHIC exoplanet direct imaging pipeline

    in_frame: the numpy array which has to be rotated
    alpha: the rotation alpha in degrees
    pad: the padding factor
    return_full: If True, return the padded array

    One side effect of this program is that the image gains two columns and two rows.
    This is necessary to avoid problems with the different choice of centre between
    GRAPHIC and numpy. Numpy rotates around the boundary between 4 pixels, whereas this
    program rotates around the centre of a pixel.

    Return the rotated array
    """

    #################################################
    # Check alpha validity and correcting if needed
    #################################################
    alpha = 1. * alpha - 360 * np.floor(alpha / 360)

    # We need to add some extra rows since np.rot90 has a different definition of the centre
    temp = np.zeros((in_frame.shape[0] + 3, in_frame.shape[1] + 3)) + np.nan
    temp[1:in_frame.shape[0] + 1, 1:in_frame.shape[1] + 1] = in_frame
    in_frame = temp

    # FFT rotation only work in the -45:+45 range
    if alpha > 45 and alpha <= 135:
        in_frame = np.rot90(in_frame, k=1)
        alpha_rad = -np.deg2rad(alpha - 90)
    elif alpha > 135 and alpha <= 225:
        in_frame = np.rot90(in_frame, k=2)
        alpha_rad = -np.deg2rad(alpha - 180)
    elif alpha > 225 and alpha <= 315:
        in_frame = np.rot90(in_frame, k=3)
        alpha_rad = -np.deg2rad(alpha - 270)
    else:
        alpha_rad = -np.deg2rad(alpha)

        # Remove one extra row
    in_frame = in_frame[:-1, :-1]

    ###################################
    # Preparing the frame for rotation
    ###################################

    # Calculate the position that the input array will be in the padded array to simplify
    #  some lines of code later
    px1 = np.int(((pad - 1) / 2.) * in_frame.shape[0])
    px2 = np.int(((pad + 1) / 2.) * in_frame.shape[0])
    py1 = np.int(((pad - 1) / 2.) * in_frame.shape[1])
    py2 = np.int(((pad + 1) / 2.) * in_frame.shape[1])

    # Make the padded array
    pad_frame = np.ones(
        (in_frame.shape[0] * pad, in_frame.shape[1] * pad)) * np.NaN
    pad_mask = np.ones((pad_frame.shape), dtype=bool)
    pad_frame[px1:px2, py1:py2] = in_frame
    pad_mask[px1:px2, py1:py2] = np.where(np.isnan(in_frame), True, False)

    # Rotate the mask, to know what part is actually the image
    pad_mask = ndimage.interpolation.rotate(pad_mask,
                                            np.rad2deg(-alpha_rad),
                                            reshape=False,
                                            order=0,
                                            mode='constant',
                                            cval=True,
                                            prefilter=False)

    # Replace part outside the image which are NaN by 0, and go into Fourier space.
    pad_frame = np.where(np.isnan(pad_frame), 0., pad_frame)

    ###############################
    # Rotation in Fourier space
    ###############################
    a = np.tan(alpha_rad / 2.)
    b = -np.sin(alpha_rad)

    M = -2j * np.pi * np.ones(pad_frame.shape)
    N = fftpack.fftfreq(pad_frame.shape[0])

    X = np.arange(-pad_frame.shape[0] / 2.,
                  pad_frame.shape[0] / 2.)  #/pad_frame.shape[0]

    pad_x = fftpack.ifft((fftpack.fft(pad_frame, axis=0, overwrite_x=True).T *
                          np.exp(a * ((M * N).T * X).T)).T,
                         axis=0,
                         overwrite_x=True)
    pad_xy = fftpack.ifft(fftpack.fft(pad_x, axis=1, overwrite_x=True) *
                          np.exp(b * (M * X).T * N),
                          axis=1,
                          overwrite_x=True)
    pad_xyx = fftpack.ifft((fftpack.fft(pad_xy, axis=0, overwrite_x=True).T *
                            np.exp(a * ((M * N).T * X).T)).T,
                           axis=0,
                           overwrite_x=True)

    # Go back to real space
    # Put back to NaN pixels outside the image.

    pad_xyx[pad_mask] = np.NaN

    if return_full:
        return np.abs(pad_xyx).copy()
    else:
        return np.abs(pad_xyx[px1:px2, py1:py2]).copy()
def compute_STFT_data_from_file_list_TIMIT(wavfile_list,
                                           fs=16000,
                                           wlen_sec=0.032,
                                           hop_percent=0.5,
                                           zp_percent=0,
                                           trim=False,
                                           verbose=False,
                                           out_file=None):
    """
    Same as 'compute_STFT_data_from_file_list' function except that specific fields related to TIMIT are added to the returned and saved dictionaries.
    """

    # STFT parameters
    wlen = wlen_sec * fs  # window length of 64 ms
    wlen = np.int(np.power(2, np.ceil(np.log2(wlen))))  # next power of 2
    hop = np.int(hop_percent * wlen)  # hop size
    nfft = wlen + zp_percent * wlen  # number of points of the discrete Fourier transform
    win = np.sin(np.arange(.5, wlen - .5 + 1) / wlen * np.pi)
    # sine analysis window

    fs_orig = librosa.load(wavfile_list[0], sr=None)[1]  # Get sampling rate

    data = [None] * len(
        wavfile_list)  # Create an empty list that will contain dictionaries

    for n, wavfile in enumerate(wavfile_list):

        path, file_name = os.path.split(wavfile)
        path, speaker = os.path.split(path)
        path, dialect = os.path.split(path)
        path, set_type = os.path.split(path)

        if verbose:
            print('processing %s/%s/%s/%s\n' %
                  (set_type, dialect, speaker, file_name))

        if fs == fs_orig:
            x = librosa.load(wavfile,
                             sr=None)[0]  # Load wav file without resampling
        else:
            print('resampling while loading with librosa')
            x = librosa.load(wavfile,
                             sr=fs)[0]  # Load wav file with resampling

        if trim:
            with open(
                    os.path.join(path, set_type, dialect, speaker,
                                 file_name[:-4] + '.PHN'), 'r') as f:
                first_line = f.readline()  # Read the first line
                for last_line in f:  # Loop through the whole file reading it all
                    pass

            if not ('#' in first_line) or not ('#' in last_line):
                raise NameError(
                    'The first or last lines of the .phn file should contain #'
                )

            ind_beg = int(first_line.split(' ')[1])
            ind_end = int(last_line.split(' ')[0])
            x = x[ind_beg:ind_end]

        T_orig = len(x)
        x_pad = librosa.util.fix_length(
            x, T_orig +
            wlen // 2)  # Padding for perfect reconstruction (see librosa doc)

        X = librosa.stft(x_pad,
                         n_fft=nfft,
                         hop_length=hop,
                         win_length=wlen,
                         window=win)  # STFT
        X_abs_2 = np.abs(X)**2  # Power spectrogram
        X_angle = np.angle(X)

        data[n] = {
            'set': set_type,
            'dialect': dialect,
            'speaker': speaker,
            'file': file_name,
            'power_spectrogram': X_abs_2,
            'phase_spectrogram': X_angle
        }

    f = open(out_file, 'wb')
    pickle.dump([data, fs, wlen_sec, hop_percent, trim], f)
    f.close()

    return data
Example #51
0
    def plot_score_over_parameter_pairs(self, ens_scr, par_space, ens_par):

        num_of_params = np.int(
            len(par_space))  #number of parameter dimensions: 4
        alen = np.int(
            len(par_space[list(par_space)[0]])
        )  #assuming the same number of vals for each paramter dimension
        num_of_mems = np.int(len(ens_scr))  #number of ensemble members

        num_of_pairs = np.int(num_of_params * (num_of_params - 1) / 2.0)
        pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]

        #loop over ensemble members/scores
        pos_per_param = {}
        for enum, escr in sorted(ens_scr.items()):

            #match parameter combination of ensemble member with location in parameter space
            pos_par_array = []
            for pnum, pname in enumerate(par_space):

                pos_par_array.append(par_space[pname].index(
                    np.float(ens_par[enum][pname])))
            pos_per_param[enum] = [pos_par_array, escr]

        #mean score for each pair of parameter combinations
        par_field = np.zeros([num_of_pairs, alen, alen])
        for i, pair in enumerate(pairs):
            par1 = pair[0]
            par2 = pair[1]

            for enum, point in sorted(pos_per_param.items()):
                k = point[0][par1]
                l = point[0][par2]
                par_field[i, k, l] += ens_scr[enum][0]  #total score
        par_field /= (alen)**2

        #print np.max(par_field)
        #par_field/=np.max(par_field) #normalize to best pair

        #for i,pf in enumerate(par_field):
        #  par_field[i]/=np.sum(pf)
        par_field /= (np.sum(par_field) / num_of_pairs
                      )  #normalize to sum 1 for each pair

        fig22 = plt.figure(22, figsize=(8, 8))
        for sub, pair in enumerate(pairs):

            #locations in plat matrix
            subpos = (
                -pair[1] +
                (num_of_params - 1)) + (num_of_params - 1) * (pair[0]) + 1
            ax22 = plt.subplot(num_of_params - 1, num_of_params - 1, subpos)
            if self.normscore:
                cs22 = ax22.pcolor(
                    par_field[sub],
                    cmap=cm.YlOrRd,
                    norm=colors.LogNorm(vmin=0.01,
                                        vmax=self.msp))  #cm.gist_heat_r
                tcks = np.array([0, 0.02, 0.05, 0.1, 0.2, 0.4, 0.8])
            else:
                cs22 = ax22.pcolor(par_field[sub],
                                   cmap=cm.hot_r,
                                   vmin=0,
                                   vmax=self.msp)
                tcks = np.arange(0, self.msp, 0.2)

            pname0 = list(par_space)[pair[0]]
            pname1 = list(par_space)[pair[1]]

            #ticks
            plt.xticks(range(alen), par_space[pname1], fontsize=8)
            plt.yticks(range(alen), par_space[pname0], fontsize=8)
            ax22.xaxis.set(ticks=np.arange(0.5, alen),
                           ticklabels=par_space[pname1])
            ax22.yaxis.set(ticks=np.arange(0.5, alen),
                           ticklabels=par_space[pname0])
            if subpos in [2, 3, 5]:
                ax22.yaxis.set(ticks=[])

            #labels
            if subpos in [3, 5, 7]:
                ax22.set_xlabel(pname1)
            if subpos in [1, 4, 7]:
                ax22.set_ylabel(pname0)

        #colorbar
        #cbaxes = fig22.add_axes([0.77, 0.1, -0.04, 0.4])
        cbaxes = fig22.add_axes([0.77, 0.1, -0.05, 0.4])
        cb = plt.colorbar(cs22, cax=cbaxes, ticks=tcks,
                          format='%.2f')  #,extend='max')
        cb.set_label('mean score', multialignment="left")
        cb.outline.set_linewidth(0)

        ### print plot to pdf file
        if self.printtopdf:
            printname = self.printname.replace("placeholder", "parampairs")
            plt.savefig(printname, format='pdf')
            plt.savefig(printname.replace(".pdf", ".png"),
                        format='png',
                        dpi=300)
def create_nc_variable_files_on_regular_grid_from_mds(mds_var_dir,
                                                     mds_files_to_load,
                                                     mds_grid_dir,
                                                     output_dir,
                                                     output_freq_code,
                                                     vars_to_load = 'all',
                                                     tiles_to_load = [0,1,2,3,4,5,6,7,8,9,10,11,12],
                                                     time_steps_to_load = [],
                                                     meta_variable_specific = dict(),
                                                     meta_common = dict(),
                                                     mds_datatype = '>f4',
                                                     dlon=0.5, dlat=0.5,
                                                     radius_of_influence = 120000,
                                                     express=1,
                                                     kvarnmidx = 2, # coordinate idx for vertical axis
                                                     # method now is only a place holder.
                                                     # This can be expanded. For example,
                                                     # the global interpolated fields can
                                                     # split to tiles, similarly to
                                                     # the tiled native fields, to
                                                     # reduce the size of each file.
                                                     verbose=True,
                                                     method = ''):
    #%%
    # force mds_files_to_load to be a list (if str is passed)
    if isinstance(mds_files_to_load, str):
        mds_files_to_load = [mds_files_to_load]

    # force time_steps_to_load to be a list (if int is passed)
    if isinstance(time_steps_to_load, int):
        time_steps_to_load = [time_steps_to_load]

    # for ce tiles_to_load to be a list (if int is passed)
    if isinstance(tiles_to_load, int):
        tiles_to_load = [tiles_to_load]

    # if no specific file data passed, read default metadata from json file
    # -- variable specific meta data
    script_dir = os.path.dirname(__file__)  # <-- absolute dir the script is in
    if not meta_variable_specific:
        meta_variable_rel_path = '../meta_json/ecco_meta_variable.json'
        abs_meta_variable_path = os.path.join(script_dir, meta_variable_rel_path)
        with open(abs_meta_variable_path, 'r') as fp:
            meta_variable_specific = json.load(fp)

    # --- common meta data
    if not meta_common:
        meta_common_rel_path = '../meta_json/ecco_meta_common.json'
        abs_meta_common_path = os.path.join(script_dir, meta_common_rel_path)
        with open(abs_meta_common_path, 'r') as fp:
            meta_common = json.load(fp)

    # info for the regular grid
    new_grid_min_lat = -90+dlat/2.
    new_grid_max_lat = 90-dlat/2.
    new_grid_min_lon = -180+dlon/2.
    new_grid_max_lon = 180-dlon/2.
    new_grid_ny = np.int((new_grid_max_lat-new_grid_min_lat)/dlat + 1 + 1e-4*dlat)
    new_grid_nx = np.int((new_grid_max_lon-new_grid_min_lon)/dlon + 1 + 1e-4*dlon)
    j_reg = new_grid_min_lat + np.asarray(range(new_grid_ny))*dlat
    i_reg = new_grid_min_lon + np.asarray(range(new_grid_nx))*dlon
    j_reg_idx = np.asarray(range(new_grid_ny))
    i_reg_idx = np.asarray(range(new_grid_nx))
    if (new_grid_ny < 1) or (new_grid_nx < 1):
        raise ValueError('You need to have at least one grid point for the new grid.')

    # loop through each mds file in mds_files_to_load
    for mds_file in mds_files_to_load:

        # if time steps to load is empty, load all time steps
        if len(time_steps_to_load ) == 0:
            # go through each file, pull out the time step, add the time step to a list,
            # and determine the start and end time of each record.

           time_steps_to_load = \
               get_time_steps_from_mds_files(mds_var_dir, mds_file)


        first_meta_fname  = mds_file + '.' + \
            str(time_steps_to_load[0]).zfill(10) + '.meta'


        # get metadata for the first file and determine which variables
        # are present
        meta = xm.utils.parse_meta_file(mds_var_dir + '/' + first_meta_fname)
        vars_here =  meta['fldList']

        if not isinstance(vars_to_load, list):
            vars_to_load = [vars_to_load]

        if 'all' not in vars_to_load:
            num_vars_matching = len(np.intersect1d(vars_to_load, vars_here))

            print ('num vars matching ', num_vars_matching)

            # only proceed if we are sure that the variable we want is in this
            # mds file
            if num_vars_matching == 0:
                print ('none of the variables you want are in ', mds_file)
                print (vars_to_load)
                print (vars_here)

                break
        #%%
        # load the MDS fields
        ecco_dataset_all =  \
                load_ecco_vars_from_mds(mds_var_dir, \
                                         mds_grid_dir,
                                         mds_file,
                                         vars_to_load = vars_to_load,
                                         tiles_to_load=tiles_to_load,
                                         model_time_steps_to_load=time_steps_to_load,
                                         output_freq_code = \
                                              output_freq_code,
                                         meta_variable_specific = \
                                              meta_variable_specific,
                                         meta_common=meta_common,
                                         mds_datatype=mds_datatype,
                                         llc_method = 'bigchunks')

        # do the actual loading. Otherwise, the code may be slow.
        ecco_dataset_all.load()

        # print(ecco_dataset_all.keys())
        # loop through each variable in this dataset,
        for var in ecco_dataset_all.keys():
            print ('    ' + var)
            # obtain the grid information (use fields from time=0)
            # Note that nrtmp would always equal to one,
            # since each outfile will include only one time-record (e.g. daily, monthly avgs.).

            ecco_dataset = ecco_dataset_all.isel(time=[0])

            var_ds = ecco_dataset[var]

            shapetmp = var_ds.shape

            lenshapetmp = len(shapetmp)
            nttmp = 0
            nrtmp = 0
            if(lenshapetmp==4):
                nttmp = shapetmp[0]
                nrtmp = 0
            elif(lenshapetmp==5):
                nttmp = shapetmp[0]
                nrtmp = shapetmp[1]
            else:
                print('Error! ', var_ds.shape)
                sys.exit()

            # Get X,Y of the original grid. They could be XC/YC, XG/YC, XC/YG, etc.
            # Similar for mask.
            # default is XC, YC
            if 'i' in var_ds.coords.keys():
                XX = ecco_dataset['XC']
                XXname = 'XC'
            if 'j' in var_ds.coords.keys():
                YY = ecco_dataset['YC']
                YYname = 'YC'
            varmask = 'maskC'
            iname = 'i'
            jname = 'j'

            if 'i_g' in var_ds.coords.keys():
                XX = ecco_dataset['XG']
                XXname = 'XG'
                varmask = 'maskW'
                iname = 'i_g'
            if 'j_g' in var_ds.coords.keys():
                YY = ecco_dataset['YG']
                YYname = 'YG'
                varmask = 'maskS'
                jname = 'j_g'

            # interpolation
            # To do it fast, set express==1 (default)
            if(express==1):
                orig_lons_1d = XX.values.ravel()
                orig_lats_1d = YY.values.ravel()
                orig_grid = pr.geometry.SwathDefinition(lons=orig_lons_1d,
                                                        lats=orig_lats_1d)

                if (new_grid_ny > 0) and (new_grid_nx > 0):
                    # 1D grid values
                    new_grid_lon, new_grid_lat = np.meshgrid(i_reg, j_reg)

                    # define the lat lon points of the two parts.
                    new_grid  = pr.geometry.GridDefinition(lons=new_grid_lon,
                                                           lats=new_grid_lat)

                    # Get the neighbor info once.
                    # It will be used repeatedly late to resample data
                    # fast for each of the datasets that is based on
                    # the same swath, e.g. for a model variable at different times.
                    valid_input_index, valid_output_index, index_array, distance_array = \
                    pr.kd_tree.get_neighbour_info(orig_grid,
                                               new_grid, radius_of_influence,
                                               neighbours=1)

            # loop through time steps, one at a time.
            for time_step in time_steps_to_load:

                i, = np.where(ecco_dataset_all.timestep == time_step)
                if(verbose):
                    print (ecco_dataset_all.timestep.values)
                    print ('time step ', time_step, i)

                # load the dataset
                ecco_dataset = ecco_dataset_all.isel(time=i)

                # pull out the year, month day, hour, min, sec associated with
                # this time step
                if type(ecco_dataset.time.values) == np.ndarray:
                    cur_time = ecco_dataset.time.values[0]
                else:
                    cur_time = ecco_dataset.time.values
                #print (type(cur_time))
                year, mon, day, hh, mm, ss  = \
                     extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(cur_time)

                print(year, mon, day)

                # if the field comes from an average,
                # extract the time bounds -- we'll use it before we save
                # the variable
                if 'AVG' in output_freq_code:
                    tb = ecco_dataset.time_bnds
                    tb.name = 'tb'

                var_ds = ecco_dataset[var]

                # 3d fields (with Z-axis) for each time record
                if(nttmp != 0 and nrtmp != 0):
                    tmpall = np.zeros((nttmp, nrtmp,new_grid_ny,new_grid_nx))
                    for ir in range(nrtmp): # Z-loop
                        # mask
                        maskloc = ecco_dataset[varmask].values[ir,:]

                        for it in range(nttmp): # time loop
                            # one 2d field at a time
                            var_ds_onechunk = var_ds[it,ir,:]
                            # apply mask
                            var_ds_onechunk.values[maskloc==0]=np.nan
                            orig_field = var_ds_onechunk.values
                            if(express==1):
                                tmp = pr.kd_tree.get_sample_from_neighbour_info(
                                        'nn', new_grid.shape, orig_field,
                                        valid_input_index, valid_output_index,
                                        index_array)

                            else:
                                new_grid_lon, new_grid_lat, tmp = resample_to_latlon(XX, YY, orig_field,
                                                                  new_grid_min_lat,
                                                                  new_grid_max_lat, dlat,
                                                                  new_grid_min_lon,
                                                                  new_grid_max_lon, dlon,
                                                                  nprocs_user=1,
                                                                  mapping_method = 'nearest_neighbor',
                                                                  radius_of_influence=radius_of_influence)
                            tmpall[it,ir,:] = tmp
                # 2d fields (without Z-axis) for each time record
                elif(nttmp != 0):
                    tmpall = np.zeros((nttmp, new_grid_ny,new_grid_nx))
                    # mask
                    maskloc = ecco_dataset[varmask].values[0,:]
                    for it in range(nttmp): # time loop
                        var_ds_onechunk = var_ds[it,:]
                        var_ds_onechunk.values[maskloc==0]=np.nan
                        orig_field = var_ds_onechunk.values
                        if(express==1):
                            tmp = pr.kd_tree.get_sample_from_neighbour_info(
                                    'nn', new_grid.shape, orig_field,
                                    valid_input_index, valid_output_index,
                                    index_array)
                        else:
                            new_grid_lon, new_grid_lat, tmp = resample_to_latlon(XX, YY, orig_field,
                                                              new_grid_min_lat,
                                                              new_grid_max_lat, dlat,
                                                              new_grid_min_lon,
                                                              new_grid_max_lon, dlon,
                                                              nprocs_user=1,
                                                              mapping_method = 'nearest_neighbor',
                                                              radius_of_influence=radius_of_influence)
                        tmpall[it,:] = tmp

                else:
                    print('Error! both nttmp and nrtmp are zeros.')
                    sys.exit()
                # set the coordinates for the new (regular) grid
                # 2d fields
                if(lenshapetmp==4):
                    var_ds_reg = xr.DataArray(tmpall,
                                              coords = {'time': var_ds.coords['time'].values,
                                                        'j': j_reg_idx,
                                                        'i': i_reg_idx},\
                                              dims = ('time', 'j', 'i'))
                # 3d fields
                elif(lenshapetmp==5):
                    # Get the variable name (kvarnm) for Z-axis: k, k_l
                    kvarnm = var_ds.coords.keys()[kvarnmidx]

                    if(kvarnm[0]!='k'):
                        kvarnmidxnew = kvarnmidx
                        for iktmp, ktmp in enumerate(var_ds.coords.keys()):
                            if(ktmp[0]=='k'):
                                kvarnmidxnew = iktmp
                        if(kvarnmidxnew==kvarnmidx):
                            print('Error! Seems ', kvarnm, ' is not the vertical axis.')
                            print(var_ds)
                            sys.exit()
                        else:
                            kvarnmidx = kvarnmidxnew
                            kvarnm = var_ds.coords.keys()[kvarnmidx]

                    var_ds_reg = xr.DataArray(tmpall,
                                              coords = {'time': var_ds.coords['time'].values,
                                                        kvarnm: var_ds.coords[kvarnm].values,
                                                        'j': j_reg_idx,
                                                        'i': i_reg_idx},\
                                              dims = ('time', kvarnm,'j', 'i'))
                # set the attrs for the new (regular) grid
                var_ds_reg['j'].attrs = var_ds[jname].attrs
                var_ds_reg['i'].attrs = var_ds[iname].attrs
                var_ds_reg['j'].attrs['long_name'] = 'y-dimension'
                var_ds_reg['i'].attrs['long_name'] = 'x-dimension'
                var_ds_reg['j'].attrs['swap_dim'] = 'latitude'
                var_ds_reg['i'].attrs['swap_dim'] = 'longitude'

                var_ds_reg['latitude'] = (('j'), j_reg)
                var_ds_reg['longitude'] = (('i'), i_reg)
                var_ds_reg['latitude'].attrs = ecco_dataset[YYname].attrs
                var_ds_reg['longitude'].attrs = ecco_dataset[XXname].attrs
                var_ds_reg['latitude'].attrs['long_name'] = "latitude at center of grid cell"
                var_ds_reg['longitude'].attrs['long_name'] = "longitude at center of grid cell"

                var_ds_reg.name = var_ds.name

                #keys_to_drop = ['tile','j','i','XC','YC','XG','YG']
                # drop these ancillary fields -- they are in grid anyway
                keys_to_drop = ['CS','SN','Depth','rA','PHrefC','hFacC',\
                                'maskC','drF', 'dxC', 'dyG', 'rAw', 'hFacW',\
                                'rAs','hFacS','maskS','dxG','dyC', 'maskW', \
                                'tile','XC','YC','XG','YG']

                for key_to_drop in keys_to_drop:
                    #print (key_to_drop)
                    if key_to_drop in var_ds.coords.keys():
                        var_ds = var_ds.drop(key_to_drop)

                # any remaining fields, e.g. time, would be included in the interpolated fields.
                for key_to_add in var_ds.coords.keys():
                    if(key_to_add not in var_ds_reg.coords.keys()):
                        if(key_to_add != 'i_g' and key_to_add != 'j_g'):
                            var_ds_reg[key_to_add] = var_ds[key_to_add]

                # use the same global attributs
                var_ds_reg.attrs = var_ds.attrs


                #print(var_ds.coords.keys())
                #%%

                # create the new file path name
                if 'MON' in output_freq_code:

                    fname = var + '_' +  str(year) + '_' + str(mon).zfill(2) + '.nc'

                    newpath = output_dir + '/' + var + '/' + \
                        str(year) + '/'

                elif ('WEEK' in output_freq_code) or \
                     ('DAY' in output_freq_code):

                    fname = var + '_' + \
                            str(year) + '_' + \
                            str(mon).zfill(2) + '_' + \
                            str(day).zfill(2) +  '.nc'
                    d0 = datetime.datetime(year, 1,1)
                    d1 = datetime.datetime(year, mon, day)
                    doy = (d1-d0).days + 1

                    newpath = output_dir + '/' + var + '/' + \
                        str(year) + '/' + str(doy).zfill(3)

                elif 'YEAR' in output_freq_code:

                     fname = var + '_' + str(year) + '.nc'

                     newpath = output_dir + '/' + var + '/' + \
                        str(year)

                else:
                    print ('no valid output frequency code specified')
                    print ('saving to year/mon/day/tile')
                    fname = var + '_' + \
                        str(year) + '_' + \
                        str(mon).zfill(2) + '_' + \
                        str(day).zfill(2) + '.nc'
                    d0 = datetime.datetime(year, 1,1)
                    d1 = datetime.datetime(year, mon, day)
                    doy = (d1-d0).days + 1

                    newpath = output_dir + '/' + var + '/' + \
                        str(year) + '/' + str(doy).zfill(3)


                # create the path if it does not exist/
                if not os.path.exists(newpath):
                    os.makedirs(newpath)

                # convert the data array to a dataset.
                tmp = var_ds_reg.to_dataset()

                # add the time bounds field back in if we have an
                # average field
                if 'AVG' in output_freq_code:
                    tmp = xr.merge((tmp, tb))
                    tmp = tmp.drop('tb')

                # put the metadata back in
                tmp.attrs = ecco_dataset.attrs

                # update the temporal and geospatial metadata
                tmp = update_ecco_dataset_geospatial_metadata(tmp)
                tmp = update_ecco_dataset_temporal_coverage_metadata(tmp)

                # save to netcdf.  it's that simple.
                if(verbose):
                    print ('saving to %s' % newpath + '/' + fname)
                # do not include _FillValue
                encoding = {i: {'_FillValue': False} for i in tmp.variables.keys()}

                tmp.to_netcdf(newpath + '/' + fname, engine='netcdf4',encoding=encoding)



#%%
    ecco_dataset_all.close()
    return ecco_dataset, tmp
Example #53
0
if __name__ == '__main__':

    if len(sys.argv) != 6:
        print(
            "\nUsage: ", sys.argv[0],
            "<input src corpus> <input trg corpus> <trg ngram order> <src output prefix> <trg output prefix>\n"
        )
        exit()

    in_src_corpus, in_trg_corpus, trg_n_order, out_src_prefix, out_trg_prefix = sys.argv[
        1:]

    out_src_sentences = "{0}.sentences".format(out_src_prefix)
    out_src_vocab = "{0}.vocab".format(out_src_prefix)
    src_vocab, src_sentences = extract_vocabulary_and_sentences(in_src_corpus)

    trg_n_order = np.int(trg_n_order)
    out_trg_context = "{0}.context".format(out_trg_prefix)
    out_trg_target = "{0}.target".format(out_trg_prefix)
    out_trg_vocab = "{0}.vocab".format(out_trg_prefix)
    trg_vocab, trg_contexts, trg_targets, trg_sentence_idx = extract_vocabulary_and_ngrams(
        in_trg_corpus, trg_n_order)

    src_sentences = multiply_sentences(src_sentences, trg_sentence_idx)

    write_file(src_sentences, out_src_sentences)
    write_file(src_vocab, out_src_vocab)
    write_file(trg_contexts, out_trg_context)
    write_file(trg_targets, out_trg_target)
    write_file(trg_vocab, out_trg_vocab)
Example #54
0
    def plot_score_scatter_metric(self, ens_scr, par_space, ens_par):

        #print par_space
        #print ens_scr
        #print ens_par

        num_of_params = np.int(
            len(par_space))  #number of parameter dimensions: 4
        num_of_metrics = np.int(len(ens_scr[list(
            ens_scr)[0]]))  #total and individual scores: 5(PD)+5(Paleo)=11
        #print num_of_params,num_of_metrics

        best_score_mem = self.best_scores[0]
        best_score_mem2 = self.best_scores[1]
        best_score_mem3 = self.best_scores[2]
        print best_score_mem, best_score_mem2, best_score_mem3, self.best_scores[
            3], self.best_scores[4]

        score_names = self.score_names

        fig23 = plt.figure(23,
                           figsize=(int(1.6 * num_of_metrics),
                                    int(1.6 * num_of_params)))
        plt.clf()
        #plt.title("Aggregated Score        Present Day Scores        Paleo Scores")

        mean_scr_param = np.zeros([num_of_metrics, num_of_params, 4])

        for i, (num, scr) in enumerate(ens_scr.iteritems()):  #ensemble members
            for k, metric in enumerate(scr):
                for l, (pn, par) in enumerate(par_space.iteritems()):

                    subpos = k + (l) * num_of_metrics + 1
                    #print k,l,subpos,metric
                    ax23 = plt.subplot(num_of_params, num_of_metrics, subpos)
                    if i == 0:  #need to be set only once
                        if k == 0:
                            ax23.set_ylabel(pn)
                            ax23.set_yticks([0, 0.5, 1])
                            ax23.tick_params(axis='y', labelsize=7)
                        else:
                            ax23.set_yticks([])
                        if not self.normscore:
                            ax23.axhline(np.exp(-1.0),
                                         color='g',
                                         linestyle='dashed',
                                         alpha=0.2,
                                         linewidth=0.5,
                                         zorder=0)  #median
                        if l == num_of_params - 1:
                            ax23.set_xlabel(score_names[k])

                    for j, pval in enumerate(par):
                        if pval == ens_par[num][pn]:
                            #print i,num,k,metric,l,j,pval,subpos
                            ax23.plot(pval,
                                      metric,
                                      "k.",
                                      markersize=2,
                                      zorder=1,
                                      alpha=0.5)  #score points
                            if i == 0:  # set axis already for the first esemble member
                                diffax = (max(par) - min(par)) * 0.2
                                minax = min(par) - diffax
                                maxax = max(par) + diffax
                                #ax23.axis([minax,maxax,0,1.0]) #1.25
                                ax23.axis([minax, maxax, 0, 1.05])  #1.25
                                ax23.set_xticks(par_space[pn])
                                ax23.tick_params(axis='x', labelsize=6)

                            if best_score_mem == num:  #show best run scores
                                ax23.plot(pval,
                                          metric,
                                          "r.",
                                          markersize=9,
                                          zorder=4)
                            elif best_score_mem2 == num:  #show second best run scores
                                ax23.plot(pval,
                                          metric,
                                          "g.",
                                          markersize=9,
                                          zorder=3,
                                          alpha=0.5)
                            elif best_score_mem3 == num:  #show third best run scores
                                ax23.plot(pval,
                                          metric,
                                          "b.",
                                          markersize=9,
                                          zorder=2,
                                          alpha=0.3)
                            #elif ens_par[num]['sia_e']==7.0:
                            #  ax23.plot(pval,metric,"y.",markersize=9,zorder=2,alpha=0.3)

                            #calculate score means per parameter values
                            #m = np.where( par==pval )
                            m = par.index(pval)
                            mean_scr_param[k, l, m] += metric

        mean_scr_param /= (np.float(len(ens_scr)) / 4.0)
        for k, metric in enumerate(scr):
            for l, (pn, par) in enumerate(par_space.iteritems()):
                subpos = k + (l) * num_of_metrics + 1
                ax23 = plt.subplot(num_of_params, num_of_metrics, subpos)
                ax23.plot(par,
                          mean_scr_param[k, l, :],
                          linestyle="dashed",
                          color="grey",
                          alpha=0.5,
                          zorder=0)  #,marker="x")

        if self.printtopdf:
            printname = self.printname.replace("placeholder", "scorescatter")
            plt.savefig(printname, format='pdf')
            plt.savefig(printname.replace(".pdf", ".png"),
                        format='png',
                        dpi=300)
Example #55
0
                         by,
                         '.',
                         markersize=1,
                         color=[0.0, 0.0, 1.0, 0.02])
                plt.plot(ex,
                         ey,
                         '.',
                         markersize=1,
                         color=[0.0, 1.0, 0.0, 0.02])
                plt.axis([x_min, x_max, y_min, y_max])
                plt.gca().invert_yaxis()

            #-----------------------------------
            # Save figure and data for each fish
            if plot:
                filename = analysisFolder + '\\' + str(np.int(
                    groups[idx])) + '_SPI_' + str(i) + '.png'
                plt.show()
                plt.savefig(filename, dpi=600)
                plt.close('all')

            #----------------------------
            # Save Analyzed Summary Data
            filename = analysisFolder + '\\' + str(np.int(
                groups[idx])) + '_SUMMARY_' + str(i) + '.npz'
            np.savez(filename,
                     VPI_NS=VPI_ns,
                     VPI_S=VPI_s,
                     SPI_NS=SPI_ns,
                     SPI_S=SPI_s,
                     BPS_NS=BPS_ns,
                     BPS_S=BPS_s,
Example #56
0
    def plot_score_over_parameter_space(self, ens_scr, par_space, ens_par):

        #print ens_scr # dict of ensemble members and individual score(s)
        #print ens_par #dict of ensemble members and individual paramter values
        #print par_space #dict of paramter diminsions and possible values

        #loop over ensemble members/scores
        pos_per_param = {}
        for enum, escr in sorted(ens_scr.items()):

            #match parameter combination of ensemble member with location in parameter space
            pos_par_array = []
            for pnum, pname in enumerate(par_space):
                try:
                    pos_par_array.append(par_space[pname].index(
                        np.float(ens_par[enum][pname])))
                except:
                    print "Position not found for " + str(enum)
            pos_per_param[enum] = [pos_par_array, escr]
        #print pos_per_param

        #find parameter matrix position of ensemble member
        alen = len(par_space[list(par_space)[0]])
        blen = len(par_space[list(par_space)[1]])
        clen = len(par_space[list(par_space)[2]])
        dlen = len(par_space[list(par_space)[3]])
        #print alen,blen,clen,dlen

        par_field = np.zeros([clen * dlen, alen, blen])
        par_names = list(np.zeros_like(par_field))

        for enum, point in sorted(pos_per_param.items()):

            apos = point[0][0]
            bpos = point[0][1]
            cpos = point[0][2]
            dpos = point[0][3]
            epos = cpos * clen + dpos

            par_field[epos, apos, bpos] = point[1][0]  #total score
            par_names[epos][apos][bpos] = np.int(enum)

            #print top3 ensemble members
            if enum in self.best_scores[0:5]:
                print "best score members: \n"
                print enum, point[1], ens_par[enum], "\n"

        #plot matrix
        fig20 = plt.figure(20, figsize=(8, 9))

        for sub in xrange(clen * dlen):
            ax20 = plt.subplot(clen, dlen, sub + 1)
            if self.normscore:
                cs20 = ax20.pcolor(par_field[sub],
                                   cmap=cm.YlOrRd,
                                   norm=colors.LogNorm(vmin=0.01,
                                                       vmax=self.msp))
                tcks = np.array([0, 0.02, 0.05, 0.1, 0.2, 0.4, 0.8])
            else:
                cs20 = ax20.pcolor(par_field[sub],
                                   cmap=cm.hot_r,
                                   vmin=0,
                                   vmax=self.msp)
                tcks = np.arange(0, self.msp, 0.2)

            #write ensemblenumber to matrix location
            for i in xrange(alen):
                for j in xrange(blen):
                    enumstr = str(np.int(par_names[sub][j][i]))
                    if enumstr in self.best_scores[0:3]:
                        ax20.text(i + 0.15,
                                  j + 0.4,
                                  enumstr,
                                  color="w",
                                  fontsize=8)
                    else:  #bright backround
                        ax20.text(i + 0.15,
                                  j + 0.4,
                                  enumstr,
                                  color="k",
                                  fontsize=8)

            #inner ticks, shifted by 0.5
            plt.xticks(range(blen), par_space[list(par_space)[1]], fontsize=8)
            plt.yticks(range(alen), par_space[list(par_space)[0]], fontsize=8)

            ax20.xaxis.set(ticks=np.arange(0.5, blen),
                           ticklabels=par_space[list(par_space)[1]])
            ax20.yaxis.set(ticks=np.arange(0.5, alen),
                           ticklabels=par_space[list(par_space)[0]])

            #outer labels
            if (sub) / clen >= dlen - 1:
                ax20.set_xlabel(par_space[list(par_space)[3]][sub % dlen])
            if sub % dlen == 0:
                #if sub%3==0:
                ax20.set_ylabel(par_space[list(par_space)[2]][(sub + 1) /
                                                              dlen])
            else:
                ax20.set_yticks([])

        #define colorbar
        #cbaxes = fig20.add_axes([0.97, 0.1, -0.04, 0.4])
        cbaxes = fig20.add_axes([0.97, 0.1, -0.05, 0.4])
        cb = plt.colorbar(cs20, cax=cbaxes, ticks=tcks,
                          format='%.2f')  #,extend='max')
        cb.set_label('total score', multialignment="left")
        cb.outline.set_linewidth(0)

        ### print plot to pdf file
        if self.printtopdf:
            printname = self.printname.replace("placeholder", "paramscore")
            plt.savefig(printname, format='pdf')
            plt.savefig(printname.replace(".pdf", ".png"),
                        format='png',
                        dpi=300)
Example #57
0
def glacier_masks(gdir):
    """Makes a gridded mask of the glacier outlines.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        where to write the data
    """

    # open srtm tif-file:
    dem_dr = rasterio.open(gdir.get_filepath('dem'), 'r', driver='GTiff')
    dem = dem_dr.read(1).astype(rasterio.float32)

    # Grid
    nx = dem_dr.width
    ny = dem_dr.height
    assert nx == gdir.grid.nx
    assert ny == gdir.grid.ny

    # Correct the DEM (ASTER...)
    # Currently we just do a linear interp -- ASTER is totally shit anyway
    min_z = -999.
    isfinite = np.isfinite(dem)
    if (np.min(dem) <= min_z) or np.any(~isfinite):
        xx, yy = gdir.grid.ij_coordinates
        pnan = np.nonzero((dem <= min_z) | (~isfinite))
        pok = np.nonzero((dem > min_z) | isfinite)
        points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
        inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
        dem[pnan] = griddata(points, np.ravel(dem[pok]), inter)
        log.warning(gdir.rgi_id + ': DEM needed interpolation.')

    isfinite = np.isfinite(dem)
    if not np.all(isfinite):
        # see how many percent of the dem
        if np.sum(~isfinite) > (0.2 * nx * ny):
            raise RuntimeError('({}) too many NaNs in DEM'.format(gdir.rgi_id))
        log.warning('({}) DEM needed zeros somewhere.'.format(gdir.rgi_id))
        dem[isfinite] = 0

    if np.min(dem) == np.max(dem):
        raise RuntimeError('({}) min equal max in the DEM.'
                           .format(gdir.rgi_id))

    # Proj
    if LooseVersion(rasterio.__version__) >= LooseVersion('1.0'):
        transf = dem_dr.transform
    else:
        transf = dem_dr.affine
    x0 = transf[2]  # UL corner
    y0 = transf[5]  # UL corner
    dx = transf[0]
    dy = transf[4]  # Negative
    assert dx == -dy
    assert dx == gdir.grid.dx
    assert y0 == gdir.grid.corner_grid.y0
    assert x0 == gdir.grid.corner_grid.x0
    dem_dr.close()

    # Clip topography to 0 m a.s.l.
    dem = dem.clip(0)

    # Smooth DEM?
    if cfg.PARAMS['smooth_window'] > 0.:
        gsize = np.rint(cfg.PARAMS['smooth_window'] / dx)
        smoothed_dem = gaussian_blur(dem, np.int(gsize))
    else:
        smoothed_dem = dem.copy()

    if not np.all(np.isfinite(smoothed_dem)):
        raise RuntimeError('({}) NaN in smoothed DEM'.format(gdir.rgi_id))

    # Geometries
    outlines_file = gdir.get_filepath('outlines')
    geometry = gpd.GeoDataFrame.from_file(outlines_file).geometry[0]

    # Interpolate shape to a regular path
    glacier_poly_hr = _interp_polygon(geometry, gdir.grid.dx)

    # Transform geometry into grid coordinates
    # It has to be in pix center coordinates because of how skimage works
    def proj(x, y):
        grid = gdir.grid.center_grid
        return grid.transform(x, y, crs=grid.proj)
    glacier_poly_hr = shapely.ops.transform(proj, glacier_poly_hr)

    # simple trick to correct invalid polys:
    # http://stackoverflow.com/questions/20833344/
    # fix-invalid-polygon-python-shapely
    glacier_poly_hr = glacier_poly_hr.buffer(0)
    if not glacier_poly_hr.is_valid:
        raise RuntimeError('This glacier geometry is crazy.')

    # Rounded nearest pix
    glacier_poly_pix = _polygon_to_pix(glacier_poly_hr)

    # Compute the glacier mask (currently: center pixels + touched)
    nx, ny = gdir.grid.nx, gdir.grid.ny
    glacier_mask = np.zeros((ny, nx), dtype=np.uint8)
    glacier_ext = np.zeros((ny, nx), dtype=np.uint8)
    (x, y) = glacier_poly_pix.exterior.xy
    glacier_mask[skdraw.polygon(np.array(y), np.array(x))] = 1
    for gint in glacier_poly_pix.interiors:
        x, y = tuple2int(gint.xy)
        glacier_mask[skdraw.polygon(y, x)] = 0
        glacier_mask[y, x] = 0  # on the nunataks, no
    x, y = tuple2int(glacier_poly_pix.exterior.xy)
    glacier_mask[y, x] = 1
    glacier_ext[y, x] = 1

    # Because of the 0 values at nunataks boundaries, some "Ice Islands"
    # can happen within nunataks (e.g.: RGI40-11.00062)
    # See if we can filter them out easily
    regions, nregions = label(glacier_mask, structure=label_struct)
    if nregions > 1:
        log.debug('(%s) we had to cut an island in the mask', gdir.rgi_id)
        # Check the size of those
        region_sizes = [np.sum(regions == r) for r in np.arange(1, nregions+1)]
        am = np.argmax(region_sizes)
        # Check not a strange glacier
        sr = region_sizes.pop(am)
        for ss in region_sizes:
            assert (ss / sr) < 0.1
        glacier_mask[:] = 0
        glacier_mask[np.where(regions == (am+1))] = 1

    # Last sanity check based on the masked dem
    tmp_max = np.max(dem[np.where(glacier_mask == 1)])
    tmp_min = np.min(dem[np.where(glacier_mask == 1)])
    if tmp_max < (tmp_min + 1):
        raise RuntimeError('({}) min equal max in the masked DEM.'
                           .format(gdir.rgi_id))

    # write out the grids in the netcdf file
    nc = gdir.create_gridded_ncdf_file('gridded_data')

    v = nc.createVariable('topo', 'f4', ('y', 'x', ), zlib=True)
    v.units = 'm'
    v.long_name = 'DEM topography'
    v[:] = dem

    v = nc.createVariable('topo_smoothed', 'f4', ('y', 'x', ), zlib=True)
    v.units = 'm'
    v.long_name = ('DEM topography smoothed' 
                   ' with radius: {:.1} m'.format(cfg.PARAMS['smooth_window']))
    v[:] = smoothed_dem

    v = nc.createVariable('glacier_mask', 'i1', ('y', 'x', ), zlib=True)
    v.units = '-'
    v.long_name = 'Glacier mask'
    v[:] = glacier_mask

    v = nc.createVariable('glacier_ext', 'i1', ('y', 'x', ), zlib=True)
    v.units = '-'
    v.long_name = 'Glacier external boundaries'
    v[:] = glacier_ext

    # add some meta stats and close
    nc.max_h_dem = np.max(dem)
    nc.min_h_dem = np.min(dem)
    dem_on_g = dem[np.where(glacier_mask)]
    nc.max_h_glacier = np.max(dem_on_g)
    nc.min_h_glacier = np.min(dem_on_g)
    nc.close()

    geometries = dict()
    geometries['polygon_hr'] = glacier_poly_hr
    geometries['polygon_pix'] = glacier_poly_pix
    geometries['polygon_area'] = geometry.area
    gdir.write_pickle(geometries, 'geometries')
    def generate_tiles(self):
        openslide_obj = self.openslide_obj
        tile_objective_value = self.tile_objective_value
        tile_read_size = self.tile_read_size

        if self.use_tiss_mask:
            ds_factor = self.level_downsamples[self.tiss_level]

        if self.objective_power == 0:
            self.objective_power = np.int(openslide_obj.properties[
                openslide.PROPERTY_NAME_OBJECTIVE_POWER])

        rescale = np.int(self.objective_power / tile_objective_value)
        openslide_read_size = np.multiply(tile_read_size, rescale)
        slide_dimension = openslide_obj.level_dimensions[0]
        slide_h = slide_dimension[1]
        slide_w = slide_dimension[0]
        tile_h = openslide_read_size[0]
        tile_w = openslide_read_size[1]

        iter_tot = 0
        output_dir = self.output_dir
        data = []

        if self.nr_tiles == None:
            for h in range(int(math.ceil((slide_h - tile_h) / tile_h + 1))):
                for w in range(int(math.ceil((slide_w - tile_w) / tile_w +
                                             1))):
                    start_h = h * tile_h
                    end_h = (h * tile_h) + tile_h
                    start_w = w * tile_w
                    end_w = (w * tile_w) + tile_w
                    if end_h > slide_h:
                        end_h = slide_h

                    if end_w > slide_w:
                        end_w = slide_w
                    #
                    if self.use_tiss_mask:
                        tiss = self.mask[
                            int(start_h / ds_factor):int(start_h / ds_factor) +
                            int(openslide_read_size[1] / ds_factor),
                            int(start_w / ds_factor):int(start_w / ds_factor) +
                            int(openslide_read_size[0] / ds_factor)]
                        tiss_frac = np.sum(tiss) / np.size(tiss)
                    else:
                        tiss_frac = 1

                    if tiss_frac > self.tiss_cutoff:

                        im = self.read_region(start_w, start_h, end_w, end_h)
                        format_str = 'Tile%d:  start_w:%d, end_w:%d, start_h:%d, end_h:%d, width:%d, height:%d'

                        print(format_str %
                              (iter_tot, start_w, end_w, start_h, end_h,
                               end_w - start_w, end_h - start_h),
                              flush=True)
                        temp = np.array(im)
                        temp = temp[:, :, 0:3]
                        im = Image.fromarray(temp)
                        if rescale != 1:
                            im = im.resize(size=[
                                np.int((end_w - start_w) / rescale),
                                np.int((end_h - start_h) / rescale)
                            ],
                                           resample=Image.BICUBIC)

                        img_save_name = 'Tile' + '_' \
                                        + str(tile_objective_value) + '_' \
                                        + str(int(start_w/rescale)) + '_' \
                                        + str(int(start_h/rescale))\
                                        + '.jpg'

                        im.save(os.path.join(output_dir, img_save_name),
                                format='JPEG')
                        data.append([
                            iter_tot, img_save_name, start_w, end_w, start_h,
                            end_h, im.size[0], im.size[1]
                        ])
                        iter_tot += 1

        else:
            for i in range(self.nr_tiles):
                condition = 0
                while condition == 0:
                    h = np.random.randint(
                        0, int(math.ceil((slide_h - tile_h) / tile_h + 1)))
                    w = np.random.randint(
                        0, int(math.ceil((slide_w - tile_w) / tile_w + 1)))

                    start_h = h * tile_h
                    end_h = (h * tile_h) + tile_h
                    start_w = w * tile_w
                    end_w = (w * tile_w) + tile_w
                    if end_h > slide_h:
                        end_h = slide_h

                    if end_w > slide_w:
                        end_w = slide_w
                    #
                    if self.use_tiss_mask:
                        tiss = self.mask[
                            int(start_h / ds_factor):int(start_h / ds_factor) +
                            int(openslide_read_size[1] / ds_factor),
                            int(start_w / ds_factor):int(start_w / ds_factor) +
                            int(openslide_read_size[0] / ds_factor)]
                        tiss_frac = np.sum(tiss) / np.size(tiss)
                    else:
                        tiss_frac = 1

                    if tiss_frac > self.tiss_cutoff:

                        im = self.read_region(start_w, start_h, end_w, end_h)
                        format_str = 'Tile%d:  start_w:%d, end_w:%d, start_h:%d, end_h:%d, width:%d, height:%d'

                        print(format_str %
                              (iter_tot, start_w, end_w, start_h, end_h,
                               end_w - start_w, end_h - start_h),
                              flush=True)
                        temp = np.array(im)
                        temp = temp[:, :, 0:3]
                        im = Image.fromarray(temp)
                        if rescale != 1:
                            im = im.resize(size=[
                                np.int((end_w - start_w) / rescale),
                                np.int((end_h - start_h) / rescale)
                            ],
                                           resample=Image.BICUBIC)

                        img_save_name = 'Tile' + '_' \
                                        + str(tile_objective_value) + '_' \
                                        + str(int(start_w/rescale)) + '_' \
                                        + str(int(start_h/rescale))\
                                        + '.jpg'

                        im.save(os.path.join(output_dir, img_save_name),
                                format='JPEG')
                        data.append([
                            iter_tot, img_save_name, start_w, end_w, start_h,
                            end_h, im.size[0], im.size[1]
                        ])
                        iter_tot += 1
                        condition = 1

        df = pd.DataFrame(data,
                          columns=[
                              'iter', 'Tile_Name', 'start_w', 'end_w',
                              'start_h', 'end_h', 'size_w', 'size_h'
                          ])
        df.to_csv(os.path.join(output_dir, 'Output.csv'), index=False)
Example #59
0
def define_glacier_region(gdir, entity=None):
    """
    Very first task: define the glacier's local grid.

    Defines the local projection (Transverse Mercator), centered on the
    glacier. There is some options to set the resolution of the local grid.
    It can be adapted depending on the size of the glacier with::

        dx (m) = d1 * AREA (km) + d2 ; clipped to dmax

    or be set to a fixed value. See ``params.cfg`` for setting these options.
    Default values of the adapted mode lead to a resolution of 50 m for
    Hintereisferner, which is approx. 8 km2 large.
    After defining the grid, the topography and the outlines of the glacier
    are transformed into the local projection. The default interpolation for
    the topography is `cubic`.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        where to write the data
    entity : geopandas GeoSeries
        the glacier geometry to process
    """

    # choose a spatial resolution with respect to the glacier area
    dxmethod = cfg.PARAMS['grid_dx_method']
    area = gdir.rgi_area_km2
    if dxmethod == 'linear':
        dx = np.rint(cfg.PARAMS['d1'] * area + cfg.PARAMS['d2'])
    elif dxmethod == 'square':
        dx = np.rint(cfg.PARAMS['d1'] * np.sqrt(area) + cfg.PARAMS['d2'])
    elif dxmethod == 'fixed':
        dx = np.rint(cfg.PARAMS['fixed_dx'])
    else:
        raise ValueError('grid_dx_method not supported: {}'.format(dxmethod))
    # Additional trick for varying dx
    if dxmethod in ['linear', 'square']:
        dx = np.clip(dx, cfg.PARAMS['d2'], cfg.PARAMS['dmax'])

    log.debug('(%s) area %.2f km, dx=%.1f', gdir.rgi_id, area, dx)

    # Make a local glacier map
    proj_params = dict(name='tmerc', lat_0=0., lon_0=gdir.cenlon,
                       k=0.9996, x_0=0, y_0=0, datum='WGS84')
    proj4_str = "+proj={name} +lat_0={lat_0} +lon_0={lon_0} +k={k} " \
                "+x_0={x_0} +y_0={y_0} +datum={datum}".format(**proj_params)
    proj_in = pyproj.Proj("+init=EPSG:4326", preserve_units=True)
    proj_out = pyproj.Proj(proj4_str, preserve_units=True)
    project = partial(pyproj.transform, proj_in, proj_out)
    # transform geometry to map
    geometry = shapely.ops.transform(project, entity['geometry'])
    geometry = _check_geometry(geometry, gdir=gdir)
    xx, yy = geometry.exterior.xy

    # Corners, incl. a buffer of N pix
    ulx = np.min(xx) - cfg.PARAMS['border'] * dx
    lrx = np.max(xx) + cfg.PARAMS['border'] * dx
    uly = np.max(yy) + cfg.PARAMS['border'] * dx
    lry = np.min(yy) - cfg.PARAMS['border'] * dx
    # n pixels
    nx = np.int((lrx - ulx) / dx)
    ny = np.int((uly - lry) / dx)

    # Back to lon, lat for DEM download/preparation
    tmp_grid = salem.Grid(proj=proj_out, nxny=(nx, ny), x0y0=(ulx, uly),
                          dxdy=(dx, -dx), pixel_ref='corner')
    minlon, maxlon, minlat, maxlat = tmp_grid.extent_in_crs(crs=salem.wgs84)

    # save transformed geometry to disk
    entity = entity.copy()
    entity['geometry'] = geometry
    # Avoid fiona bug: https://github.com/Toblerity/Fiona/issues/365
    for k, s in entity.iteritems():
        if type(s) in [np.int32, np.int64]:
            entity[k] = int(s)
    towrite = gpd.GeoDataFrame(entity).T
    towrite.crs = proj4_str
    # Delete the source before writing
    if 'DEM_SOURCE' in towrite:
        del towrite['DEM_SOURCE']
    towrite.to_file(gdir.get_filepath('outlines'))

    # Also transform the intersects if necessary
    gdf = cfg.PARAMS['intersects_gdf']
    if len(gdf) > 0:
        gdf = gdf.loc[((gdf.RGIId_1 == gdir.rgi_id) |
                       (gdf.RGIId_2 == gdir.rgi_id))]
        if len(gdf) > 0:
            gdf = salem.transform_geopandas(gdf, to_crs=proj_out)
            if hasattr(gdf.crs, 'srs'):
                # salem uses pyproj
                gdf.crs = gdf.crs.srs
            gdf.to_file(gdir.get_filepath('intersects'))

    # Open DEM
    source = entity.DEM_SOURCE if hasattr(entity, 'DEM_SOURCE') else None
    dem_list, dem_source = get_topo_file((minlon, maxlon), (minlat, maxlat),
                                         rgi_region=gdir.rgi_region,
                                         source=source)
    log.debug('(%s) DEM source: %s', gdir.rgi_id, dem_source)

    # A glacier area can cover more than one tile:
    if len(dem_list) == 1:
        dem_dss = [rasterio.open(dem_list[0])]  # if one tile, just open it
        dem_data = rasterio.band(dem_dss[0], 1)
        if LooseVersion(rasterio.__version__) >= LooseVersion('1.0'):
            src_transform = dem_dss[0].transform
        else:
            src_transform = dem_dss[0].affine
    else:
        dem_dss = [rasterio.open(s) for s in dem_list]  # list of rasters
        dem_data, src_transform = merge_tool(dem_dss)  # merged rasters

    # Use Grid properties to create a transform (see rasterio cookbook)
    dst_transform = rasterio.transform.from_origin(
        ulx, uly, dx, dx  # sign change (2nd dx) is done by rasterio.transform
    )

    # Set up profile for writing output
    profile = dem_dss[0].profile
    profile.update({
        'crs': proj4_str,
        'transform': dst_transform,
        'width': nx,
        'height': ny
    })

    # Could be extended so that the cfg file takes all Resampling.* methods
    if cfg.PARAMS['topo_interp'] == 'bilinear':
        resampling = Resampling.bilinear
    elif cfg.PARAMS['topo_interp'] == 'cubic':
        resampling = Resampling.cubic
    else:
        raise ValueError('{} interpolation not understood'
                         .format(cfg.PARAMS['topo_interp']))

    dem_reproj = gdir.get_filepath('dem')
    with rasterio.open(dem_reproj, 'w', **profile) as dest:
        dst_array = np.empty((ny, nx), dtype=dem_dss[0].dtypes[0])
        reproject(
            # Source parameters
            source=dem_data,
            src_crs=dem_dss[0].crs,
            src_transform=src_transform,
            # Destination parameters
            destination=dst_array,
            dst_transform=dst_transform,
            dst_crs=proj4_str,
            # Configuration
            resampling=resampling)

        dest.write(dst_array, 1)

    for dem_ds in dem_dss:
        dem_ds.close()

    # Glacier grid
    x0y0 = (ulx+dx/2, uly-dx/2)  # To pixel center coordinates
    glacier_grid = salem.Grid(proj=proj_out, nxny=(nx, ny),  dxdy=(dx, -dx),
                              x0y0=x0y0)
    glacier_grid.to_json(gdir.get_filepath('glacier_grid'))
    gdir.write_pickle(dem_source, 'dem_source')
Example #60
0
import tomopy
import dxchange
import numpy as np
import h5py
import sys
import skimage.feature

##################################### Inputs #########################################################################
ndsets = np.int(sys.argv[1])
theta_start = 0
theta_end = np.int(sys.argv[2])
name = sys.argv[4]
file_name = name + '.h5'
sino_start = 0
sino_end = 2048
flat_field_norm = True
flat_field_drift_corr = False  # Correct the intensity drift
remove_rings = True
binning = np.int(sys.argv[3])
######################################################################################################################


def preprocess_data(prj,
                    flat,
                    dark,
                    FF_norm=flat_field_norm,
                    remove_rings=remove_rings,
                    FF_drift_corr=flat_field_drift_corr,
                    downsapling=binning):

    if FF_norm:  # dark-flat field correction