示例#1
0
   def __call__(self, x):
      '''Interpolate at point [x].  Returns a 3-tuple: (y, mask) where [y]
      is the interpolated point, and [mask] is a boolean array with the same
      shape as [x] and is True where interpolated and False where extrapolated'''
      if not self.setup:
         self._setup()

      if len(num.shape(x)) < 1:
         scalar = True
      else:
         scalar = False

      x = num.atleast_1d(x)
      if self.realization:
         evm = num.atleast_1d(splev(x, self.realization))
         mask = num.greater_equal(x, self.realization[0][0])*\
                num.less_equal(x,self.realization[0][-1])
      else:
         evm = num.atleast_1d(splev(x, self.tck))
         mask = num.greater_equal(x, self.tck[0][0])*num.less_equal(x,self.tck[0][-1])

      if scalar:
         return evm[0],mask[0]
      else:
         return evm,mask
def curveScore(l, curve):
    lb = l["TextBB"]
    elbl = [lb[0] - 20, lb[1], lb[0], lb[3]]
    elbr = [lb[0], lb[1], lb[2] + 20, lb[3]]
    cindex = int(curve[0])
    cdata = curve[1]
    # img=cdata[elb[1]:elb[3],elb[0]:elb[2]]
    imgl = cdata[elbl[1] : elbl[3], elbl[0] : elbl[2]]
    imgr = cdata[elbr[1] : elbr[3], elbr[0] : elbr[2]]

    # print cindex,l['Text']
    # show_img(imgl)
    # show_img(imgr)
    # points from the rectangle to the left and right of the legend word that are not white or black pixels.
    lnzps = np.where(np.logical_and(np.greater_equal(imgl[:, :, 1], 0.06), np.greater_equal(imgl[:, :, 2], 0.1)))
    rnzps = np.where(np.logical_and(np.greater_equal(imgr[:, :, 1], 0.06), np.greater_equal(imgr[:, :, 2], 0.1)))

    if len(lnzps[0]) == 0 and len(rnzps[0]) == 0:  # this means for this legend, we did not find a single pixel from the
        # curve that is to the left or right of it.
        # print l['Text'],"has no points to the left or right for",cindex
        return (None, None, None)
    elif len(lnzps[0]) != 0 and len(rnzps[0]) == 0:
        # print l['Text'],"has curve",cindex,"to the left of it, distance:",100-np.sort(lnzps[0])[-1]
        return (cindex, "l", 20 - np.sort(lnzps[0])[-1])
    elif len(lnzps[0]) == 0 and len(rnzps[0]) != 0:
        # print l['Text'],"has curve",cindex,"to the right of it, distance:",np.sort(rnzps[0])[0]
        return (cindex, "r", np.sort(rnzps[0])[0])
    else:  # this means, some points from this curve belongs to both left and the right of the legend. That is improbable.
        print "Something wrong, a single curve has pixels on both sides of the image "
        return (None, None)
示例#3
0
def parallel_point_test(center,dim,x,y,z):
    '''
    Overview:
        Determines whether a given point is in a parallelapiped given the point
    being tested and the relevant parameters.


    Parameters:

    center:(float,[3]|angstroms) = The coordinates of the center of the
    parallelapiped. This parameter is in the form (x center,y center, z center)

    dim:(float,[3]|angstroms) = The x, y and z dimensions of the parallelapiped
    object.

    x,y,z:(float|angstroms) = coordinates for the point being tested.


    Note:
    -The API is left intentionally independent of the class structures used in
    sample_prep.py to allow for code resuabilitiy.

    '''

    low_lim = (array(center) - (array(dim)/2.0))
    high_lim = (array(center) +(array(dim)/2.0))

    height_lim = greater_equal (z,low_lim[2])*less_equal (z,high_lim[2])
    length_lim = greater_equal (y,low_lim[1])*less_equal (y,high_lim[1])
    width_lim = greater_equal (x,low_lim[0])*less_equal (x,high_lim[0])

    test_results = height_lim * length_lim * width_lim

    return test_results
示例#4
0
def plotCurves(c1, c2):
    name1, t, avg1, top1, bottom1 = c1
    name2, t, avg2, top2, bottom2 = c2
    pl.plot(t, np.zeros(len(t)), 'k-')
    s1 = ma.array(avg1)
    s2 = ma.array(avg2)
    zx1 = np.logical_and(np.greater_equal(top1, 0), np.less_equal(bottom1, 0))
    zx2 = np.logical_and(np.greater_equal(top2, 0), np.less_equal(bottom2, 0))
    ix = np.logical_or(
            np.logical_and(
                np.greater_equal(top1, top2),
                np.less_equal(bottom1, top2)),
            np.logical_and(
                np.greater_equal(top1, bottom2),
                np.less_equal(bottom1, bottom2)))
    mask1 = np.logical_or(zx1, ix)
    mask2 = np.logical_or(zx2, ix)

    print mask1
    print mask2
    print zx1
    print zx2
    print ix

    pl.plot(t, s1, "k--", linewidth=1)
    pl.plot(t, s2, "k-", linewidth=1)
    s1.mask = ix
    s2.mask = ix
    pl.plot(t, s1, "k--", linewidth=3, label=name1)
    pl.plot(t, s2, "k-", linewidth=3, label=name2)
    pl.xlabel('Time (secs)')
    pl.ylabel("Pearson correlation")
示例#5
0
文件: dm15temp.py 项目: obscode/snpy
   def eval(self, band, times, z=0, k=1):
      '''Evaluate, using a spline, the value of the template at specific
      times, optionally with a redshift (in the sense that the times should
      be blueshifted before interpolating.  Also returns a mask indicating
      the interpolated points (1) and the extrapolated points (0)'''
      if len(num.shape(times)) == 0:
         evt = num.array([times/(1+z)])
         scalar = 1
      else:
         evt = times/(1+z)
         scalar = 0
      if band not in self.__dict__ and band not in ['J','H','K']:
         raise AttributeError, "Sorry, band %s is not supported by dm15temp" % \
               band
      s = dm152s(self.dm15)
      if band == 'J':
         return(0.080 + evt/s*0.05104699 + 0.007064257*(evt/s)**2 - 0.000257906*(evt/s)**3,
               0.0*evt/s + 0.06, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      elif band == 'H':
         return(0.050 + evt/s*0.0250923 + 0.001852107*(evt/s)**2 - 0.0003557824*(evt/s)**3,
               0.0*evt/s + 0.08, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      elif band == 'K':
         return(0.042 + evt/s*0.02728437+ 0.003194500*(evt/s)**2 - 0.0004139377*(evt/s)**3,
               0.0*evt/s + 0.08, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      evd = self.tck[band].ev(evt/self.s, evt*0+self.dm15)
      eevd = self.tck['e_'+band].ev(evt/self.s, evt*0+self.dm15)
      mask = num.greater_equal(evt/self.s, -10)*num.less_equal(evt/self.s,70)

      if scalar:
         return(evd[0], eevd[0], mask[0])
      else:
         return(evd, eevd, mask)
示例#6
0
def radial_contrast_flr(image, xc, yc, seps, zw, coron_thrupt, klip_thrupt=None):
    rad_flr_ctc = np.empty((len(seps)))
    assert(len(seps) == len(coron_thrupt))
    if klip_thrupt is not None:
        assert(len(seps) == len(klip_thrupt))
        rad_flr_ctc_ktc = np.empty((len(seps)))
    else:
        rad_flr_ctc_ktc = None

    imh = image.shape[0]
    imw = image.shape[1]

    xs = np.arange(imw) - xc
    ys = np.arange(imh) - yc
    XXs, YYs = np.meshgrid(xs, ys)
    RRs = np.sqrt(XXs**2 + YYs**2)

    for si, sep in enumerate(seps):
        r_in = np.max([seps[0], sep-zw/2.])
        r_out = np.min([seps[-1], sep+zw/2.])
        meas_ann_mask = np.logical_and(np.greater_equal(RRs, r_in),
                                          np.less_equal(RRs, r_out))
        meas_ann_ind = np.nonzero(np.logical_and(np.greater_equal(RRs, r_in).ravel(),
                                                    np.less_equal(RRs, r_out).ravel()))[0]
        meas_ann = np.ravel(image)[meas_ann_ind]
        rad_flr_ctc[si] = np.nanstd(meas_ann)/coron_thrupt[si]
        if rad_flr_ctc_ktc is not None:
            rad_flr_ctc_ktc[si] = np.nanstd(meas_ann)/coron_thrupt[si]/klip_thrupt[si]

    #pdb.set_trace()
    return rad_flr_ctc, rad_flr_ctc_ktc
示例#7
0
def getMaxPoints(arr):
    # [TODO] Work out for RGB rather than array, and maybe we don't need the filter, but hopefully speeds it up.
    # Reference http://scipy-cookbook.readthedocs.io/items/FiltFilt.html
    arra = filtfilt(b,a,arr)
    maxp = maxpoints(arra, order=(len(arra)/20), mode='wrap')
    minp = minpoints(arra, order=(len(arra)/20), mode='wrap')

    points = []

    for i in range(3):
        mas = np.equal(np.greater_equal(maxp,(i*(len(arra)/3))), np.less_equal(maxp,((i+1)*len(arra)/3)))
        k = np.compress(mas[0], maxp)
        if len(k)==0:
            continue
        points.append(sum(k)/len(k))

    if len(points) == 1:
        return points, []

    points = np.compress(np.greater_equal(arra[points],(max(arra)-min(arra))*0.40 + min(arra)),points)
    rifts = []
    for i in range(len(points)-1):
        mas = np.equal(np.greater_equal(minp, points[i]),np.less_equal(minp,points[i+1]))
        k = np.compress(mas[0], minp)
        rifts.append(k[arra[k].argmin()])

    return points, rifts
示例#8
0
def rel_coron_thrupt(Pmod, ref_pos):
    # Given 2-d off-axis PSF model over a set of positions,
    # comput the FWHM throughput relative to the off-axis PSF
    # at a reference (presumably peak throughput) location.
    coron_thrupt = np.empty(Pmod.shape[:-2])

    Pref = Pmod[ref_pos]
    ref_peak = np.max(Pref)
    ref_fwhm_ind = np.greater_equal(Pref, ref_peak/2)
    ref_fwhm_sum = np.sum(Pref[ref_fwhm_ind])

    if len(coron_thrupt.shape) == 2:
        for si in range(coron_thrupt.shape[0]):
            for ti in range(coron_thrupt.shape[1]):
                P = Pmod[si,ti]
                fwhm_ind = np.greater_equal(P, np.max(P)/2)
                fwhm_sum = np.sum(P[fwhm_ind])
                coron_thrupt[si,ti] = fwhm_sum/ref_fwhm_sum

    elif len(coron_thrupt.shape) == 1:
        for ti in range(coron_thrupt.shape[0]):
            P = Pmod[ti]
            fwhm_ind = np.greater_equal(P, np.max(P)/2)
            fwhm_sum = np.sum(P[fwhm_ind])
            coron_thrupt[ti] = fwhm_sum/ref_fwhm_sum

    return coron_thrupt
示例#9
0
def filter(mask, cube, header, clipMethod, threshold, rmsMode, verbose):
    if clipMethod == 'relative':
        # determine the clip level
		# Measure noise in original cube
		# rms = GetRMS(cube,rmsmode=rmsMode,zoomx=1,zoomy=1,zoomz=100000,verb=verbose,nrbins=100000)
        rms = GetRMS(cube, rmsMode=rmsMode, zoomx=1, zoomy=1, zoomz=1, verbose=verbose)
        print 'Estimated rms = ', rms
        clip = threshold * rms
    if clipMethod == 'absolute':
        clip = threshold
    print 'using clip threshold: ', clip
	#return ((cube >= clip)+(cube <= -1*clip))

   
    # check whether there are NaNs
    nan_mask = np.isnan(cube)
    found_nan=nan_mask.sum()
    if found_nan:
        cube=np.nan_to_num(cube)
        np.logical_or(mask, (np.greater_equal(cube, clip) + np.less_equal(cube, -clip)), mask)
        cube[nan_mask]=np.nan
    else:
        np.logical_or(mask, (np.greater_equal(cube, clip) + np.less_equal(cube, -clip)), mask)

	
    return 
示例#10
0
def _calc_uncorr_gene_score(gene, input_gene, input_snp, pruned_snps, hotspots):
    # find local snps given a gene
    cond_snps_near_gene = logical_and(np.equal(input_snp[:, 0], input_gene[gene, 0]),
                                      np.greater_equal(input_snp[:, 1], input_gene[gene, 1]),
                                      np.less_equal(input_snp[:, 1], input_gene[gene, 2]))
    # if no snps found
    if not np.any(cond_snps_near_gene):
        return (np.nan, 0, 1, 0, 0)

    n_snps_zscore_finite = np.sum(np.isfinite(input_snp[cond_snps_near_gene][:, 3]))
    # if no snps with finite zcore
    if n_snps_zscore_finite == 0:
        return (np.nan, 0, 1, 0, 0)

    n_snps_per_gene = n_snps_zscore_finite

    # use p-value to find most significant SNP
    idx_min_pval = np.nanargmin(input_snp[cond_snps_near_gene][:, 3])

    uncorr_score = input_snp[cond_snps_near_gene][idx_min_pval, 2]

    # count number of independent SNPs per gene
    n_indep_snps_per_gene = np.sum(logical_and(np.equal(pruned_snps[:, 0], input_gene[gene, 0]),
                                               np.greater_equal(pruned_snps[:, 1], input_gene[gene, 1]),
                                               np.less_equal(pruned_snps[:, 1], input_gene[gene, 2])))

    # count number of hotspots per gene
    n_hotspots_per_gene = np.sum(np.logical_and(np.equal(hotspots[:, 0], input_gene[gene, 0]),
                                                np.greater(np.fmin(hotspots[:, 2], input_gene[gene, 2])
                                                           - np.fmax(hotspots[:, 1], input_gene[gene, 1]), 0)))
    return (uncorr_score, n_snps_per_gene, 0, n_indep_snps_per_gene, n_hotspots_per_gene)
示例#11
0
def minima_in_range(r, g_r, r_min, r_max):
    """Find the minima in a range of r, g_r values"""
    idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r)))
    g_r_slice = g_r[idx]
    g_r_min = g_r_slice[g_r_slice.argmin()]
    idx_min, _ = find_nearest(g_r, g_r_min)
    return r[idx_min], g_r[idx_min]
示例#12
0
文件: patternfn.py 项目: ioam/imagen
def ring(x, y, height, thickness, gaussian_width):
    """
    Circular ring (annulus) with Gaussian fall-off after the solid ring-shaped region.
    """
    radius = height/2.0
    half_thickness = thickness/2.0

    distance_from_origin = np.sqrt(x**2+y**2)
    distance_outside_outer_disk = distance_from_origin - radius - half_thickness
    distance_inside_inner_disk = radius - half_thickness - distance_from_origin

    ring = 1.0-np.bitwise_xor(np.greater_equal(distance_inside_inner_disk,0.0),
                              np.greater_equal(distance_outside_outer_disk,0.0))

    sigmasq = gaussian_width*gaussian_width

    if sigmasq==0.0:
        inner_falloff = x*0.0
        outer_falloff = x*0.0
    else:
        with float_error_ignore():
            inner_falloff = np.exp(np.divide(-distance_inside_inner_disk*distance_inside_inner_disk, 2.0*sigmasq))
            outer_falloff = np.exp(np.divide(-distance_outside_outer_disk*distance_outside_outer_disk, 2.0*sigmasq))

    return np.maximum(inner_falloff,np.maximum(outer_falloff,ring))
示例#13
0
文件: patternfn.py 项目: ioam/imagen
def arc_by_radian(x, y, height, radian_range, thickness, gaussian_width):
    """
    Radial arc with Gaussian fall-off after the solid ring-shaped
    region with the given thickness, with shape specified by the
    (start,end) radian_range.
    """

    # Create a circular ring (copied from the ring function)
    radius = height/2.0
    half_thickness = thickness/2.0

    distance_from_origin = np.sqrt(x**2+y**2)
    distance_outside_outer_disk = distance_from_origin - radius - half_thickness
    distance_inside_inner_disk = radius - half_thickness - distance_from_origin

    ring = 1.0-np.bitwise_xor(np.greater_equal(distance_inside_inner_disk,0.0),
                              np.greater_equal(distance_outside_outer_disk,0.0))

    sigmasq = gaussian_width*gaussian_width

    if sigmasq==0.0:
        inner_falloff = x*0.0
        outer_falloff = x*0.0
    else:
        with float_error_ignore():
            inner_falloff = np.exp(np.divide(-distance_inside_inner_disk*distance_inside_inner_disk, 2.0*sigmasq))
            outer_falloff = np.exp(np.divide(-distance_outside_outer_disk*distance_outside_outer_disk, 2.0*sigmasq))

    output_ring = np.maximum(inner_falloff,np.maximum(outer_falloff,ring))

    # Calculate radians (in 4 phases) and cut according to the set range)

    # RZHACKALERT:
    # Function float_error_ignore() cannot catch the exception when
    # both np.dividend and divisor are 0.0, and when only divisor is 0.0
    # it returns 'Inf' rather than 0.0. In x, y and
    # distance_from_origin, only one point in distance_from_origin can
    # be 0.0 (circle center) and in this point x and y must be 0.0 as
    # well. So here is a hack to avoid the 'invalid value encountered
    # in divide' error by turning 0.0 to 1e-5 in distance_from_origin.
    distance_from_origin += np.where(distance_from_origin == 0.0, 1e-5, 0)

    with float_error_ignore():
        sines = np.divide(y, distance_from_origin)
        cosines = np.divide(x, distance_from_origin)
        arcsines = np.arcsin(sines)

    phase_1 = np.where(np.logical_and(sines >= 0, cosines >= 0), 2*pi-arcsines, 0)
    phase_2 = np.where(np.logical_and(sines >= 0, cosines <  0), pi+arcsines,   0)
    phase_3 = np.where(np.logical_and(sines <  0, cosines <  0), pi+arcsines,   0)
    phase_4 = np.where(np.logical_and(sines <  0, cosines >= 0), -arcsines,     0)
    arcsines = phase_1 + phase_2 + phase_3 + phase_4

    if radian_range[0] <= radian_range[1]:
        return np.where(np.logical_and(arcsines >= radian_range[0], arcsines <= radian_range[1]),
                        output_ring, 0.0)
    else:
        return np.where(np.logical_or(arcsines >= radian_range[0], arcsines <= radian_range[1]),
                        output_ring, 0.0)
示例#14
0
def cone_point_test(center,dim,stub,x,y,z):
    '''
    Overview:
        Determines whether a given point is in an cone given the point being
    tested and the relevant parameters..


    Parameters:

    center:float,[3]|angstroms) = The x, y, and z component of the central
    point of the ellipsoid. In the case that the center is set to
    [None,None,None] the shape will be put in the bottom corner of the unit cell
    (the bounding box will start at (0,0,0).

    dim:(float,[3]|angstroms) = The x component, y component and thickness
    of the cone respectively. x is the radius of the cone base in the x
    direction and b is the radius of the cone base in the y direction.

    stub:(float|angstroms) = provides a hard cut-off for the thickness of the
    cone. this allows for the creation of a truncated cone object who side slope
    can be altered by using different z component values while keeping the stub
    parameter fixed.

    x,y,z:(float|angstroms) = coordinates for the point being tested.


    Notes:
    -To solve this equation more efficiently, the program takes in an array of
    x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
    module then solves each part of the test individually and takes the product.
    Only the points where all of the inquires are True will be left as true in
    the test_results array

    -The API is left intentionally independent of the class structures used in
    sample_prep.py to allow for code resuabilitiy.

    '''

    a_angle = arctan(dim[2]/dim[0])
    b_angle = arctan(dim[2]/dim[1])

    low_height_lim = greater_equal (z,(center[2] - dim[2]/2))

    if stub == None:
        up_height_lim =  less_equal (z,(center[2] + dim[2]/2))
    else:
        up_height_lim =  less_equal (z,(center[2] + stub/2))

    xy_test = ((((x-center[0])**2)/((((center[2] +
           dim[2]/2)-z)/tan(a_angle))**2))+(((y-center[1])**2)/((((center[2] +
           dim[2]/2)-z)/tan(b_angle))**2)))

    in_plane_low_lim = less_equal (0.0,xy_test)
    in_plane_high_lim = greater_equal (1.0,xy_test)

    test_results = (low_height_lim * up_height_lim * in_plane_low_lim *
                    in_plane_high_lim)

    return test_results
示例#15
0
 def __ge__(a, b):
     try:
         return np.greater_equal(a.v, b.v)
     except AttributeError:
         if isinstance(a, Measurement):
             return np.greater_equal(a.v, b)
         else:
             return np.greater_equal(a, b.v)
示例#16
0
def gradient_to_spherical(gx,gy):
	"""
	This function convert gradient coordinates of the 
	reflector into spherical coordinates of reflected rays
	on the unit sphere S2.
	
	Parameters
	----------
	gx : 1D array
		Gradients coordinate along x axis
	gy : 1D array
		Gradients coordinate along y axis
	
	Returns
	-------
	theta : 1D array
		Inclination angles (with respect to the
		positiv z axis). 0 <= theta <= pi
	phi : 1D array
		Azimuthal angles (projection of a direction
		in z=0 plane with respect to the x axis).
		0 <= phi <= 2pi
		
	See Also
	--------
	Inverse Methods for Illumination Optics, Corien Prins
	"""
	try:
		if len(gx.shape) > 1 or len(gy.shape) > 1:
			raise NotProperShapeError("gx and gy must be 1D arrays.")
		
		if gx.shape != gy.shape:
			raise NotProperShapeError("gx and gy must have the same length.")
			
		# theta computation
		num = gx*gx + gy*gy - 1
		denom = gx*gx + gy*gy + 1
		theta = np.arccos(num/denom)

		# phi computation
		zero = np.zeros(gx.shape)
		phi = np.zeros(gx.shape)
		J = np.logical_and(np.greater_equal(gx,zero),np.greater_equal(gy,zero))
		phi[J] = np.arctan(gy[J]/gx[J])
			
		J = np.less(gx, zero)
		phi[J] = np.arctan(gy[J]/gx[J]) + np.pi
			
		J = np.logical_and(np.greater_equal(gx, zero), np.less(gy, zero))
		phi[J] = np.arctan(gy[J]/gx[J]) + 2*np.pi
			
		return theta, phi
		
	except FloatingPointError:
		print("****gradient_to_spherical error: division by zero.")
		
	except NotProperShapeError, arg:
		print("****gradient_to_spherical error: ", arg.msg)
示例#17
0
def threshScore(reg_op, ref):
    if (reg_op.shape != ref.shape):
        raise ValueError("Scan shapes must be identical to compare.")
    threshold = 10
    binary_reg_op = np.greater_equal(reg_op,threshold)
    binary_ref = np.greater_equal(ref,threshold)
    boundary_match = np.equal(binary_reg_op,binary_ref)
    score = float(np.sum(boundary_match)) / boundary_match.size
    return score
示例#18
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = self.high
        weights = weights.copy()
        weights[selection] = 0.0

        numpy.greater_equal(q, self.low, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.underflow._numpy(data, subweights, shape)

        numpy.less(q, self.high, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.overflow._numpy(data, subweights, shape)

        if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
            # Numpy defines histograms as including the upper edge of the last bin only, so drop that
            weights[q == self.high] == 0.0

            h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)

            for hi, value in zip(h, self.values):
                value.fill(None, float(hi))

        else:
            q = numpy.array(q, dtype=numpy.float64)
            numpy.subtract(q, self.low, q)
            numpy.multiply(q, self.num, q)
            numpy.divide(q, self.high - self.low, q)
            numpy.floor(q, q)
            q = numpy.array(q, dtype=int)

            for index, value in enumerate(self.values):
                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                value._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
def ts_increments(ts, monotony = 'increasing', max_value = None, reset_value = 0.):

    '''Return a timeserie with the increments registered in the 
        input timeserie

    .. arguments:
    - (list) ts: pandas DataFrame containing a timeserie
    - (string) monotony: increasing / decreasing / non_monotonous
    - (float) max_value: value from which the meter is reseted
    - (float) reset_value: value to which the meter is reseted

    .. returns:
    - on success: timeseries of increments. The output timeseries contains 
        one value less than the original one. The diference between 
        two values, is assigned to the epoch of the second one.'''

    new_ts = ts_to_float(ts)

    if 'error' in new_ts:
        return new_ts

    if len(new_ts) <= 1:
        return {'error': 'timeserie must have length greater than 1 to compute increments'}

    if max_value != None:
        try:
            max_value = float(max_value)
        except:
            return {'error': 'max_value is not a number'}

    try:
        reset_value = float(reset_value)
    except:
        return {'error': 'reset_value is not a number'}

    if monotony == 'increasing':
        if not np.greater_equal(new_ts['value'], reset_value).all():
            return {'error': 'value lower than reset_value'}
        elif max_value and not np.less_equal(new_ts['value'], max_value).all():
            return {'error': 'value greater than max_value'}
    elif monotony == 'decreasing':
        if not np.less_equal(new_ts['value'].values, reset_value).all():
            return {'error': 'value greater than reset value'}
        elif max_value and not np.greater_equal(new_ts['value'], max_value).all():
            return {'error': 'value lower than max_value'}

    new_ts['old_value'] = new_ts['value'].shift()

    new_ts = new_ts.drop(new_ts.index[0])

    new_ts['increments'] = new_ts.apply(single_inc, axis = 1, monotony = monotony, \
        max_value = max_value, reset_value = reset_value)

    output_ts = pd.DataFrame()
    output_ts['value'] = new_ts['increments']

    return output_ts
示例#20
0
    def function(self,E) :
        """ Calculates the number of counts in barns"""
        
        if self.delta.value != self._previous_delta:
            self._previous_delta = copy.copy(self.delta.value)
            self.integrategos(self.delta.value)
            self.calculate_knots()

        if self._previous_effective_angle != self.effective_angle.value:
            self.integrategos()
            
        factor = 4.0 * np.pi * a0 ** 2.0 * R**2 / E / self.T #to convert to m**2/bin
        Emax = self.energyaxis[-1] + self.edgeenergy + \
        self.delta.value #maximum tabulated energy
        cts = np.zeros((len(E)))
        
        if self.fs_state is True:
            if self.__knots[-1] > Emax : Emax = self.__knots[-1]
            fine_structure_indices=np.logical_and(np.greater_equal(E, 
            self.edgeenergy+self.delta.value), 
            np.less(E, self.edgeenergy + self.delta.value + self.fs_emax))
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value + self.fs_emax), 
            np.less(E, Emax))
            if self.fs_mode == "new_spline" :
                cts = np.where(fine_structure_indices, 
                1E-25*splev(E,(self.__knots,self.fslist.value,3),0), cts)
            elif self.fs_mode == "spline" :
                cts = np.where(fine_structure_indices, 
                cspline1d_eval(self.fslist.value, 
                E, 
                dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts)
            elif self.fs_mode == "spline_times_edge" :
                cts = np.where(fine_structure_indices, 
                factor*splev((E-self.edgeenergy-self.delta.value), 
                self.__goscoeff)*cspline1d_eval(self.fslist.value, 
                E,dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts )
        else:
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value), np.less(E, Emax))            
        powerlaw_indices = np.greater_equal(E,Emax)  
        cts = np.where(tabulated_indices, 
        factor * splev((E-self.edgeenergy-self.delta.value), 
        self.__goscoeff),
         cts)
        
        # Convert to barns/dispersion.
        #Note: The R factor is introduced in order to give the same value
        # as DM, although it is not in the equations.
        cts = np.where(powerlaw_indices, self.A * E**-self.r, cts) 
        return (self.__subshell_factor * self.intensity.value * self.energy_scale 
        * 1.0e28 / R) * cts       
示例#21
0
def size_stats(l,x, msg=""):
    '''l and x are lists'''
    print msg
    # sort sizes
    l.sort()

    ## get X values for NX stats and sort
    x.sort()

    ## Get N reads
    N = len(l)
    print "N = %d" % (N)

    ## Get sum of data
    A = sum(l)
    print "Total length = %d" % (A)

    ## Get max length:
    MAX = max(l)
    print "Max length = %d" % (MAX)

    ## Get min length:
    MIN = min(l)
    print "Min length = %d" % (MIN)

    ## Get mean length
    MEAN = np.mean(l)
    print "Mean length = %d" % (MEAN)

    ## Get median contig size
    MEDIAN = np.median(l)
    print "Median length = %d" % (MEDIAN)

    ## Get NX values
    nxvalues = NX(l,x,G=A)
    for e in x:
        print "N%s length = %d" % (str(e), nxvalues[e])

    ## expected read length
    E = e_size(l,G=A)
    print "Expected size = %d" % (E)

    ##number reads >= X 
    print "N reads >= 10kb:"
    print sum(np.greater_equal(l,50e3))
    print "N reads >= 25kb:"
    print sum(np.greater_equal(l,50e3))
    print "N reads >= 50kb:"
    print sum(np.greater_equal(l,50e3))
    print "N reads >= 75kb:"
    print sum(np.greater_equal(l,75e3))
    print "N reads >= 100kb:"
    print sum(np.greater_equal(l,100e3))
    ##TODO: also want size data from reads >=X
    ## ALSO in diff fxn if both length and Q available - do longest with Q>x, etc
    print
示例#22
0
def wp(data, wt, percentiles,cum=False): 
	"""Compute weighted percentiles. 
	If the weights are equal, this is the same as normal percentiles. 
	Elements of the C{data} and C{wt} arrays correspond to 
	each other and must have equal length (unless C{wt} is C{None}). 

	@param data: The data. 
	@type data: A L{numpy.ndarray} array or a C{list} of numbers. 
	@param wt: How important is a given piece of data. 
	@type wt: C{None} or a L{numpy.ndarray} array or a C{list} of numbers. 
		 All the weights must be non-negative and the sum must be 
		 greater than zero. 
	@param percentiles: what percentiles to use.  (Not really percentiles, 
		 as the range is 0-1 rather than 0-100.) 
	@type percentiles: a C{list} of numbers between 0 and 1. 
	@rtype: [ C{float}, ... ] 
	@return: the weighted percentiles of the data. 
	"""
	assert numpy.greater_equal(percentiles, 0.0).all(), "Percentiles less than zero" 
	assert numpy.less_equal(percentiles, 1.0).all(), "Percentiles greater than one" 
	data = numpy.asarray(data) 
	# data = numpy.reshape(data,(len(data)))
	assert len(data.shape) == 1 
	if wt is None: 
		 wt = numpy.ones(data.shape, numpy.float) 
	else: 
		 wt = numpy.asarray(wt, numpy.float) 
		 # wt = numpy.reshape(wt,(len(wt)))
		 assert wt.shape == data.shape 
		 assert numpy.greater_equal(wt, 0.0).all(), "Not all weights are non-negative." 
	assert len(wt.shape) == 1 
	n = data.shape[0] 
	assert n > 0 
	i = numpy.argsort(data) 
	sd = numpy.take(data, i, axis=0)
	sw = numpy.take(wt, i, axis=0) 
	aw = numpy.add.accumulate(sw) 
	if not aw[-1] > 0: 
		 raise ValueError("Nonpositive weight sum" )
	w = (aw-0.5*sw)/aw[-1] 
	spots = numpy.searchsorted(w, percentiles) 
	if cum:
		sd = numpy.add.accumulate(numpy.multiply(sd,sw))
	o = [] 
	for (s, p) in zip(spots, percentiles): 
		 if s == 0: 
				 o.append(sd[0]) 
		 elif s == n: 
				 o.append(sd[n-1]) 
		 else: 
				 f1 = (w[s] - p)/(w[s] - w[s-1]) 
				 f2 = (p - w[s-1])/(w[s] - w[s-1]) 
				 assert f1>=0 and f2>=0 and f1<=1 and f2<=1 
				 assert abs(f1+f2-1.0) < 1e-6 
				 o.append(sd[s-1]*f1 + sd[s]*f2) 
	return o 
示例#23
0
 def filterStates():
     for col in range(2, tableData.shape[1]):
         yMatch = numpy.greater_equal(tableData[:, col] == "Y", self.filterVals == "Y")
         nMatch = numpy.greater_equal(tableData[:, col] == "N", self.filterVals == "N")
         if numpy.all(numpy.logical_and(yMatch, nMatch)):
             for cell, tag in columns[col]:
                 cell.grid()
         else:
             for cell, tag in columns[col]:
                 cell.grid_remove()
示例#24
0
def next_step(state):
    neighbors = game_tmp
    ndimage.convolve(state, kernel, output=neighbors)
    np.greater_equal(neighbors, 2, out=game_bool1)
    np.less_equal(neighbors, 3, out=game_bool2)
    np.multiply(game_bool1, game_bool2, out=game_bool1)
    np.multiply(state, game_bool1, out=state)
    np.equal(neighbors, 3, out=game_bool1)
    np.add(state, game_bool1, out=state)
    np.clip(state + game_bool1, 0, 1, out=state)
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = 0.0
        weights = weights.copy()
        weights[selection] = 0.0

        if all(isinstance(v, Count) and v.transform is identity for c, v in self.bins) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):

            h, _ = numpy.histogram(q, [float("-inf")] + [(c1 + c2)/2.0 for (c1, v1), (c2, v2) in zip(self.bins[:-1], self.bins[1:])] + [float("inf")], weights=weights)

            for hi, (c, v) in zip(h, self.bins):
                v.fill(None, float(hi))

        else:
            selection = numpy.empty(q.shape, dtype=numpy.bool)
            selection2 = numpy.empty(q.shape, dtype=numpy.bool)

            for index in xrange(len(self.bins)):
                if index == 0:
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.greater_equal(q, high, selection)

                elif index == len(self.bins) - 1:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    numpy.less(q, low, selection)

                else:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.less(q, low, selection)
                    numpy.greater_equal(q, high, selection2)
                    numpy.bitwise_or(selection, selection2, selection)

                subweights[:] = weights
                subweights[selection] = 0.0
                self.bins[index][1]._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
示例#26
0
def main():
    """
    universal function(ufunc)
    Function that returns the result of the operation of each element.
    Arg is ndarray, returns a ndarray
    """
    arr = np.arange(10)

    print_headline('arg is ndarray')
    print 'integer'
    print np.sqrt(arr)
    print np.exp(arr)
    print np.square(arr)
    print np.log10(arr)

    print ''
    print 'float'
    arr = np.array([-2, -1.6, -1.4, 0, 1.4, 1.6, 2])
    print np.sign(arr)
    print np.ceil(arr)
    print np.floor(arr)
    print np.rint(arr) # round
    print np.sin(arr) # cos, tan, arcxxx
    print np.logical_not([arr >= 1]) # and, or, xor

    print ''
    print 'NaN, inf'
    nan_inf = np.array([0, 1, np.NaN, np.inf, -np.inf])
    print nan_inf
    print np.isnan(nan_inf)
    print np.isinf(nan_inf)

    print_headline('args are ndarray')
    x = np.arange(8)
    y = np.arange(8)[::-1]
    print x, y
    print np.add(x, y)
    print np.subtract(x, y)
    print np.multiply(x, y)
    print np.divide(x, y)
    print np.floor_divide(x, y)
    print np.power(x, y)
    print np.mod(x, y)
    print np.maximum(x, y)
    print np.minimum(x, y)
    print np.greater_equal(x, y)

    print_headline('returns are ndarray')
    arr = np.random.randn(10)
    # divide integer ndarray and float ndarray
    modf = np.modf(arr)
    print arr
    print modf[0]
    print modf[1]
示例#27
0
def mat2gray(m):
  m = (np.asmatrix(m))
  Mmin = np.min(m)
  Mmax = np.max(m)
  I = np.zeros(np.shape(m))
  divisorMat = float(Mmax - Mmin) * (m - Mmin)
  if (np.max(divisorMat) > 0):
    I = np.add(I, np.multiply( np.logical_and( np.greater_equal(m, Mmin), np.less(m, Mmax)), (1 / float(Mmax - Mmin) * (m - Mmin)) ) )
  I = np.add(I, (np.greater_equal(m, Mmax)))

  return I
示例#28
0
def thermal_argmax(prob_arr, temperature):
    prob_arr = np.log(prob_arr) / temperature
    prob_arr = np.exp(prob_arr) / np.sum(np.exp(prob_arr))
    print(prob_arr)
    if np.greater_equal(prob_arr.sum(), 1.0000000001):
        logging.warn('Probabilities to sample add to more than 1, {}'.
                     format(prob_arr.sum()))
        prob_arr = prob_arr / (prob_arr.sum() + .0000000001)
    if np.greater_equal(prob_arr.sum(), 1.0000000001):
        logging.warn('Probabilities to sample still add to more than 1')
    return np.argmax(np.random.multinomial(1, prob_arr, 1))
示例#29
0
 def cellular_next_step(self, min_n, max_n, birth_n):
     self.old_state = self.state.copy()
     neighbors = self.tmp
     ndimage.convolve(self.state, self.kernel, output=neighbors)
     np.greater_equal(neighbors, min_n, out=self.cell_bounds1)
     np.less_equal(neighbors, max_n, out=self.cell_bounds2)
     np.multiply(self.cell_bounds1, self.cell_bounds2, out=self.cell_bounds1)
     np.multiply(self.state, self.cell_bounds1, out=self.state)
     np.equal(neighbors, birth_n, out=self.cell_bounds1)
     np.add(self.state, self.cell_bounds1, out=self.state)
     np.clip(self.state + self.cell_bounds1, 0, 1, out=self.state)
     return np.sum(self.old_state - self.state)**2
def annealfxn(params, useparams, time, model, envlist, xpdata, xspairlist, lb, ub, norm=False, vardata=False, fileobj=None):
    ''' Feeder function for scipy.optimize.anneal
    '''
    #annlout = scipy.optimize.anneal(pysb.anneal_sundials.annealfxn, paramarr, 
    #                                args=(None, 20000, model, envlist, xpnormdata, 
    #                                [(2,1),(4,2),(7,3)], lb, ub, True, True), 
    #                                lower=lower, upper=upper, full_output=1)
    # sample anneal call full model:
    # params: parameters to be optimized, at their values for the given annealing step
    # lower,upper: arrays from get array function or something similar from getgenparambounds
    # lb, ub: lower bound and upper bound for function from getgenparambounds
    #
    # sample anneal call, optimization of some parameters
    #   annlout = scipy.optimize.anneal(pysb.anneal_sundials.annealfxn, smacprm, args=(smacnum, 25000, model, envlist, xpdata,
    #            [(2,2), (3,3)], lower=lower, upper=upper, full_output=1)
    #
    # sample anneal call, optimization for ALL parameters
    # 
    #

    
    if numpy.greater_equal(params, lb).all() and numpy.less_equal(params, ub).all():
        print("Integrating...")
        outlist = annlodesolve(model, time, envlist, params, useparams)
        # specify that this is normalized data
        if norm is True:
            print("Normalizing data")
            datamax = numpy.max(outlist[0], axis = 1)
            datamin = numpy.min(outlist[0], axis = 1)
            outlistnorm = ((outlist[0].T - datamin)/(datamax-datamin)).T
            # xpdata[0] should be time, get from original array
            outlistnorm[0] = outlist[0][0].copy()
            # xpdata here is normalized, and so is outlistnorm
            objout = compare_data(xpdata, outlistnorm, xspairlist, vardata)
        else:
            objout = compare_data(xpdata, outlist[0], xspairlist, vardata)
    else:
        print("======>VALUE OUT OF BOUNDS NOTED")
        temp = numpy.where((numpy.logical_and(numpy.greater_equal(params, lb), numpy.less_equal(params, ub)) * 1) == 0)
        for i in temp:
            print("======>",i, params[i])
        objout = 1.0e300 # the largest FP in python is 1.0e308, otherwise it is just Inf

    # save the params and temps for analysis
    # FIXME If a parameter is out of bounds, outlist and outlistnorm will be undefined and this will cause an error
    if fileobj:
        if norm:
            writetofile(fileobj, params, outlistnorm, objout)
        else:
            writetofile(fileobj, params, outlist, objout)
    
    return objout
示例#31
0
np.median(x)  #median values in full dataset
np.max(x)  #max
max(x)  #will not work, as it is multi
max([1,2,3])  #this will work
np.min(x) #min

#%%  More Functions
x=np.random.randint(30,50, size=200000)
x=np.array([30,49,50,60, 49])
np.equal(x, 49) #all values equal to 48
np.sum(np.equal(x,49))
np.greater(x, 40) #values greater than 40
np.sum(np.greater(x,40))  #how many values > 40
sum(np.greater(x,40))
np.less(x, 50)  #values < 50
np.greater_equal(x, 40)  #values >= 40
x < 40 #another way T/ F
np.sum(x < 49)  #how many values < 40
x
np.sum(x < 40, axis=0)  #in each col, values < 40

x=np.random.randint(10, size=(3,4))
x
np.all(x > 4)
np.any(x > 4)
np.sum(x > 1)
np.sum(x > 3, axis=1)
np.sum(x > 3, axis=0)
np.sum( (x> 3) & (x < 7), axis=0)
np.sum( ~((x> 3) & (x < 7)), axis=0)
示例#32
0
def getLBP3DImage(inputImage, inputMask, **kwargs):
  """
  Compute and return the Local Binary Pattern (LBP) in 3D using spherical harmonics.
  If ``force2D`` is set to true (= feature extraction in 2D) a warning is logged.

  LBP is only calculated for voxels segmented in the mask

  Following settings are possible:

    - ``lbp3DLevels`` [2]: integer, specifies the the number of levels in spherical harmonics to use.
    - ``lbp3DIcosphereRadius`` [1]: Float, specifies the radius in which the neighbours should be sampled
    - ``lbp3DIcosphereSubdivision`` [1]: Integer, specifies the number of subdivisions to apply in the icosphere

  :return: Yields LBP filtered image for each level, 'lbp-3D-m<level>' and ``kwargs`` (customized settings).
           Additionally yields the kurtosis image, 'lbp-3D-k' and ``kwargs``.

  .. note::
    LBP can often return only a very small number of different gray levels. A customized bin width is often needed.
  .. warning::
    Requires package ``scipy`` and ``trimesh`` to function. If not available, this filter logs a warning and does not
    yield an image.

  References:

  - Banerjee, J, Moelker, A, Niessen, W.J, & van Walsum, T.W. (2013), "3D LBP-based rotationally invariant region
    description." In: Park JI., Kim J. (eds) Computer Vision - ACCV 2012 Workshops. ACCV 2012. Lecture Notes in Computer
    Science, vol 7728. Springer, Berlin, Heidelberg. doi:10.1007/978-3-642-37410-4_3
  """
  global logger
  try:
    from scipy.stats import kurtosis
    from scipy.ndimage.interpolation import map_coordinates
    from scipy.special import sph_harm
    from trimesh.creation import icosphere
  except ImportError:
    logger.warning('Could not load required package "scipy" or "trimesh", cannot implement filter LBP 3D')
    return

  # Warn the user if features are extracted in 2D, as this function calculates LBP in 3D
  if kwargs.get('force2D', False):
    logger.warning('Calculating Local Binary Pattern in 3D, but extracting features in 2D. Use with caution!')

  label = kwargs.get('label', 1)

  lbp_levels = kwargs.get('lbp3DLevels', 2)
  lbp_icosphereRadius = kwargs.get('lbp3DIcosphereRadius', 1)
  lbp_icosphereSubdivision = kwargs.get('lbp3DIcosphereSubdivision', 1)

  im_arr = sitk.GetArrayFromImage(inputImage)
  ma_arr = sitk.GetArrayFromImage(inputMask)

  # Variables used in the shape comments:
  # Np Number of voxels
  # Nv Number of vertices

  # Vertices icosahedron for spherical sampling
  coords_icosahedron = numpy.array(icosphere(lbp_icosphereSubdivision, lbp_icosphereRadius).vertices)  # shape(Nv, 3)

  # Corresponding polar coordinates
  theta = numpy.arccos(numpy.true_divide(coords_icosahedron[:, 2], lbp_icosphereRadius))
  phi = numpy.arctan2(coords_icosahedron[:, 1], coords_icosahedron[:, 0])

  # Corresponding spherical harmonics coefficients Y_{m, n, theta, phi}
  Y = sph_harm(0, 0, theta, phi)  # shape(Nv,)
  n_ix = numpy.array(0)

  for n in range(1, lbp_levels):
    for m in range(-n, n + 1):
      n_ix = numpy.append(n_ix, n)
      Y = numpy.column_stack((Y, sph_harm(m, n, theta, phi)))
  # shape (Nv, x) where x is the number of iterations in the above loops + 1

  # Get labelled coordinates
  ROI_coords = numpy.where(ma_arr == label)  # shape(3, Np)

  # Interpolate f (samples on the spheres across the entire volume)
  coords = numpy.array(ROI_coords).T[None, :, :] + coords_icosahedron[:, None, :]  # shape(Nv, Np, 3)
  f = map_coordinates(im_arr, coords.T, order=3)  # Shape(Np, Nv)  Note that 'Np' and 'Nv' are swapped due to .T

  # Compute spherical Kurtosis
  k = kurtosis(f, axis=1)  # shape(Np,)

  # Apply sign function
  f_centroids = im_arr[ROI_coords]  # Shape(Np,)
  f = numpy.greater_equal(f, f_centroids[:, None]).astype(int)  # Shape(Np, Nv)

  # Compute c_{m,n} coefficients
  c = numpy.multiply(f[:, :, None], Y[None, :, :])  # Shape(Np, Nv, x)
  c = c.sum(axis=1)  # Shape(Np, x)

  # Integrate over m
  f = numpy.multiply(c[:, None, n_ix == 0], Y[None, :, n_ix == 0])  # Shape (Np, Nv, 1)
  for n in range(1, lbp_levels):
    f = numpy.concatenate((f,
                           numpy.sum(numpy.multiply(c[:, None, n_ix == n], Y[None, :, n_ix == n]),
                                     axis=2, keepdims=True)
                           ),
                          axis=2)
  # Shape f (Np, Nv, levels)

  # Compute L2-Norm
  f = numpy.sqrt(numpy.sum(f ** 2, axis=1))  # shape(Np, levels)

  # Keep only Real Part
  f = numpy.real(f)  # shape(Np, levels)
  k = numpy.real(k)  # shape(Np,)

  # Yield the derived images for each level
  result = numpy.ndarray(im_arr.shape)
  for l_idx in range(lbp_levels):
    result[ROI_coords] = f[:, l_idx]

    # Create a SimpleITK image
    im = sitk.GetImageFromArray(result)
    im.CopyInformation(inputImage)

    yield im, 'lbp-3D-m%d' % (l_idx + 1), kwargs

  # Yield Kurtosis
  result[ROI_coords] = k

  # Create a SimpleITK image
  im = sitk.GetImageFromArray(result)
  im.CopyInformation(inputImage)

  yield im, 'lbp-3D-k', kwargs
示例#33
0
 def grad(self, z):
     return np.diagflat(np.greater_equal(z, 0) + self.a * np.less(z, 0))
示例#34
0
'''
   greater() 大于
   less()    小于
   equal()   等于
   执行元素级别的比较运算,最终返回一个布尔型数组
'''
print()
print('比较函数:')
num1 = np.random.randint(1,7,size = (2,3))
num2 = np.random.randint(1,7,size = (2,3))
print(num1)
print(num2)
print('大于,大于等于')
print(np.greater(num1,num2))
print(np.greater_equal(num1,num2))
print('小于,小于等于')
print(np.greater(num1,num2))
print(np.greater_equal(num1,num2))
print('等于,不等于')
print(np.equal(num1,num2))
print(np.not_equal(num1,num2))
print()


'''
   logical_and() 
   logical_or()    
   logical_xor()   
   执行元素级别的布尔逻辑运算,相当于中缀运算符&、|、^
'''
示例#35
0
    def renumber(self):
        """
        removes duplicate elems and nodes that are not referenced
        by any element, as well as elems that have been deleted (==-1)
        This function is lifted and modified slightly from Rusty's code.

        Returns
        -------
        dict
            {'valid_elems':new_elems, 'pointmap':old_indexes,
             'valid_nodes':active_nodes}
        """
        element_hash = {}  # sorted tuples of vertices
        new_elems = []  # list of indexes into the old ones
        for i in range(self.n_elems()):
            my_key = tuple(self._elems[i])
            if my_key not in element_hash and self.elem(i) is not None:
                # we're original and not deleted
                element_hash[my_key] = i  # value is ignored...
                new_elems.append(i)

        self._elems = self._elems[new_elems]  # Survived elems

        # remove lonesome nodes
        active_nodes = np.unique(np.ravel(self._elems[:, :MAX_NODES]))[1:]
        if np.any(active_nodes) <= 0:
            raise Exception(
                "renumber: Active nodes includes some negative indexes.")

        old_indexes = -np.ones(self.n_nodes(), np.int32)

        self._nodes = self._nodes[active_nodes]
        if np.any(np.isnan(self._nodes)):
            raise Exception("renumber: some points have NaNs!")

        # need a mapping from active node to its index -
        # explicitly ask for int32 for consistency
        new_indexes = np.arange(active_nodes.shape[0], dtype=np.int32)
        old_indexes[active_nodes] = new_indexes
        # map onto the new indexes
        flag_active_nodes = np.greater_equal(self._elems[:, :MAX_NODES], 0)
        self._elems[flag_active_nodes] = old_indexes[
            self._elems[flag_active_nodes]]

        if np.any(self._elems) < 0:
            raise Exception(
                "renumber: after remapping indexes, have negative node index in elems"
            )

        # clear out stale data
        self._clear_stale_data()

        # rebuild the edges
        self.build_edges_from_elems()

        # return the mappings so that subclasses can catch up
        return {
            'valid_elems': new_elems,
            'pointmap': old_indexes,
            'valid_nodes': active_nodes
        }
示例#36
0
    def eval_sequence(self, data):
        """Calculates ID metrics for one sequence"""
        # Initialise results
        res = {}
        for field in self.fields:
            res[field] = 0

        # Return result quickly if tracker or gt sequence is empty
        if data['num_tracker_dets'] == 0:
            res['IDFN'] = data['num_gt_dets']
            return res
        if data['num_gt_dets'] == 0:
            res['IDFP'] = data['num_tracker_dets']
            return res

        # Variables counting global association
        potential_matches_count = np.zeros(
            (data['num_gt_ids'], data['num_tracker_ids']))
        gt_id_count = np.zeros(data['num_gt_ids'])
        tracker_id_count = np.zeros(data['num_tracker_ids'])

        # First loop through each timestep and accumulate global track information.
        for t, (gt_ids_t, tracker_ids_t) in enumerate(
                zip(data['gt_ids'], data['tracker_ids'])):
            # Count the potential matches between ids in each timestep
            matches_mask = np.greater_equal(data['similarity_scores'][t],
                                            self.threshold)
            match_idx_gt, match_idx_tracker = np.nonzero(matches_mask)
            potential_matches_count[gt_ids_t[match_idx_gt],
                                    tracker_ids_t[match_idx_tracker]] += 1

            # Calculate the total number of dets for each gt_id and tracker_id.
            gt_id_count[gt_ids_t] += 1
            tracker_id_count[tracker_ids_t] += 1

        # Calculate optimal assignment cost matrix for ID metrics
        num_gt_ids = data['num_gt_ids']
        num_tracker_ids = data['num_tracker_ids']
        fp_mat = np.zeros(
            (num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))
        fn_mat = np.zeros(
            (num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))
        fp_mat[num_gt_ids:, :num_tracker_ids] = 1e10
        fn_mat[:num_gt_ids, num_tracker_ids:] = 1e10
        for gt_id in range(num_gt_ids):
            fn_mat[gt_id, :num_tracker_ids] = gt_id_count[gt_id]
            fn_mat[gt_id, num_tracker_ids + gt_id] = gt_id_count[gt_id]
        for tracker_id in range(num_tracker_ids):
            fp_mat[:num_gt_ids, tracker_id] = tracker_id_count[tracker_id]
            fp_mat[tracker_id + num_gt_ids,
                   tracker_id] = tracker_id_count[tracker_id]
        fn_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count
        fp_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count

        # Hungarian algorithm
        match_rows, match_cols = linear_sum_assignment(fn_mat + fp_mat)

        # Accumulate basic statistics
        res['IDFN'] = fn_mat[match_rows, match_cols].sum().astype(np.int)
        res['IDFP'] = fp_mat[match_rows, match_cols].sum().astype(np.int)
        res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int)

        # Calculate final ID scores
        res = self._compute_final_fields(res)
        return res
示例#37
0
def test_broadcast():
    a = sym.Variable("a")
    b = sym.Variable("b")
    shape = {'a': (3, 4, 5), 'b': (1, 5)}

    def _collapse(g):
        return g.reshape(-1, shape['b'][-1]).sum(0, keepdims=True)

    y = sym.broadcast_add(a, b)

    def _backward_add(head_grads, a, b):
        da = head_grads
        db = _collapse(head_grads)
        return da, db

    check_function(y, lambda a, b: a + b, _backward_add, shape=shape)

    y = sym.broadcast_sub(a, b)

    def _backward_sub(head_grads, a, b):
        da = head_grads
        db = -_collapse(head_grads)
        return da, db

    check_function(y, lambda a, b: a - b, _backward_sub, shape=shape)

    y = sym.broadcast_mul(a, b)

    def _backward_mul(head_grads, a, b):
        da = head_grads * b
        db = _collapse(head_grads * a)
        return da, db

    check_function(y, lambda a, b: a * b, _backward_mul, shape=shape)

    y = sym.broadcast_div(a, b)

    def _backward_div(head_grads, a, b):
        da = head_grads / b
        db = _collapse(-head_grads * a / b**2)
        return da, db

    # We avoid computing numerical derivatives too close to zero here
    check_function(y,
                   lambda a, b: a / b,
                   _backward_div,
                   shape=shape,
                   numerical_grads=False)
    check_function(y,
                   lambda a, b: a / b,
                   _backward_div,
                   shape=shape,
                   in_range={'b': (0.1, 20)})

    y = sym.broadcast_mod(a, b)
    check_function(y,
                   lambda a, b: np.mod(a, b),
                   in_range={
                       'a': (0.001, 100),
                       'b': (1, 100)
                   },
                   dtype='int32',
                   shape=shape)

    y = sym.broadcast_max(a, b)
    check_function(y, lambda a, b: np.maximum(a, b), shape=shape)

    y = sym.broadcast_min(a, b)
    check_function(y, lambda a, b: np.minimum(a, b), shape=shape)

    y = sym.broadcast_pow(a, b)
    check_function(y,
                   lambda a, b: np.power(a, b),
                   in_range={
                       'a': (0.001, 100),
                       'b': (0.001, 2)
                   },
                   shape=shape)

    y = sym.broadcast_left_shift(a, b)
    check_function(y, lambda a, b: a << b, dtype='int32', shape=shape)

    y = sym.broadcast_right_shift(a, b)
    check_function(y, lambda a, b: a >> b, dtype='int32', shape=shape)

    y = sym.broadcast_greater(a, b)
    check_function(y, lambda a, b: np.greater(a, b), shape=shape)

    y = sym.broadcast_less(a, b)
    check_function(y, lambda a, b: np.less(a, b), shape=shape)

    y = sym.broadcast_equal(a, b)
    check_function(y,
                   lambda a, b: np.equal(a, b),
                   in_range={
                       'a': (-2, 2),
                       'b': (-2, 2)
                   },
                   dtype='int32',
                   shape=shape)

    y = sym.broadcast_not_equal(a, b)
    check_function(y,
                   lambda a, b: np.not_equal(a, b),
                   in_range={
                       'a': (-2, 2),
                       'b': (-2, 2)
                   },
                   dtype='int32',
                   shape=shape)

    y = sym.broadcast_greater_equal(a, b)
    check_function(y,
                   lambda a, b: np.greater_equal(a, b),
                   in_range={
                       'a': (-3, 3),
                       'b': (-3, 3)
                   },
                   dtype='int32',
                   shape=shape)

    y = sym.broadcast_less_equal(a, b)
    check_function(y,
                   lambda a, b: np.less_equal(a, b),
                   in_range={
                       'a': (-3, 3),
                       'b': (-3, 3)
                   },
                   dtype='int32',
                   shape=shape)
示例#38
0
def plot_isotherms(func_p_sat_T: Callable[[float], float],
                   func_wxX_pT: Callable[[float, float], float],
                   p_calc: list, T_calc: list,
                   p_axis: list, wxX_axis: list,
                   flag_p_rel: bool = False) -> dict:
    r"""Plot isotherms.

    This functions calculates vapor pressures and loadings / molar fractions / concentrations for
    the pressures and temperatures given as inputs. The input 'func_wxX_pT' defines what will be
    calculated: loadings / molar fractions / concentrations.

    Parameters
    ----------
    func_p_sat_T : Callable[[float], float]
        Method to calculate vapor pressure depending on temperature.
    func_wxX_pT : Callable[[float, float], float]
        Method to calculate loading / molar fraction / concentration depending on pressure and
        temperature.
    p_calc : list
        List containing pressures (i.e., floats) required to calculate loadings / molar fractions
        / concentrations.
    T_calc : list
        List containing temperatures (i.e., floats) required to calculate loadings / molar
        fractions / concentrations.
    p_axis : list
        List containing pressures (i.e., floats) defining the ticks of the x axis.
    wxX_axis : list
        List containing loadings / molar fractions / concentrations (i.e., floats) defining the
        ticks of the y axis.
    flag_p_rel : bool, optional
        Flag defining if x axis shall be pressure or relative pressure (i.e., p/p_sat). The
        default is False.

    Returns
    -------
    dict
        Dict containing a) handles for figure, axis, and plots; and b) calculated vapor pressures
        and equilibrium loadings / molar fractions / concentrations.

    """
    # Calculate saturation properties and get finite results
    #
    p_sat = [func_p_sat_T(val) for val in T_calc]
    id_limit = np.logical_and(np.greater_equal(p_sat, 0), np.isfinite(p_sat))

    saturationPropeties = {'p_sat': np.extract(id_limit, p_sat),
                           'T_sat': np.extract(id_limit, T_calc)}

    # Get critical pressure and temperature or maximum pressure and temperature if both are
    # lower than critical pressure and temperature
    #
    p_crit = max(saturationPropeties['p_sat'])
    T_crit = max(saturationPropeties['T_sat'])

    # Calculate saturation capacity
    #
    T_sat_boundary = [T_calc[0]] + [val for val in range(int(np.ceil(T_calc[0])),
                                                         int(np.floor(T_crit)))] + [T_crit]
    p_sat_boundary = [func_p_sat_T(val) for val in T_sat_boundary]

    # Calculate equilibrium properties and get reasonable results (i.e., p_calc <= p_sat(T_calc))
    #
    sorptionEquilibrium = [None] * len(T_calc)

    for id_T, val_T in enumerate(T_calc):
        wxXT_pT = [func_wxX_pT(val, val_T) for val in p_calc]

        if val_T <= T_crit:
            # Cut at p_sat
            #
            if not np.less_equal(p_calc, p_sat[id_T]).all():
                # Cut necessary
                #
                id_limit = np.less_equal(p_calc, p_sat[id_T])

                sorptionEquilibrium[id_T] = {'wxXT_pT': np.extract(id_limit, np.array(wxXT_pT)),
                                             'p_equ':   np.extract(id_limit, np.array(p_calc))}

            else:
                # No cut necessary
                #
                sorptionEquilibrium[id_T] = {'wxXT_pT': np.array(wxXT_pT),
                                             'p_equ':   np.array(p_calc)}

        else:
            # Cut at p_crit
            #
            id_limit = np.less_equal(p_calc, p_crit)

            sorptionEquilibrium[id_T] = {'wxXT_pT': np.extract(id_limit, np.array(wxXT_pT)),
                                         'p_equ':   np.extract(id_limit, np.array(p_calc))}

    # Plot data
    #
    h_fig, h_ax = plt.subplots(1, 1, figsize=(20/2.54, 10/2.54))
    h_plots = [None] * (len(T_calc)  + 1)

    h_plots[0] = h_ax.plot(p_sat_boundary if not flag_p_rel else \
                           np.array(p_sat_boundary) / np.array(p_sat_boundary),
                           [func_wxX_pT(p_sat_boundary[ind], val) for ind, val in \
                            enumerate(T_sat_boundary)],
                           linestyle = '-',
                           color = '#00459F',
                           label = 'Saturation capacity')

    for id_T, val_T in enumerate(T_calc):
        # Convert x axis if necessary
        #
        if val_T <= T_crit:
            # Normalize by p_sat(T)
            #
            p_plot = sorptionEquilibrium[id_T]['p_equ'] if not flag_p_rel else \
                sorptionEquilibrium[id_T]['p_equ'] / p_sat[id_T]

        else:
            # Normalize by p_crit
            #
            p_plot = sorptionEquilibrium[id_T]['p_equ'] if not flag_p_rel else \
                sorptionEquilibrium[id_T]['p_equ'] / p_crit


        h_plots[id_T+1] = h_ax.plot(p_plot,
                                    sorptionEquilibrium[id_T]['wxXT_pT'],
                                    linestyle = '--',
                                    # color = '#646567',
                                    label = str(val_T - 273.15) + ' °C')

    # Set up axis
    #
    h_ax.spines['top'].set_visible(False)
    h_ax.spines['right'].set_visible(False)
    h_ax.spines['left'].set_linewidth(0.5)
    h_ax.spines['bottom'].set_linewidth(0.5)

    if not flag_p_rel:
        h_ax.set_xlim(min(p_axis), max(p_axis))
    else:
        h_ax.set_xlim(0, 1)


    h_ax.set_ylim(min(wxX_axis), max(wxX_axis))

    h_ax.set_xticks([val for val in p_axis] if not flag_p_rel else \
                    [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
    h_ax.set_yticks([val for val in wxX_axis])

    h_ax.grid(linestyle = ':', color = '#9C9E9F')

    h_ax.set_xlabel(r'Pressure $p$ / Pa $\longrightarrow$' if not flag_p_rel else \
                    r'Relative pressure $p$ $p_\mathrm{sat}^{-1}$ / Pa Pa$^{-1}$ $\longrightarrow$')
    h_ax.set_ylabel(r'Sorption capacity $w$ or $x$ or $X$ / various $\longrightarrow$')

    h_ax.legend(bbox_to_anchor=(1.05, 1),
                loc='upper left',
                labelspacing=0.05,
                frameon=False)

    # Tight layout
    #
    h_fig.align_ylabels()
    h_fig.tight_layout()

    # Return results allowing for custom made moedifications
    #
    return {'handles':  (h_fig, h_ax, h_plots),
            'data':     (saturationPropeties, sorptionEquilibrium)}
示例#39
0
def plot_isobars(func_p_sat_T: Callable[[float], float],
                 func_T_sat_p: Callable[[float], float],
                 func_wxX_pT: Callable[[float, float], float],
                 p_calc: list, T_calc: list,
                 T_axis: list, wxX_axis: list) -> dict:
    r"""Plot isobars.

    This functions calculates vapor pressures and loadings / molar fractions / concentrations for
    the pressures and temperatures given as inputs. The input 'func_wxX_pT' defines what will be
    calculated: loadings / molar fractions / concentrations.

    Parameters
    ----------
    func_p_sat_T : Callable[[float], float]
        Method to calculate vapor pressure depending on temperature.
    func_T_sat_p : Callable[[float], float]
        Method to calculate vapor temperature depending on pressure.
    func_wxX_pT : Callable[[float, float], float]
        Method to calculate loading / molar fraction / concentration depending on pressure and
        temperature.
    p_calc : list
        List containing pressures (i.e., floats) required to calculate loadings / molar fractions
        / concentrations.
    T_calc : list
        List containing temperatures (i.e., floats) required to calculate loadings / molar
        fractions / concentrations.
    T_axis : list
        List containing temperatures (i.e., floats) defining the ticks of the x axis.
    wxX_axis : list
        List containing loadings / molar fractions / concentrations (i.e., floats) defining the
        ticks of the y axis.

    Returns
    -------
    dict
        Dict containing a) handles for figure, axis, and plots; and b) calculated vapor pressures
        and equilibrium loadings / molar fractions / concentrations.

    """
    # Calculate saturation properties and get finite results
    #
    p_sat = [func_p_sat_T(val) for val in T_calc]
    id_limit = np.logical_and(np.greater_equal(p_sat, 0), np.isfinite(p_sat))

    saturationPropeties = {'p_sat': np.extract(id_limit, p_sat),
                           'T_sat': np.extract(id_limit, T_calc)}

    # Get critical pressure and temperature or maximum pressure and temperature if both are
    # lower than critical pressure and temperature: Cut p_calc if necessary
    #
    p_crit = max(saturationPropeties['p_sat'])
    T_crit = max(saturationPropeties['T_sat'])

    p_calc = np.extract(np.less_equal(p_calc, p_crit), p_calc)

    # Calculate saturation capacity
    #
    T_sat_boundary = [T_calc[0]] + [val for val in range(int(np.ceil(T_calc[0])),
                                                         int(np.floor(T_crit)))] + [T_crit]
    p_sat_boundary = [func_p_sat_T(val) for val in T_sat_boundary]

    # Calculate equilibrium properties and get reasonable results (i.e., p_calc <= p_sat(T_calc))
    #
    sorptionEquilibrium = [None] * len(p_calc)

    for id_p, val_p in enumerate(p_calc):
        wxXT_pT = [func_wxX_pT(val_p, val) for val in T_calc]
        id_limit = np.less_equal(wxXT_pT, func_wxX_pT(val_p, func_T_sat_p(val_p)))

        sorptionEquilibrium[id_p] = {'wxXT_pT': np.extract(id_limit, np.array(wxXT_pT)),
                                     'T_equ':   np.extract(id_limit, np.array(T_calc))}

    # Plot data
    #
    h_fig, h_ax = plt.subplots(1, 1, figsize=(20/2.54, 10/2.54))
    h_plots = [None] * (len(p_calc) + 1)

    h_plots[0] = h_ax.plot(T_sat_boundary,
                           [func_wxX_pT(p_sat_boundary[ind], val) for ind, val in \
                            enumerate(T_sat_boundary)],
                           linestyle = '-',
                           color = '#00459F',
                           label = 'Saturation capacity')

    for id_p, val_p in enumerate(p_calc):
        h_plots[id_p+1] = h_ax.plot(sorptionEquilibrium[id_p]['T_equ'],
                                    sorptionEquilibrium[id_p]['wxXT_pT'],
                                    linestyle = '--',
                                    # color = '#646567',
                                    label = str(val_p/1000) + ' kPa')

    # Set up axis
    #
    h_ax.spines['top'].set_visible(False)
    h_ax.spines['right'].set_visible(False)
    h_ax.spines['left'].set_linewidth(0.5)
    h_ax.spines['bottom'].set_linewidth(0.5)

    h_ax.set_xlim(min(T_axis), max(T_axis))
    h_ax.set_ylim(min(wxX_axis), max(wxX_axis))

    h_ax.set_xticks([val for val in T_axis])
    h_ax.set_yticks([val for val in wxX_axis])

    h_ax.set_xticklabels([str(int(val - 273.15)) for val in T_axis])

    h_ax.grid(linestyle = ':', color = '#9C9E9F')

    h_ax.set_xlabel(r'Temperature $t$ / °C $\longrightarrow$')
    h_ax.set_ylabel(r'Sorption capacity $w$ or $x$ or $X$ / various $\longrightarrow$')

    h_ax.legend(bbox_to_anchor=(1.05, 1),
                loc='upper left',
                labelspacing=0.05,
                frameon=False)

    # Tight layout
    #
    h_fig.align_ylabels()
    h_fig.tight_layout()

    # Return results allowing for custom made moedifications
    #
    return {'handles':  (h_fig, h_ax, h_plots),
            'data':     (saturationPropeties, sorptionEquilibrium)}
示例#40
0
    def eval(self,
             band,
             times,
             z=0,
             mag=1,
             sextrap=1,
             gen=1,
             toff=True,
             extrap=False):
        '''Evaluate the template in band [band] at epochs [times].  Optionally
      redshift by (1+[z]).  If [mag]=1, return in magnitudes, otherwise return
      in flux units.  If [sextrap]=1, extrapolate beyond the training sample
      by using a stretch.  Use [gen] to specifiy the generation of the template.
      If you want the Tmax - Tmax(B) offset applied, set [toff] to True,
      otherwise, Tmax will be at 0 for every filter.'''

        if toff:
            evt = (times - self.deltaTmax(band)) / (1 + z)
        else:
            evt = times / (1 + z)

        #if band not in ['u','B','V','g','r','i','Y','J','K','H','K','J_K','H_K']:
        #   raise AttributeError, "Sorry, band %s is not supported by dm15temp2" % \
        #         band

        # This provides a template for JHK photometry based on Kevin Krisciunas' polynomial
        s = dm152s(self.dm15)
        if band == 'J_K':
            return (0.080 + evt / s * 0.05104699 + 0.007064257 * (evt / s)**2 -
                    0.000257906 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))
        elif band == 'H_K':
            return (0.050 + evt / s * 0.0250923 + 0.001852107 * (evt / s)**2 -
                    0.0003557824 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))
        elif band == 'K':
            return (0.042 + evt / s * 0.02728437 + 0.003194500 * (evt / s)**2 -
                    0.0004139377 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))
        dmmin, dmmax = get_p_lim(band, 'dm15', gen)
        if sextrap and not (dmmin < self.dm15 < dmmax):
            tmin, tmax = get_t_lim('B', 'dm15', gen)
            test_t = num.linspace(0, tmax, 10)
            if self.dm15 <= dmmin:
                start = self.teval('B', 15.0, dmmin)
                target = start + (self.dm15 - dmmin)
                dmlim = dmmin
            else:
                start = self.teval('B', 15.0, dmmax)
                target = start + (self.dm15 - dmmax)
                dmlim = dmmax
            test_vals = self.teval('B', test_t, dmlim) - target
            id = num.nonzero(num.greater(test_vals, 0))[0][0]
            if debug:
                print "start=", start, "dm15=", self.dm15, "target = ", target
            t0 = test_t[id - 1]
            t1 = test_t[id]
            if debug: print "t0 = ", t0, "t1 = ", t1
            root = scipy.optimize.brentq(\
                  lambda x:  self.teval('B', x, dmlim) - target, t0, t1)
            if debug: print "root = ", root
            s = 15. / root
        else:
            s = 1.0

        dm15 = self.dm15
        if sextrap and dm15 <= dmmin:
            dm15 = dmmin + 0.001
        if sextrap and dm15 >= dmmax:
            dm15 = dmmax - 0.001

        tmin, tmax = get_t_lim(band, 'dm15', gen)
        tmask = num.greater_equal(evt, tmin) * num.less_equal(evt, tmax)

        # Apply any stretch
        evt = evt * s
        evd, eevd, mask = finterp(band, evt, dm15, 'dm15', gen, extrap=extrap)

        if not extrap:
            mask = mask * tmask

        if not mag:
            return (evd, eevd, mask)
        else:
            return (-2.5 * num.log10(evd), eevd / evd * 1.0857, mask)
示例#41
0
    def eval(self,
             band,
             times,
             z=0,
             mag=1,
             sextrap=1,
             gen=1,
             toff=True,
             extrap=False):
        '''Evaluate the template in band [band] at epochs [times].  Optionally
      redshift by (1+[z]).  If [mag]=1, return in magnitudes, otherwise return
      in flux units.  If [sextrap]=1, extrapolate beyond the training sample
      by using a stretch.  Use [gen] to specifiy the generation of the template.
      If you want the Tmax - Tmax(B) offset applied, set [toff] to True,
      otherwise, Tmax will be at 0 for every filter.'''

        if toff:
            evt = (times - self.deltaTmax(band)) / (1 + z)
        else:
            evt = times / (1 + z)

        if self.st <= 0:
            return (evt * 0, evt * 0, num.zeros(evt.shape, dtype=num.bool))

        if band == 'J_K':
            s = self.st
            return (0.080 + evt / s * 0.05104699 + 0.007064257 * (evt / s)**2 -
                    0.000257906 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))
        elif band == 'H_K':
            s = self.st
            return (0.050 + evt / s * 0.0250923 + 0.001852107 * (evt / s)**2 -
                    0.0003557824 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))
        elif band == 'K':
            s = self.st
            return (0.042 + evt / s * 0.02728437 + 0.003194500 * (evt / s)**2 -
                    0.0004139377 * (evt / s)**3,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]) * 0.08,
                    num.greater_equal(evt / s, NIR_range[0]) *
                    num.less_equal(evt / s, NIR_range[1]))

        stmin, stmax = get_p_lim(band, 'st', gen)
        if sextrap and not (stmin < self.st < stmax):
            if self.st < stmin:
                s = stmin / self.st
            else:
                s = stmax / self.st
        else:
            s = 1.0

        st = self.st
        if sextrap and st <= stmin:
            st = stmin + 0.001
        if sextrap and st >= stmax:
            st = stmax - 0.001

        tmin, tmax = get_t_lim(band, 'st', gen)
        tmask = num.greater_equal(evt, tmin) * num.less_equal(evt, tmax)

        # Apply any stretch
        evt = evt * s
        evd, eevd, mask = finterp(band, evt, st, 'st', gen, extrap=extrap)
        if not extrap:
            mask = mask * tmask

        if not mag:
            return (evd, eevd, mask)
        else:
            return (-2.5 * num.log10(evd), eevd / evd * 1.0857, mask)
示例#42
0
def cooperative_mec(mec_list):
    global _off_cloud
    global _off_mec
    global task_id, task_record

    for i in mec_list:
        _host = mec_comparison()
        if _host == 0:
            # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]])  # [task_id,exec_time]
            _send_task = f"{i.split('_')[0]}.{task_id}"
            _client.publish(
                cloud_ip,
                str([_send_task, t_time[i.split('_')[0]][0]]),
            )
            task_record[_send_task] = 'cloud'
            task_id += 1
            _off_cloud += 1
            # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host

            print('\n=========SENDING {} TO CLOUD==========='.format(i))

        else:
            j = i.split('_')[0]
            _max = np.array([6, 5, 5])
            send = 'false'
            if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
                send = 'true'
            # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
            if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
                _send_task = f"{j}.{task_id}"
                send_offloaded_task_mec('{} {} {}'.format(
                    'ex', mec_id(_host), [_send_task, t_time[j][0]]))
                task_record[_send_task] = 'mec'
                task_id += 1
                _off_mec += 1
                # SENDS TASK TO MEC FOR EXECUTION

                w_send = mec_waiting_time[_host][-1] + 0.001
                mec_waiting_time[_host].append(
                    w_send)  # adds a new average waiting time
                print('\n======SENDING {} TO MEC {}========='.format(i, _host))
            elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
                _send_task = f"{j}.{task_id}"
                send_offloaded_task_mec('{} {} {}'.format(
                    'ex', mec_id(_host), [_send_task, t_time[j][0]]))
                task_record[_send_task] = 'mec'
                task_id += 1
                _off_mec += 1
                # SENDS TASK TO MEC FOR EXECUTION
                w_send = mec_waiting_time[_host][-1] + 0.001
                mec_waiting_time[_host].append(
                    w_send)  # adds a new average waiting time
                print('\n======SENDING {} TO MEC {}========='.format(i, _host))
            else:
                _send_task = f"{j}.{task_id}"
                _client.publish(
                    cloud_ip,
                    str([_send_task, t_time[j][0]]),
                )
                task_record[_send_task] = 'cloud'
                task_id += 1
                _off_cloud += 1
                # send_cloud([j, t_time[j][0]])    # # [task_id,exec_time]

                # cloud_register[j.split('.')[2]] = send_back_host

                print('\n=========SENDING {} TO CLOUD==========='.format(i))
示例#43
0
    def compute(self, inputSDR, supressLearningFlag):
        # flag to supress learning
        self.supressLearningFlag = supressLearningFlag

        # prepare new iteration
        self.prepareNewIteration()

        # How similar is the input SDR to the pattern of the minicolumns?
        if self.nCols != 0:
            start = 1
            columnActivity = self.computeColumnActivations(inputSDR)
            columnActivity_sorted = np.sort(columnActivity)[::-1]
            idx_sorted = np.argsort(columnActivity)[::-1]
        else:
            start = 0
            columnActivity = 0
            columnActivity_sorted = 0
            idx_sorted = 0

        if not self.supressLearningFlag:
            # Are there activities above threshold? If yes, activate the k most
            # active columns, otherwise create new ones and make these the active ones.
            if start != 0:
                cond1 = np.greater(columnActivity_sorted,
                                   self.params.minColumnActivity)
                cond3 = min(self.params.kActiveColumn,
                            len(columnActivity_sorted))
                cond2 = np.greater_equal(columnActivity_sorted,
                                         columnActivity_sorted[cond3 - 1])
                activeCols = idx_sorted[cond1 & cond2]
            else:
                activeCols = []

            sdrNonZeroIdx = np.argwhere(inputSDR == 1)
            cond1 = max(0, self.params.nColsPerPattern - len(activeCols))
            activeCols = np.concatenate(
                [activeCols,
                 self.createNewColumn(sdrNonZeroIdx, cond1)])
        else:
            # In non-learning mode, take the k most active columns
            # plus columns with same activity like kth-best column
            cond3 = min(self.params.kActiveColumn, len(columnActivity_sorted))
            cond2 = np.greater_equal(columnActivity_sorted,
                                     columnActivity_sorted[cond3 - 1])
            activeCols = idx_sorted[cond2]

        activeCols = activeCols.astype(int)

        # for each active column:
        # - mark all predicted cells as winnerCells
        # - if there was no predicted cell, chose one and activate all predictions
        # - activate predictions of winnerCells
        self.activeCells = []
        self.winnerCells = []

        for activeCol in activeCols:
            predictedIdx = np.argwhere(self.prevP[:, activeCol] > 0)

            if predictedIdx.size == 0:
                # if there are no predicted: burst (predict from all cells
                # and choose one winner cell)
                winnerCell = self.burst(activeCol)
                index = np.ravel_multi_index([winnerCell, activeCol],
                                             self.P.shape,
                                             order='F')
                self.winnerCells.append(index)
            elif predictedIdx.size == 1:
                # if there is only one predicted cell, make this the winner cell
                winnerCell = np.asscalar(predictedIdx)
                self.activatePredictions(winnerCell, activeCol)
                index = np.ravel_multi_index([winnerCell, activeCol],
                                             self.P.shape,
                                             order='F')
                self.winnerCells.append(index)
            else:
                # if there are multiple predicted cells, make all winner cells
                for j in predictedIdx:
                    self.activatePredictions(np.asscalar(j), activeCol)
                    index = np.ravel_multi_index([np.asscalar(j), activeCol],
                                                 self.P.shape,
                                                 order='F')
                    self.winnerCells.append(index)

        # learn predictions
        if not self.supressLearningFlag:
            self.learnPredictions()

            # also predict newly learned predictions
            for columnIdx in range(self.nCols):
                if self.burstedCol[0, columnIdx] == 1:
                    for i in range(self.P.shape[0]):
                        self.activatePredictions(i, columnIdx)
def wait_die(processes, avail, n_need, allocat):
    global deadlock

    offload = []

    # To store execution sequence
    exec_seq = []

    # Make a copy of available resources
    work = [0] * len(processes)

    # While all processes are not finished
    # or system is not in safe state.
    while 'w' or 0 in work:
        if 0 in work:
            ind = work.index(0)
            i = processes[ind]
        elif 'w' in work:
            # print('wk: ', work)
            ind = work.index('w')
            i = processes[ind]
        else:
            break

        # print('comparing| process: ', i, n_need[i], 'work: ', avail)
        if not (False in list(np.greater_equal(avail, n_need[i]))):
            exec_seq.append(i)
            avail = np.add(avail, allocat[i])
            work[ind] = 1
            # print('added: ', exec_seq)

        else:
            a = list(set(processes) - set(exec_seq) - set(offload))
            n = {}
            for j in a:
                n[j] = sum(allocat[j])
            _max = max(n, key=n.get)
            # print('work: ', work, 'need: ', n_need[_max])
            if processes.index(_max) > processes.index(
                    i):  # if true, i is older
                # if process is already waiting then offload process
                if work[ind] == 'w':
                    offload.append(i)
                    avail = np.array(avail) + np.array(allocat[i])
                    work[processes.index(i)] = 1
                    # print('offload reentry: ', i, offload)
                else:
                    # wait put process to waiting
                    work[processes.index(i)] = 'w'
                    # print('waiting: ', i)

            else:
                # abort i
                offload.append(i)
                avail = np.array(avail) + np.array(allocat[i])
                work[processes.index(i)] = 1
                # print('offload: ', i)

    if len(offload) > 0:
        print('offloading tasks: ', offload)
        cooperative_mec(offload)
        deadlock[0] += 1

    print('Execution seq: ', exec_seq)

    return exec_seq
 def predict_deterministic(self, bias_to_data, depth=-1):
     return np.greater_equal(self.get_probabilities(bias_to_data, depth), 0.5)
示例#46
0
def plot_duehring(func_p_sat_T: Callable[[float], float],
                  func_T_sat_p: Callable[[float], float],
                  func_p_wxXT: Callable[[float, float], float],
                  func_wxX_pT: Callable[[float, float], float],
                  T_calc: list, wxX_calc: list,
                  T_axis_x: list, T_axis_y: list) -> dict:
    r"""Plot Dühring diagram.

    This functions calculates vapor temperatures and equilibrium pressures for the temperatures
    and loadings / molar fractions / concentrations given as inputs. The input 'func_p_wxXT'
    defines what will be needed for calculations: loadings / molar fractions / concentrations.

    Parameters
    ----------
    func_p_sat_T : Callable[[float], float]
        Method to calculate vapor pressure depending on temperature.
    func_T_sat_p : Callable[[float], float]
        Method to calculate vapor temperature depending on pressure.
    func_p_wxXT : Callable[[float, float], float]
        Method to calculate pressure depending on loading / molar fraction / concentration and
        temperature.
    func_wxX_pT : Callable[[float, float], float]
        Method to calculate loading / molar fraction / concentration depending on pressure and
        temperature.
    T_calc : list
        List containing temperatures (i.e., floats) required to calculate equilibrium pressure.
    wxX_calc : list
        List containing loadings / molar fractions / concentrations (i.e., floats) required to
        equilibrium pressure.
    T_axis_x : list
        List containing temperatures (i.e., floats) defining the ticks of the x axis.
    T_axis_y : list
        List containing vapor temperatures (i.e., floats) defining the ticks of the y axis.

    Returns
    -------
    dict
        Dict containing a) handles for figure, axis, and plots; and b) calculated vapor pressures
        and equilibrium pressures.

    """
    # Calculate saturation properties and get reasonable results (i.e., p_sat <= p_crit)
    #
    p_sat = [func_p_sat_T(val) for val in T_calc]
    id_limit = np.logical_and(np.greater_equal(p_sat, 0), np.isfinite(p_sat))

    saturationPropeties = {'p_sat':   np.extract(id_limit, p_sat),
                           'T_sat':   np.extract(id_limit, T_calc)}

    # Calculate maximum loading / molar fraction / concentration to adapt input wxX_calc
    #
    wxX_max = func_wxX_pT(min(saturationPropeties['p_sat']),
                          min(saturationPropeties['T_sat']))

    wxX_calc = np.extract(np.less_equal(wxX_calc, wxX_max), wxX_calc)

    # Get critical pressure and temperature or maximum pressure and temperature if both are
    # lower than critical pressure and temperature
    #
    p_crit = max(saturationPropeties['p_sat'])
    T_crit = max(saturationPropeties['T_sat'])

    # Calculate equilibrium properties and get reasonable results (i.e., p_wxXT <= p_sat)
    #
    sorptionEquilibrium = [None] * len(wxX_calc)

    for id_wxX, val_wxX in enumerate(wxX_calc):
        p_wxXT = [func_p_wxXT(val_wxX, val) for val in T_calc]

        if not np.less_equal(p_wxXT[:len(saturationPropeties['p_sat'])],
                             saturationPropeties['p_sat']).all():
            # Cut at p_sat
            #
            id_finite = np.where(np.less_equal(p_wxXT, p_sat) == False)[0][0]

            sorptionEquilibrium[id_wxX] = {'p_wxXT':    np.array(p_wxXT[:id_finite]),
                                           'T_equ':     np.array(T_calc[:id_finite]),
                                           'T_sat':     np.array([func_T_sat_p(val) for val in \
                                                                  np.array(p_wxXT[:id_finite])])}

        else:
            # Cut at p_crit
            #
            id_limit = np.less_equal(np.array(p_wxXT), p_crit)

            sorptionEquilibrium[id_wxX] = {'p_wxXT':    np.extract(id_limit, np.array(p_wxXT)),
                                           'T_equ':     np.extract(id_limit, np.array(T_calc)),
                                           'T_sat':     np.array([func_T_sat_p(val) for val in \
                                                                  np.extract(id_limit,
                                                                             np.array(p_wxXT))])}

    # Plot data
    #
    h_fig, h_ax = plt.subplots(1, 1, figsize=(20/2.54, 10/2.54))
    h_plots = [None] * (len(wxX_calc) + 1)

    h_plots[0] = h_ax.plot(saturationPropeties['T_sat'],
                           saturationPropeties['T_sat'],
                           linestyle = '-',
                           color = '#00459F',
                           label = 'Vapor temperature')

    for id_wxX, val_wxX in enumerate(wxX_calc):
        id_valid = np.greater_equal(sorptionEquilibrium[id_wxX]['T_sat'], 0)

        h_plots[id_wxX+1] = h_ax.plot(np.extract(id_valid, sorptionEquilibrium[id_wxX]['T_equ']),
                                      np.extract(id_valid, sorptionEquilibrium[id_wxX]['T_sat']),
                                      linestyle = '--',
                                      # color = '#646567',
                                      label = str(val_wxX*100) + ' %')

    # Set up axis
    #
    h_ax.spines['top'].set_visible(False)
    h_ax.spines['right'].set_visible(False)
    h_ax.spines['left'].set_linewidth(0.5)
    h_ax.spines['bottom'].set_linewidth(0.5)

    h_ax.set_xlim(min(T_axis_x), max(T_axis_x))
    h_ax.set_ylim(min(T_axis_y), max(T_axis_y))

    h_ax.set_xticks([val for val in T_axis_x])
    h_ax.set_yticks([val for val in T_axis_y])

    h_ax.set_xticklabels([str(int(val - 273.15)) for val in T_axis_x])
    h_ax.set_yticklabels([str(int(val - 273.15)) for val in T_axis_y])

    h_ax.grid(linestyle = ':', color = '#9C9E9F')

    h_ax.set_xlabel(r'Temperature $t$ / °C $\longrightarrow$')
    h_ax.set_ylabel(r'Temperature $t_\mathrm{sat}$ / °C $\longrightarrow$')

    h_ax.legend(bbox_to_anchor=(1.05, 1),
                loc='upper left',
                labelspacing=0.05,
                frameon=False)

    # Tight layout
    #
    h_fig.align_ylabels()
    h_fig.tight_layout()

    # Return results allowing for custom made moedifications
    #
    return {'handles':  (h_fig, h_ax, h_plots),
            'data':     (saturationPropeties, sorptionEquilibrium)}
示例#47
0
def bpz_run(argv=None):
    """Run BPZ.

    bpz: Bayesian Photo-Z estimation
    Reference: Benitez 2000, ApJ, 536, p.571
    Usage:
    python bpz.py catalog.cat
    Needs a catalog.columns file which describes the contents of catalog.cat"""

    #description = """Run BPZ."""
    #prog = "bpz.py"

    #parser = ArgumentParser(prog=prog, description=description)
    #args = parser.parse_args(argv)

    #print("This will soon run BPZ")

    def seglist(vals, mask=None):
        """Split vals into lists based on mask > 0"""
        if mask is None:
            mask = np.greater(vals, 0)
        lists = []
        i = 0
        lastgood = False
        list1 = []
        for i in range(len(vals)):
            if not mask[i]:
                if lastgood:
                    lists.append(list1)
                    list1 = []
                lastgood = False
            if mask[i]:
                list1.append(vals[i])
                lastgood = True

        if lastgood:
            lists.append(list1)
        return lists

    # Initialization and definitions#

    # Current directory
    homedir = os.getcwd()

    # Parameter definition
    pars = useful.params()

    pars.d = {
        'SPECTRA': 'CWWSB4.list',  # template list
        #'PRIOR':   'hdfn_SB',      # prior name
        'PRIOR': 'hdfn_gen',  # prior name
        'NTYPES':
        None,  # Number of Elliptical, Spiral, and Starburst/Irregular templates  Default: 1,2,n-3
        'DZ': 0.01,  # redshift resolution
        'ZMIN': 0.01,  # minimum redshift
        'ZMAX': 10.,  # maximum redshift
        'MAG': 'yes',  # Data in magnitudes?
        'MIN_MAGERR': 0.001,  # minimum magnitude uncertainty --DC
        'ODDS': 0.95,  # Odds threshold: affects confidence limits definition
        'INTERP':
        0,  # Number of interpolated templates between each of the original ones
        'EXCLUDE': 'none',  # Filters to be excluded from the estimation
        'NEW_AB':
        'no',  # If yes, generate new AB files even if they already exist
        # Perform some checks, compare observed colors with templates, etc.
        'CHECK': 'yes',
        'VERBOSE': 'yes',  # Print estimated redshifts to the standard output
        # Save all the galaxy probability distributions (it will create a very
        # large file)
        'PROBS': 'no',
        # Save all the galaxy probability distributions P(z,t) (but not priors)
        # -- Compact
        'PROBS2': 'no',
        'PROBS_LITE': 'yes',  # Save only the final probability distribution
        'GET_Z': 'yes',  # Actually obtain photo-z
        'ONLY_TYPE': 'no',  # Use spectroscopic redshifts instead of photo-z
        'MADAU': 'yes',  # Apply Madau correction to spectra
        'Z_THR': 0,  # Integrate probability for z>z_thr
        'COLOR': 'no',  # Use colors instead of fluxes
        'PLOTS': 'no',  # Don't produce plots
        'INTERACTIVE': 'yes',  # Don't query the user
        'PHOTO_ERRORS':
        'no',  # Define the confidence interval using only the photometric errors
        # "Intrinsic"  photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
        'MIN_RMS': 0.05,
        'N_PEAKS': 1,
        'MERGE_PEAKS': 'no',
        'CONVOLVE_P': 'yes',
        'P_MIN': 1e-2,
        'SED_DIR': bpz_tools.sed_dir,
        'AB_DIR': bpz_tools.ab_dir,
        'FILTER_DIR': bpz_tools.fil_dir,
        'DELTA_M_0': 0.,
        'ZP_OFFSETS': 0.,
        'ZC': None,
        'FC': None,
        "ADD_SPEC_PROB": None,
        "ADD_CONTINUOUS_PROB": None,
        "NMAX": None  # Useful for testing
    }

    if pars.d['PLOTS'] == 'no':
        plots = 0

    if plots:
        plots = 'pylab'

    # Define the default values of the parameters
    pars.d['INPUT'] = sys.argv[1]  # catalog with the photometry
    obs_file = pars.d['INPUT']
    root = os.path.splitext(pars.d['INPUT'])[0]
    # column information for the input catalog
    pars.d['COLUMNS'] = root + '.columns'
    pars.d['OUTPUT'] = root + '.bpz'  # output

    #ipar = 2

    if len(sys.argv) > 2:  # Check for parameter file and update parameters
        if sys.argv[2] == '-P':
            pars.fromfile(sys.argv[3])
    #        ipar = 4
    pars.d.update(coeio.params_cl())

    def updateblank(var, ext):
        #        global pars
        if pars.d[var] in [None, 'yes']:
            pars.d[var] = root + '.' + ext

    updateblank('CHECK', 'flux_comparison')
    updateblank('PROBS_LITE', 'probs')
    updateblank('PROBS', 'full_probs')
    updateblank('PROBS2', 'chisq')

    # This allows to change the auxiliary directories used by BPZ
    if pars.d['SED_DIR'] != bpz_tools.sed_dir:
        print("Changing sed_dir to ", pars.d['SED_DIR'])
        sed_dir = pars.d['SED_DIR']
        if sed_dir[-1] != '/':
            sed_dir += '/'
    else:
        sed_dir = pars.d['SED_DIR']
    if pars.d['AB_DIR'] != bpz_tools.ab_dir:
        print("Changing ab_dir to ", pars.d['AB_DIR'])
        ab_dir = pars.d['AB_DIR']
        if ab_dir[-1] != '/':
            ab_dir += '/'
    else:
        ab_dir = pars.d['AB_DIR']
    if pars.d['FILTER_DIR'] != bpz_tools.fil_dir:
        print("Changing fil_dir to ", pars.d['FILTER_DIR'])
        fil_dir = pars.d['FILTER_DIR']
        if fil_dir[-1] != '/':
            fil_dir += '/'
    else:
        fil_dir = pars.d['FILTER_DIR']

    # Better safe than sorry
    if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
            'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
        print("This would delete the input file!")
        sys.exit()
    if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
            'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
        print("This would delete the .columns file!")
        sys.exit()

    # Assign the intrinsin rms
    if pars.d['SPECTRA'] == 'CWWSB.list':
        print('Setting the intrinsic rms to 0.067(1+z)')
        pars.d['MIN_RMS'] = 0.067

    pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
    pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
    if pars.d['INTERACTIVE'] == 'no':
        interactive = 0
    else:
        interactive = 1
    if pars.d['VERBOSE'] == 'yes':
        print("Current parameters")
        useful.view_keys(pars.d)
    pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
    if pars.d["ADD_SPEC_PROB"] is not None:
        specprob = 1
        specfile = pars.d["ADD_SPEC_PROB"]
        spec = useful.get_2Darray(specfile)
        ns = spec.shape[1]
        if old_div(ns, 2) != (old_div(ns, 2.)):
            print("Number of columns in SPEC_PROB is odd")
            sys.exit()
        z_spec = spec[:, :old_div(ns, 2)]
        p_spec = spec[:, old_div(ns, 2):]
        # Write output file header
        header = "#ID "
        header += ns / 2 * " z_spec%i"
        header += ns / 2 * " p_spec%i"
        header += "\n"
        header = header % tuple(
            list(range(old_div(ns, 2))) + list(range(old_div(ns, 2))))
        specout = open(specfile.split()[0] + ".p_spec", "w")
        specout.write(header)
    else:
        specprob = 0
    pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])

    # Some misc. initialization info useful for the .columns file
    # nofilters=['M_0','OTHER','ID','Z_S','X','Y']
    nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']

    # Numerical codes for nondetection, etc. in the photometric catalog
    unobs = -99.  # Objects not observed
    undet = 99.  # Objects not detected

    # Define the z-grid
    zmin = float(pars.d['ZMIN'])
    zmax = float(pars.d['ZMAX'])
    if zmin > zmax:
        raise 'zmin < zmax !'
    dz = float(pars.d['DZ'])

    linear = 1
    if linear:
        z = np.arange(zmin, zmax + dz, dz)
    else:
        if zmax != 0.:
            zi = zmin
            z = []
            while zi <= zmax:
                z.append(zi)
                zi = zi + dz * (1. + zi)
            z = np.array(z)
        else:
            z = np.array([0.])

    # Now check the contents of the FILTERS,SED and A diBrectories

    # Get the filters in stock
    filters_db = []
    filters_db = glob.glob(fil_dir + '*.res')
    for i in range(len(filters_db)):
        filters_db[i] = os.path.basename(filters_db[i])
        filters_db[i] = filters_db[i][:-4]

    # Get the SEDs in stock
    sed_db = []
    sed_db = glob.glob(sed_dir + '*.sed')
    for i in range(len(sed_db)):
        sed_db[i] = os.path.basename(sed_db[i])
        sed_db[i] = sed_db[i][:-4]

    # Get the ABflux files in stock
    ab_db = []
    ab_db = glob.glob(ab_dir + '*.AB')
    for i in range(len(ab_db)):
        ab_db[i] = os.path.basename(ab_db[i])
        ab_db[i] = ab_db[i][:-3]

    # Get a list with the filter names and check whether they are in stock
    col_file = pars.d['COLUMNS']
    filters = useful.get_str(col_file, 0)

    for cosa in nofilters:
        if filters.count(cosa):
            filters.remove(cosa)

    if pars.d['EXCLUDE'] != 'none':
        if isinstance(pars.d['EXCLUDE'], str):
            pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
        for cosa in pars.d['EXCLUDE']:
            if filters.count(cosa):
                filters.remove(cosa)

    for filter in filters:
        if filter[-4:] == '.res':
            filter = filter[:-4]
        if filter not in filters_db:
            print('filter ', filter, 'not in database at', fil_dir, ':')
            if useful.ask('Print filters in database?'):
                for line in filters_db:
                    print(line)
            sys.exit()

    # Get a list with the spectrum names and check whether they're in stock
    # Look for the list in the home directory first,
    # if it's not there, look in the SED directory
    spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
    if not os.path.exists(spectra_file):
        spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])

    spectra = useful.get_str(spectra_file, 0)
    for i in range(len(spectra)):
        if spectra[i][-4:] == '.sed':
            spectra[i] = spectra[i][:-4]

    nf = len(filters)
    nt = len(spectra)
    nz = len(z)

    # Get the model fluxes
    f_mod = np.zeros((nz, nt, nf)) * 0.
    abfiles = []

    for it in range(nt):
        for jf in range(nf):
            if filters[jf][-4:] == '.res':
                filtro = filters[jf][:-4]
            else:
                filtro = filters[jf]
            model = '.'.join([spectra[it], filtro, 'AB'])
            model_path = os.path.join(ab_dir, model)
            abfiles.append(model)
            # Generate new ABflux files if not present
            # or if new_ab flag on
            if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
                if spectra[it] not in sed_db:
                    print('SED ', spectra[it], 'not in database at', sed_dir)
                    #		for line in sed_db:
                    #                    print line
                    sys.exit()
                # print spectra[it],filters[jf]
                print('     Generating ', model, '....')
                bpz_tools.ABflux(spectra[it], filtro, madau=pars.d['MADAU'])

            zo, f_mod_0 = useful.get_data(model_path, (0, 1))
            # Rebin the data to the required redshift resolution
            f_mod[:, it, jf] = useful.match_resol(zo, f_mod_0, z)
            if np.less(f_mod[:, it, jf], 0.).any():
                print('Warning: some values of the model AB fluxes are <0')
                print('due to the interpolation ')
                print('Clipping them to f>=0 values')
                # To avoid rounding errors in the calculation of the likelihood
                f_mod[:, it, jf] = np.clip(f_mod[:, it, jf], 0., 1e300)

    # Here goes the interpolacion between the colors
    ninterp = int(pars.d['INTERP'])

    ntypes = pars.d['NTYPES']
    if ntypes is None:
        nt0 = nt
    else:
        nt0 = list(ntypes)
        for i, nt1 in enumerate(nt0):
            print(i, nt1)
            nt0[i] = int(nt1)
        if (len(nt0) != 3) or (np.sum(nt0) != nt):
            print()
            print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
            print('does not add up to %d templates' % nt)
            print('USAGE: -NTYPES nell,nsp,nsb')
            print('nell = # of elliptical templates')
            print('nsp  = # of spiral templates')
            print('nsb  = # of starburst templates')
            print(
                'These must add up to the number of templates in the SPECTRA list'
            )
            print('Quitting BPZ.')
            sys.exit()

    if ninterp:
        nti = nt + (nt - 1) * ninterp
        buff = np.zeros((nz, nti, nf)) * 1.
        tipos = np.arange(0., float(nti), float(ninterp) + 1.)
        xtipos = np.arange(float(nti))
        for iz in np.arange(nz):
            for jf in range(nf):
                buff[iz, :, jf] = useful.match_resol(tipos, f_mod[iz, :, jf],
                                                     xtipos)
        nt = nti
        f_mod = buff

    # Load all the parameters in the columns file to a dictionary
    col_pars = useful.params()
    col_pars.fromfile(col_file)

    # Read which filters are in which columns
    flux_cols = []
    eflux_cols = []
    cals = []
    zp_errors = []
    zp_offsets = []
    for filter in filters:
        datos = col_pars.d[filter]
        flux_cols.append(int(datos[0]) - 1)
        eflux_cols.append(int(datos[1]) - 1)
        cals.append(datos[2])
        zp_errors.append(datos[3])
        zp_offsets.append(datos[4])
    zp_offsets = np.array(list(map(float, zp_offsets)))
    if pars.d['ZP_OFFSETS']:
        zp_offsets += np.array(list(map(float, pars.d['ZP_OFFSETS'])))

    flux_cols = tuple(flux_cols)
    eflux_cols = tuple(eflux_cols)

    # READ the flux and errors from obs_file
    f_obs = useful.get_2Darray(obs_file, flux_cols)
    ef_obs = useful.get_2Darray(obs_file, eflux_cols)

    # Convert them to arbitrary fluxes if they are in magnitudes
    if pars.d['MAG'] == 'yes':
        seen = np.greater(f_obs, 0.) * np.less(f_obs, undet)
        no_seen = np.equal(f_obs, undet)
        no_observed = np.equal(f_obs, unobs)
        todo = seen + no_seen + no_observed
        # The minimum photometric error is 0.01
        # ef_obs=ef_obs+seen*np.equal(ef_obs,0.)*0.001
        ef_obs = np.where(np.greater_equal(ef_obs, 0.),
                          np.clip(ef_obs, pars.d['MIN_MAGERR'], 1e10), ef_obs)
        if np.add.reduce(np.add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
            print('Objects with unexpected magnitudes!')
            print("""Allowed values for magnitudes are 
    	0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" +
                  repr(unobs) + "(not observed)")
            for i in range(len(todo)):
                if not np.alltrue(todo[i, :]):
                    print(i + 1, f_obs[i, :], ef_obs[i, :])
            sys.exit()

        # Detected objects
        try:
            f_obs = np.where(seen, 10.**(-.4 * f_obs), f_obs)
        except OverflowError:
            print(
                'Some of the input magnitudes have values which are >700 or <-700'
            )
            print('Purge the input photometric catalog')
            print('Minimum value', min(f_obs))
            print('Maximum value', max(f_obs))
            print('Indexes for minimum values', np.argmin(f_obs, 0.))
            print('Indexes for maximum values', np.argmax(f_obs, 0.))
            print('Bye.')
            sys.exit()

        try:
            ef_obs = np.where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
        except OverflowError:
            print(
                'Some of the input magnitude errors have values which are >700 or <-700'
            )
            print('Purge the input photometric catalog')
            print('Minimum value', min(ef_obs))
            print('Maximum value', max(ef_obs))
            print('Indexes for minimum values', np.argmin(ef_obs, 0.))
            print('Indexes for maximum values', np.argmax(ef_obs, 0.))
            print('Bye.')
            sys.exit()

        # Looked at, but not detected objects (mag=99.)
        # We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
        # If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
        # with the sign we take the absolute value of dm
        f_obs = np.where(no_seen, 0., f_obs)
        ef_obs = np.where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)

        # Objects not looked at (mag=-99.)
        f_obs = np.where(no_observed, 0., f_obs)
        ef_obs = np.where(no_observed, 0., ef_obs)

    # Flux codes:
    # If f>0 and ef>0 : normal objects
    # If f==0 and ef>0 :object not detected
    # If f==0 and ef==0: object not observed
    # Everything else will crash the program

    # Check that the observed error fluxes are reasonable
    #if sometrue(np.less(ef_obs,0.)): raise 'Negative input flux errors'
    if np.less(ef_obs, 0.).any():
        raise 'Negative input flux errors'

    f_obs = np.where(np.less(f_obs, 0.), 0., f_obs)  # Put non-detections to 0
    ef_obs = np.where(np.less(f_obs, 0.), np.maximum(1e-100, f_obs + ef_obs),
                      ef_obs)  # Error equivalent to 1 sigma upper limit

    #if sometrue(np.less(f_obs,0.)) : raise 'Negative input fluxes'
    seen = np.greater(f_obs, 0.) * np.greater(ef_obs, 0.)
    no_seen = np.equal(f_obs, 0.) * np.greater(ef_obs, 0.)
    no_observed = np.equal(f_obs, 0.) * np.equal(ef_obs, 0.)

    todo = seen + no_seen + no_observed
    if np.add.reduce(np.add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
        print('Objects with unexpected fluxes/errors')

    # Convert (internally) objects with zero flux and zero error(non observed)
    # to objects with almost infinite (~1e108) error and still zero flux
    # This will yield reasonable likelihoods (flat ones) for these objects
    ef_obs = np.where(no_observed, 1e108, ef_obs)

    # Include the zero point errors
    zp_errors = np.array(list(map(float, zp_errors)))
    zp_frac = bpz_tools.e_mag2frac(zp_errors)
    # zp_frac=10.**(.4*zp_errors)-1.
    ef_obs = np.where(seen, np.sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2),
                      ef_obs)
    ef_obs = np.where(
        no_seen,
        np.sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
        ef_obs)

    # Add the zero-points offset
    # The offsets are defined as m_new-m_old
    zp_offsets = np.array(list(map(float, zp_offsets)))
    zp_offsets = np.where(np.not_equal(zp_offsets, 0.),
                          10.**(-.4 * zp_offsets), 1.)
    f_obs = f_obs * zp_offsets
    ef_obs = ef_obs * zp_offsets

    # Convert fluxes to AB if needed
    for i in range(f_obs.shape[1]):
        if cals[i] == 'Vega':
            const = bpz_tools.mag2flux(bpz_tools.VegatoAB(0., filters[i]))
            f_obs[:, i] = f_obs[:, i] * const
            ef_obs[:, i] = ef_obs[:, i] * const
        elif cals[i] == 'AB':
            continue
        else:
            print('AB or Vega?. Check ' + col_file + ' file')
            sys.exit()

    # Get m_0 (if present)
    if 'M_0' in col_pars.d:
        m_0_col = int(col_pars.d['M_0']) - 1
        m_0 = useful.get_data(obs_file, m_0_col)
        m_0 += pars.d['DELTA_M_0']

    # Get the objects ID (as a string)
    if 'ID' in col_pars.d:
        #    print col_pars.d['ID']
        id_col = int(col_pars.d['ID']) - 1
        lid = useful.get_str(obs_file, id_col)
    else:
        lid = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))

    # Get spectroscopic redshifts (if present)
    if 'Z_S' in col_pars.d:
        z_s_col = int(col_pars.d['Z_S']) - 1
        z_s = useful.get_data(obs_file, z_s_col)

    # Get the X,Y coordinates
    if 'X' in col_pars.d:
        datos = col_pars.d['X']
        if len(datos) == 1:  # OTHERWISE IT'S A FILTER!
            x_col = int(col_pars.d['X']) - 1
            x = useful.get_data(obs_file, x_col)
    if 'Y' in col_pars.d:
        datos = col_pars.d['Y']
        if len(datos) == 1:  # OTHERWISE IT'S A FILTER!
            y_col = int(datos) - 1
            y = useful.get_data(obs_file, y_col)

    # If 'check' on, initialize some variables
    check = pars.d['CHECK']
    checkSED = check != 'no'

    ng = f_obs.shape[0]
    if checkSED:
        # PHOTOMETRIC CALIBRATION CHECK
        # Defaults: r=1, dm=1, w=0
        frat = np.ones((ng, nf), float)
        fw = np.zeros((ng, nf), float)

    # Visualize the colors of the galaxies and the templates

    # When there are spectroscopic redshifts available
    if interactive and 'Z_S' in col_pars.d and plots and checkSED and useful.ask(
            'Plot colors vs spectroscopic redshifts?'):
        pylab.figure(1)
        nrows = 2
        ncols = old_div((nf - 1), nrows)
        if (nf - 1) % nrows:
            ncols += 1
        for i in range(nf - 1):
            # plot=FramedPlot()
            # Check for overflows
            fmu = f_obs[:, i + 1]
            fml = f_obs[:, i]
            good = np.greater(fml, 1e-100) * np.greater(fmu, 1e-100)
            zz, fmu, fml = useful.multicompress(good, (z_s, fmu, fml))
            colour = old_div(fmu, fml)
            colour = np.clip(colour, 1e-5, 1e5)
            colour = 2.5 * np.log10(colour)
            pylab.subplot(nrows, ncols, i + 1)
            pylab.plot(zz, colour, "bo")
            for it in range(nt):
                # Prevent overflows
                fmu = f_mod[:, it, i + 1]
                fml = f_mod[:, it, i]
                good = np.greater(fml, 1e-100)
                zz, fmu, fml = useful.multicompress(good, (z, fmu, fml))
                colour = old_div(fmu, fml)
                colour = np.clip(colour, 1e-5, 1e5)
                colour = 2.5 * np.log10(colour)
                pylab.plot(zz, colour, "r")
            pylab.xlabel(r'$z$')
            pylab.ylabel('%s - %s' % (filters[i], filters[i + 1]))
        pylab.show()
        inp = input('Hit Enter to continue.')

    # Get other information which will go in the output file (as strings)
    if 'OTHER' in col_pars.d:
        if col_pars.d['OTHER'] != 'all':
            other_cols = col_pars.d['OTHER']
            if isinstance(other_cols, list):
                other_cols = tuple(map(int, other_cols))
            else:
                other_cols = (int(other_cols), )
            other_cols = [x - 1 for x in other_cols]
            n_other = len(other_cols)
        else:
            n_other = useful.get_2Darray(obs_file, cols='all',
                                         nrows=1).shape[1]
            other_cols = list(range(n_other))

        others = useful.get_str(obs_file, other_cols)

        if len(other_cols) > 1:
            other = []
            for j in range(len(others[0])):
                lista = []
                for i in range(len(others)):
                    lista.append(others[i][j])
                other.append(''.join(lista))
        else:
            other = others

    if pars.d['GET_Z'] == 'no':
        get_z = 0
    else:
        get_z = 1

    # Prepare the output file
    out_name = pars.d['OUTPUT']
    if get_z:
        if os.path.exists(out_name):
            os.system('cp %s %s.bak' % (out_name, out_name))
            print("File %s exists. Copying it to %s.bak" %
                  (out_name, out_name))
        output = open(out_name, 'w')

    if pars.d['PROBS_LITE'] == 'no':
        save_probs = 0
    else:
        save_probs = 1

    if pars.d['PROBS'] == 'no':
        save_full_probs = 0
    else:
        save_full_probs = 1

    if pars.d['PROBS2'] == 'no':
        save_probs2 = 0
    else:
        save_probs2 = 1

    # Include some header information

    #   File name and the date...
    time_stamp = time.ctime(time.time())
    if get_z:
        output.write('## File ' + out_name + '  ' + time_stamp + '\n')

    # and also the parameters used to run bpz...
    if get_z:
        output.write("""##
##Parameters used to run BPZ:
##
""")
    claves = list(pars.d.keys())
    claves.sort()
    for key in claves:
        if isinstance(pars.d[key], list):
            cosa = ','.join(list(pars.d[key]))
        else:
            cosa = str(pars.d[key])
        if get_z:
            output.write('##' + key.upper() + '=' + cosa + '\n')

    if save_full_probs:
        # Shelve some info on the run
        full_probs = shelve.open(pars.d['PROBS'])
        full_probs['TIME'] = time_stamp
        full_probs['PARS'] = pars.d

    if save_probs:
        probs = open(pars.d['PROBS_LITE'], 'w')
        probs.write('# ID  p_bayes(z)  where z=arange(%.4f,%.4f,%.4f) \n' %
                    (zmin, zmax + dz, dz))

    if save_probs2:
        probs2 = open(pars.d['PROBS2'], 'w')
        probs2.write(
            '# id t  z1    P(z1) P(z1+dz) P(z1+2*dz) ...  where dz = %.4f\n' %
            dz)

    # Use a empirical prior?
    tipo_prior = pars.d['PRIOR']
    useprior = 0
    if 'M_0' in col_pars.d:
        has_mags = 1
    else:
        has_mags = 0
    if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
        useprior = 1

    # Add cluster 'spikes' to the prior?
    cluster_prior = 0.
    if pars.d['ZC']:
        cluster_prior = 1
        if isinstance(pars.d['ZC'], str):
            zc = np.array([float(pars.d['ZC'])])
        else:
            zc = np.array(list(map(float, pars.d['ZC'])))
        if isinstance(pars.d['FC'], str):
            fc = np.array([float(pars.d['FC'])])
        else:
            fc = np.array(list(map(float, pars.d['FC'])))

        fcc = np.add.reduce(fc)
        if fcc > 1.:
            print(fcc)
            raise 'Too many galaxies in clusters!'
        pi_c = np.zeros((nz, nt)) * 1.
        # Go over the different cluster spikes
        for i in range(len(zc)):
            # We define the cluster within dz=0.01 limits
            cluster_range = np.less_equal(abs(z - zc[i]), .01) * 1.
            # Clip values to avoid overflow
            exponente = np.clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
            # Outside the cluster range g is 0
            g = np.exp(exponente) * cluster_range
            norm = np.add.reduce(g)
            pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]

        # Go over the different types
        print('We only apply the cluster prior to the early type galaxies')
        for i in range(1, 3 + 2 * ninterp):
            pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]

    # Output format
    format = '%' + repr(np.maximum(5, len(lid[0]))) + 's'  # ID format
    format = format + pars.d['N_PEAKS'] * \
        ' %.3f %.3f  %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'

    # Add header with variable names to the output file
    sxhdr = """##
##Column information
##
# 1 LID"""
    k = 1

    if pars.d['N_PEAKS'] > 1:
        for j in range(pars.d['N_PEAKS']):
            sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
                   k + 5, j + 1)
            k += 5
    else:
        sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
        k += 5

    sxhdr += """    
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)

    nh = k + 4
    if 'Z_S' in col_pars.d:
        sxhdr = sxhdr + '# %i Z_S\n' % nh
        format = format + '  %.3f'
        nh += 1
    if has_mags:
        format = format + '  %.3f'
        sxhdr = sxhdr + '# %i M_0\n' % nh
        nh += 1
    if 'OTHER' in col_pars.d:
        sxhdr = sxhdr + '# %i OTHER\n' % nh
        format = format + ' %s'
        nh += n_other

    # print sxhdr

    if get_z:
        output.write(sxhdr + '##\n')

    odds_i = float(pars.d['ODDS'])
    oi = useful.inv_gauss_int(odds_i)

    print(odds_i, oi)

    # Proceed to redshift estimation

    if checkSED:
        buffer_flux_comparison = ""

    if pars.d['CONVOLVE_P'] == 'yes':
        # Will Convolve with a dz=0.03 gaussian to make probabilities smoother
        # This is necessary; if not there are too many close peaks
        sigma_g = 0.03
        x = np.arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
                      dz)  # made symmetric --DC
        gaus = np.exp(-(old_div(x, sigma_g))**2)

    if pars.d["NMAX"] is not None:
        ng = int(pars.d["NMAX"])
    for ig in range(ng):
        # Don't run BPZ on galaxies with have z_s > z_max
        if not get_z:
            continue
        if pars.d['COLOR'] == 'yes':
            likelihood = bpz_tools.p_c_z_t_color(f_obs[ig, :nf],
                                                 ef_obs[ig, :nf],
                                                 f_mod[:nz, :nt, :nf])
        else:
            likelihood = bpz_tools.p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
                                           f_mod[:nz, :nt, :nf])

        iz_ml = likelihood.i_z_ml
        t_ml = likelihood.i_t_ml
        red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
        p = likelihood.likelihood
        if not ig:
            print('ML * prior -- NOT QUITE BAYESIAN')

        if pars.d[
                'ONLY_TYPE'] == 'yes':  # Use only the redshift information, no priors
            p_i = np.zeros((nz, nt)) * 1.
            j = np.searchsorted(z, z_s[ig])
            # print j,nt,z_s[ig]
            p_i[j, :] = old_div(1., float(nt))
        else:
            if useprior:
                if pars.d['PRIOR'] == 'lensing':
                    p_i = bpz_tools.prior(z, m_0[ig], tipo_prior, nt0, ninterp,
                                          x[ig], y[ig])
                else:
                    p_i = bpz_tools.prior(z, m_0[ig], tipo_prior, nt0, ninterp)
            else:
                p_i = old_div(np.ones((nz, nt), float), float(nz * nt))
            if cluster_prior:
                p_i = (1. - fcc) * p_i + pi_c

        if save_full_probs:
            full_probs[lid[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]

        # Multiply the prior by the likelihood to find the final probability
        pb = p_i[:nz, :nt] * p[:nz, :nt]

        # Convolve with a gaussian of width \sigma(1+z) to take into
        # accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
        #(to be done)

        # Estimate the bayesian quantities
        p_bayes = np.add.reduce(pb[:nz, :nt], -1)

        # Convolve with a gaussian
        if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
            p_bayes = np.convolve(p_bayes, gaus, 1)

        # Eliminate all low level features in the prob. distribution
        pmax = max(p_bayes)
        p_bayes = np.where(np.greater(p_bayes, pmax * float(pars.d['P_MIN'])),
                           p_bayes, 0.)

        norm = np.add.reduce(p_bayes)
        p_bayes = old_div(p_bayes, norm)

        if specprob:
            p_spec[ig, :] = useful.match_resol(z, p_bayes,
                                               z_spec[ig, :]) * p_spec[ig, :]
            norma = np.add.reduce(p_spec[ig, :])
            if norma == 0.:
                norma = 1.
            p_spec[ig, :] /= norma
            # vyjod=tuple([lid[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
            #                int(float(other[ig]))])
            vyjod = tuple([lid[ig]] + list(z_spec[ig, :]) +
                          list(p_spec[ig, :]))
            formato = "%s " + 5 * " %.4f"
            formato += 5 * " %.3f"
            #formato+="  %4f %i"
            formato += "\n"
            print(formato % vyjod)
            specout.write(formato % vyjod)

        if pars.d['N_PEAKS'] > 1:
            # Identify  maxima and minima in the final probability
            g_max = np.less(p_bayes[2:], p_bayes[1:-1]) * \
                np.less(p_bayes[:-2], p_bayes[1:-1])
            g_min = np.greater(p_bayes[2:], p_bayes[1:-1]) * \
                np.greater(p_bayes[:-2], p_bayes[1:-1])

            g_min += np.equal(p_bayes[1:-1], 0.) * np.greater(p_bayes[2:], 0.)
            g_min += np.equal(p_bayes[1:-1], 0.) * np.greater(p_bayes[:-2], 0.)

            i_max = np.compress(g_max, np.arange(nz - 2)) + 1
            i_min = np.compress(g_min, np.arange(nz - 2)) + 1

            # Check that the first point and the last one are not minima or maxima,
            # if they are, add them to the index arrays

            if p_bayes[0] > p_bayes[1]:
                i_max = np.concatenate([[0], i_max])
                i_min = np.concatenate([[0], i_min])
            if p_bayes[-1] > p_bayes[-2]:
                i_max = np.concatenate([i_max, [nz - 1]])
                i_min = np.concatenate([i_min, [nz - 1]])
            if p_bayes[0] < p_bayes[1]:
                i_min = np.concatenate([[0], i_min])
            if p_bayes[-1] < p_bayes[-2]:
                i_min = np.concatenate([i_min, [nz - 1]])

            p_max = np.take(p_bayes, i_max)
            p_tot = []
            z_peaks = []
            t_peaks = []
            # Sort them by probability values
            p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
            # For each maximum, define the minima which sandwich it
            # Assign minima to each maximum
            jm = np.searchsorted(i_min, i_max)
            p_max = list(p_max)

            for i in range(len(i_max)):
                z_peaks.append(
                    [z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
                t_peaks.append(np.argmax(pb[i_max[i], :nt]))
                p_tot.append(np.sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
                # print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]

            if ninterp:
                t_peaks = list(old_div(np.array(t_peaks), (1. + ninterp)))

            if pars.d['MERGE_PEAKS'] == 'yes':
                # Merge peaks which are very close 0.03(1+z)
                merged = []
                for k in range(len(z_peaks)):
                    for j in range(len(z_peaks)):
                        if j > k and k not in merged and j not in merged:
                            if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
                                    1. + z_peaks[j][0]):
                                # Modify the element which receives the
                                # accretion
                                z_peaks[k][1] = np.minimum(
                                    z_peaks[k][1], z_peaks[j][1])
                                z_peaks[k][2] = np.maximum(
                                    z_peaks[k][2], z_peaks[j][2])
                                p_tot[k] += p_tot[j]
                                # Put the merged element in the list
                                merged.append(j)

                # Clean up
                copia = p_tot[:]
                for j in merged:
                    p_tot.remove(copia[j])
                copia = z_peaks[:]
                for j in merged:
                    z_peaks.remove(copia[j])
                copia = t_peaks[:]
                for j in merged:
                    t_peaks.remove(copia[j])
                copia = p_max[:]
                for j in merged:
                    p_max.remove(copia[j])

            if np.sum(np.array(p_tot)) != 1.:
                p_tot = old_div(np.array(p_tot), np.sum(np.array(p_tot)))

        # Define the peak
        iz_b = np.argmax(p_bayes)
        zb = z[iz_b]
        # OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
        # if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
        # else: zb=zb-dz #This corrects another small bias --DC

        # Integrate within a ~ oi*sigma interval to estimate
        # the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
        # Look for the number of sigma corresponding
        # to the odds_i confidence limit

        zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
        zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
        if pars.d['Z_THR'] > 0:
            zo1 = float(pars.d['Z_THR'])
            zo2 = float(pars.d['ZMAX'])
        o = bpz_tools.odds(p_bayes[:nz], z, zo1, zo2)

        # Integrate within the same odds interval to find the type
        # izo1=np.maximum(0,np.searchsorted(z,zo1)-1)
        # izo2=np.minimum(nz,np.searchsorted(z,zo2))
        # t_b=np.argmax(np.add.reduce(p[izo1:izo2,:nt],0))

        it_b = np.argmax(pb[iz_b, :nt])
        t_b = it_b + 1

        if ninterp:
            tt_b = old_div(float(it_b), (1. + ninterp))
            tt_ml = old_div(float(t_ml), (1. + ninterp))
        else:
            tt_b = it_b
            tt_ml = t_ml

        if max(pb[iz_b, :]) < 1e-300:
            print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
            t_b = -1.
            tt_b = -1.

        # Redshift confidence limits
        z1, z2 = bpz_tools.interval(p_bayes[:nz], z, odds_i)
        if pars.d['PHOTO_ERRORS'] == 'no':
            zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
            zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
            if zo1 < z1:
                z1 = np.maximum(0., zo1)
            if zo2 > z2:
                z2 = zo2

        # Print output

        if pars.d['N_PEAKS'] == 1:
            salida = [
                lid[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1, red_chi2
            ]
        else:
            salida = [lid[ig]]
            for k in range(pars.d['N_PEAKS']):
                if k <= len(p_tot) - 1:
                    salida = salida + \
                        list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
                else:
                    salida += [-1., -1., -1., -1., -1.]
            salida += [z[iz_ml], tt_ml + 1, red_chi2]

        if 'Z_S' in col_pars.d:
            salida.append(z_s[ig])
        if has_mags:
            salida.append(m_0[ig] - pars.d['DELTA_M_0'])
        if 'OTHER' in col_pars.d:
            salida.append(other[ig])

        if get_z:
            output.write(format % tuple(salida) + '\n')
        if pars.d['VERBOSE'] == 'yes':
            print(format % tuple(salida))

        odd_check = odds_i

        if checkSED:
            ft = f_mod[iz_b, it_b, :]
            fo = f_obs[ig, :]
            efo = ef_obs[ig, :]
            factor = ft / efo / efo
            ftt = np.add.reduce(ft * factor)
            fot = np.add.reduce(fo * factor)
            am = old_div(fot, ftt)
            ft = ft * am

            flux_comparison = [lid[ig], m_0[ig], z[iz_b], t_b, am] + list(
                np.concatenate([ft, fo, efo]))
            nfc = len(flux_comparison)

            format_fc = '%s  %.2f  %.2f   %i' + (nfc - 4) * '   %.3e' + '\n'
            buffer_flux_comparison = buffer_flux_comparison + \
                format_fc % tuple(flux_comparison)
            if o >= odd_check:
                # PHOTOMETRIC CALIBRATION CHECK
                # Calculate flux ratios, but only for objects with ODDS >= odd_check
                #  (odd_check = 0.95 by default)
                # otherwise, leave weight w = 0 by default
                eps = 1e-10
                frat[ig, :] = MLab_coe.divsafe(fo, ft, inf=eps, nan=eps)
                #fw[ig,:] = np.greater(fo, 0)
                fw[ig, :] = MLab_coe.divsafe(fo, efo, inf=1e8, nan=0)
                fw[ig, :] = np.clip(fw[ig, :], 0, 100)

        if save_probs:
            texto = '%s ' % str(lid[ig])
            texto += len(p_bayes) * '%.3e ' + '\n'
            probs.write(texto % tuple(p_bayes))

        # pb[z,t] -> p_bayes[z]
        # 1. tb are summed over
        # 2. convolved with Gaussian if CONVOLVE_P
        # 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
        # 4. normalized such that np.sum(P(z)) = 1
        if save_probs2:  # P = np.exp(-chisq / 2)
            pmin = pmax * float(pars.d['P_MIN'])
            chisq = -2 * np.log(pb)
            for itb in range(nt):
                chisqtb = chisq[:, itb]
                pqual = np.greater(pb[:, itb], pmin)
                chisqlists = seglist(chisqtb, pqual)
                if len(chisqlists) == 0:
                    continue
                zz = np.arange(zmin, zmax + dz, dz)
                zlists = seglist(zz, pqual)
                for i in range(len(zlists)):
                    probs2.write('%s  %2d  %.3f  ' %
                                 (lid[ig], itb + 1, zlists[i][0]))
                    fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
                    probs2.write(fmt % tuple(chisqlists[i]))

    if checkSED:
        open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)

    if get_z:
        output.close()

    if checkSED:
        if interactive:
            print("")
            print("")
            print("PHOTOMETRIC CALIBRATION TESTS")
            fratavg = old_div(np.sum(fw * frat, axis=0), np.sum(fw, axis=0))
            dmavg = -bpz_tools.flux2mag(fratavg)
            fnobj = np.sum(np.greater(fw, 0), axis=0)
            print(
                "If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ."
            )
            print(
                "(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)"
            )
            print()
            print('  fo/ft    dmag   nobj   filter')
            for i in range(nf):
                print('% 7.3f  % 7.3f %5d   %s' %
                      (fratavg[i], dmavg[i], fnobj[i], filters[i]))
            print(
                "fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
                % odd_check)
            print(
                "dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)"
            )
            print(
                "nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
                % odd_check)

        if save_full_probs:
            full_probs.close()
        if save_probs:
            probs.close()
        if save_probs2:
            probs2.close()

    if plots and checkSED:
        zb, zm, zb1, zb2, o, tb = useful.get_data(out_name, (1, 6, 2, 3, 5, 4))
        # Plot the comparison between z_spec and z_B

        if 'Z_S' in col_pars.d:
            if not interactive or useful.ask('Compare z_B vs z_spec?'):
                good = np.less(z_s, 9.99)
                print(
                    'Total initial number of objects with spectroscopic redshifts= ',
                    np.sum(good))
                od_th = 0.
                if useful.ask('Select for galaxy characteristics?\n'):
                    od_th = eval(input('Odds threshold?\n'))
                    good *= np.greater_equal(o, od_th)
                    t_min = eval(input('Minimum spectral type\n'))
                    t_max = eval(input('Maximum spectral type\n'))
                    good *= np.less_equal(tb, t_max) * \
                        np.greater_equal(tb, t_min)
                    if has_mags:
                        mg_min = eval(input('Bright magnitude limit?\n'))
                        mg_max = eval(input('Faint magnitude limit?\n'))
                        good = good * np.less_equal(m_0, mg_max) * \
                            np.greater_equal(m_0, mg_min)

                zmo, zso, zbo, zb1o, zb2o, tb = useful.multicompress(
                    good, (zm, z_s, zb, zb1, zb2, tb))
                print('Number of objects with odds > %.2f= %i ' %
                      (od_th, len(zbo)))
                deltaz = old_div((zso - zbo), (1. + zso))
                sz = useful.stat_robust(deltaz, 3., 3)
                sz.run()
                outliers = np.greater_equal(abs(deltaz), 3. * sz.rms)
                print('Number of outliers [dz >%.2f*(1+z)]=%i' %
                      (3. * sz.rms, np.add.reduce(outliers)))
                catastrophic = np.greater_equal(deltaz * (1. + zso), 1.)
                n_catast = np.sum(catastrophic)
                print('Number of catastrophic outliers [dz >1]=', n_catast)
                print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
                if interactive and plots:
                    pylab.figure(2)
                    pylab.subplot(211)
                    pylab.plot(np.arange(min(zso),
                                         max(zso) + 0.01, 0.01),
                               np.arange(min(zso),
                                         max(zso) + 0.01, 0.01), "r")
                    pylab.errorbar(
                        zso, zbo,
                        [abs(zbo - zb1o), abs(zb2o - zbo)], fmt="bo")
                    pylab.xlabel(r'$z_{spec}$')
                    pylab.ylabel(r'$z_{bpz}$')
                    pylab.subplot(212)
                    pylab.plot(zso, zmo, "go", zso, zso, "r")
                    pylab.xlabel(r'$z_{spec}$')
                    pylab.ylabel(r'$z_{ML}$')
                    pylab.show()

    rolex.check()
示例#48
0
    def __init__(
        self,
        data,
        *,
        rgb=None,
        colormap='gray',
        contrast_limits=None,
        gamma=1,
        interpolation='nearest',
        rendering='mip',
        iso_threshold=0.5,
        attenuation=0.05,
        name=None,
        metadata=None,
        scale=None,
        translate=None,
        rotate=None,
        shear=None,
        affine=None,
        opacity=1,
        blending='translucent',
        visible=True,
        multiscale=None,
    ):
        if isinstance(data, types.GeneratorType):
            data = list(data)

        if getattr(data, 'ndim', 2) < 2:
            raise ValueError(
                trans._('Image data must have at least 2 dimensions.'))

        # Determine if data is a multiscale
        if multiscale is None:
            multiscale, data = guess_multiscale(data)

        # Determine initial shape
        if multiscale:
            init_shape = data[0].shape
        else:
            init_shape = data.shape

        # Determine if rgb
        if rgb is None:
            rgb = guess_rgb(init_shape)

        # Determine dimensionality of the data
        if rgb:
            ndim = len(init_shape) - 1
        else:
            ndim = len(init_shape)

        super().__init__(
            data,
            ndim,
            name=name,
            metadata=metadata,
            scale=scale,
            translate=translate,
            rotate=rotate,
            shear=shear,
            affine=affine,
            opacity=opacity,
            blending=blending,
            visible=visible,
            multiscale=multiscale,
        )

        self.events.add(
            interpolation=Event,
            rendering=Event,
            iso_threshold=Event,
            attenuation=Event,
        )

        # Set data
        self.rgb = rgb
        self._data = data
        if self.multiscale:
            self._data_level = len(self.data) - 1
            # Determine which level of the multiscale to use for the thumbnail.
            # Pick the smallest level with at least one axis >= 64. This is
            # done to prevent the thumbnail from being from one of the very
            # low resolution layers and therefore being very blurred.
            big_enough_levels = [
                np.any(np.greater_equal(p.shape, 64)) for p in data
            ]
            if np.any(big_enough_levels):
                self._thumbnail_level = np.where(big_enough_levels)[0][-1]
            else:
                self._thumbnail_level = 0
        else:
            self._data_level = 0
            self._thumbnail_level = 0
        self.corner_pixels[1] = self.level_shapes[self._data_level]

        self._new_empty_slice()

        # Set contrast_limits and colormaps
        self._gamma = gamma
        self._iso_threshold = iso_threshold
        self._attenuation = attenuation
        if contrast_limits is None:
            self.contrast_limits_range = self._calc_data_range()
        else:
            self.contrast_limits_range = contrast_limits
        self._contrast_limits = tuple(self.contrast_limits_range)
        self.colormap = colormap
        self.contrast_limits = self._contrast_limits
        self._interpolation = {
            2:
            Interpolation.NEAREST,
            3: (Interpolation3D.NEAREST if self.__class__.__name__ == 'Labels'
                else Interpolation3D.LINEAR),
        }
        self.interpolation = interpolation
        self.rendering = rendering

        # Trigger generation of view slice and thumbnail
        self._update_dims()
示例#49
0
    temp_0_comp = S3_comp[:, :, 0]
    temp_1_comp = S3_comp[:, :, 1]

    temp_2_comp = 1 * temp_0_comp + (-1) * temp_1_comp
    S1_comp.append(temp_2_comp)

    X_IND_reshaped_comp = np.reshape(x_ind, [batch_size, 2 * K])
    LOSS_comp.append(
        np.log(i) *
        np.mean(np.mean(np.square(X_IND_reshaped_comp - S2_comp[-1]), 1)))
    BER_comp.append(
        np.mean(
            np.not_equal(batch_X, np.sign(S1_comp[-1])).astype(np.float32)))

Max_Val_comp = np.amax(S3_comp, axis=2, keepdims=True)
Greater_comp = np.greater_equal(S3_comp, Max_Val_comp)
BER2_comp = np.round(Greater_comp.astype(np.float32))
x_ind_reshaped = np.reshape(x_ind, [batch_size, K, 2])
BER3_comp = np.not_equal(BER2_comp, x_ind_reshaped)
BER4_comp = np.sum(BER3_comp.astype(np.float32), 2)
BER5_comp = np.greater(BER4_comp.astype(np.float32), 0)
SER_comp = np.mean(BER5_comp)

toc = tm.time()
time_np = (toc - tic) / test_batch_size
print('time np')
print(time_np)

print("tf ser at layer is:")
print(
    np.array(
示例#50
0
        coralDS, baseDS, rbDS = None, None, None
        sys.exit(0)

    if (coralDS.RasterXSize != rbDS.RasterXSize) or (coralDS.RasterYSize !=
                                                     rbDS.RasterYSize):
        print('This image %s does not have same dimensions..exiting' %
              (os.path.basename(rbname)))
        coralDS, baseDS, rbDS = None, None, None
        sys.exit(0)

    rbdata = rbDS.GetRasterBand(1).ReadAsArray(xoffset, yoffset, xsize, ysize)
    rbon = np.logical_and(np.not_equal(rbdata, -9999), np.not_equal(rbdata, 0))

    pDS = gdal.Open(persdevfiles[j], gdal.GA_ReadOnly)
    pdata = pDS.GetRasterBand(1).ReadAsArray(xoffset, yoffset, xsize, ysize)
    above = np.greater_equal(pdata, 2)
    below = np.less(pdata, 2)
    allgood = np.all(np.dstack((coralmaskon, basedataon, rbon)), axis=-1)

    print('Stacked Coralmask, Baseline, and Rb')
    gabove = np.logical_and(above, allgood)
    gbelow = np.logical_and(below, allgood)
    numabove = np.sum(gabove)
    numbelow = np.sum(gbelow)
    print('%s: Above: %d    Below: %d' % (rbname, numabove, numbelow))
    outfile = 'rb_baseline_bleach_data_' + thedate + '_' + tid + '.tif'
    outDS = drv.Create(outfile, xsize, ysize, 2, gdal.GDT_Byte)
    outDS.SetGeoTransform(sampgt)
    outDS.SetProjection(proj)
    outDS.GetRasterBand(1).WriteArray(gabove.astype(np.uint8))
    outDS.GetRasterBand(2).WriteArray(gbelow.astype(np.uint8))
示例#51
0
 def backward(head_grads, x):
     mask1 = np.greater_equal(x, a_min).astype("float")
     mask2 = np.less_equal(x, a_max).astype("float")
     return [head_grads * mask1 * mask2]

np.fmax(a,b)
Out[111]: array([4, 4, 1])


np.fmin(a,b)
Out[112]: array([3, 3, 1])

np.mod(a,b)
Out[113]: array([1, 3, 0], dtype=int32)

np.greater(a,b)
Out[114]: array([ True, False, False], dtype=bool)

np.greater_equal(a,b)
Out[115]: array([ True, False,  True], dtype=bool)

np.less_equal(a,b)
Out[116]: array([False,  True,  True], dtype=bool)


np.less(a,b)
Out[117]: array([False,  True, False], dtype=bool)

np.equal(a,b)
Out[118]: array([False, False,  True], dtype=bool)


arrsts=np.array([[0,1,2],[3,4,5],[6,7,8]])
示例#53
0
		new_device[ :, :, layer_idx * layer_step + internal_layer ] = 1.0 * np.greater( cur_fabrication_target[ :, :, layer_idx * layer_step + 1 ], 0.5 )
		new_device[ :, :, layer_idx * layer_step + internal_layer ] += 1.0 * np.greater( pull_data, 1 )
new_device[ :, :, new_device.shape[ 2 ] - 1 ] = new_device[ :, :, new_device.shape[ 2 ] - 2 ]


assert ( num_eval_points % num_nodes_available ) == 0, "Expected the number of nodes of available to evenly divide the number of eval points!"


num_outer_loops = int( num_eval_points / num_nodes_available )

job_names = {}

fdtd_hook.switchtolayout()
cur_density = new_device.copy()
# cur_density = bayer_filter.get_permittivity()
cur_density = 1.0 * np.greater_equal( cur_density, 0.5 )

reinterpolate_density = np.zeros( [ 2 * cur_density.shape[ idx ] for idx in range( 0, len( cur_density.shape ) ) ] )
for z_idx in range( 0, reinterpolate_density.shape[ 2 ] ):
	for x_idx in range( 0, reinterpolate_density.shape[ 0 ] ):
		for y_idx in range( 0, reinterpolate_density.shape[ 1 ] ):
			down_x = int( 0.5 * x_idx )
			down_y = int( 0.5 * y_idx )
			down_z = int( 0.5 * z_idx )

			reinterpolate_density[ x_idx, y_idx, z_idx ] = cur_density[ down_x, down_y, down_z ]

	if dilation_erosion_test:
		if dilation_amount > 0:
			reinterpolate_density[ :, :, z_idx ] = scipy.ndimage.binary_dilation( reinterpolate_density[ :, :, z_idx ], iterations=np.abs( dilation_amount ) )
		else:
示例#54
0
x = np.array([11,7,69,28,85])
y = np.array([25,68,28,11,85])
print(x)
print(y)
print("Comparison element :-")
print(np.equal(x, y))
#06 Write a Python program to create an element-wise comparison (greater, greater_equal, less and less_equal) of two given arrays.

import numpy as np
x = np.array([11,7,69,28,85])
y = np.array([25,68,28,11,85])
print(x)
print(y)
print("Comparison element :-")
print(np.greater(x, y))
print(np.greater_equal(x, y))
print(np.less(x, y))
print(np.less_equal(x, y))

# 06 Write a Python program to create an element-wise comparison (equal, +/-1) of two given arrays.
import numpy as np
x = np.array([11,7,69,-28,85])
y = np.array([25,-68,28,11,85])
print(x)
print(y)
print("Comparison element :-")
print(np.equal(x,y))
num = 0
while(num < len(x)): 
	
	if x[num] >= 0: 
示例#55
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])

        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])

        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])

        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])

        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
示例#56
0
def detect(img_path, model, device, pixel_threshold, quiet=True):
    img = Image.open(img_path)
    d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
    img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
    with torch.no_grad():
        east_detect = model(load_pil(img).to(device))
    y = np.squeeze(east_detect.cpu().numpy(), axis=0)  # c, h, w
    y[:3, :, :] = sigmoid(y[:3, :, :])
    cond = np.greater_equal(y[0, :, :], pixel_threshold)
    activation_pixels = np.where(cond)
    quad_scores, quad_after_nms = nms(y, activation_pixels)
    with Image.open(img_path) as im:
        d_wight, d_height = resize_image(im, cfg.max_predict_img_size)
        scale_ratio_w = d_wight / im.width
        scale_ratio_h = d_height / im.height
        im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
        quad_im = im.copy()
        draw = ImageDraw.Draw(im)
        for i, j in zip(activation_pixels[0], activation_pixels[1]):
            px = (j + 0.5) * cfg.pixel_size
            py = (i + 0.5) * cfg.pixel_size
            line_width, line_color = 1, 'red'
            if y[1, i, j] >= cfg.side_vertex_pixel_threshold:
                if y[2, i, j] < cfg.trunc_threshold:
                    line_width, line_color = 2, 'yellow'
                elif y[2, i, j] >= 1 - cfg.trunc_threshold:
                    line_width, line_color = 2, 'green'
            draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
                       (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
                       (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
                       (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
                       (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size)],
                      width=line_width,
                      fill=line_color)
        im.save(img_path + '_act.jpg')
        quad_draw = ImageDraw.Draw(quad_im)
        txt_items = []
        for score, geo, s in zip(quad_scores, quad_after_nms,
                                 range(len(quad_scores))):

            if np.amin(score) > 0:
                quad_draw.line([
                    tuple(geo[0]),
                    tuple(geo[1]),
                    tuple(geo[2]),
                    tuple(geo[3]),
                    tuple(geo[0])
                ],
                               width=2,
                               fill='red')

                rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
                rescaled_geo_list = np.reshape(rescaled_geo, (8, )).tolist()
                txt_item = ','.join(map(str, rescaled_geo_list))
                txt_items.append(txt_item + '\n')
            elif not quiet:
                print('quad invalid with vertex num less then 4.')
        quad_im.save(img_path + '_predict.jpg')
        if cfg.predict_write2txt and len(txt_items) > 0:
            with open(img_path[:-4] + '.txt', 'w') as f_txt:
                f_txt.writelines(txt_items)
示例#57
0
    def __init__(self,
                 data,
                 a_field_name,
                 b_field_name,
                 t_field_name,
                 exposure_method,
                 outlier_fraction=0):
        # Compute all signals and store all relevant to BaseSignals object
        a, b, t = data[a_field_name].values, data[b_field_name].values, data[
            t_field_name].values

        logging.info(
            f"{a_field_name} with shape {a.shape} with {np.sum(~np.isnan(a))} valid and "
            f"{np.isnan(a).sum()} NaN values.")

        logging.info(
            f"{b_field_name} with shape {b.shape} with {np.sum(~np.isnan(b))} valid and "
            f"{np.isnan(b).sum()} NaN values.")

        # Filter outliers
        if outlier_fraction > 0:
            data = self._filter_outliers(data, a_field_name, b_field_name,
                                         outlier_fraction)

        # Calculate exposure
        b_mean = float(np.mean(b[notnan_indices(b)]))
        a_length = a.shape[0]
        exposure_a = self._compute_exposure(a, exposure_method, b_mean,
                                            a_length)
        exposure_b = self._compute_exposure(b, exposure_method, b_mean,
                                            a_length)
        data["e_a"], data["e_b"] = exposure_a, exposure_b

        # Not nan rows (non-mutual)
        index_a_nn, index_b_nn = notnan_indices(a), notnan_indices(b)
        index_a_outlier = np.logical_and(np.greater_equal(a, 0.55),
                                         np.less_equal(a, 1.00))
        index_a_nn = np.logical_and(index_a_nn, index_a_outlier)

        a_nn, b_nn = a[index_a_nn], b[index_b_nn]
        t_a_nn, t_b_nn = t[index_a_nn], t[index_b_nn]
        exposure_a_nn, exposure_b_nn = exposure_a[index_a_nn], exposure_b[
            index_b_nn]

        logging.info(f"t_a {t_a_nn.min()} {t_a_nn.max()}")
        logging.info(f"t_b {t_b_nn.min()} {t_b_nn.max()}")

        # Extract mutual not nan rows
        data[a_field_name][~index_a_nn] = np.nan
        data_mutual_nn = data[[
            t_field_name, a_field_name, b_field_name, "e_a", "e_b"
        ]].dropna()
        a_mutual_nn, b_mutual_nn = data_mutual_nn[
            a_field_name].values, data_mutual_nn[b_field_name].values
        t_mutual_nn = data_mutual_nn[t_field_name].values
        exposure_a_mutual_nn, exposure_b_mutual_nn = data_mutual_nn[
            "e_a"].values, data_mutual_nn["e_b"].values
        logging.info("Mutual signals extracted.")

        logging.info(
            f"{a_field_name} mutual with shape {a_mutual_nn.shape} with {np.sum(~np.isnan(a_mutual_nn))} "
            f"valid and {np.isnan(a_mutual_nn).sum()} NaN values.")

        logging.info(
            f"{b_field_name} mutual with shape {b_mutual_nn.shape} with {np.sum(~np.isnan(b_mutual_nn))} "
            f"valid and {np.isnan(b_mutual_nn).sum()} NaN values.")

        # Create BaseSignals instance
        self.base_signals = BaseSignals(a_nn, b_nn, t_a_nn, t_b_nn,
                                        exposure_a_nn, exposure_b_nn,
                                        a_mutual_nn, b_mutual_nn, t_mutual_nn,
                                        exposure_a_mutual_nn,
                                        exposure_b_mutual_nn)
 def predict_stochastic(self, bias_to_data, depth=-1):
     current_probabilities = self.get_probabilities(bias_to_data, depth)
     random_coin = np.random.uniform(0.0, 1.0, current_probabilities.shape)
     return np.greater_equal(current_probabilities, random_coin)
示例#59
0
def rotate(coor, m1, q0, th, an, cut, lowrg, highrg, re, taccepted, zflag,
           zval, cflag, dcdoutfile, indices, this_mask, basis_mask, sub_m2,
           align_mask, coor_sub_m1, com_sub_m1, mask_a_array, mask_b_array,
           distance_array, type_array, first_last_resid, molecule_type,
           cutoff_array, vdw_factor):

    over = 0
    badrg = 0
    accepted = 0
    arg = 0.0
    lowestrg = re[5]
    hrg = re[6]
    frame = 0
    badz = 0
    badc = 0

    check = 0
    minmax = []

    result = rotate_dihedral(coor, m1, frame, q0, th, an, indices, this_mask,
                             first_last_resid, molecule_type)

    if result == 1:
        error, basis_coor = m1.get_coor_using_mask(frame, basis_mask)

        if (len(cutoff_array) > 0):
            check = vdw_overlap.overlap(basis_coor[0], cutoff_array,
                                        vdw_factor)
        else:
            check = overlap.overlap(basis_coor[0], float(cut))
        filename = ''
    else:
        filename = ''
        check = 1

    thisrg = m1.calcrg(frame)
    if (thisrg > hrg):
        hrg = thisrg
    if (thisrg < lowestrg):
        lowestrg = thisrg

    if (check == 0):
        if (thisrg > lowrg and thisrg < highrg):
            filename = 'winner'

            m1.center(frame)

            error, sub_m2.coor = m1.get_coor_using_mask(frame, align_mask)

            sub_m2.setCoor(sub_m2.coor)

            com_sub_m2 = sub_m2.calccom(0)
            sub_m2.center(0)
            coor_sub_m2 = sub_m2.coor[0]

            m1.align(frame, coor_sub_m2, com_sub_m2, coor_sub_m1, com_sub_m1)

            if (zflag == 1):
                error, sub_m2.coor = m1.get_coor_using_mask(frame, basis_mask)
                sub_m2.setCoor(sub_m2.coor)
                zee = sub_m2.coor[0, :, 2]
                zcheck = numpy.alltrue(numpy.greater_equal(zee, zval))
                if (zcheck == 0):
                    check = 1
                    badz = 1

            if (check == 0 and cflag == 1):
                check = constraints.check_constraints(m1, mask_a_array,
                                                      mask_b_array,
                                                      distance_array,
                                                      type_array)
                if (check == 1):
                    badc = 1

            if (check == 0):
                m1.write_dcd_step(dcdoutfile, 0, taccepted + 1)
                minmax = m1.calcminmax_frame(0)
                accepted = 1
                arg = thisrg
        else:
            badrg = 1

    else:
        over = 1

    re[0] = accepted
    re[1] = over
    re[2] = badrg
    re[3] = thisrg
    re[4] = arg
    re[5] = lowestrg
    re[6] = hrg
    re[7] = badz
    re[8] = badc
    re[9] = minmax

    return filename
示例#60
0
def onp_greater_equal(a, b):
    return onp.greater_equal(a, b)