Esempio n. 1
0
   def eval(self, band, times, z=0, k=1):
      '''Evaluate, using a spline, the value of the template at specific
      times, optionally with a redshift (in the sense that the times should
      be blueshifted before interpolating.  Also returns a mask indicating
      the interpolated points (1) and the extrapolated points (0)'''
      if len(num.shape(times)) == 0:
         evt = num.array([times/(1+z)])
         scalar = 1
      else:
         evt = times/(1+z)
         scalar = 0
      if band not in self.__dict__ and band not in ['J','H','K']:
         raise AttributeError, "Sorry, band %s is not supported by dm15temp" % \
               band
      s = dm152s(self.dm15)
      if band == 'J':
         return(0.080 + evt/s*0.05104699 + 0.007064257*(evt/s)**2 - 0.000257906*(evt/s)**3,
               0.0*evt/s + 0.06, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      elif band == 'H':
         return(0.050 + evt/s*0.0250923 + 0.001852107*(evt/s)**2 - 0.0003557824*(evt/s)**3,
               0.0*evt/s + 0.08, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      elif band == 'K':
         return(0.042 + evt/s*0.02728437+ 0.003194500*(evt/s)**2 - 0.0004139377*(evt/s)**3,
               0.0*evt/s + 0.08, num.greater_equal(evt/s, -12)*num.less_equal(evt/s, 10)) 
      evd = self.tck[band].ev(evt/self.s, evt*0+self.dm15)
      eevd = self.tck['e_'+band].ev(evt/self.s, evt*0+self.dm15)
      mask = num.greater_equal(evt/self.s, -10)*num.less_equal(evt/self.s,70)

      if scalar:
         return(evd[0], eevd[0], mask[0])
      else:
         return(evd, eevd, mask)
Esempio n. 2
0
def filter(mask, cube, header, clipMethod, threshold, rmsMode, verbose):
    if clipMethod == 'relative':
        # determine the clip level
		# Measure noise in original cube
		# rms = GetRMS(cube,rmsmode=rmsMode,zoomx=1,zoomy=1,zoomz=100000,verb=verbose,nrbins=100000)
        rms = GetRMS(cube, rmsMode=rmsMode, zoomx=1, zoomy=1, zoomz=1, verbose=verbose)
        print 'Estimated rms = ', rms
        clip = threshold * rms
    if clipMethod == 'absolute':
        clip = threshold
    print 'using clip threshold: ', clip
	#return ((cube >= clip)+(cube <= -1*clip))

   
    # check whether there are NaNs
    nan_mask = np.isnan(cube)
    found_nan=nan_mask.sum()
    if found_nan:
        cube=np.nan_to_num(cube)
        np.logical_or(mask, (np.greater_equal(cube, clip) + np.less_equal(cube, -clip)), mask)
        cube[nan_mask]=np.nan
    else:
        np.logical_or(mask, (np.greater_equal(cube, clip) + np.less_equal(cube, -clip)), mask)

	
    return 
Esempio n. 3
0
def plotCurves(c1, c2):
    name1, t, avg1, top1, bottom1 = c1
    name2, t, avg2, top2, bottom2 = c2
    pl.plot(t, np.zeros(len(t)), 'k-')
    s1 = ma.array(avg1)
    s2 = ma.array(avg2)
    zx1 = np.logical_and(np.greater_equal(top1, 0), np.less_equal(bottom1, 0))
    zx2 = np.logical_and(np.greater_equal(top2, 0), np.less_equal(bottom2, 0))
    ix = np.logical_or(
            np.logical_and(
                np.greater_equal(top1, top2),
                np.less_equal(bottom1, top2)),
            np.logical_and(
                np.greater_equal(top1, bottom2),
                np.less_equal(bottom1, bottom2)))
    mask1 = np.logical_or(zx1, ix)
    mask2 = np.logical_or(zx2, ix)

    print mask1
    print mask2
    print zx1
    print zx2
    print ix

    pl.plot(t, s1, "k--", linewidth=1)
    pl.plot(t, s2, "k-", linewidth=1)
    s1.mask = ix
    s2.mask = ix
    pl.plot(t, s1, "k--", linewidth=3, label=name1)
    pl.plot(t, s2, "k-", linewidth=3, label=name2)
    pl.xlabel('Time (secs)')
    pl.ylabel("Pearson correlation")
Esempio n. 4
0
def parallel_point_test(center,dim,x,y,z):
    '''
    Overview:
        Determines whether a given point is in a parallelapiped given the point
    being tested and the relevant parameters.


    Parameters:

    center:(float,[3]|angstroms) = The coordinates of the center of the
    parallelapiped. This parameter is in the form (x center,y center, z center)

    dim:(float,[3]|angstroms) = The x, y and z dimensions of the parallelapiped
    object.

    x,y,z:(float|angstroms) = coordinates for the point being tested.


    Note:
    -The API is left intentionally independent of the class structures used in
    sample_prep.py to allow for code resuabilitiy.

    '''

    low_lim = (array(center) - (array(dim)/2.0))
    high_lim = (array(center) +(array(dim)/2.0))

    height_lim = greater_equal (z,low_lim[2])*less_equal (z,high_lim[2])
    length_lim = greater_equal (y,low_lim[1])*less_equal (y,high_lim[1])
    width_lim = greater_equal (x,low_lim[0])*less_equal (x,high_lim[0])

    test_results = height_lim * length_lim * width_lim

    return test_results
Esempio n. 5
0
   def __call__(self, x):
      '''Interpolate at point [x].  Returns a 3-tuple: (y, mask) where [y]
      is the interpolated point, and [mask] is a boolean array with the same
      shape as [x] and is True where interpolated and False where extrapolated'''
      if not self.setup:
         self._setup()

      if len(num.shape(x)) < 1:
         scalar = True
      else:
         scalar = False

      x = num.atleast_1d(x)
      if self.realization:
         evm = num.atleast_1d(splev(x, self.realization))
         mask = num.greater_equal(x, self.realization[0][0])*\
                num.less_equal(x,self.realization[0][-1])
      else:
         evm = num.atleast_1d(splev(x, self.tck))
         mask = num.greater_equal(x, self.tck[0][0])*num.less_equal(x,self.tck[0][-1])

      if scalar:
         return evm[0],mask[0]
      else:
         return evm,mask
Esempio n. 6
0
def _calc_uncorr_gene_score(gene, input_gene, input_snp, pruned_snps, hotspots):
    # find local snps given a gene
    cond_snps_near_gene = logical_and(np.equal(input_snp[:, 0], input_gene[gene, 0]),
                                      np.greater_equal(input_snp[:, 1], input_gene[gene, 1]),
                                      np.less_equal(input_snp[:, 1], input_gene[gene, 2]))
    # if no snps found
    if not np.any(cond_snps_near_gene):
        return (np.nan, 0, 1, 0, 0)

    n_snps_zscore_finite = np.sum(np.isfinite(input_snp[cond_snps_near_gene][:, 3]))
    # if no snps with finite zcore
    if n_snps_zscore_finite == 0:
        return (np.nan, 0, 1, 0, 0)

    n_snps_per_gene = n_snps_zscore_finite

    # use p-value to find most significant SNP
    idx_min_pval = np.nanargmin(input_snp[cond_snps_near_gene][:, 3])

    uncorr_score = input_snp[cond_snps_near_gene][idx_min_pval, 2]

    # count number of independent SNPs per gene
    n_indep_snps_per_gene = np.sum(logical_and(np.equal(pruned_snps[:, 0], input_gene[gene, 0]),
                                               np.greater_equal(pruned_snps[:, 1], input_gene[gene, 1]),
                                               np.less_equal(pruned_snps[:, 1], input_gene[gene, 2])))

    # count number of hotspots per gene
    n_hotspots_per_gene = np.sum(np.logical_and(np.equal(hotspots[:, 0], input_gene[gene, 0]),
                                                np.greater(np.fmin(hotspots[:, 2], input_gene[gene, 2])
                                                           - np.fmax(hotspots[:, 1], input_gene[gene, 1]), 0)))
    return (uncorr_score, n_snps_per_gene, 0, n_indep_snps_per_gene, n_hotspots_per_gene)
Esempio n. 7
0
    def phantom(x):
        result = True

        for xi, xmin, xmax in zip(x, min_pt, max_pt):
            result = (result &
                      np.less_equal(xmin, xi) & np.less_equal(xi, xmax))
        return result
Esempio n. 8
0
def getMaxPoints(arr):
    # [TODO] Work out for RGB rather than array, and maybe we don't need the filter, but hopefully speeds it up.
    # Reference http://scipy-cookbook.readthedocs.io/items/FiltFilt.html
    arra = filtfilt(b,a,arr)
    maxp = maxpoints(arra, order=(len(arra)/20), mode='wrap')
    minp = minpoints(arra, order=(len(arra)/20), mode='wrap')

    points = []

    for i in range(3):
        mas = np.equal(np.greater_equal(maxp,(i*(len(arra)/3))), np.less_equal(maxp,((i+1)*len(arra)/3)))
        k = np.compress(mas[0], maxp)
        if len(k)==0:
            continue
        points.append(sum(k)/len(k))

    if len(points) == 1:
        return points, []

    points = np.compress(np.greater_equal(arra[points],(max(arra)-min(arra))*0.40 + min(arra)),points)
    rifts = []
    for i in range(len(points)-1):
        mas = np.equal(np.greater_equal(minp, points[i]),np.less_equal(minp,points[i+1]))
        k = np.compress(mas[0], minp)
        rifts.append(k[arra[k].argmin()])

    return points, rifts
Esempio n. 9
0
 def test_rand(self):
     # Simple distributional checks for sparse.rand.
     for random_state in None, 4321, np.random.RandomState():
         x = sprand(10, 20, density=0.5, dtype=np.float64,
                    random_state=random_state)
         assert_(np.all(np.less_equal(0, x.data)))
         assert_(np.all(np.less_equal(x.data, 1)))
Esempio n. 10
0
    def phan(x):
        result = True

        for xp, minv, maxv in zip(x, begin, end):
            result = (result &
                      np.less_equal(minv, xp) & np.less_equal(xp, maxv))
        return result
Esempio n. 11
0
def radial_contrast_flr(image, xc, yc, seps, zw, coron_thrupt, klip_thrupt=None):
    rad_flr_ctc = np.empty((len(seps)))
    assert(len(seps) == len(coron_thrupt))
    if klip_thrupt is not None:
        assert(len(seps) == len(klip_thrupt))
        rad_flr_ctc_ktc = np.empty((len(seps)))
    else:
        rad_flr_ctc_ktc = None

    imh = image.shape[0]
    imw = image.shape[1]

    xs = np.arange(imw) - xc
    ys = np.arange(imh) - yc
    XXs, YYs = np.meshgrid(xs, ys)
    RRs = np.sqrt(XXs**2 + YYs**2)

    for si, sep in enumerate(seps):
        r_in = np.max([seps[0], sep-zw/2.])
        r_out = np.min([seps[-1], sep+zw/2.])
        meas_ann_mask = np.logical_and(np.greater_equal(RRs, r_in),
                                          np.less_equal(RRs, r_out))
        meas_ann_ind = np.nonzero(np.logical_and(np.greater_equal(RRs, r_in).ravel(),
                                                    np.less_equal(RRs, r_out).ravel()))[0]
        meas_ann = np.ravel(image)[meas_ann_ind]
        rad_flr_ctc[si] = np.nanstd(meas_ann)/coron_thrupt[si]
        if rad_flr_ctc_ktc is not None:
            rad_flr_ctc_ktc[si] = np.nanstd(meas_ann)/coron_thrupt[si]/klip_thrupt[si]

    #pdb.set_trace()
    return rad_flr_ctc, rad_flr_ctc_ktc
Esempio n. 12
0
def FDR(p,q):
	'''
	input:
	p   - vector of p-values (numpy array)
	q   - False Discovery Rate level
	output:
	pID - p-value threshold based on independence or positive dependence
	pN  - Nonparametric p-value threshold
	*if non of the p-values pass FDR, an empty list is returned
	'''
	isort = np.argsort(p)
	p = p[isort]
	V = len(p)
	I = np.array(range(1,V+1))
	cVID = 1
	cVN = sum(np.divide(float(1),I))
	#threshold based on independence or positive dependence
	ID_thresh_vec = (I*q)/V/cVID
	ID_pass_vec = np.less_equal(p,ID_thresh_vec)
	if any(ID_pass_vec):
		pID = max(p[ID_pass_vec])
	else:
		pID = []
	# Nonparametric threshold
	N_thresh_vec = (I*q)/V/cVN
	N_pass_vec = np.less_equal(p,N_thresh_vec)
	if any(N_pass_vec):
		pN = max(p[N_pass_vec])
	else:
		pN = []
	return pID, pN
Esempio n. 13
0
def cone_point_test(center,dim,stub,x,y,z):
    '''
    Overview:
        Determines whether a given point is in an cone given the point being
    tested and the relevant parameters..


    Parameters:

    center:float,[3]|angstroms) = The x, y, and z component of the central
    point of the ellipsoid. In the case that the center is set to
    [None,None,None] the shape will be put in the bottom corner of the unit cell
    (the bounding box will start at (0,0,0).

    dim:(float,[3]|angstroms) = The x component, y component and thickness
    of the cone respectively. x is the radius of the cone base in the x
    direction and b is the radius of the cone base in the y direction.

    stub:(float|angstroms) = provides a hard cut-off for the thickness of the
    cone. this allows for the creation of a truncated cone object who side slope
    can be altered by using different z component values while keeping the stub
    parameter fixed.

    x,y,z:(float|angstroms) = coordinates for the point being tested.


    Notes:
    -To solve this equation more efficiently, the program takes in an array of
    x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
    module then solves each part of the test individually and takes the product.
    Only the points where all of the inquires are True will be left as true in
    the test_results array

    -The API is left intentionally independent of the class structures used in
    sample_prep.py to allow for code resuabilitiy.

    '''

    a_angle = arctan(dim[2]/dim[0])
    b_angle = arctan(dim[2]/dim[1])

    low_height_lim = greater_equal (z,(center[2] - dim[2]/2))

    if stub == None:
        up_height_lim =  less_equal (z,(center[2] + dim[2]/2))
    else:
        up_height_lim =  less_equal (z,(center[2] + stub/2))

    xy_test = ((((x-center[0])**2)/((((center[2] +
           dim[2]/2)-z)/tan(a_angle))**2))+(((y-center[1])**2)/((((center[2] +
           dim[2]/2)-z)/tan(b_angle))**2)))

    in_plane_low_lim = less_equal (0.0,xy_test)
    in_plane_high_lim = greater_equal (1.0,xy_test)

    test_results = (low_height_lim * up_height_lim * in_plane_low_lim *
                    in_plane_high_lim)

    return test_results
Esempio n. 14
0
 def _subset(self, z):
     """
     Hampel's function is defined piecewise over the range of z
     """
     z = np.fabs(np.asarray(z))
     t1 = np.less_equal(z, self.a)
     t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
     t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
     return t1, t2, t3
def ts_increments(ts, monotony = 'increasing', max_value = None, reset_value = 0.):

    '''Return a timeserie with the increments registered in the 
        input timeserie

    .. arguments:
    - (list) ts: pandas DataFrame containing a timeserie
    - (string) monotony: increasing / decreasing / non_monotonous
    - (float) max_value: value from which the meter is reseted
    - (float) reset_value: value to which the meter is reseted

    .. returns:
    - on success: timeseries of increments. The output timeseries contains 
        one value less than the original one. The diference between 
        two values, is assigned to the epoch of the second one.'''

    new_ts = ts_to_float(ts)

    if 'error' in new_ts:
        return new_ts

    if len(new_ts) <= 1:
        return {'error': 'timeserie must have length greater than 1 to compute increments'}

    if max_value != None:
        try:
            max_value = float(max_value)
        except:
            return {'error': 'max_value is not a number'}

    try:
        reset_value = float(reset_value)
    except:
        return {'error': 'reset_value is not a number'}

    if monotony == 'increasing':
        if not np.greater_equal(new_ts['value'], reset_value).all():
            return {'error': 'value lower than reset_value'}
        elif max_value and not np.less_equal(new_ts['value'], max_value).all():
            return {'error': 'value greater than max_value'}
    elif monotony == 'decreasing':
        if not np.less_equal(new_ts['value'].values, reset_value).all():
            return {'error': 'value greater than reset value'}
        elif max_value and not np.greater_equal(new_ts['value'], max_value).all():
            return {'error': 'value lower than max_value'}

    new_ts['old_value'] = new_ts['value'].shift()

    new_ts = new_ts.drop(new_ts.index[0])

    new_ts['increments'] = new_ts.apply(single_inc, axis = 1, monotony = monotony, \
        max_value = max_value, reset_value = reset_value)

    output_ts = pd.DataFrame()
    output_ts['value'] = new_ts['increments']

    return output_ts
def find_pi(m):
	np.random.seed()
	x1 = np.random.random(N)
	y1 = np.random.random(N)
	np.square(x1,x1)
	np.square(y1,y1)
	np.add(x1,y1,x1)
	np.less_equal(x1, 1.0, x1)
	return np.add.reduce(x1)*4.0/N
Esempio n. 17
0
def next_step(state):
    neighbors = game_tmp
    ndimage.convolve(state, kernel, output=neighbors)
    np.greater_equal(neighbors, 2, out=game_bool1)
    np.less_equal(neighbors, 3, out=game_bool2)
    np.multiply(game_bool1, game_bool2, out=game_bool1)
    np.multiply(state, game_bool1, out=state)
    np.equal(neighbors, 3, out=game_bool1)
    np.add(state, game_bool1, out=state)
    np.clip(state + game_bool1, 0, 1, out=state)
Esempio n. 18
0
def test_universal():
    np.random.seed(98052)
    builtin_sgd = lambda params: sgd(params, lr=learning_rate_schedule(0.125, UnitType.minibatch))
    builtin_last_avg_error, builtin_avg_error, _ = ffnet(builtin_sgd)
    np.random.seed(98052)
    my_sgd = lambda ps, gs: C.combine([C.assign(p, p - 0.125/25 * g) for p, g in zip(ps, gs)])
    universal_sgd = lambda params: universal(my_sgd, params)
    my_last_avg_error, my_avg_error, _ = ffnet(universal_sgd)
    assert np.all(np.less_equal(my_last_avg_error, builtin_last_avg_error))
    assert np.all(np.less_equal(my_avg_error, builtin_avg_error))
  def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only(
      self):
    test_graph = tf.Graph()
    with test_graph.as_default():
      model = self._build_model(
          is_training=False, first_stage_only=True, second_stage_batch_size=2)
      batch_size = 2
      height = 10
      width = 12
      input_image_shape = (batch_size, height, width, 3)

      preprocessed_inputs = tf.placeholder(dtype=tf.float32,
                                           shape=(batch_size, None, None, 3))
      prediction_dict = model.predict(preprocessed_inputs)

      # In inference mode, anchors are clipped to the image window, but not
      # pruned.  Since MockFasterRCNN.extract_proposal_features returns a
      # tensor with the same shape as its input, the expected number of anchors
      # is height * width * the number of anchors per location (i.e. 3x3).
      expected_num_anchors = height * width * 3 * 3
      expected_output_keys = set([
          'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape',
          'rpn_box_encodings', 'rpn_objectness_predictions_with_background',
          'anchors'])
      expected_output_shapes = {
          'rpn_box_predictor_features': (batch_size, height, width, 512),
          'rpn_features_to_crop': (batch_size, height, width, 3),
          'rpn_box_encodings': (batch_size, expected_num_anchors, 4),
          'rpn_objectness_predictions_with_background':
          (batch_size, expected_num_anchors, 2),
          'anchors': (expected_num_anchors, 4)
      }

      init_op = tf.global_variables_initializer()
      with self.test_session() as sess:
        sess.run(init_op)
        prediction_out = sess.run(prediction_dict,
                                  feed_dict={
                                      preprocessed_inputs:
                                      np.zeros(input_image_shape)
                                  })

        self.assertEqual(set(prediction_out.keys()), expected_output_keys)

        self.assertAllEqual(prediction_out['image_shape'], input_image_shape)
        for output_key, expected_shape in expected_output_shapes.iteritems():
          self.assertAllEqual(prediction_out[output_key].shape, expected_shape)

        # Check that anchors are clipped to window.
        anchors = prediction_out['anchors']
        self.assertTrue(np.all(np.greater_equal(anchors, 0)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))
Esempio n. 20
0
File: ngl10p.py Progetto: yyr/pyngl
def wigley(time):
  y = numpy.zeros(time.shape).astype(type(time))
  numpy.putmask(y,numpy.less(time,1953.),         \
                ((time-1860.)/(1953.-1860.)) * 35.0)
  numpy.putmask(y,numpy.logical_and(numpy.greater_equal(time,1953.),  \
                                    numpy.less_equal(time,1973.)),    \
                ((time-1953.)/(1973.-1953.)) * (68. - 35.) + 35.)
  numpy.putmask(y,numpy.logical_and(numpy.greater(time,1973.),        \
                                    numpy.less_equal(time,1990.)),    \
                ((time-1973.)/(1990.-1973.)) * (75. - 68.) + 68.)
  return y
Esempio n. 21
0
def get_residual_stats(config_dict, Phi_0, coadd_img, med_img, xycent=None):
    if xycent == None:
        xycent = ((fr_width - 1)/2., (fr_width - 1)/2.)
    fr_shape = config_dict['fr_shape']
    parang_seq = config_dict['parang_seq']
    op_rad = config_dict['op_rad']
    op_az = config_dict['op_az']
    rad_vec = np.sqrt(get_radius_sqrd(fr_shape, xycent)).ravel()
    
    Phi_0_derot = (Phi_0 + parang_seq[0]) % 360.
    coadd_annular_rms = list()
    zonal_rms = [[None]*N_az[r] for r in range(N_rad)]
    print "RMS counts in KLIP results:"
    for rad_ind in op_rad:
        R2 = R_out[rad_ind]
        if rad_ind == 0:
            R1 = R_inner
        else:
            R1 = R_out[rad_ind-1]
        annular_mask_logic = np.vstack([np.less_equal(rad_vec, R2),\
                                        np.greater(rad_vec, R1),\
                                        np.isfinite(coadd_img.ravel())])
        annular_mask = np.nonzero( np.all(annular_mask_logic, axis=0) )[0]
        coadd_annular_rms.append( np.sqrt( np.mean( np.ravel(coadd_img)[annular_mask]**2 ) ) )
        print "\tannulus %d/%d: %.3f in KLIP sub'd, derotated, coadded annlus" % (rad_ind+1, len(op_rad), coadd_annular_rms[-1])
        if len(op_az[rad_ind]) > 1:
            Phi_beg = (Phi_0_derot - DPhi[rad_ind]/2.) % 360.
            Phi_end = [ (Phi_beg + i * DPhi[rad_ind]) % 360. for i in range(1, len(op_az[rad_ind])) ]
            Phi_end.append(Phi_beg)
            for az_ind in op_az[rad_ind]:
                Phi2 = Phi_end[az_ind]
                if az_ind == 0:
                    Phi1 = Phi_beg
                else:
                    Phi1 = Phi_end[az_ind-1]
                if Phi1 < Phi2:
                    mask_logic = np.vstack((np.less_equal(rad_vec, R2),\
                                            np.greater(rad_vec, R1),\
                                            np.less_equal(angle_vec, Phi2),\
                                            np.greater(angle_vec, Phi1)))
                else: # azimuthal region spans phi = 0
                    rad_mask_logic = np.vstack((np.less_equal(rad_vec, R2),\
                                                np.greater(rad_vec, R1)))
                    az_mask_logic = np.vstack((np.less_equal(angle_vec, Phi2),\
                                               np.greater(angle_vec, Phi1)))
                    mask_logic = np.vstack((np.any(az_mask_logic, axis=0),\
                                            np.all(rad_mask_logic, axis=0)))
                derot_zonemask = np.nonzero( np.all(mask_logic, axis = 0) )[0]
                zonal_rms[rad_ind][az_ind] = np.sqrt( np.mean( np.ravel(coadd_img)[derot_zonemask]**2 ) )
            delimiter = ', '
            print "\tby zone: %s" % delimiter.join(["%.3f" % zonal_rms[rad_ind][a] for a in op_az[rad_ind]])
    print "Peak, min values in final co-added image: %0.3f, %0.3f" % (np.nanmax(coadd_img), np.nanmin(coadd_img))
    print "Peak, min values in median of de-rotated images: %0.3f, %0.3f" % (np.nanmax(med_img), np.nanmin(med_img))
    return coadd_annular_rms, zonal_rms
Esempio n. 22
0
 def cellular_next_step(self, min_n, max_n, birth_n):
     self.old_state = self.state.copy()
     neighbors = self.tmp
     ndimage.convolve(self.state, self.kernel, output=neighbors)
     np.greater_equal(neighbors, min_n, out=self.cell_bounds1)
     np.less_equal(neighbors, max_n, out=self.cell_bounds2)
     np.multiply(self.cell_bounds1, self.cell_bounds2, out=self.cell_bounds1)
     np.multiply(self.state, self.cell_bounds1, out=self.state)
     np.equal(neighbors, birth_n, out=self.cell_bounds1)
     np.add(self.state, self.cell_bounds1, out=self.state)
     np.clip(self.state + self.cell_bounds1, 0, 1, out=self.state)
     return np.sum(self.old_state - self.state)**2
Esempio n. 23
0
def annealfxn(params, useparams, time, model, envlist, xpdata, xspairlist, lb, ub, norm=False, vardata=False, fileobj=None):
    ''' Feeder function for scipy.optimize.anneal
    '''
    #annlout = scipy.optimize.anneal(pysb.anneal_sundials.annealfxn, paramarr, 
    #                                args=(None, 20000, model, envlist, xpnormdata, 
    #                                [(2,1),(4,2),(7,3)], lb, ub, True, True), 
    #                                lower=lower, upper=upper, full_output=1)
    # sample anneal call full model:
    # params: parameters to be optimized, at their values for the given annealing step
    # lower,upper: arrays from get array function or something similar from getgenparambounds
    # lb, ub: lower bound and upper bound for function from getgenparambounds
    #
    # sample anneal call, optimization of some parameters
    #   annlout = scipy.optimize.anneal(pysb.anneal_sundials.annealfxn, smacprm, args=(smacnum, 25000, model, envlist, xpdata,
    #            [(2,2), (3,3)], lower=lower, upper=upper, full_output=1)
    #
    # sample anneal call, optimization for ALL parameters
    # 
    #

    
    if numpy.greater_equal(params, lb).all() and numpy.less_equal(params, ub).all():
        print("Integrating...")
        outlist = annlodesolve(model, time, envlist, params, useparams)
        # specify that this is normalized data
        if norm is True:
            print("Normalizing data")
            datamax = numpy.max(outlist[0], axis = 1)
            datamin = numpy.min(outlist[0], axis = 1)
            outlistnorm = ((outlist[0].T - datamin)/(datamax-datamin)).T
            # xpdata[0] should be time, get from original array
            outlistnorm[0] = outlist[0][0].copy()
            # xpdata here is normalized, and so is outlistnorm
            objout = compare_data(xpdata, outlistnorm, xspairlist, vardata)
        else:
            objout = compare_data(xpdata, outlist[0], xspairlist, vardata)
    else:
        print("======>VALUE OUT OF BOUNDS NOTED")
        temp = numpy.where((numpy.logical_and(numpy.greater_equal(params, lb), numpy.less_equal(params, ub)) * 1) == 0)
        for i in temp:
            print("======>",i, params[i])
        objout = 1.0e300 # the largest FP in python is 1.0e308, otherwise it is just Inf

    # save the params and temps for analysis
    # FIXME If a parameter is out of bounds, outlist and outlistnorm will be undefined and this will cause an error
    if fileobj:
        if norm:
            writetofile(fileobj, params, outlistnorm, objout)
        else:
            writetofile(fileobj, params, outlist, objout)
    
    return objout
  def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(self):
    test_graph = tf.Graph()
    with test_graph.as_default():
      model = self._build_model(
          is_training=True, first_stage_only=True, second_stage_batch_size=2)
      batch_size = 2
      height = 10
      width = 12
      input_image_shape = (batch_size, height, width, 3)
      preprocessed_inputs = tf.placeholder(dtype=tf.float32,
                                           shape=(batch_size, None, None, 3))
      prediction_dict = model.predict(preprocessed_inputs)

      expected_output_keys = set([
          'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape',
          'rpn_box_encodings', 'rpn_objectness_predictions_with_background',
          'anchors'])
      # At training time, anchors that exceed image bounds are pruned.  Thus
      # the `expected_num_anchors` in the above inference mode test is now
      # a strict upper bound on the number of anchors.
      num_anchors_strict_upper_bound = height * width * 3 * 3

      init_op = tf.global_variables_initializer()
      with self.test_session() as sess:
        sess.run(init_op)
        prediction_out = sess.run(prediction_dict,
                                  feed_dict={
                                      preprocessed_inputs:
                                      np.zeros(input_image_shape)
                                  })

        self.assertEqual(set(prediction_out.keys()), expected_output_keys)
        self.assertAllEqual(prediction_out['image_shape'], input_image_shape)

        # Check that anchors have less than the upper bound and
        # are clipped to window.
        anchors = prediction_out['anchors']
        self.assertTrue(len(anchors.shape) == 2 and anchors.shape[1] == 4)
        num_anchors_out = anchors.shape[0]
        self.assertTrue(num_anchors_out < num_anchors_strict_upper_bound)

        self.assertTrue(np.all(np.greater_equal(anchors, 0)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
        self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))

        self.assertAllEqual(prediction_out['rpn_box_encodings'].shape,
                            (batch_size, num_anchors_out, 4))
        self.assertAllEqual(
            prediction_out['rpn_objectness_predictions_with_background'].shape,
            (batch_size, num_anchors_out, 2))
Esempio n. 25
0
def PairCorr(xpos,ypos,maxDist,imageWidth,imageHeight,Npar):
 
   # Find particles in the cropping area
   xcropL = greater_equal(xpos,maxDist)              # left boundary
   xcropR = less_equal(xpos,(imageWidth - maxDist))  # right boundary
   ycropT = greater_equal(ypos,maxDist)              # top boundary
   ycropB = less_equal(ypos,(imageHeight - maxDist)) # bottom boundary
   xyIndices = xcropL * xcropR * ycropT * ycropB        # cropped boundary

   # Find cropped indices
   xycropIndices = where(xyIndices)[0]
   # Npar  = NPar#len(p_indices)
   NcropPar = len(xycropIndices)
   print 'Total # of Particles:',Npar
   print '# of Particles within PairCorrCalc boundary:',NcropPar

   # Compute the distances between all particles.
   distancesM = empty([NcropPar,Npar])
   print 'Calculating inter-particle spacings...'

   for citr in xrange(0,NcropPar,1):
      i = xycropIndices[citr]
      xdist  = subtract(xpos,xpos[i])**2
      ydist  = subtract(ypos,ypos[i])**2
      rdist = sqrt(add(xdist,ydist))
      distancesM[citr,:] = rdist
   
   # Transform the distance matrix into a linear array
   distancesL = distancesM.flatten('C')

   print 'Filtering particles beyond maxDist...'
   distancesLF = distancesL[distancesL <= maxDist]

   imageCropArea = float(imageWidth - maxDist) * float(imageHeight - maxDist)
   numDens  = float(NcropPar) / imageCropArea
   print'Cropped # Density:',"{:2.1e}".format(numDens),'[# / Pix^2]'

   # Compute the histogram and set its parameters
   numBins = 2 * maxDist
   binWidth = float(maxDist)/(numBins)
   print 'Radial bin width:',binWidth,'[Pixels]'
   mBins = arange(binWidth,maxDist,binWidth)
   print '# of bins:',len(mBins)

   # Compute the histogram
   hist,mbinEdges = histogram(distancesLF,bins=mBins)

   # Normalize the histogram to make it g(r)
   hist = divide(hist,mBins[1:])
   hist = hist / (2.0 * math.pi * Npar * binWidth * numDens)

   return mBins,hist
Esempio n. 26
0
    def wall_collisions(self):
        """Wall Boundary conditions"""

        length = (N/self.den)**(1./self.dim)
        maxpos = length/2. - RADIUS

        np.putmask(self.velocity, np.greater_equal(self.position, maxpos),
                   -self.velocity)
        np.putmask(self.position, np.greater_equal(self.position, maxpos),
                   2*maxpos - self.position)
        np.putmask(self.velocity, np.less_equal(self.position, -maxpos),
                   -self.velocity)
        np.putmask(self.position, np.less_equal(self.position, -maxpos),
                   -2*maxpos - self.position)
Esempio n. 27
0
    def atoms_inside(self, atoms, periodicity=None):
        """Decides which atoms are inside the body (see Body class)."""

        dirvec0 = self._dir_vector / self._norm
        relpos = atoms - self._point1 - self.shift_vector
        dists = np.sqrt(np.sum(np.cross(relpos, dirvec0)**2, axis=1))
        heights = np.dot(relpos, dirvec0) / self._norm
        # maximal allowed distance at given height
        maxdists = self._radius1 + (self._radius2 - self._radius1) * heights
        atoms_inside = np.logical_and(
            np.logical_and(np.less_equal(dists, maxdists),
                           np.less_equal(heights, 1.0)),
            np.greater_equal(heights, 0.0))
        return atoms_inside
def Calculate_NumIndepSNPsPerGene(AllHumanGeneChrPos, PrunedSNPsChrPos, boundr_upstr, boundr_downstr):
    n_r, n_c = AllHumanGeneChrPos.shape
    Num_Indep_SNPs_per_gene = np.zeros(n_r, dtype="i")
    strand = np.all(np.equal(AllHumanGeneChrPos[:, 5], 1))

    for i in range(n_r):
        if strand:
            Num_Indep_SNPs_per_gene[i] = np.sum(logical_and(np.equal(PrunedSNPsChrPos[:, 0], AllHumanGeneChrPos[i, 0]),
                                                            np.greater_equal(PrunedSNPsChrPos[:, 1], (AllHumanGeneChrPos[i, 1] - boundr_upstr)),
                                                            np.less_equal(PrunedSNPsChrPos[:, 1], (AllHumanGeneChrPos[i, 2] + boundr_downstr))))
        else:
            Num_Indep_SNPs_per_gene[i] = np.sum(logical_and(np.equal(PrunedSNPsChrPos[:, 0], AllHumanGeneChrPos[i, 0]),
                                                            np.greater_equal(PrunedSNPsChrPos[:, 1], (AllHumanGeneChrPos[i, 1] - boundr_downstr)),
                                                            np.less_equal(PrunedSNPsChrPos[:, 1], (AllHumanGeneChrPos[i, 2] + boundr_upstr))))
    return Num_Indep_SNPs_per_gene
Esempio n. 29
0
 def test_testUfuncs1 (self):
     "Test various functions such as sin, cos."
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     self.assertTrue (eq(numpy.cos(x), cos(xm)))
     self.assertTrue (eq(numpy.cosh(x), cosh(xm)))
     self.assertTrue (eq(numpy.sin(x), sin(xm)))
     self.assertTrue (eq(numpy.sinh(x), sinh(xm)))
     self.assertTrue (eq(numpy.tan(x), tan(xm)))
     self.assertTrue (eq(numpy.tanh(x), tanh(xm)))
     olderr = numpy.seterr(divide='ignore', invalid='ignore')
     try:
         self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm)))
         self.assertTrue (eq(numpy.log(abs(x)), log(xm)))
         self.assertTrue (eq(numpy.log10(abs(x)), log10(xm)))
     finally:
         numpy.seterr(**olderr)
     self.assertTrue (eq(numpy.exp(x), exp(xm)))
     self.assertTrue (eq(numpy.arcsin(z), arcsin(zm)))
     self.assertTrue (eq(numpy.arccos(z), arccos(zm)))
     self.assertTrue (eq(numpy.arctan(z), arctan(zm)))
     self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
     self.assertTrue (eq(numpy.absolute(x), absolute(xm)))
     self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym)))
     self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym)))
     self.assertTrue (eq(numpy.less(x, y), less(xm, ym)))
     self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym)))
     self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym)))
     self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym)))
     self.assertTrue (eq(numpy.conjugate(x), conjugate(xm)))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y))))
     self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x))))
Esempio n. 30
0
 def constraint(self, arg, bound=None):
     bound = seminorm.constraint(self, arg, bound=bound)
     inbox = np.product(np.less_equal(np.fabs(arg), bound * (1+self.tol)))
     if inbox:
         return 0
     else:
         return np.inf
    def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only(
            self):
        test_graph = tf.Graph()
        with test_graph.as_default():
            model = self._build_model(is_training=False,
                                      first_stage_only=True,
                                      second_stage_batch_size=2)
            batch_size = 2
            height = 10
            width = 12
            input_image_shape = (batch_size, height, width, 3)

            preprocessed_inputs = tf.placeholder(dtype=tf.float32,
                                                 shape=(batch_size, None, None,
                                                        3))
            prediction_dict = model.predict(preprocessed_inputs)

            # In inference mode, anchors are clipped to the image window, but not
            # pruned.  Since MockFasterRCNN.extract_proposal_features returns a
            # tensor with the same shape as its input, the expected number of anchors
            # is height * width * the number of anchors per location (i.e. 3x3).
            expected_num_anchors = height * width * 3 * 3
            expected_output_keys = set([
                'rpn_box_predictor_features', 'rpn_features_to_crop',
                'image_shape', 'rpn_box_encodings',
                'rpn_objectness_predictions_with_background', 'anchors'
            ])
            expected_output_shapes = {
                'rpn_box_predictor_features': (batch_size, height, width, 512),
                'rpn_features_to_crop': (batch_size, height, width, 3),
                'rpn_box_encodings': (batch_size, expected_num_anchors, 4),
                'rpn_objectness_predictions_with_background':
                (batch_size, expected_num_anchors, 2),
                'anchors': (expected_num_anchors, 4)
            }

            init_op = tf.global_variables_initializer()
            with self.test_session() as sess:
                sess.run(init_op)
                prediction_out = sess.run(prediction_dict,
                                          feed_dict={
                                              preprocessed_inputs:
                                              np.zeros(input_image_shape)
                                          })

                self.assertEqual(set(prediction_out.keys()),
                                 expected_output_keys)

                self.assertAllEqual(prediction_out['image_shape'],
                                    input_image_shape)
                for output_key, expected_shape in expected_output_shapes.iteritems(
                ):
                    self.assertAllEqual(prediction_out[output_key].shape,
                                        expected_shape)

                # Check that anchors are clipped to window.
                anchors = prediction_out['anchors']
                self.assertTrue(np.all(np.greater_equal(anchors, 0)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))
Esempio n. 32
0
def leq_notclose(x1, x2):
    """Return the truth value of (x1 <= x2) AND (x1 is NOT close to x2) element-wise."""
    return np.less_equal(x1, x2) & ~np.isclose(x1, x2)
Esempio n. 33
0
    ])
    phase_avg.append([
        phase[this_djf].min(), phase[this_djf].max(),
        np.mean(phase[this_djf]),
        np.median(phase[this_djf])
    ])
    # sunspot_avg.append([np.mean(g_sun.data[this_djf_sspot], axis = 0), np.std(g_sun.data[this_djf_sspot], axis = 0, ddof = 1)])
    amplitude_avg.append([
        np.mean(amplitude[this_djf], axis=0),
        np.std(amplitude[this_djf], axis=0, ddof=1)
    ])

    sigma_season = np.std(tg[this_year_extr], axis=0, ddof=1)
    if SEASON == 'DJF':
        sigma_2ex = np.less_equal(
            tg[this_djf],
            np.mean(tg[this_year_extr]) - 2 * sigma_season)
        sigma_3ex = np.less_equal(
            tg[this_djf],
            np.mean(tg[this_year_extr]) - 3 * sigma_season)
    elif SEASON == 'JJA':
        sigma_2ex = np.greater_equal(
            tg[this_djf],
            np.mean(tg[this_year_extr]) + 2 * sigma_season)
        sigma_3ex = np.greater_equal(
            tg[this_djf],
            np.mean(tg[this_year_extr]) + 3 * sigma_season)
    extremes.append([np.sum(sigma_2ex), np.sum(sigma_3ex)])

    if SEASON == 'DJF':
        sigma_2ex = np.less_equal(tg[this_djf], mean_djf - 2 * sigma_djf)
Esempio n. 34
0
    pz = pavg * np.cos(theta)
    poslist.append((x, y, z))
    plist.append((px, py, pz))
    mlist.append(mass)
    rlist.append(Ratom)

pos = np.array(poslist)
poscircle = pos
p = np.array(plist)
m = np.array(mlist)
m.shape = (Natoms, 1)
radius = np.array(rlist)
r = pos - pos[:, np.newaxis]  # all pairs of atom-to-atom vectors

ds = (p / m) * (dt / 2.0)
if "False" not in np.less_equal(mag(ds), radius).tolist():
    pos = pos + (p / mass) * (dt / 2.0)  # initial half-step

pb = ProgressBar(0, Nsteps, c=1)
for i in pb.range():

    # Update all positions
    ds = mag((p / m) * (dt / 2.0))
    if "False" not in np.less_equal(ds, radius).tolist():
        pos = pos + (p / m) * dt

    r = pos - pos[:, np.newaxis]  # all pairs of atom-to-atom vectors
    rmag = np.sqrt(np.sum(np.square(r), -1))  # atom-to-atom scalar distances
    hit = np.less_equal(rmag, radius + radius[:, None]) - np.identity(Natoms)
    hitlist = np.sort(np.nonzero(
        hit.flat)[0]).tolist()  # i,j encoded as i*Natoms+j
Esempio n. 35
0
    'tf.math.is_strictly_increasing',
    lambda x, name=None: np.all(x[1:] > x[:-1]))

l2_normalize = utils.copy_docstring('tf.math.l2_normalize', _l2_normalize)

lbeta = utils.copy_docstring(
    'tf.math.lbeta',
    _lbeta)

less = utils.copy_docstring(
    'tf.math.less',
    lambda x, y, name=None: np.less(x, y))

less_equal = utils.copy_docstring(
    'tf.math.less_equal',
    lambda x, y, name=None: np.less_equal(x, y))

lgamma = utils.copy_docstring(
    'tf.math.lgamma',
    lambda x, name=None: scipy_special.gammaln(x))

log = utils.copy_docstring(
    'tf.math.log',
    lambda x, name=None: np.log(_convert_to_tensor(x)))

log1p = utils.copy_docstring(
    'tf.math.log1p',
    lambda x, name=None: np.log1p(_convert_to_tensor(x)))

log_sigmoid = utils.copy_docstring(
    'tf.math.log_sigmoid',
Esempio n. 36
0
def make_spline(t,
                m,
                e_m,
                knots=None,
                k=1,
                s=None,
                fitflux=0,
                zpt=0,
                tmin=None,
                tmax=None,
                task=-1,
                anchor_dist=[5.0, 5.0],
                slopes=[None, None]):
    '''A wrapper around splrep that makes sure the independent variable is
   monotonic and non-repeating.  Required arguments:  time (t), magnitudes
   (m) and errors (e_m).  If knots are specified, use them (if task==-1), otherwise,
   they are computed from -10 days to 100 days in 10-day increments.  k is the 
   spline order (default 1) and s is the smoothing factor, as per splrep.  If fitflux
   is nonzero, convert magnitudes to flux (using provided zpt).  tmin and tmax should
   be set to the limits of the spline.'''
    # first, we make sure that t is monotonically increasing with no repeated
    # elements
    sids = num.argsort(t)
    tt = t[sids]  #num.take(t, sids)
    mm = m[sids]  # num.take(m, sids)
    ee_m = e_m[sids]  #num.take(e_m, sids)

    if tmin is None:
        tmin = t.min()
    if tmax is None:
        tmax = t.max()

    # here's some Numeric magic.  first, find where we have repeating x-values
    Nmatrix = num.equal(tt[:, num.newaxis], tt[num.newaxis, :])
    #val_matrix = mm[:,num.newaxis]*num.ones((len(mm),len(mm)))
    #e_matrix = ee_m[:,num.newaxis]*num.ones((len(mm),len(mm)))
    val_matrix = mm[:, num.newaxis] * Nmatrix
    e_matrix = ee_m[:, num.newaxis] * Nmatrix

    average = sum(val_matrix) / sum(Nmatrix)
    e_average = sum(e_matrix) / sum(Nmatrix)

    # at this point, average is the original data, but with repeating data points
    # replaced with their average.  Now, we just pick out the unique x's and
    # the first of any repeating points:
    gids = num.concatenate([[1], num.greater(tt[1:] - tt[:-1], 0.)])
    tt = num.compress(gids, tt)
    mm = num.compress(gids, average)
    ee_m = num.compress(gids, e_average)

    # Now get rid of any data that's outside [tmin,tmax]
    gids = num.less_equal(tt, tmax) * num.greater_equal(tt, tmin)
    #tt = num.compress(gids,tt)
    tt = tt[gids]
    #mm = num.compress(gids,mm)
    mm = mm[gids]
    #ee_m = num.compress(gids,ee_m)
    ee_m = ee_m[gids]
    ee_m = num.where(num.less(ee_m, 0.001), 0.001, ee_m)

    # Next, add some anchors to the data to control the slopes
    if anchor_dist[0] > 0 and task != -1:
        if slopes[0] is not None:
            mm0 = mm[0] - slopes[0] * anchor_dist[0]
        else:
            mm0 = mm[0] - (mm[1] - mm[0]) / (tt[1] - tt[0]) * anchor_dist[0]
        tt = num.concatenate([[tt[0] - anchor_dist[0]], tt])
        mm = num.concatenate([[mm0], mm])
        ee_m = num.concatenate([[ee_m[0]], ee_m])
    if anchor_dist[1] > 0:
        if slopes[1] is not None:
            mm1 = mm[-1] + slopes[1] * anchor_dist[1]
        else:
            mm1 = mm[-1] + (mm[-1] - mm[-2]) / (tt[-1] -
                                                tt[-2]) * anchor_dist[1]
        tt = num.concatenate([tt, [tt[-1] + anchor_dist[1]]])
        mm = num.concatenate([mm, [mm1]])
        ee_m = num.concatenate([ee_m, [ee_m[-1]]])

    # Now convert to flux if requested:
    if fitflux:
        mm = num.power(10, -0.4 * (mm - zpt))
        ee_m = mm * ee_m / 1.087

    if knots is None and task == -1:
        # Use the minimal number
        knots = tmin + num.arange(2 * k + 3) * (tmax - tmin) / (2 * k + 2)

    # Okay, now make the spline representation
    tck, fp, ier, msg = splrep(tt,
                               mm,
                               1.0 / ee_m,
                               k=k,
                               s=s,
                               t=knots,
                               task=task,
                               full_output=1)
    return (tck, fp, ier, msg)
Esempio n. 37
0
def __VoxelizeRayTracingZDirectionVTK__(polydata,gridCOx,gridCOy,gridCOz):
    # Remark : most of the time is spent in this function. This is because of 
    # the python loop on VTK_Rays. Maybe a loop through a VTK collection of rays 
    # using an iterator object would be faster, because python could recognize
    # the loop should be spent in C++. 
    #See https://blog.kitware.com/pythonic-callbacks-and-iteration-in-vtk/
    
    #Identify the min and max x,y coordinates (cm) of the mesh:
#    meshXmin = meshXYZ[:,0,:].min()
#    meshXmax = meshXYZ[:,0,:].max()
#    meshYmin = meshXYZ[:,1,:].min()
#    meshYmax = meshXYZ[:,1,:].max()
#    meshZmin = meshXYZ[:,2,:].min()
#    meshZmax = meshXYZ[:,2,:].max()
    
    meshXmin,meshXmax,meshYmin,meshYmax,meshZmin,meshZmax = polydata.GetBounds()
    assert (meshXmin<meshXmax and meshYmin<meshYmax and meshZmin<meshZmax)
    
    #Identify the min and max x,y coordinates (pixels) of the mesh:
    meshXminp = np.nonzero(abs(gridCOx-meshXmin)==min(abs(gridCOx-meshXmin)))[0][0]
    meshXmaxp = np.nonzero(abs(gridCOx-meshXmax)==min(abs(gridCOx-meshXmax)))[0][0]
    meshYminp = np.nonzero(abs(gridCOy-meshYmin)==min(abs(gridCOy-meshYmin)))[0][0]
    meshYmaxp = np.nonzero(abs(gridCOy-meshYmax)==min(abs(gridCOy-meshYmax)))[0][0]
        
    #meshXYZmin = np.min(meshXYZ,axis=2)
    #meshXYZmax = np.max(meshXYZ,axis=2)    
        
    sampleDimensions=(len(gridCOx),len(gridCOy),len(gridCOz))
    image=np.zeros(sampleDimensions,dtype=np.bool)
    #Identify the min and max x,y,z coordinates of each facet:
    
    
    #Construct octree of the mesh for fast access to its geometry
    obbTree = vtk.vtkOBBTree()
    obbTree.SetDataSet(polydata)
    obbTree.BuildLocator()
    
    #correctionLIST = []  
    
    Yrange=range(meshYminp,meshYmaxp+1)
    Xrange=range(meshXminp,meshXmaxp+1)
    
    correctionLIST = np.zeros( (len(Xrange),len(Yrange)) )    
    
    for loopY in Yrange:
        for loopX in Xrange:
            
            epsilon=(meshZmax-meshZmin)/10
            pSource=[gridCOx[loopX],gridCOy[loopY],meshZmax+epsilon]
            pTarget=[gridCOx[loopX],gridCOy[loopY],meshZmin-epsilon]
            pointsIntersection=__VTKRayCasting__(obbTree,pSource,pTarget)
            
            
            if len(pointsIntersection)>0: 

              #gridCOzCROSS=np.asarray([pointsIntersection[i][2] for i in range(len(pointsIntersection))])
              gridCOzCROSS=pointsIntersection[:,2]
              
              #Remove values of gridCOzCROSS which are outside of the mesh limits (including a 1e-12 margin for error).
              gridCOzCROSS = gridCOzCROSS[ np.logical_and(
                  np.greater_equal(gridCOzCROSS,(meshZmin-1e-12)*np.ones(gridCOzCROSS.shape)),
                  np.less_equal(gridCOzCROSS,(meshZmax+1e-12)*np.ones(gridCOzCROSS.shape))) ]
          
              #Round gridCOzCROSS to remove any rounding errors, and take only the unique values:
              gridCOzCROSS = np.round(gridCOzCROSS*1e12)/1e12
              gridCOzCROSS = np.unique(gridCOzCROSS)
    
              if gridCOzCROSS.size%2 == 0: 
                  for loopASSIGN in range(gridCOzCROSS.size/2):
                      voxelsINSIDE = np.logical_and(
                                    np.greater(gridCOz,gridCOzCROSS[2*loopASSIGN]), 
                                    np.less(gridCOz,gridCOzCROSS[2*loopASSIGN+1]))
                                    
                      image[loopX,loopY,voxelsINSIDE] = 1
              elif len(gridCOzCROSS)>0:
                  #correctionLIST.append([loopX,loopY])
                  correctionLIST[loopX-meshXminp,loopY-meshYminp]=1
    
    # USE INTERPOLATION TO FILL IN THE RAYS WHICH COULD NOT BE VOXELISED
    #For rays where the voxelisation did not give a clear result, the ray is
    #computed by interpolating from the surrounding rays.    

    nzX,nzY=np.nonzero(correctionLIST)
    nzX,nzY = nzX+meshXminp,nzY+meshYminp
    #correctionLIST=[[nzX[i],nzY[i]] for i in range(nzX.size)]
    
    correctionLIST=np.transpose(np.vstack((nzX,nzY)))
    image=__InterpolateRemainingVoxels__(correctionLIST,sampleDimensions,image)
    
    return image
def three_cases(changes, penalty, denominator):
    return (np.less_equal(changes, -penalty) *
            (-changes - penalty) + np.greater_equal(changes, penalty) *
            (-changes + penalty)) / denominator
def _check_precision_positivity(precision, covariance_type):
    """Check a precision vector is positive-definite."""
    if np.any(np.less_equal(precision, 0.0)):
        raise ValueError("'%s precision' should be "
                         "positive" % covariance_type)
Esempio n. 40
0
    def apply(self, experiment):
        """
        Assigns new metadata to events using the mixture model estimated
        in :meth:`estimate`.
        
        Returns
        -------
        Experiment
            A new :class:`.Experiment` with the new condition variables as
            described in the class documentation.  Also adds the following
            new statistics:
            
            - **mean** : Float
                the mean of the fitted gaussian in each channel for each component.
                
            - **sigma** : (Float, Float)
                the locations the mean +/- one standard deviation in each channel
                for each component.
                
            - **correlation** : Float
                the correlation coefficient between each pair of channels for each
                component.
                
            - **proportion** : Float
                the proportion of events in each component of the mixture model.  only
                added if :attr:`num_components` ``> 1``.
        """
             
        if experiment is None:
            raise util.CytoflowOpError('experiment',
                                       "No experiment specified")
         
        if len(self.channels) == 0:
            raise util.CytoflowOpError('channels',
                                       "Must set at least one channel")
         
        # make sure name got set!
        if not self.name:
            raise util.CytoflowOpError('name',
                                       "You have to set the gate's name "
                                       "before applying it!")
        
        if self.num_components > 1 and self.name in experiment.data.columns:
            raise util.CytoflowOpError('name',
                                       "Experiment already has a column named {0}"
                                       .format(self.name))
            
        if self.sigma > 0:
            for i in range(1, self.num_components + 1):
                cname = "{}_{}".format(self.name, i)
                if cname in experiment.data.columns:
                    raise util.CytoflowOpError('name',
                                               "Experiment already has a column named {}"
                                               .format(cname))
 
        if self.posteriors:
            for i in range(1, self.num_components + 1):
                cname = "{}_{}_posterior".format(self.name, i)
                if cname in experiment.data.columns:
                    raise util.CytoflowOpError('name',
                                               "Experiment already has a column named {}"
                                               .format(cname))               
         
        if not self._gmms:
            raise util.CytoflowOpError(None, 
                                       "No components found.  Did you forget to "
                                       "call estimate()?")
            
        for c in self.channels:
            if c not in self._scale:
                raise util.CytoflowOpError(None,
                                           "Model scale not set.  Did you forget "
                                           "to call estimate()?")
 
        for c in self.channels:
            if c not in experiment.channels:
                raise util.CytoflowOpError('channels',
                                           "Channel {0} not found in the experiment"
                                           .format(c))
        
        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError('by',
                                           "Aggregation metadata {} not found, "
                                           "must be one of {}"
                                           .format(b, experiment.conditions))
#                             
#         if self.num_components == 1 and self.sigma == 0.0:
#             raise util.CytoflowOpError('sigma',
#                                        "if num_components is 1, sigma must be > 0.0")
        
                
        if self.num_components == 1 and self.posteriors:
            warn("If num_components == 1, all posteriors will be 1",
                 util.CytoflowOpWarning)
#             raise util.CytoflowOpError('posteriors',
#                                        "If num_components == 1, all posteriors will be 1.")
         
        if self.num_components > 1:
            event_assignments = pd.Series(["{}_None".format(self.name)] * len(experiment), dtype = "object")
 
        if self.sigma > 0:
            event_gate = {i : pd.Series([False] * len(experiment), dtype = "double")
                           for i in range(self.num_components)}
 
        if self.posteriors:
            event_posteriors = {i : pd.Series([0.0] * len(experiment), dtype = "double")
                                for i in range(self.num_components)}

        if self.by:
            groupby = experiment.data.groupby(self.by)
        else:
            # use a lambda expression to return a group that
            # contains all the events
            groupby = experiment.data.groupby(lambda _: True)   

        # make the statistics       
        components = [x + 1 for x in range(self.num_components)]
         
        prop_idx = pd.MultiIndex.from_product([experiment[x].unique() for x in self.by] + [components], 
                                         names = list(self.by) + ["Component"])
        prop_stat = pd.Series(name = "{} : {}".format(self.name, "proportion"),
                              index = prop_idx, 
                              dtype = np.dtype(object)).sort_index()
                  
        mean_idx = pd.MultiIndex.from_product([experiment[x].unique() for x in self.by] + [components] + [self.channels], 
                                              names = list(self.by) + ["Component"] + ["Channel"])
        mean_stat = pd.Series(name = "{} : {}".format(self.name, "mean"),
                              index = mean_idx, 
                              dtype = np.dtype(object)).sort_index()
        sigma_stat = pd.Series(name = "{} : {}".format(self.name, "sigma"),
                               index = mean_idx,
                               dtype = np.dtype(object)).sort_index()
        interval_stat = pd.Series(name = "{} : {}".format(self.name, "interval"),
                                  index = mean_idx, 
                                  dtype = np.dtype(object)).sort_index()

        corr_idx = pd.MultiIndex.from_product([experiment[x].unique() for x in self.by] + [components] + [self.channels] + [self.channels], 
                                              names = list(self.by) + ["Component"] + ["Channel_1"] + ["Channel_2"])
        corr_stat = pd.Series(name = "{} : {}".format(self.name, "correlation"),
                              index = corr_idx, 
                              dtype = np.dtype(object)).sort_index()  
                 
        for group, data_subset in groupby:
            if group not in self._gmms:
                # there weren't any events in this group, so we didn't get
                # a gmm.
                continue
             
            gmm = self._gmms[group]
            x = data_subset.loc[:, self.channels[:]]
            for c in self.channels:
                x[c] = self._scale[c](x[c])
                
            # which values are missing?

            x_na = pd.Series([False] * len(x))
            for c in self.channels:
                x_na[np.isnan(x[c]).values] = True
                        
            x = x.values
            x_na = x_na.values
            group_idx = groupby.groups[group]
 
            if self.num_components > 1:
                predicted = np.full(len(x), -1, "int")
                predicted[~x_na] = gmm.predict(x[~x_na])
                
                predicted_str = pd.Series(["(none)"] * len(predicted))
                for c in range(0, self.num_components):
                    predicted_str[predicted == c] = "{0}_{1}".format(self.name, c + 1)
                predicted_str[predicted == -1] = "{0}_None".format(self.name)
                predicted_str.index = group_idx
     
                event_assignments.iloc[group_idx] = predicted_str
                
            # if we're doing sigma-based gating, for each component check
            # to see if the event is in the sigma gate.
            if self.sigma > 0.0:
                for c in range(self.num_components):
                    s = np.linalg.pinv(gmm.covariances_[c])
                    mu = gmm.means_[c]
                    
                    # compute the Mahalanobis distance

                    f = lambda x, mu, s: np.dot(np.dot((x - mu).T, s), (x - mu))
                    dist = np.apply_along_axis(f, 1, x, mu, s)

                    # come up with a threshold based on sigma.  you'll note we
                    # didn't sqrt dist: that's because for a multivariate 
                    # Gaussian, the square of the Mahalanobis distance is
                    # chi-square distributed
                    
                    p = (scipy.stats.norm.cdf(self.sigma) - 0.5) * 2
                    thresh = scipy.stats.chi2.ppf(p, 1)
                    
                    event_gate[c].iloc[group_idx] = np.less_equal(dist, thresh)
                    
            if self.posteriors:
#                 import sys;sys.path.append(r'/home/brian/.p2/pool/plugins/org.python.pydev_6.2.0.201711281614/pysrc')
#                 import pydevd;pydevd.settrace()
                
                p = gmm.predict_proba(x)
                for c in range(self.num_components):
                    event_posteriors[c].iloc[group_idx] = p[:, c]
                    
            for c in range(self.num_components):
                if len(self.by) == 0:
                    g = [c + 1]
                elif hasattr(group, '__iter__') and not isinstance(group, (str, bytes)):
                    g = tuple(list(group) + [c + 1])
                else:
                    g = tuple([group] + [c + 1])

                prop_stat.loc[g] = gmm.weights_[c]
                
                for cidx1, channel1 in enumerate(self.channels):
                    g2 = tuple(list(g) + [channel1])
                    mean_stat.loc[g2] = self._scale[channel1].inverse(gmm.means_[c, cidx1])
                    
                    s, corr = util.cov2corr(gmm.covariances_[c])
                    sigma_stat[g2] = (self._scale[channel1].inverse(s[cidx1]))
                    interval_stat.loc[g2] = (self._scale[channel1].inverse(gmm.means_[c, cidx1] - s[cidx1]),
                                             self._scale[channel1].inverse(gmm.means_[c, cidx1] + s[cidx1]))
            
                    for cidx2, channel2 in enumerate(self.channels):
                        g3 = tuple(list(g2) + [channel2])
                        corr_stat[g3] = corr[cidx1, cidx2]
                        
                    corr_stat.drop(tuple(list(g2) + [channel1]), inplace = True)

        new_experiment = experiment.clone()
          
        if self.num_components > 1:
            new_experiment.add_condition(self.name, "category", event_assignments)
            
        if self.sigma > 0:
            for c in range(self.num_components):
                gate_name = "{}_{}".format(self.name, c + 1)
                new_experiment.add_condition(gate_name, "bool", event_gate[c])              
                
        if self.posteriors:
            for c in range(self.num_components):
                post_name = "{}_{}_posterior".format(self.name, c + 1)
                new_experiment.add_condition(post_name, "double", event_posteriors[c])
                
        new_experiment.statistics[(self.name, "mean")] = pd.to_numeric(mean_stat)
        new_experiment.statistics[(self.name, "sigma")] = sigma_stat
        new_experiment.statistics[(self.name, "interval")] = interval_stat
        if len(corr_stat) > 0:
            new_experiment.statistics[(self.name, "correlation")] = pd.to_numeric(corr_stat)
        if self.num_components > 1:
            new_experiment.statistics[(self.name, "proportion")] = pd.to_numeric(prop_stat)

        new_experiment.history.append(self.clone_traits(transient = lambda _: True))
        return new_experiment
Esempio n. 41
0
 def _subset(self, z):
     """
     Huber's T is defined piecewise over the range for z
     """
     z = np.asarray(z)
     return np.less_equal(np.fabs(z), self.t)
Esempio n. 42
0
 def _subset(self, z):
     """
     Andrew's wave is defined piecewise over the range of z.
     """
     z = np.asarray(z)
     return np.less_equal(np.fabs(z), self.a * np.pi)
Esempio n. 43
0
def less_equal(x, y):
    if not SWITCH_ON or has_tensor(x) or has_tensor(y):
        return tf.less_equal(x, y)
    else:
        return np.less_equal(x, y)
Esempio n. 44
0
def make_spline2(t,
                 m,
                 e_m,
                 k=3,
                 fitflux=0,
                 zpt=0,
                 tmin=-10,
                 tmax=100,
                 adaptive=0,
                 max_curv_fac=10,
                 **args):
    '''A wrapper around spline2 that makes sure the independent variable is
   monotonic and non-repeating.  Required arguments:  time (t), magnitudes
   (m) and errors (e_m).  k is the spline order (default 3)  If fitflux
   is nonzero, convert magnitudes to flux (using provided zpt).  tmin and tmax should
   be set to the limits of the spline.'''
    # first, we make sure that t is monotonically increasing with no repeated
    # elements
    sids = num.argsort(t)
    tt = num.take(t, sids)
    mm = num.take(m, sids)
    ee_m = num.take(e_m, sids)

    # here's some Numeric magic.  first, find where we have repeating x-values
    Nmatrix = num.equal(tt[:, num.newaxis], tt[num.newaxis, :])
    #val_matrix = mm[:,num.newaxis]*num.ones((len(mm),len(mm)))
    #e_matrix = ee_m[:,num.newaxis]*num.ones((len(mm),len(mm)))
    val_matrix = mm[:, num.newaxis] * Nmatrix
    e_matrix = ee_m[:, num.newaxis] * Nmatrix

    average = sum(val_matrix) / sum(Nmatrix)
    e_average = sum(e_matrix) / sum(Nmatrix)

    # at this point, average is the original data, but with repeating data points
    # replaced with their average.  Now, we just pick out the unique x's and
    # the first of any repeating points:
    gids = num.concatenate([[True], num.greater(tt[1:] - tt[:-1], 0.)])
    tt = tt[gids]  #num.compress(gids, tt)
    mm = average[gids]  # num.compress(gids, average)
    ee_m = e_average[gids]  #num.compress(gids, e_average)

    # Now get rid of any data that's outside [tmin,tmax]
    gids = num.less_equal(tt, tmax) * num.greater_equal(tt, tmin)
    tt = tt[gids]  #num.compress(gids,tt)
    mm = mm[gids]  # num.compress(gids,mm)
    ee_m = ee_m[gids]  #num.compress(gids,ee_m)
    ee_m = num.where(num.less(ee_m, 0.001), 0.001, ee_m)

    # Now convert to flux if requested:
    if fitflux:
        mm = num.power(10, -0.4 * (mm - zpt))
        ee_m = mm * ee_m / 1.087

    # Okay, now make the spline representation
    if not adaptive:
        tck = spline2.spline2(tt, mm, w=1.0 / ee_m, degree=k, **args)
        fp = num.sum(num.power((mm - spline2.evalsp(tt, tck)) / ee_m, 2))
        ier = 0
        msg = 'not much'
        return (tck, fp, ier, msg)

    # Do an adaptive (much slower) search for the best fit, subject
    #  to curvature constraints (right now, just a cap).
    if 'lopt' in args: del args['lopt']
    Ks = []
    chisqs = []
    lopts = list(range(2, int(0.8 * len(tt) - 1)))
    for l in lopts:
        tck = spline2.spline2(tt, mm, w=1.0 / ee_m, degree=k, lopt=l, **args)
        fp = num.sum(num.power((mm - spline2.evalsp(tt, tck)) / ee_m, 2))
        K = quad(K2, tck[0][0], tck[0][-1], args=(tck, ), epsrel=0.01)[0]
        chisqs.append(fp)
        Ks.append(K)

    chisqs = num.array(chisqs)
    Ks = num.array(Ks)
    chisqs = num.where(num.less(Ks, Ks.min() * max_curv_fac), chisqs, num.Inf)
    id = num.argmin(chisqs)

    ier = lopts[id]
    tck = spline2.spline2(tt, mm, w=1.0 / ee_m, degree=k, lopt=ier, **args)
    fp = num.sum(num.power((mm - spline2.evalsp(tt, tck)) / ee_m, 2))
    return (tck, fp, ier, "Optimized lopt = %d" % ier)
Esempio n. 45
0
    def VisParticles(self,
                     timestep=-1,
                     branchID=1,
                     PlotFlow=True,
                     PlotTemp=True,
                     PlotGrid=True):
        """
        visualize the particle trajectories at a given time step
        set PlotTemp == False for now, need to figure out how to contour plot 1D array
        """
        self.ReadParticles(branchID)

        X = self.X[timestep]
        Z = self.Z[timestep]

        if PlotFlow == True or PlotTemp == True:
            self.ReadBackground(branchID)
            X_flow = self.X_flow[timestep]
            Z_flow = self.Z_flow[timestep]

        plt.rcParams.update({'font.size': 18})
        fig = plt.figure(figsize=(11.5, 8))
        ax = fig.add_subplot(111)
        ax.plot(X, Z, '.', color='r')
        if PlotFlow == True:
            U = self.U[timestep]
            W = self.W[timestep]

            ## mask data
            X_flow = np.asarray(X_flow)
            Z_flow = np.asarray(Z_flow)
            U = np.asarray(U)
            W = np.asarray(W)
            maskuv = np.logical_or(U != 0, W != 0)

            scale = 1.
            scale = 100. / scale
            Q = ax.quiver(X_flow[maskuv],
                          Z_flow[maskuv],
                          U[maskuv] * 100.,
                          W[maskuv] * 100.,
                          zorder=5,
                          width=0.001,
                          headwidth=4,
                          headlength=4.5,
                          scale=scale,
                          color='b')
            qk = ax.quiverkey(Q,
                              0.15,
                              0.15,
                              1,
                              r'$1 \frac{cm}{s}$',
                              labelpos='W',
                              fontproperties={
                                  'weight': 'bold',
                                  'size': 20
                              })

        if PlotTemp == True:
            import matplotlib.tri as tri
            #from scipy.interpolate import griddata
            T = self.T[timestep]
            U = np.asarray(self.U[timestep])
            W = np.asarray(self.W[timestep])

            ## mask data
            X_flow = np.asarray(X_flow)
            Z_flow = np.asarray(Z_flow)
            T = np.asarray(T)

            triang = tri.Triangulation(X_flow, Z_flow)
            isbad = np.less_equal(np.asarray(self.T[0]), 0)
            #isbad = np.equal(U, 0) & np.equal(W, 0)
            mask = np.any(np.where(isbad[triang.triangles], True, False),
                          axis=1)
            triang.set_mask(mask)

            #T[T==0] = self.mask_value
            #T = np.ma.masked_array(T,mask=T==self.mask_value)

            #T_limits = [0,35]
            T_limits = [T.min(), T.max()]
            levels = np.linspace(T_limits[0], T_limits[1], 100)
            cs = ax.tricontourf(triang, T, cmap=plt.cm.bone, levels=levels)
            #cs = ax.tricontourf(X_flow, Z_flow, T, cmap=plt.cm.bone, levels=levels)

            from mpl_toolkits.axes_grid1 import make_axes_locatable
            divider = make_axes_locatable(ax)
            cax = divider.append_axes("right", size="3%", pad=0.05)
            cb = fig.colorbar(cs, cax=cax, orientation='vertical')
            cb.ax.tick_params(labelsize=12)
            cb.ax.yaxis.offsetText.set_fontsize(12)
            cb.set_label('Temperature', fontsize=14)

        if PlotGrid == True:
            from bathymetry import W2_Bathymetry
            filename = '%s\\%s' % (self.workdir, 'Bth_WB1.npt')
            WB = W2_Bathymetry(filename)
            pat = WB.VisBranch2(branchID)
            for sq in pat:
                ax.add_patch(sq)
            ax.autoscale_view()

        timestr = datetime.strftime(self.runtimes[timestep], '%Y-%m-%d')
        ax.title.set_text('Time: %s' % timestr)
        #ax.set_xlim([4000, 12000])
        #ax.set_ylim([135, 150])
        ax.set_ylim([135, 160])
        ax.set_xlabel('Distance from upstream (m)')
        ax.set_ylabel('Water Depth (m)')
        #ax.yaxis.grid(True)
        #ax.xaxis.grid(True)
        #plt.show()
        #plt.savefig('particle_tracks_%s.png'%str(timestep))
        #plt.savefig('%s\\particle_tracks_%s.png'%(self.workdir, str(timestep)))
        #plt.close()
        plt.show()
Esempio n. 46
0
print(f"ANIS={ANIS} with CIANIS={CIANIS}")

# plot
fig4, axs4 = plt.subplots(2, 2, num=4, clear=True)
axs4[0, 0].plot(*x_est.T[:2], label="est", color="C0")
axs4[0, 0].scatter(*Z.T, label="z", color="C1")
axs4[0, 0].legend()
axs4[0, 1].plot(np.arange(K) * Ts, x_est[:, 4], label=r"$\omega$")
axs4[0, 1].legend()
axs4[1, 0].plot(np.arange(K) * Ts, prob_est, label=r"$Pr(s)$")
axs4[1, 0].legend()
axs4[1, 1].plot(np.arange(K) * Ts, NIS, label="NIS")
axs4[1, 1].plot(np.arange(K) * Ts, NISes)

ratio_in_CI = np.sum(
    np.less_equal(CINIS[0], NIS) * np.less_equal(NIS, CINIS[1])) / K
CI_LABELS = ["CI0", "CI1"]
for ci, cilbl in zip(CINIS, CI_LABELS):
    axs4[1, 1].plot([1, K * Ts], np.ones(2) * ci, "--r", label=cilbl)
axs4[1, 1].text(K * Ts * 1.1, 1, f"{ratio_in_CI} inside CI", rotation=90)
axs4[1, 1].legend()
plt.show()
# %% tune IMM by looking at ground truth
sigma_z = 3
sigma_a_CV_low = 0.2
sigma_a_CT = 0.1
sigma_omega = 0.002 * np.pi
sigma_a_CVhigh = 8
PI = np.array([[0.95, 0.05], [0.05, 0.95]])

# Optional sanity check
Esempio n. 47
0
def percentile(data, percentiles, weights=None):
    """Compute weighted percentiles.

    If the weights are equal, this is the same as normal percentiles.
    Elements of the data and wt arrays correspond to each other and must have
    equal length.
    If wt is None, this function calls numpy's percentile instead (faster)

    TODO: re-implementing the normal percentile could be faster
          because it would avoid more variable checks and overheads

    Note: uses Cython code if available.

    INPUTS:
    -------
    data: ndarray[float, ndim=1]
        data points
    percentiles: ndarray[float, ndim=1]
        percentiles to use. (between 0 and 100)

    KEYWORDS:
    --------
    weights: ndarray[float, ndim=1] or None
        Weights of each point in data
        All the weights must be non-negative and the sum must be
        greater than zero.

    OUTPUTS:
    -------
    the weighted percentiles of the data.
    """
    # check if actually weighted percentiles is needed
    if weights is None:
        return np.percentile(data, list(percentiles))
    if np.equal(weights, 1.).all():
        return np.percentile(data, list(percentiles))

    # make sure percentiles are fractions between 0 and 1
    if not np.greater_equal(percentiles, 0.0).all():
        raise ValueError("Percentiles less than 0")
    if not np.less_equal(percentiles, 100.0).all():
        raise ValueError("Percentiles greater than 100")

    #Make sure data is in correct shape
    shape = np.shape(data)
    n = len(data)
    if (len(shape) != 1):
        raise ValueError("wrong data shape, expecting 1d")

    if len(weights) != n:
        print(n, len(weights))
        raise ValueError("weights must be the same shape as data")
    if not np.greater_equal(weights, 0.0).all():
        raise ValueError("Not all weights are non-negative.")

    _data = np.asarray(data, dtype=float)

    if hasattr(percentiles, '__iter__'):
        _p = np.asarray(percentiles, dtype=float) * 0.01
    else:
        _p = np.asarray([percentiles * 0.01], dtype=float)

    _wt = np.asarray(weights, dtype=float)

    if _C_code:
        return c_wp(_data, _wt, _p)
    else:
        len_p = len(_p)
        sd = np.empty(n, dtype=float)
        sw = np.empty(n, dtype=float)
        aw = np.empty(n, dtype=float)
        o = np.empty(len_p, dtype=float)

        i = np.argsort(_data)
        np.take(_data, i, axis=0, out=sd)
        np.take(_wt, i, axis=0, out=sw)
        np.add.accumulate(sw, out=aw)

        if not aw[-1] > 0:
            raise ValueError("Nonpositive weight sum")

        w = (aw - 0.5 * sw) / aw[-1]

        spots = np.searchsorted(w, _p)
        for (pk, s, p) in zip(list(range(len_p)), spots, _p):
            if s == 0:
                o[pk] = sd[0]
            elif s == n:
                o[pk] = sd[n - 1]
            else:
                f1 = (w[s] - p) / (w[s] - w[s - 1])
                f2 = (p - w[s - 1]) / (w[s] - w[s - 1])
                assert (f1 >= 0) and (f2 >= 0) and (f1 <= 1 ) and (f2 <= 1)
                assert abs(f1 + f2 - 1.0) < 1e-6
                o[pk] = sd[s - 1] * f1 + sd[s] * f2
        return o
Esempio n. 48
0
 def inverse(self, array):
     return np.where(np.less_equal(array, 3), array,
                     np.exp(array + np.log(3) - 3))
Esempio n. 49
0
# In[145]:

np.greater_equal(a,b)
#大于等于


# In[146]:

np.less(a,b)
#小于


# In[147]:

np.less_equal(a,b)
#小于等于


# In[148]:

np.equal(a,b)


# In[149]:

np.not_equal(a,b)


# In[150]:
Esempio n. 50
0
 def forward(self, array):
     return np.where(np.less_equal(array, 3), array,
                     3 + np.log(array) - np.log(3))
Esempio n. 51
0
def leq_close(x1, x2):
    """Return the truth value of (x1 <= x2) OR (x1 is close to x2) element-wise."""
    return np.less_equal(x1, x2) | np.isclose(x1, x2)
Esempio n. 52
0
def monkhorst_pack(size):
    """Construct a uniform sampling of k-space of given size."""
    if np.less_equal(size, 0).any():
        raise ValueError('Illegal size: %s' % list(size))
    kpts = np.indices(size).transpose((1, 2, 3, 0)).reshape((-1, 3))
    return (kpts + 0.5) / size - 0.5
Esempio n. 53
0
 def binary_op(self, op, rhs1, rhs2, where, args, stacklevel):
     if self.shadow:
         rhs1 = self.runtime.to_eager_array(rhs1,
                                            stacklevel=(stacklevel + 1))
         rhs2 = self.runtime.to_eager_array(rhs2,
                                            stacklevel=(stacklevel + 1))
         if where is not None and isinstance(where, NumPyThunk):
             where = self.runtime.to_eager_array(where,
                                                 stacklevel=(stacklevel +
                                                             1))
     elif self.deferred is None:
         if where is not None and isinstance(where, NumPyThunk):
             self.check_eager_args((stacklevel + 1), rhs1, rhs2, where)
         else:
             self.check_eager_args((stacklevel + 1), rhs1, rhs2)
     if self.deferred is not None:
         self.deferred.binary_op(op,
                                 rhs1,
                                 rhs2,
                                 where,
                                 args,
                                 stacklevel=(stacklevel + 1))
     else:
         if op == NumPyOpCode.ADD:
             np.add(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_AND:
             np.logical_and(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.DIVIDE:
             np.divide(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.EQUAL:
             np.equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.FLOOR_DIVIDE:
             np.floor_divide(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.GREATER_EQUAL:
             np.greater_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.GREATER:
             np.greater(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         # elif op == NumPyOpCode.SHIFT_LEFT:
         #    np.left_shift(rhs1.array, rhs2.array, out=self.array,
         #            where=where if not isinstance(where, EagerArray)
         #                        else where.array)
         # elif op == NumPyOpCode.SHIFT_RIGHT:
         #    np.right_shift(rhs1.array, rhs2.array, out=self.array,
         #            where=where if not isinstance(where, EagerArray)
         #                        else where.array)
         elif op == NumPyOpCode.MOD:
             np.mod(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.MULTIPLY:
             np.multiply(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_OR:
             np.logical_or(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.POWER:
             np.power(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.SUBTRACT:
             np.subtract(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_XOR:
             np.logical_xor(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.LESS_EQUAL:
             np.less_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.LESS:
             np.less(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.MAXIMUM:
             np.maximum(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.MINIMUM:
             np.minimum(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         elif op == NumPyOpCode.NOT_EQUAL:
             np.not_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray) else where.array,
             )
         else:
             raise RuntimeError("unsupported binary op " + str(op))
         self.runtime.profile_callsite(stacklevel + 1, False)
Esempio n. 54
0
    def execute( self, args, **kwargs ): 
        op = args[0] 
        if op == 'indices': 
            threshold_mask = None
#            print "Processing computeThresholdRange: %s " % str( args )
            for var_op in args[1:]:  
                var_data, vmin, vmax = self.computeThresholdRange( var_op )               
                if not isNone(var_data):
                    var_mask = numpy.logical_and( numpy.greater_equal( var_data, vmin ), numpy.less_equal( var_data, vmax ) )  
#                    print "MultiVarPointCollection.execute: %s, mask range = %s  " % ( str( args ), str( (vmin, vmax) ) ); sys.stdout.flush()
                    if isNone(threshold_mask):                       
                        self.thresholdTargetType = 'coords' if var_op[0] in [ 'lat', 'lon', 'lev', 'x', 'y', 'z' ] else 'vardata' 
                        threshold_mask = var_mask
                    else:
                        threshold_mask = numpy.logical_and( threshold_mask, var_mask )
            if isNone( threshold_mask ):
                print>>sys.stderr, "Thresholding failed for spec: ", str( args )
                return None, None
            else:
                index_array = numpy.arange( 0, len(threshold_mask) )
                self.selected_index_array = index_array[ threshold_mask ]  
                return vmin, vmax   
        elif op == 'points': 
#            print " subproc: Process points request, args = %s " % str( args ); sys.stdout.flush()
            if args[2] <> None:
                self.setPointHeights( height_var=args[1], z_scale=args[2] )  
        elif op == 'ROI': 
            ROI = args[1]
            self.setROI(ROI)            
        elif op == 'timestep': 
            self.stepTime( **kwargs )  
    def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(
            self):
        test_graph = tf.Graph()
        with test_graph.as_default():
            model = self._build_model(is_training=True,
                                      first_stage_only=True,
                                      second_stage_batch_size=2)
            batch_size = 2
            height = 10
            width = 12
            input_image_shape = (batch_size, height, width, 3)
            preprocessed_inputs = tf.placeholder(dtype=tf.float32,
                                                 shape=(batch_size, None, None,
                                                        3))
            prediction_dict = model.predict(preprocessed_inputs)

            expected_output_keys = set([
                'rpn_box_predictor_features', 'rpn_features_to_crop',
                'image_shape', 'rpn_box_encodings',
                'rpn_objectness_predictions_with_background', 'anchors'
            ])
            # At training time, anchors that exceed image bounds are pruned.  Thus
            # the `expected_num_anchors` in the above inference mode test is now
            # a strict upper bound on the number of anchors.
            num_anchors_strict_upper_bound = height * width * 3 * 3

            init_op = tf.global_variables_initializer()
            with self.test_session() as sess:
                sess.run(init_op)
                prediction_out = sess.run(prediction_dict,
                                          feed_dict={
                                              preprocessed_inputs:
                                              np.zeros(input_image_shape)
                                          })

                self.assertEqual(set(prediction_out.keys()),
                                 expected_output_keys)
                self.assertAllEqual(prediction_out['image_shape'],
                                    input_image_shape)

                # Check that anchors have less than the upper bound and
                # are clipped to window.
                anchors = prediction_out['anchors']
                self.assertTrue(
                    len(anchors.shape) == 2 and anchors.shape[1] == 4)
                num_anchors_out = anchors.shape[0]
                self.assertTrue(
                    num_anchors_out < num_anchors_strict_upper_bound)

                self.assertTrue(np.all(np.greater_equal(anchors, 0)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 0], height)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 1], width)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 2], height)))
                self.assertTrue(np.all(np.less_equal(anchors[:, 3], width)))

                self.assertAllEqual(prediction_out['rpn_box_encodings'].shape,
                                    (batch_size, num_anchors_out, 4))
                self.assertAllEqual(
                    prediction_out[
                        'rpn_objectness_predictions_with_background'].shape,
                    (batch_size, num_anchors_out, 2))
Esempio n. 56
0
def less_equal(x, y):
    """Elementwise x <= y """
    return np.less_equal(x, y)
Esempio n. 57
0
 def _read_data(self, data, exact_calcs, no_benefits):
     """
     Read Records data from file or use specified DataFrame as data.
     Specifies exact array depending on boolean value of exact_calcs.
     Set benefits to zero if no_benefits is True; otherwise do nothing.
     """
     # pylint: disable=too-many-statements,too-many-branches
     if Records.INTEGER_VARS is None:
         Records.read_var_info()
     # read specified data
     if isinstance(data, pd.DataFrame):
         taxdf = data
     elif isinstance(data, six.string_types):
         if os.path.isfile(data):
             taxdf = pd.read_csv(data)
         else:
             # cannot call read_egg_ function in unit tests
             taxdf = read_egg_csv(data)  # pragma: no cover
     else:
         msg = 'data is neither a string nor a Pandas DataFrame'
         raise ValueError(msg)
     self.__dim = len(taxdf.index)
     self.__index = taxdf.index
     # create class variables using taxdf column names
     READ_VARS = set()
     self.IGNORED_VARS = set()
     for varname in list(taxdf.columns.values):
         if varname in Records.USABLE_READ_VARS:
             READ_VARS.add(varname)
             if varname in Records.INTEGER_READ_VARS:
                 setattr(self, varname,
                         taxdf[varname].astype(np.int32).values)
             else:
                 setattr(self, varname,
                         taxdf[varname].astype(np.float64).values)
         else:
             self.IGNORED_VARS.add(varname)
     # check that MUST_READ_VARS are all present in taxdf
     if not Records.MUST_READ_VARS.issubset(READ_VARS):
         msg = 'Records data missing one or more MUST_READ_VARS'
         raise ValueError(msg)
     # delete intermediate taxdf object
     del taxdf
     # create other class variables that are set to all zeros
     UNREAD_VARS = Records.USABLE_READ_VARS - READ_VARS
     ZEROED_VARS = Records.CALCULATED_VARS | UNREAD_VARS
     for varname in ZEROED_VARS:
         if varname in Records.INTEGER_VARS:
             setattr(self, varname,
                     np.zeros(self.array_length, dtype=np.int32))
         else:
             setattr(self, varname,
                     np.zeros(self.array_length, dtype=np.float64))
     # check for valid MARS values
     if not np.all(
             np.logical_and(np.greater_equal(self.MARS, 1),
                            np.less_equal(self.MARS, 5))):
         raise ValueError('not all MARS values in [1,5] range')
     # create variables derived from MARS, which is in MUST_READ_VARS
     self.num[:] = np.where(self.MARS == 2, 2, 1)
     self.sep[:] = np.where(self.MARS == 3, 2, 1)
     # check for valid EIC values
     if not np.all(
             np.logical_and(np.greater_equal(self.EIC, 0),
                            np.less_equal(self.EIC, 3))):
         raise ValueError('not all EIC values in [0,3] range')
     # specify value of exact array
     self.exact[:] = np.where(exact_calcs is True, 1, 0)
     # optionally set benefits to zero
     if no_benefits:
         self.housing_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.ssi_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.snap_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.tanf_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.vet_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.wic_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.mcare_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.mcaid_ben[:] = np.zeros(self.array_length, dtype=np.float64)
         self.other_ben[:] = np.zeros(self.array_length, dtype=np.float64)
     # delete intermediate variables
     del READ_VARS
     del UNREAD_VARS
     del ZEROED_VARS
Esempio n. 58
0
            Pos[s, 0] = -Ls0
            Vel[s, 0] = -Vel[s, 0]
        elif Pos[s, 0] >= Ls0:
            Pos[s, 0] = Ls0
            Vel[s, 0] = -Vel[s, 0]
        elif Pos[s, 1] <= -Ls1:
            Pos[s, 1] = -Ls1
            Vel[s, 1] = -Vel[s, 1]
        elif Pos[s, 1] >= Ls1:
            Pos[s, 1] = Ls1
            Vel[s, 1] = -Vel[s, 1]

    # Create the set of all pairs and the list the colliding spheres
    Rij = Pos - Pos[:, np.newaxis]
    Mag2ij = np.add.reduce(Rij * Rij, -1)  # sphere-to-sphere distances**2
    colliding = np.less_equal(Mag2ij, Dij) - Id
    hitlist = np.sort(np.nonzero(colliding.flat)[0]).tolist()

    # Check to see if the spheres are colliding
    for ij in hitlist:
        s1, s2 = divmod(ij, Nsp)  # decode the spheres pair (s1,s2) colliding
        hitlist.remove(s2 * Nsp +
                       s1)  # remove symmetric (s2,s1) pair from list
        R12 = Pos[s2] - Pos[s1]
        nR12 = np.linalg.norm(R12)
        d12 = Radius[s1] + Radius[s2] - nR12
        tau = R12 / nR12
        DR0 = d12 * tau
        x1 = Mass[s1] / (Mass[s1] + Mass[s2])
        x2 = 1 - x1  # x2 = Mass[s2]/(Mass[s1]+Mass[s2])
        Pos[s1] -= x2 * DR0
Esempio n. 59
0
def circ_kern(diameter):
    assert diameter % 2
    r = diameter // 2  #int(np.floor(diameter/2))
    mnvals = np.indices((diameter, diameter)) - r
    rads = np.hypot(mnvals[0], mnvals[1])
    return np.less_equal(rads, r).astype(np.int)
Esempio n. 60
0
 def _subset(self, z):
     """
     Tukey's biweight is defined piecewise over the range of z
     """
     z = np.fabs(np.asarray(z))
     return np.less_equal(z, self.c)