Example #1
0
def test_hyp2f1_real_some_points():
    pts = [
        (1,2,3,0),
        (1./3, 2./3, 5./6, 27./32),
        (1./4, 1./2, 3./4, 80./81),
        (2,-2,-3,3),
        (2,-3,-2,3),
        (2,-1.5,-1.5,3),
        (1,2,3,0),
        (0.7235, -1, -5, 0.3),
        (0.25, 1./3, 2, 0.999),
        (0.25, 1./3, 2, -1),
        (2,3,5,0.99),
        (3./2,-0.5,3,0.99),
        (2,2.5,-3.25,0.999),
        (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001),
        (-10,900,-10.5,0.99),
        (-10,900,10.5,0.99),
        (-1,2,1,1.0),
        (-1,2,1,-1.0),
        (-3,13,5,1.0),
        (-3,13,5,-1.0),
    ]
    dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts]
    dataset = np.array(dataset, dtype=np.float_)

    olderr = np.seterr(invalid='ignore')
    try:
        FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
    finally:
        np.seterr(**olderr)
Example #2
0
 def test_testUfuncs1 (self):
     "Test various functions such as sin, cos."
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     self.assertTrue (eq(numpy.cos(x), cos(xm)))
     self.assertTrue (eq(numpy.cosh(x), cosh(xm)))
     self.assertTrue (eq(numpy.sin(x), sin(xm)))
     self.assertTrue (eq(numpy.sinh(x), sinh(xm)))
     self.assertTrue (eq(numpy.tan(x), tan(xm)))
     self.assertTrue (eq(numpy.tanh(x), tanh(xm)))
     olderr = numpy.seterr(divide='ignore', invalid='ignore')
     try:
         self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm)))
         self.assertTrue (eq(numpy.log(abs(x)), log(xm)))
         self.assertTrue (eq(numpy.log10(abs(x)), log10(xm)))
     finally:
         numpy.seterr(**olderr)
     self.assertTrue (eq(numpy.exp(x), exp(xm)))
     self.assertTrue (eq(numpy.arcsin(z), arcsin(zm)))
     self.assertTrue (eq(numpy.arccos(z), arccos(zm)))
     self.assertTrue (eq(numpy.arctan(z), arctan(zm)))
     self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
     self.assertTrue (eq(numpy.absolute(x), absolute(xm)))
     self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym)))
     self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym)))
     self.assertTrue (eq(numpy.less(x, y), less(xm, ym)))
     self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym)))
     self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym)))
     self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym)))
     self.assertTrue (eq(numpy.conjugate(x), conjugate(xm)))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y))))
     self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x))))
def ll(actual, predicted):
    """
    Computes the log likelihood.

    This function computes the log likelihood between two numbers,
    or for element between a pair of lists or numpy arrays.

    Parameters
    ----------
    actual : int, float, list of numbers, numpy array
             The ground truth value
    predicted : same type as actual
                The predicted value

    Returns
    -------
    score : double or list of doubles
            The log likelihood error between actual and predicted

    """
    actual = np.array(actual)
    predicted = np.array(predicted)
    err = np.seterr(all='ignore')
    score = -(actual * np.log(predicted) + (1 - actual) * np.log(1 - predicted))
    np.seterr(divide=err['divide'], over=err['over'],
              under=err['under'], invalid=err['invalid'])
    if type(score) == np.ndarray:
        score[np.isnan(score)] = 0
    else:
        if np.isnan(score):
            score = 0
    return score
def get_rule(rule_name):
    global evaluation_method
    global tree_type
    global network_type
    global data_path
    global nb_generations
    
    
    multiprocessing = False
    
    results_path ='../../results/temp/working_results.txt'
    
    #do not display numpy warnings     
    np.seterr('ignore') 

    ne.get_datas_from_real_network(data_path,
                               results_path,
                               evaluation_method= evaluation_method,
                               network_type = network_type)

    genome = ga.new_genome(
                       results_path,
                       evaluation_method= evaluation_method,
                       tree_type = tree_type,
                       network_type = network_type
                       )


    return evolve(genome ,rule_name, nb_generations =nb_generations, multiprocessing = multiprocessing)
Example #5
0
def check_that_nr_fit_runs():
    from jds_image_proc.clouds import voxel_downsample
    #from brett2.ros_utils import RvizWrapper    
    #import lfd.registration as lr
    ##import lfd.warping as lw    
    #if rospy.get_name() == "/unnamed":
        #rospy.init_node("test_rigidity", disable_signals=True)
        #from time import sleep
        #sleep(1)
    #rviz = RvizWrapper.create()
    
    pts0 = np.loadtxt("../test/rope_control_points.txt")
    pts1 = np.loadtxt("../test/bad_rope.txt")    
    pts_rigid = voxel_downsample(pts0[:10], .02)
    #lr.Globals.setup()
    np.seterr(all='ignore')
    np.set_printoptions(suppress=True)

    lin_ag, trans_g, w_eg, x_ea = tps.tps_nr_fit_enhanced(pts0, pts1, 0.01, pts_rigid, 0.001, method="newton",plotting=1)
    #lin_ag2, trans_g2, w_ng2 = tps_fit(pts0, pts1, .01, .01)
    #assert np.allclose(w_ng, w_ng2)
    def eval_partial(x_ma):
        return tps_eval(x_ma, lin_ag, trans_g, w_eg, x_ea) 
    #lr.plot_orig_and_warped_clouds(eval_partial, pts0, pts1, res=.008)
    #handles = lw.draw_grid(rviz, eval_partial, pts0.min(axis=0), pts0.max(axis=0), 'base_footprint')

    grads = tps.tps_grad(pts_rigid, lin_ag, trans_g, w_eg, x_ea)
Example #6
0
def top_eigenvector(A,niter=1000,force_iteration=False):
    '''
    assuming the LEFT invariant subspace of A corresponding to the LEFT
    eigenvalue of largest modulus has geometric multiplicity of 1 (trivial
    Jordan block), returns the vector at the intersection of that eigenspace and
    the simplex

    A should probably be a ROW-stochastic matrix

    probably uses power iteration
    '''
    n = A.shape[0]
    np.seterr(invalid='raise',divide='raise')
    if n <= 25 and not force_iteration:
        x = np.repeat(1./n,n)
        x = np.linalg.matrix_power(A.T,niter).dot(x)
        x /= x.sum()
        return x
    else:
        x1 = np.repeat(1./n,n)
        x2 = x1.copy()
        for itr in xrange(niter):
            np.dot(A.T,x1,out=x2)
            x2 /= x2.sum()
            x1,x2 = x2,x1
            if np.linalg.norm(x1-x2) < 1e-8:
                break
        return x1
Example #7
0
    def test_power_complex(self):
        x = np.array([1 + 2j, 2 + 3j, 3 + 4j])
        assert_equal(x ** 0, [1.0, 1.0, 1.0])
        assert_equal(x ** 1, x)
        assert_almost_equal(x ** 2, [-3 + 4j, -5 + 12j, -7 + 24j])
        assert_almost_equal(x ** 3, [(1 + 2j) ** 3, (2 + 3j) ** 3, (3 + 4j) ** 3])
        assert_almost_equal(x ** 4, [(1 + 2j) ** 4, (2 + 3j) ** 4, (3 + 4j) ** 4])
        assert_almost_equal(x ** (-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)])
        assert_almost_equal(x ** (-2), [1 / (1 + 2j) ** 2, 1 / (2 + 3j) ** 2, 1 / (3 + 4j) ** 2])
        assert_almost_equal(x ** (-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, (-117 - 44j) / 15625])
        assert_almost_equal(x ** (0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), ncu.sqrt(3 + 4j)])
        norm = 1.0 / ((x ** 14)[0])
        assert_almost_equal(
            x ** 14 * norm, [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, 5583548873 + 2465133864j]]
        )

        # Ticket #836
        def assert_complex_equal(x, y):
            assert_array_equal(x.real, y.real)
            assert_array_equal(x.imag, y.imag)

        for z in [complex(0, np.inf), complex(1, np.inf)]:
            err = np.seterr(invalid="ignore")
            z = np.array([z], dtype=np.complex_)
            try:
                assert_complex_equal(z ** 1, z)
                assert_complex_equal(z ** 2, z * z)
                assert_complex_equal(z ** 3, z * z * z)
            finally:
                np.seterr(**err)
Example #8
0
 def test_spectrogram(self):
     """
     Create spectrogram plotting examples in tests/output directory.
     """
     # Create dynamic test_files to avoid dependencies of other modules.
     # set specific seed value such that random numbers are reproducible
     np.random.seed(815)
     head = {
         'network': 'BW', 'station': 'BGLD',
         'starttime': UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
         'sampling_rate': 200.0, 'channel': 'EHE'}
     tr = Trace(data=np.random.randint(0, 1000, 824), header=head)
     st = Stream([tr])
     # 1 - using log=True
     reltol = 1
     if MATPLOTLIB_VERSION < [1, 2, 0]:
         reltol = 2000
     with ImageComparison(self.path, 'spectrogram_log.png',
                          reltol=reltol) as ic:
         with warnings.catch_warnings(record=True):
             warnings.resetwarnings()
             np_err = np.seterr(all="warn")
             spectrogram.spectrogram(st[0].data, log=True, outfile=ic.name,
                                     samp_rate=st[0].stats.sampling_rate,
                                     show=False)
             np.seterr(**np_err)
     # 2 - using log=False
     reltol = 1
     if MATPLOTLIB_VERSION < [1, 3, 0]:
         reltol = 3
     with ImageComparison(self.path, 'spectrogram.png',
                          reltol=reltol) as ic:
         spectrogram.spectrogram(st[0].data, log=False, outfile=ic.name,
                                 samp_rate=st[0].stats.sampling_rate,
                                 show=False)
def d(im1, im2, im1Norm=0, im2Norm=0):
	i1_s = im1.shape
	i2_s = im2.shape
	assert (i1_s == i2_s), "frames do not have same dimentions"
	if (len(i1_s) == 2):
		total = 0
		for x in range(i1_s[0]):
			for y in range(i1_s[1]):
				product = im1[x][y] * im2[x][y]
				total += product
		existing_np_err = np.seterr(all='raise')
		try:
			result = np.sqrt(im1Norm + im2Norm - 2*total)
		except FloatingPointError as fpe:
			print("Error: %s" % str(fpe))
			print("im1Norm: %s, im2Norm: %s, total: %s" % (str(im1Norm),
				str(im2Norm),
				str(total)))
			raise fpe
		np.seterr(**existing_np_err)
		return result
	elif (len(i1_s) == 3):
		distance = 0
		for x in range(i1_s[0]):
			for y in range(i1_s[1]):
				for z in range(i1_s[2]):
					if im1[x][y][z] == im2[x][y][z]:
						continue
					distance += abs(im1[x][y][z] - im1[x][y][z])**2
		return (distance / (i1_s[0] * i1_s[1] * i1_s[2]))**(1/2)
Example #10
0
def gaussian_highpass(img, high_cutoff, pad=1):
    ''' Apply a Gaussian highpass filter to an image
    
    .. seealso:: :py:func:`gaussian_highpass_kernel`
    
    :Parameters:
    
    img : array
          Image
    high_cutoff : float
                 High-frequency cutoff
    pad : int
          Padding
    
    :Returns:
    
    out : array
          Filtered image
    '''
    
    ctype = numpy.complex128 if img.dtype is numpy.float64 else numpy.complex64
    if pad > 1:
        shape = img.shape
        img = pad_image(img.astype(ctype), (int(img.shape[0]*pad), int(img.shape[1]*pad)), 'e')
    else: img = img.astype(ctype)
    state = numpy.geterr()
    numpy.seterr(all='ignore')
    img = filter_image(img, gaussian_highpass_kernel(img.shape, high_cutoff, img.dtype), pad)
    numpy.seterr(**state)
    if pad > 1: img = depad(img, shape)
    return img
def main():
	import sys
	numpy.seterr(invalid='ignore')
	
	
        # Nina's C++ code
        f = open('connect_lsd.cpp')
        code = f.read()
        f.close()

        # Nina's support C code
        f = open('estimate_multiband_variability.cpp')
        support_code = f.read()
        f.close()
      
				
	
	
	#####################
	
	#get filename from argument
	
	# make a log logfile (it's for all; later, write filename to logfile when file is processed)
	
	#if cell logfile doesn't exist: create it
	
	
	lightcurves = numpy.genfromtxt('files_in/lightcurves.txt', \
	      names = 'filterid,mjd_obs,ucalmag,magErr,obj_id',\
	      dtype='|S1,f8,f8,f8,|S19', skip_header=1)
	    
	
	
        
        test = structure_function(lightcurves,code, support_code)
Example #12
0
def _test_factory(test, dtype=np.double):
    """Boost test"""
    olderr = np.seterr(all='ignore')
    try:
        test.check(dtype=dtype)
    finally:
        np.seterr(**olderr)
    def __init__(self, layers, l2_decay=0.001, debug=False, learning_rate=0.001):
        mapping = {"input": lambda x: InputLayer(x),
                   "fc": lambda x: FullyConnectedLayer(x),
                   "convolution": lambda x: ConvolutionLayer(x),
                   "pool": lambda x: PoolingLayer(x),
                   "squaredloss": lambda x: SquaredLossLayer(x),
                   "softmax": lambda x: SoftmaxLossLayer(x),
                   "relu": lambda x: ReLuLayer(x),
                   "dropout": lambda x: DropoutLayer(x)}
        self.layers = []
        self.l2_decay = l2_decay
        self.debug = debug
        self.learning_rate = learning_rate
        prev = None

        np.seterr(all="warn")
        
        #print str(layers)

        for layer in layers:
            layer["input_shape"] = layer.get("input_shape", None) or prev.output_shape
            layer["l2_decay"] = layer.get("l2_decay", None) or self.l2_decay
            layer["debug"] = self.debug
            layer = mapping[layer["type"]](layer)
            self.layers.append(layer)
            prev = layer
Example #14
0
    def xy2lonlat(self, x, y):
        """Calculate x,y in own projection from given lon,lat (scalars/arrays).
        """
        if self.projected is True:
            if self.proj.is_latlong():
                return x, y
            else:
                if 'ob_tran' in self.proj4:
                    logging.info('NB: Converting degrees to radians ' +
                                 'due to ob_tran srs')
                    x = np.radians(np.array(x))
                    y = np.radians(np.array(y))
                return self.proj(x, y, inverse=True)
        else:
            np.seterr(invalid='ignore')  # Disable warnings for nan-values
            y = np.atleast_1d(np.array(y))
            x = np.atleast_1d(np.array(x))

            # NB: mask coordinates outside domain
            x[x < self.xmin] = np.nan
            x[x > self.xmax] = np.nan
            y[y < self.ymin] = np.nan
            y[y < self.ymin] = np.nan

            lon = map_coordinates(self.lon, [y, x], order=1,
                                  cval=np.nan, mode='nearest')
            lat = map_coordinates(self.lat, [y, x], order=1,
                                  cval=np.nan, mode='nearest')
            return (lon, lat)
Example #15
0
    def updatePixels(self, tlc, shape, props, **pixelBlocks):
        r1 = np.array(pixelBlocks['r1_pixels'], dtype='f4', copy=False)
        r2 = np.array(pixelBlocks['r2_pixels'], dtype='f4', copy=False)

        np.seterr(divide='ignore')
        pixelBlocks['output_pixels'] = self.op(r1, r2).astype(props['pixelType'], copy=False)
        return pixelBlocks
 def test_sh_legendre(self):
     olderr = np.seterr(all='ignore')
     try:
         self.check_poly(orth.eval_sh_legendre, orth.sh_legendre,
                         param_ranges=[], x_range=[0, 1])
     finally:
         np.seterr(**olderr)
    def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
                   nparam=10, nx=10, rtol=1e-8):
        np.random.seed(1234)

        dataset = []
        for n in np.arange(nn):
            params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
            params = np.asarray(params).T
            if not param_ranges:
                params = [0]
            for p in params:
                if param_ranges:
                    p = (n,) + tuple(p)
                else:
                    p = (n,)
                x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
                x[0] = x_range[0] # always include domain start point
                x[1] = x_range[1] # always include domain end point
                poly = np.poly1d(cls(*p))
                z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
                dataset.append(z)

        dataset = np.concatenate(dataset, axis=0)

        def polyfunc(*p):
            p = (p[0].astype(int),) + p[1:]
            return func(*p)

        olderr = np.seterr(all='raise')
        try:
            ds = FuncData(polyfunc, dataset, range(len(param_ranges)+2), -1,
                          rtol=rtol)
            ds.check()
        finally:
            np.seterr(**olderr)
Example #18
0
def segregationIndex(synapseGroups, skeleton_id, weightOutputs=True):
    nout = np.zeros(len(synapseGroups))
    ngrp = np.zeros(len(synapseGroups))

    PRE = Relation.objects.get(project=pid, relation_name="presynaptic_to").value_list("id")[0]

    if weightOutputs:
        nTargets = countTargets(skeleton_id)
        for group in synapseGroups.values():
            for i, synDirection in enumerate(group.relations):
                if synDirection == PRE:
                    nout[group] += nTargets[group.connector_ids[i]]
                    ngrp[group] += nTargets[group.connector_ids[i]]
                else:
                    ngrp[group] += 1
    else:
        for group in synapseGroups.values():
            for synDirection in group.relations:
                if synDirection == PRE:
                    nout[group] += 1
            ngrp[group] = len(group.relations)

    frac = np.divide(nout, ngrp)

    np.seterr(all="ignore")
    h_partial = ngrp * (frac * np.log(frac) + (1 - frac) * np.log(1 - frac))
    h_partial[np.isnan(h_partial)] = 0
    frac_unseg = sum(nout) / sum(ngrp)
    h_unseg = sum(ngrp) * (frac_unseg * np.log(frac_unseg) + (1 - frac_unseg) * np.log(1 - frac_unseg))
    return 1 - sum(h_partial) / h_unseg
Example #19
0
 def setUp(self):
     # Most generic way to get the actual data directory.
     self.data_dir = os.path.join(os.path.dirname(os.path.abspath(
         inspect.getfile(inspect.currentframe()))), "data")
     self.image_dir = os.path.join(os.path.dirname(__file__), 'images')
     self.nperr = np.geterr()
     np.seterr(all='ignore')
Example #20
0
 def test_call(self):
     poly = []
     for n in xrange(5):
         poly.extend([x.strip() for x in
             ("""
             orth.jacobi(%(n)d,0.3,0.9)
             orth.sh_jacobi(%(n)d,0.3,0.9)
             orth.genlaguerre(%(n)d,0.3)
             orth.laguerre(%(n)d)
             orth.hermite(%(n)d)
             orth.hermitenorm(%(n)d)
             orth.gegenbauer(%(n)d,0.3)
             orth.chebyt(%(n)d)
             orth.chebyu(%(n)d)
             orth.chebyc(%(n)d)
             orth.chebys(%(n)d)
             orth.sh_chebyt(%(n)d)
             orth.sh_chebyu(%(n)d)
             orth.legendre(%(n)d)
             orth.sh_legendre(%(n)d)
             """ % dict(n=n)).split()
         ])
     olderr = np.seterr(all='ignore')
     try:
         for pstr in poly:
             p = eval(pstr)
             assert_almost_equal(p(0.315), np.poly1d(p)(0.315), err_msg=pstr)
     finally:
         np.seterr(**olderr)
def run():
    d = 'default: %(default)s'
    b = dict(action='store_true') # boolian
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('cls_file')
    parser.add_argument('-o', '--output-plot', default='plane.pdf')
    parser.add_argument(
        '-r', '--regions', help=_regions_help, nargs='+', default=[])
    parser.add_argument('-i', '--interpolation', default='gauss',
                        choices=planeplt.interpolators)
    parser.add_argument('-e', '--external', help=_ext_help, **b)
    dowhat = parser.add_mutually_exclusive_group()
    dowhat.add_argument('-b', '--band-region')
    dowhat.add_argument('--best', help=_best_help, **b)
    dowhat.add_argument('--best-regions', help=_regs_help, **b)
    dowhat.add_argument('--heatmap', **b)
    dowhat.add_argument('--noband', **b)
    dowhat.add_argument('--clean', help=_clean_help, **b)
    dowhat.add_argument('--ul', help=_ul_help, **b)
    dowhat.add_argument('--mono', help=_mono_help)
    parser.add_argument(
        '-a', '--add-limits', help=_limits_help, nargs=2,
        metavar=('FILE', 'ENTRY'))
    parser.add_argument('-d','--do-hepdata', **b)
    args = parser.parse_args(sys.argv[1:])
    helvetify()
    np.seterr(all='raise')
    if any([args.best, args.best_regions, args.clean, args.ul, args.mono]):
        _max_exclusion_plane(args, show_regions=args.best_regions,
                             clean=args.clean, ul=args.ul)
    elif args.band_region:
        _make_exclusion_plane(args)
    else:
        _multi_exclusion_plane(args)
Example #22
0
def emmEstimate( obs, phi_matrix, elem_size= np.longdouble ):
   """
   obs        : observed seq (multidimensions)
   phi_matrix : matrix from forward_backward algorithm

   Estimate trans and emm
   trans: transition prob
   emm  : emission prob
   """

   #import pdb; pdb.set_trace()
   #obs_matrix = np.matrix(zip(*obs))
   obs_matrix = np.matrix(obs)

   # estimating the emission (weigthed emission) for obs of 1 
   emm_estimated1 = np.matrix((np.matrix(obs_matrix) * np.matrix(phi_matrix.T)).T, dtype=elem_size)

   # normalize the matrix
   # sum_phi is the sum over all the states given the whole obs.sequence
   sum_phi = np.matrix(np.matrix(phi_matrix).sum(axis=1), dtype=elem_size)
   np.seterr(invalid='ignore')
   emm_estimated1 = np.matrix(emm_estimated1/sum_phi, dtype=elem_size)
   #emm_estimated1 = np.nan_to_num(emm_estimated1)

   # estimated emission for 0
   emm_estimated0 = np.matrix(1 - np.matrix(emm_estimated1), dtype=elem_size)
   # combine emission for 0 and 1 together
   emm_estimated = np.matrix(np.concatenate((emm_estimated0, emm_estimated1), axis = 1), dtype=elem_size)
   #emm_estimated = np.nan_to_num(emm_estimated)

   return emm_estimated
Example #23
0
def hyp0f1(v, z):
    r"""Confluent hypergeometric limit function 0F1.

    Parameters
    ----------
    v, z : array_like
        Input values.

    Returns
    -------
    hyp0f1 : ndarray
        The confluent hypergeometric limit function.

    Notes
    -----
    This function is defined as:

    .. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.

    It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
    the differential equation :math:``f''(z) + vf'(z) = f(z)`.
    """
    v = atleast_1d(v)
    z = atleast_1d(z)
    v, z = np.broadcast_arrays(v, z)
    arg = 2 * sqrt(abs(z))
    old_err = np.seterr(all='ignore')  # for z=0, a<1 and num=inf, next lines
    num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
    den = abs(z)**((v - 1.0) / 2)
    num *= gamma(v)
    np.seterr(**old_err)
    num[z == 0] = 1
    den[z == 0] = 1
    return num / den
def VisualizeFit(Xval, pval, epsilon, mu, sigma):
    """
    Visualize the fitter data
    :param Xval: the validation data set (only the first two columns are used)
    :param pval: A vector containing probabilities for example data in Xval
    :param mu: Estimate for the mean, using the training data
    :param sigma: Estimate for the variance, using the training data
    :return:
    """
    np.seterr(over='ignore')
    x1, x2 = np.meshgrid(np.arange(0, 35, 0.5), np.arange(0, 35, 0.5))
    z1 = np.asarray(x1).reshape(-1)

    mat = np.zeros([len(z1), 2])
    mat[:, 0] = np.asarray(x1).reshape(-1)
    mat[:, 1] = np.asarray(x2).reshape(-1)

    Z = MultivariateGaussian(mat, mu, sigma)
    Z = np.reshape(Z, np.shape(x1))

    x = [10 ** x for x in np.arange(-20, 0, 3)]

    plt.figure(1)

    plt.scatter(Xval[:, 0], Xval[:, 1], c=None, s=25, alpha=None, marker="+")

    points = np.where(pval < epsilon)
    plt.scatter(Xval[:, 0][points], Xval[:, 1][points], s=50, marker='+', color='red')
    plt.contour(x1, x2, Z, x)

    plt.show()
def VisualizeData(X, mu, sigma):
    """
    plot the data in X along with the fit.
    Note that X is a two column vector
    :param X:
    :param mu:
    :param sigma:
    :return:
    """
    np.seterr(over='ignore')
    x1, x2 = np.meshgrid(np.arange(0, 35, 0.5), np.arange(0, 35, 0.5))
    z1 = np.asarray(x1).reshape(-1)

    mat = np.zeros([len(z1), 2])
    mat[:, 0] = np.asarray(x1).reshape(-1)
    mat[:, 1] = np.asarray(x2).reshape(-1)

    Z = MultivariateGaussian(mat, mu, sigma)
    Z = np.reshape(Z, np.shape(x1))

    x = [10 ** x for x in np.arange(-20, 0, 3)]

    plt.figure(1)

    plt.scatter(X[:, 0], X[:, 1], c=None, s=25, alpha=None, marker="+")

    plt.contour(x1, x2, Z, x)
    plt.show()
Example #26
0
def regression(X, Y):
    nub_iter = 1
    ss = ShuffleSplit(X.shape[0], n_iter=nub_iter, test_size=0.2, indices=True, random_state=0)

    for train_index, test_index in ss:
        X_test, Y_test = X[test_index], Y[test_index]
        X_train, Y_train = X[train_index], Y[train_index]

        ordinary_least_squares = linear_model.LinearRegression()
        ridge_regression = linear_model.Ridge(alpha=1)
        bayesian_regression = linear_model.BayesianRidge()
        svr = SVR(C=1.0, epsilon=0.2, kernel="linear")
        knn_reg = neighbors.KNeighborsRegressor(5, weights="distance")
        regrs = [ordinary_least_squares, ridge_regression, bayesian_regression, svr, knn_reg]

        for regr in regrs:
            regr.fit(X_train, Y_train)
            predict = regr.predict(X_test)
            np.seterr(invalid="ignore")

            true, pred = Y_test, predict
            MAE = mean_absolute_error(np.array(true), np.array(pred))
            MSE = mean_squared_error(np.array(true), np.array(pred))
            Pearson_r = pearsonr(np.array(true), np.array(pred))

            decimal = 4
            print("|%s|%s|%s|" % (round(MAE, decimal), round(MSE, decimal), round(Pearson_r[0], decimal)))
Example #27
0
    def perform(self, node, inp, out):
        rstate, size = inp
        o_rstate, o_sample = out
        numpy_version = numpy.__version__.split('.')
        if not self.warned_numpy_version and int(numpy_version[0]) <= 1 and int(numpy_version[1]) <3 :
            print "Warning: you must use numpy version 1.3.0 or higher with the python version of this op. Otherwise numpy leak memory. and numpy"
            self.warned_numpy_version = True

        n_elements = 1

        rstate = numpy.asarray(rstate)  # bring state from GPU if necessary
        if not self.inplace:
            rstate = rstate.copy()

        for s in size:
            n_elements *= s

        n_streams, _ = rstate.shape

        rval = numpy.zeros(n_elements, dtype=self.output_type.dtype)

        err_orig = numpy.seterr(over='ignore')
        try:
            for i in xrange(n_elements):
                sample = mrg_next_value(rstate[i % n_streams],
                                        rstate[i % n_streams])
                rval[i] = sample
        finally:
            numpy.seterr(**err_orig)

        o_rstate[0] = node.outputs[0].type.filter(rstate)  # send to GPU if necessary
        o_sample[0] = node.outputs[1].type.filter(rval.reshape(size))  # send to GPU if necessary
def rp_gumbel_original(p_zero, loc, scale, flvol, max_return_period=1e9):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')
    max_p = 1-1./max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-np.float64(p_zero))/(1-np.float64(p_zero)), 0), 1)
    max_reduced_variate = -np.log(-np.log(np.float64(max_p_residual)))
    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values)
    # make sure that the reduced variate does not exceed the one, resembling the 1,000,000 year return period
    reduced_variate = np.minimum((flvol-loc)/scale, max_reduced_variate)
    # reduced_variate = (flvol-loc)/scale
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.float64(reduced_variate))), 0), 1)
    # tranform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1-p_zero) + p_zero, p_zero), max_p)  # Never larger than max_p
    # transform into a return period    
    return_period = 1./(1-p)
    test_p = p == 1    
    return return_period, test_p
Example #29
0
 def log_likelihood(self,x):
     out = np.zeros_like(x, dtype=np.double)
     nanidx = np.isnan(x)
     err = np.seterr(divide='ignore')
     out[~nanidx] = np.log(self.weights)[list(x[~nanidx])]  # log(0) can happen, no warning
     np.seterr(**err)
     return out
Example #30
0
 def setUpClass(cls):
     # Suppress logging messages
     tp.quiet()
     # Catch attempts to set values on an inadvertent copy of a Pandas object.
     make_pandas_strict()
     # Make numpy strict
     np.seterr('raise')
Example #31
0
                        default=[0.0, 0.0],
                        help="Min,Max values on yscale (AUTO if undefined).")
    parser.add_argument(
        "--ylimerr",
        nargs=2,
        type=float,
        default=[0.0, 0.0],
        help="Min,Max values on yscale for error plots (AUTO if undefined).")
    parser.add_argument("--eps",
                        action='store_true',
                        help="Create eps plots instead of png.")
    args = parser.parse_args()
    ext = '.png' if not args.eps else '.eps'
    # 'suppress' disables scientific notation for small numbers.
    np.set_printoptions(precision=4, linewidth=130, suppress=True)
    np.seterr(all='raise')
    pyplot.rc('savefig', dpi=300)
    pyplot.rc('font', size=8)
    pyplot.rc('mathtext', default='regular')  # Don't use italics for mathmode.

    # Error checking on the given method.
    min_opts = ('nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b',
                'tnc', 'cobyla', 'slsqp', 'dogleg', 'trust-ncg')
    if args.method.lower() in min_opts: method = args.method
    else:
        U.print_error('Invalid minimization method: ' + args.method, False)
        print 'Options are:', min_opts
        exit(1)

    # Find the model module.
    model = MU.get_model(args.model)
Example #32
0
# -*- coding: utf-8 -*-

import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
import matplotlib.pyplot as plt

from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__

old_settings = np.seterr(all='ignore')


def parse_arguments(args=None):
    basic_args = plot_correlation_args()
    heatmap_parser = heatmap_options()
    scatter_parser = scatterplot_options()
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are joined using the Nearest Point Algorithm (also known as "single").
Optionally, the values can be saved as tables, too.
Example #33
0
import sys
sys.path.append('../')
import numpy as np
from pyBKT.generate import synthetic_data, random_model_uni
from pyBKT.fit import EM_fit
from utils import crossvalidate, accuracy, rmse, auc, check_data, data_helper, ktidem_skills_ct
import copy
np.seterr(divide='ignore', invalid='ignore')
num_fit_initializations = 20
seed, folds = 2020, 5  #can customize to anything, keep same seed and # folds over all trials
results = {}  #create dictionary to store accuracy and rmse results

df, skill_list, student_count, data_count, template_count = ktidem_skills_ct.find_skills(
)

ct_default = {
    'order_id': 'Row',
    'skill_name': 'KC(SubSkills)',
    'correct': 'Correct First Attempt',
    'user_id': 'Anon Student Id',
    'multiguess': 'Problem Name',
}

for i in range(12):
    skill_name = skill_list[i]
    results[skill_name] = [student_count[i], data_count[i], template_count[i]]

    data = data_helper.convert_data(df, skill_name, defaults=ct_default)
    check_data.check_data(data)
    results[skill_name].append(
        (np.sum(data["data"][0]) - len(data["data"][0])) /
Example #34
0
    def contains(self, mouseevent):
        """
        Test whether the mouse event occurred on the line.  The pick
        radius determines the precision of the location test (usually
        within five points of the value).  Use
        :meth:`~matplotlib.lines.Line2D.get_pickradius` or
        :meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
        modify it.

        Returns *True* if any values are within the radius along with
        ``{'ind': pointlist}``, where *pointlist* is the set of points
        within the radius.

        TODO: sort returned indices by distance
        """
        if six.callable(self._contains):
            return self._contains(self, mouseevent)

        if not is_numlike(self.pickradius):
            raise ValueError("pick radius should be a distance")

        # Make sure we have data to plot
        if self._invalidy or self._invalidx:
            self.recache()
        if len(self._xy) == 0:
            return False, {}

        # Convert points to pixels
        transformed_path = self._get_transformed_path()
        path, affine = transformed_path.get_transformed_path_and_affine()
        path = affine.transform_path(path)
        xy = path.vertices
        xt = xy[:, 0]
        yt = xy[:, 1]

        # Convert pick radius from points to pixels
        if self.figure is None:
            warnings.warn('no figure set when check if mouse is on line')
            pixels = self.pickradius
        else:
            pixels = self.figure.dpi / 72. * self.pickradius

        # the math involved in checking for containment (here and inside of
        # segment_hits) assumes that it is OK to overflow.  In case the
        # application has set the error flags such that an exception is raised
        # on overflow, we temporarily set the appropriate error flags here and
        # set them back when we are finished.
        olderrflags = np.seterr(all='ignore')
        try:
            # Check for collision
            if self._linestyle in ['None', None]:
                # If no line, return the nearby point(s)
                d = (xt - mouseevent.x)**2 + (yt - mouseevent.y)**2
                ind, = np.nonzero(np.less_equal(d, pixels**2))
            else:
                # If line, return the nearby segment(s)
                ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)
        finally:
            np.seterr(**olderrflags)

        ind += self.ind_offset

        # Debugging message
        if False and self._label != '':
            print("Checking line", self._label, "at", mouseevent.x,
                  mouseevent.y)
            print('xt', xt)
            print('yt', yt)
            #print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
            print('ind', ind)

        # Return the point(s) within radius
        return len(ind) > 0, dict(ind=ind)
Example #35
0
#    Birmingham, AL 35235 USA
#
#    email: [email protected]
#
#    License: BSD-style (see LICENSE.txt in main source directory)

import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
    sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)],
                                 '..'))

import pyeq3

import numpy

numpy.seterr(all='ignore')

import pyeq3.Model_3D_BaseClass


class PowerA(pyeq3.Model_3D_BaseClass.Model_3D_BaseClass):

    _baseName = "Power A"
    _HTML = 'z = a * (x<sup>b</sup> + y<sup>c</sup>)'
    _leftSideHTML = 'z'
    _coefficientDesignators = ['a', 'b', 'c']
    _canLinearSolverBeUsedForSSQABS = False

    webReferenceURL = ''

    baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
Example #36
0
from cProjgamma import sample_beta_fc, logdgamma
from scipy.stats import invwishart
from numpy.random import gamma, uniform, normal, beta
from scipy.linalg import cho_factor, cho_solve, cholesky
from scipy.special import loggamma
from numpy.random import choice
from collections import namedtuple
from itertools import repeat
from math import ceil, log, lgamma, exp
import numpy as np
np.seterr(under='ignore', over='raise')
# import multiprocessing as mp
import sqlite3 as sql
import pandas as pd
import data as dm
import cUtility as cu
import os
import mpi4py as mpi

import pt
from pointcloud import localcov
from data import Data

GammaPrior = namedtuple('GammaPrior', 'a b')
DirichletPrior = namedtuple('DirichletPrior', 'a')
NormalPrior = namedtuple('NormalPrior', 'mu SCho SInv')
InvWishartPrior = namedtuple('InvWishartPrior', 'nu psi')

POOL_SIZE = 8

Example #37
0
import zipfile
import copy
import shutil
import gdal

# additional modules
from datetime import datetime, timedelta
import pytz
import pickle
from skimage import morphology, transform
from scipy import ndimage

# CoastSat modules
from coastsat import SDS_preprocess, SDS_tools, gdal_merge

np.seterr(all='ignore') # raise/ignore divisions by 0 and nans

# Main function to download images from the EarthEngine server
def retrieve_images(inputs):
    """
    Downloads all images from Landsat 5, Landsat 7, Landsat 8 and Sentinel-2
    covering the area of interest and acquired between the specified dates.
    The downloaded images are in .TIF format and organised in subfolders, divided
    by satellite mission. The bands are also subdivided by pixel resolution.

    KV WRL 2018

    Arguments:
    -----------
    inputs: dict with the following keys
        'sitename': str
Example #38
0
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.  You may obtain a copy
# of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
# License for the specific language governing permissions and limitations under
# the License.

import numpy as np
from scipy import special  # Only used for halfspace solution

np.seterr(all='ignore')

__all__ = [
    'wavenumber', 'angle_factor', 'fullspace', 'greenfct', 'reflections',
    'fields', 'halfspace'
]

# Wavenumber-frequency domain kernel


def wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd,
               ab, xdirect, msrc, mrec, use_ne_eval):
    r"""Calculate wavenumber domain solution.

    Return the wavenumber domain solutions ``PJ0``, ``PJ1``, and ``PJ0b``,
    which have to be transformed with a Hankel transform to the frequency
Example #39
0
    _divide = config.numpy.seterr_divide
if config.numpy.seterr_over == 'None':
    _over = None
else:
    _over = config.numpy.seterr_over
if config.numpy.seterr_under == 'None':
    _under = None
else:
    _under = config.numpy.seterr_under
if config.numpy.seterr_invalid == 'None':
    _invalid = None
else:
    _invalid = config.numpy.seterr_invalid
numpy.seterr(all=_all,
             divide=_divide,
             over=_over,
             under=_under,
             invalid=_invalid)
del _all, _divide, _over, _under, _invalid

# This is defined here because it is designed to work across symbolic
#   datatypes (Sparse and Tensor)


def dot(l, r):
    """Return a symbolic matrix/dot product between l and r """
    rval = NotImplemented
    e0, e1 = None, None

    if rval == NotImplemented and hasattr(l, '__dot__'):
        try:
import os
import numpy
import matplotlib
import matplotlib.cm as cm
import matplotlib.font_manager
import re
import scipy
import matplotlib.pyplot as plt
import argparse

from collections import defaultdict
from scipy import stats
from re import sub

numpy.seterr(all='raise')

parser=argparse.ArgumentParser()
parser.add_argument('--training_mode', required=True, type = str, help = 'Specify the name of the training type, which should also correspond to the first part of the folder name')
parser.add_argument('--make_plots', required=False, action='store_true')
parser.add_argument('--bert_layer', required=False, type=str, default=12)
args = parser.parse_args()

#from matplotlib import rcParams
#rcParams['font.family'] = 'sans-serif'
#rcParams['font.sans-serif'] = ['Helvetica']
cwd=os.getcwd()

big='{}/{}_test_novels'.format(cwd, args.training_mode)

if args.make_plots:
    if 'bert' in args.training_mode:
Example #41
0
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf

import ensembles  # pylint: disable=g-bad-import-order

np.seterr(invalid="ignore")


def get_place_cell_ensembles(env_size, neurons_seed, targets_type,
                             lstm_init_type, n_pc, pc_scale):
    """Create the ensembles for the Place cells."""
    place_cell_ensembles = [
        ensembles.PlaceCellEnsemble(n,
                                    stdev=s,
                                    pos_min=-env_size / 2.0,
                                    pos_max=env_size / 2.0,
                                    seed=neurons_seed,
                                    soft_targets=targets_type,
                                    soft_init=lstm_init_type)
        for n, s in zip(n_pc, pc_scale)
    ]
Example #42
0
def mix_eam(files, kind, method, f=[], rep_ab=[], alphas=[], betas=[]):
    """
    mix eam alloy files data set and compute the interspecies pair potential part using the 
    mean geometric value from each pure species 
    
    Parameters
    ----------
    files : array of strings
            Contain all the files to merge and mix
    kind : string
            kinf of eam. Supported eam/alloy, eam/fs
    method : string, {geometric, arithmetic, weighted, fitted}
        Method used to mix the pair interaction terms. The geometric,
        arithmetic, and weighted arithmetic average are available. The weighted
        arithmetic method is using the electron density function values of atom
        :code:`a` and :code:`b` to ponderate the pair potential between species
        :code:`a` and :math:`b`, :code:`rep_ab = 0.5(fb/fa * rep_a + fa/fb *
        rep_b)`, see [1]. The fitted method is to be used if :code:`rep_ab`
        has been previously fitted and is parse as :math:`rep_ab` karg.
    f : np.array 
        fitted density term (for FS eam style)
    rep_ab : np.array 
        fitted rep_ab term
    alphas : array
        fitted alpha values for the fine tuned mixing. 
        :code:`rep_ab = alpha_a*rep_a+alpha_b*rep_b`
    betas : array 
        fitted values for the fine tuned mixing. 
        :code:`f_ab = beta_00*rep_a+beta_01*rep_b`
        :code:`f_ba = beta_10*rep_a+beta_11*rep_b`

    Returns
    -------
    sources : string
        Source informations or comment line for the file header
    parameters_mix: EAMParameters
        EAM potential parameters
    F_ : array_like
        contain the tabulated values of the embedded functions
        shape = (nb elements, nb elements, nb of data points)
    f_ : array_like
        contain the tabulated values of the density functions
        shape = (nb elements, nb elements, nb of data points)
    rep_ : array_like
        contain the tabulated values of pair potential
        shape = (nb elements, nb elements, nb of data points)

    References
    ----------

    1. X. W. Zhou, R. A. Johnson, and H. N. G. Wadley, Phys. Rev. B, 69, 144113 (2004)
    """

    nb_at = 0
    # Counting elements and repartition and select smallest tabulated set Nrho*drho // Nr*dr
    Nrho, drho, Nr, dr, cutoff = np.empty((len(files))), np.empty(
        (len(files))), np.empty((len(files))), np.empty(
            (len(files))), np.empty((len(files)))
    sources = ""
    if kind == "eam/alloy":
        for i, f_eam in enumerate(files):
            source, parameters, F, f, rep = read_eam(f_eam, kind="eam/alloy")
            sources += source
            source += " "
            nb_at += len(parameters[0])
            Nrho[i] = parameters[5]
            drho[i] = parameters[7]
            cutoff[i] = parameters[9]
            Nr[i] = parameters[6]
            dr[i] = parameters[8]
        # --- #
        max_cutoff = cutoff.argmax()
        max_prod = (Nrho * drho).argmax()
        max_prod_r = (Nr * dr).argmax()
        atomic_numbers, atomic_masses, lattice_parameters, crystal_structures, elements = np.empty(
            0), np.empty(0), np.empty(0), np.empty(0).astype(
                np.str), np.empty(0).astype(np.str)
        Nr_ = Nr[max_prod_r]
        dr_ = ((Nr * dr).max()) / Nr_
        Nrho_ = Nrho[max_prod]
        drho_ = ((Nrho * drho).max()) / Nrho_

        if Nr_ > 2000:
            Nr_ = 2000  # reduce
            dr_ = ((Nr * dr).max()) / Nr_
        if Nrho_ > 2000:
            Nrho_ = 2000  # reduce
            drho_ = ((Nrho * drho).max()) / Nrho_
        F_, f_, rep_ = np.empty((nb_at, Nrho_)), np.empty(
            (nb_at, Nr_)), np.empty((nb_at, nb_at, Nr_))
        at = 0
        for i, f_eam in enumerate(files):
            source, parameters, F, f, rep = read_eam(f_eam, kind="eam/alloy")
            elements = np.append(elements, parameters[0])
            atomic_numbers = np.append(atomic_numbers, parameters[1])
            atomic_masses = np.append(atomic_masses, parameters[2])
            lattice_parameters = np.append(lattice_parameters, parameters[3])
            crystal_structures = np.append(crystal_structures, parameters[4])
            for j in range(len(parameters[0])):
                F_[at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nrho[i] * drho[i], Nrho[i]),
                    F[j, :])(np.linspace(0, Nrho_ * drho_, Nrho_))
                f_[at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nr[i] * dr[i], Nr[i]),
                    f[j, :])(np.linspace(0, Nr_ * dr_, Nr_))
                rep_[at, at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nr[i] * dr[i], Nr[i]),
                    rep[j, j, :])(np.linspace(0, Nr_ * dr_, Nr_))
                at += 1
        # mixing repulsive part
        old_err_state = np.seterr(divide='raise')
        ignored_states = np.seterr(**old_err_state)
        for i in range(nb_at):
            for j in range(nb_at):
                if j < i:
                    if method == "geometric":
                        rep_[i, j, :] = (rep_[i, i, :] * rep_[j, j, :])**0.5
                    if method == "arithmetic":
                        if alphas:
                            rep_[i, j, :] = alphas[i] * rep_[
                                i, i, :] + alphas[j] * rep_[j, j, :]
                        else:
                            rep_[i,
                                 j, :] = 0.5 * (rep_[i, i, :] + rep_[j, j, :])
                    if method == "weighted":
                        rep_[i, j, :] = 0.5 * (
                            np.divide(f_[j, :], f_[i, :]) * rep_[i, i, :] +
                            np.divide(f_[i, :], f_[j, :]) * rep_[j, j, :])
                    if method == "fitted":
                        rep_ab[np.isnan(rep_ab)] = 0
                        rep_ab[np.isinf(rep_ab)] = 0
                        rep_[i,
                             j, :] = interpolate.InterpolatedUnivariateSpline(
                                 np.linspace(0, max(Nr * dr), rep_ab.shape[0]),
                                 rep_ab)(np.linspace(0, Nr_ * dr_, Nr_))
                    rep_[i, j, :][np.isnan(rep_[i, j, :])] = 0
                    rep_[i, j, :][np.isinf(rep_[i, j, :])] = 0
    elif kind == "eam/fs":
        for i, f_eam in enumerate(files):
            source, parameters, F, f, rep = read_eam(f_eam, kind="eam/alloy")
            sources += source
            source += " "
            nb_at += len(parameters[0])
            Nrho[i] = parameters[5]
            drho[i] = parameters[7]
            cutoff[i] = parameters[9]
            Nr[i] = parameters[6]
            dr[i] = parameters[8]
        # --- #
        max_cutoff = cutoff.argmax()
        max_prod = (Nrho * drho).argmax()
        max_prod_r = (Nr * dr).argmax()
        atomic_numbers, atomic_masses, lattice_parameters, crystal_structures, elements = np.empty(
            0), np.empty(0), np.empty(0), np.empty(0).astype(
                np.str), np.empty(0).astype(np.str)
        Nr_ = Nr[max_prod_r]
        dr_ = ((Nr * dr).max()) / Nr_
        Nrho_ = Nrho[max_prod]
        drho_ = ((Nrho * drho).max()) / Nrho_

        if Nr_ > 2000:
            Nr_ = 2000  # reduce
            dr_ = ((Nr * dr).max()) / Nr_
        if Nrho_ > 2000:
            Nrho_ = 2000  # reduce
            drho_ = ((Nrho * drho).max()) / Nrho_
        F_, f_, rep_ = np.empty((nb_at, Nrho_)), np.empty(
            (nb_at, nb_at, Nr_)), np.empty((nb_at, nb_at, Nr_))
        at = 0
        for i, f_eam in enumerate(files):
            source, parameters, F, f, rep = read_eam(f_eam, kind="eam/alloy")
            elements = np.append(elements, parameters[0])
            atomic_numbers = np.append(atomic_numbers, parameters[1])
            atomic_masses = np.append(atomic_masses, parameters[2])
            lattice_parameters = np.append(lattice_parameters, parameters[3])
            crystal_structures = np.append(crystal_structures, parameters[4])
            for j in range(len(parameters[0])):
                F_[at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nrho[i] * drho[i], Nrho[i]),
                    F[j, :])(np.linspace(0, Nrho_ * drho_, Nrho_))
                f_[at, at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nr[i] * dr[i], Nr[i]),
                    f[j, :])(np.linspace(0, Nr_ * dr_, Nr_))
                rep_[at, at, :] = interpolate.InterpolatedUnivariateSpline(
                    np.linspace(0, Nr[i] * dr[i], Nr[i]),
                    rep[j, j, :])(np.linspace(0, Nr_ * dr_, Nr_))
                at += 1
        # mixing density part
        old_err_state = np.seterr(divide='raise')
        ignored_states = np.seterr(**old_err_state)
        for i in range(nb_at):
            for j in range(nb_at):
                if i != j:
                    if method == "geometric":
                        f_[i, j, :] = (f_[i, i, :] * f_[j, j, :])**0.5
                    if method == "arithmetic":
                        if betas.any():
                            f_[i, j, :] = betas[i, i] * f_[i, i, :] + betas[
                                i, j] * f_[j, j, :]
                        else:
                            f_[i, j, :] = 0.5 * (f_[i, i, :] + f_[j, j, :])
                    if method == "fitted":
                        f_ab[np.isnan(f_ab)] = 0
                        f_ab[np.isinf(f_ab)] = 0
                        f_[i, j, :] = interpolate.InterpolatedUnivariateSpline(
                            np.linspace(0, max(Nr * dr), rep_ab.shape[0]),
                            rep_ab)(np.linspace(0, Nr_ * dr_, Nr_))
                    f_[i, j, :][np.isnan(f_[i, j, :])] = 0
                    f_[i, j, :][np.isinf(f_[i, j, :])] = 0
        # mixing repulsive part
        for i in range(nb_at):
            for j in range(nb_at):
                if j < i:
                    if method == "geometric":
                        rep_[i, j, :] = (rep_[i, i, :] * rep_[j, j, :])**0.5
                    if method == "arithmetic":
                        if alphas:
                            rep_[i, j, :] = alphas[i] * rep_[
                                i, i, :] + alphas[j] * rep_[j, j, :]
                        else:
                            rep_[i,
                                 j, :] = 0.5 * (rep_[i, i, :] + rep_[j, j, :])
                    if method == "fitted":
                        rep_ab[np.isnan(rep_ab)] = 0
                        rep_ab[np.isinf(rep_ab)] = 0
                        rep_[i,
                             j, :] = interpolate.InterpolatedUnivariateSpline(
                                 np.linspace(0, max(Nr * dr), rep_ab.shape[0]),
                                 rep_ab)(np.linspace(0, Nr_ * dr_, Nr_))
                    rep_[i, j, :][np.isnan(rep_[i, j, :])] = 0
                    rep_[i, j, :][np.isinf(rep_[i, j, :])] = 0
    else:
        raise ValueError(f"EAM kind {kind} is not supported")

    parameters_mix = EAMParameters(elements, atomic_numbers, atomic_masses,
                                   lattice_parameters, crystal_structures,
                                   Nrho_, Nr_, drho_, dr_, cutoff[max_cutoff])
    return sources, parameters_mix, F_, f_, rep_
Example #43
0
import json
from variable_classifiers.base_runner import Runner
from datahandler import data_import as di
import numpy as np
from web.report_generator import generate_error_report
import os
import sys
from datahandler.helpers import import_regexes3, get_rule_properties
from stats.stat_gen import get_failures
from datahandler.preprocessors import convert_repeated_data_to_sublist
import random
import string
from util.SpecialException import SpecialException

np_orig_err_settings = np.seterr(divide='raise',
                                 over='raise',
                                 under='raise',
                                 invalid='raise')
orig_stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
template_directory = os.path.join('web', 'templates')
available_funcs = {}
cwd = os.getcwd()


def exposed_function(func):
    available_funcs[getattr(func, '__name__')] = func


'''
####
Example #44
0
from active_particles.init import get_env, slurm_output, mkdir
from active_particles.dat import Dat, Gsd
from active_particles.maths import normalise1D, amplogwidth

from os import getcwd
from os import environ as envvar
from os.path import join as joinpath

import sys

from math import ceil

import pickle

import numpy as np
np.seterr(divide='ignore')

import matplotlib as mpl
if not(get_env('SHOW', default=False, vartype=bool)):
	mpl.use('Agg')	# avoids crash if launching without display
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize as ColorsNormalise
from matplotlib.cm import ScalarMappable
from mpl_toolkits.axes_grid1 import make_axes_locatable

from datetime import datetime

from collections import OrderedDict

import subprocess
Example #45
0
 def setUp(self):
     self.image_dir = os.path.join(os.path.dirname(__file__), 'images')
     self.nperr = np.geterr()
     np.seterr(all='ignore')
Example #46
0
from __future__ import division
import numpy as np
np.seterr(invalid='raise')
from matplotlib import pyplot as plt
import copy

from pybasicbayes import models, distributions
from pybasicbayes.util.text import progprint_xrange

alpha_0 = 5.0
obs_hypparams = dict(mu_0=np.zeros(2),
                     alphas_0=2 * np.ones(2),
                     betas_0=np.ones(2),
                     nus_0=0.1 * np.ones(2))

priormodel = models.Mixture(alpha_0=alpha_0,
                            components=[
                                distributions.DiagonalGaussian(**obs_hypparams)
                                for itr in range(30)
                            ])

data, _ = priormodel.generate(500)
data2, _ = priormodel.generate(500)

del priormodel

posteriormodel = models.Mixture(
    alpha_0=alpha_0,
    components=[
        distributions.DiagonalGaussian(**obs_hypparams) for itr in range(30)
    ])
Example #47
0
"""
Generates simple DNA structures.
"""

import base
try:
    import numpy as np
except:
    import mynumpy as np
import math
import sys
import utils

# need to add this to avoid stupid warning about invalid division at line dir /= np.sqrt(np.dot(dir,dir))
try:
    np.seterr(invalid='ignore')
except:
    pass

BP = "bp"
DEGREES = "degrees"
DEG = "degrees"
RAD = "radiants"
RADIANTS = "radiants"
BASE_PAIRS = "bp"


class StrandGenerator(object):
    """
    Strand generator object.
    """
Example #48
0
 def tearDown(self):
     np.seterr(**self.nperr)
Example #49
0
def tune(model_loader: typing.Tuple[str, str, tuple, dict],
         dataset_loader: typing.Tuple[str, str, tuple, dict],
         loss_loader: typing.Tuple[str, str, tuple, dict],
         accuracy_style: str,
         folder: str, cores: int,
         settings: HyperparameterSettings,
         store_up_to: AnalysisSettings,
         logger: logging.Logger = None):
    r"""Finds the optimal learning rate and batch size for the specified model
    on the specified dataset trained with the given loss. Stores the following
    information:

    .. code:: none

        folder/
            final.json
                {'lr_start': float, 'lr_end': float, 'batch_size': float,
                 'cycle_size_epochs': int, 'epochs': int}
            misc.json
                Variables that went into the final output. Typically selected
                via heuristics, constants, or come from the hyperparameter
                settings. Some may be deduced from the numpy array files
                directly

                {
                    'initial_batch_size': int,
                    'initial_cycle_time': int,
                    'initial_min_lr': float,
                    'initial_max_lr': float,
                    'initial_lr_num_to_val': int,
                    'initial_lr_num_trials': int,
                    'initial_lr_window_size': int,
                    'initial_lr_sweep_result_min': float,
                    'initial_lr_sweep_result_max': float,
                    'second_min_lr': float,
                    'second_max_lr': float
                }

            lr_vs_perf.npz
                lrs=np.ndarray[number of batches]
                perfs=np.ndarray[trials, number of batches]
                smoothed_perfs=np.ndarray[trials, number of batches]
                lse_smoothed_perfs=np.ndarray[trials, number of batches]
                perf_derivs=np.ndarray[trials, number_of_batches]
                smoothed_perf_derivs=np.ndarray[trials, number of batches]
                mean_smoothed_perf_derivs=np.ndarray[number of batches]
                lse_smoothed_perf_then_derivs=np.ndarray[number of batches]
                    lse = log sum exp. when there are many trials, the mean
                    gets overly pessimistic from bad initializations,
                    so LSE is more stable. however, we can't do lse on the
                    smoothed derivatives because then derivatives will tend
                    to be positive everywhere, so we have to smooth first,
                    then take lse, then take derivative
                lse_smoothed_perf_then_derivs_then_smooth=np.ndarray[
                    number of batches]. the smoothed variant of the pervious
                lr_range=np.ndarray[2]
                    min, max for the good range of learning rates
            bs_vs_perf.npz  (bs=batch_size)
                Where a single batch is tried multiple times, we take the
                mean over those times to ensure bss contains only unique
                values and hence can be treated like lrs

                bss=np.ndarray[number of batches]
                perfs=np.ndarray[trials, number of batches]
                smoothed_perfs=np.ndarray[number of batches]
                lse_smoothed_perfs=np.ndarray[number of batches]
                perf_derivs=np.ndarray[trials, number_of_batches]
                smoothed_perf_derivs=np.ndarray[trials, number of batches]
                mean_smoothed_perf_derivs=np.ndarray[number of batches]
                lse_smoothed_perf_then_derivs=np.ndarray[number of batches]
                bs_range=np.ndarray[2]
                    min, max for the good range of batch sizes
            lr_vs_perf2.npz
                only stored if settings.rescan_lr_after_bs. looks exactly
                like lr_vs_perf.npz, except these runs are performed with
                the newly selected batch size
            bs_sampled.npz
                only stored if settings.batch_pts > 0

                bss=np.ndarray[num bs attempted]
                final=np.ndarray[num bs attempted, trials]
                    final performance for batch size i for each trial
                lse_final=np.ndarray[num bs attempted]
                    final logsumexp performance for each batch size, argmax
                    is the selected batch size. If you want this to nicely
                    be below the maximum, subtract log(trials) and note
                    this does not effect the argmax

                raw_i=np.ndarray[trials, number of batches]
                    only if store_up_to.hparam_selection_specific_imgs,
                    same for the *_raw_i

                    i is a sampled batch size and raw_i[t, j] is the
                    performance of the model after iteration j for
                    batch size i on trial t.
                smoothed_raw_i=np.ndarray[trials, number of batches]
                lse_smoothed_raw_i=np.ndarray[number of batches]


    :param model_loader: describes which module and corresponding attribute can
        be passed what arguments and keyword arguments to produce the
        nn.Module with a random initialization which can be trained

        .. code::python

            model_loader = ('torch.nn', 'Linear', tuple(20, 10),
                            {'bias': True})

    :param dataset_loader: describes which module and corresponding attribute
        can be passed what arguments and keyword arguments to produce the
        training dataset and validation dataset.
    :param loss_loader: describes which module and corresponding attribute can
        be passed what arguments and keyword arguments to produce the nn.Module
        that converts (y_pred, y) to a scalar which should be minimized
    :param folder: where to save the output to
    :param cores: how many cores to use; 1 for just the main process
    :param settings: the settings to use to tune the learning rate and batch
        size
    :param store_up_to: the information stored should be at least what is
        required to produce this analysis
    """
    if logger is None:
        logger = logging.getLogger(__name__)
    os.makedirs(folder)

    train_set, _ = ignite_simple.utils.invoke(dataset_loader)

    logger.info('Performing initial learning rate sweep...')
    init_batch_size = 64
    init_cycle_time = int(np.clip(300000 // len(train_set), 2, 10) * 2)
    lr_sweep_epochs = settings.lr_sweep_len
    if isinstance(lr_sweep_epochs, int):
        lr_sweep_epochs = lr_sweep_epochs / len(train_set)
    lr_sweep_epochs = int(lr_sweep_epochs)

    if isinstance(settings.warmup_pts, float):
        warmup_pts = int(len(train_set) * settings.warmup_pts)
    else:
        warmup_pts = settings.warmup_pts
    warmup_batch = settings.warmup_batch
    warmup_lr = settings.warmup_lr

    lr_min, lr_max, lr_initial_window_size, lr_initial_trials = (
        _select_lr_from(
            model_loader, dataset_loader, loss_loader, accuracy_style,
            os.path.join(folder, 'lr_vs_perf.npz'), cores, settings,
            store_up_to, logger, lr_sweep_epochs * 2, init_batch_size,
            settings.lr_start, settings.lr_end, settings.lr_width_only_gradients,
            warmup_lr, warmup_batch, warmup_pts
        )
    )
    initial_lr_sweep_result_min, initial_lr_sweep_result_max = lr_min, lr_max
    initial_lr_num_to_val = min(NUM_TO_VAL_MAX, len(train_set))

    logger.info('Performing initial batch size sweep...')

    # The trick is the increasing the batch size requires a corresponding
    # increase in learning rate. We don't want to include the lr range
    # except insofar as taking that into account as otherwise these
    # results would be even muddier than they already are

    lr_avg_over_batch = ((lr_min + lr_max) / 2) / init_batch_size

    bs_sweep_lr_min = lr_avg_over_batch * settings.batch_start
    bs_sweep_lr_max = lr_avg_over_batch * settings.batch_end

    result = _run_and_collate(
        _batch_vs_perf, {
            'model_loader': model_loader,
            'dataset_loader': dataset_loader,
            'loss_loader': loss_loader,
            'accuracy_style': accuracy_style,
            'batch_start': settings.batch_start,
            'batch_end': settings.batch_end,
            'lr_start': bs_sweep_lr_min,
            'lr_end': bs_sweep_lr_max,
            'cycle_time_epochs': init_cycle_time
        }, cores, settings.batch_rn_min_inits
    )

    logger.debug('Organizing and interpreting batch size sweep...')

    bss = result['bss'][0]
    bs_perfs = result['perfs']
    if np.sum(np.isnan(bs_perfs)) > 0:
        logger.debug('Batch size sweep exploded on some initializations')
        logger.debug('Forcibly enabling second LR sweep')
        settings.rescan_lr_after_bs = True

        bs_perfs[np.isnan(bs_perfs)] = 0

    bs_sweep_trials = int(bs_perfs.shape[0])

    window_size = smooth_window_size(bs_perfs.shape[1])

    smoothed_bs_perf = scipy.signal.savgol_filter(
        bs_perfs, window_size, 1
    )
    old_settings = np.seterr(under='ignore')
    lse_smoothed_bs_perf = scipy.special.logsumexp(
        smoothed_bs_perf, axis=0
    )
    np.seterr(**old_settings)
    lse_smoothed_bs_perf_then_derivs = np.gradient(
        lse_smoothed_bs_perf, axis=0)

    bs_perf_derivs = np.gradient(bs_perfs, axis=-1)
    smoothed_bs_perf_derivs = scipy.signal.savgol_filter(
        bs_perfs, window_size, 1, deriv=1)

    mean_smoothed_bs_perf_derivs = smoothed_bs_perf_derivs.mean(0)

    bs_min, bs_max = find_with_derivs(bss, lse_smoothed_bs_perf_then_derivs)
    bs_min, bs_max = int(bs_min), int(bs_max)

    logger.info('Batch size range: [%s, %s) (found from %s trials)',
                bs_min, bs_max, bs_perfs.shape[0])

    np.savez_compressed(
        os.path.join(folder, 'bs_vs_perf.npz'),
        bss=bss, perfs=bs_perfs,
        perf_derivs=bs_perf_derivs,
        smoothed_perfs=smoothed_bs_perf,
        smoothed_perf_derivs=smoothed_bs_perf_derivs,
        mean_smoothed_perf_derivs=mean_smoothed_bs_perf_derivs,
        lse_smoothed_perf_then_derivs=lse_smoothed_bs_perf_then_derivs,
        bs_range=np.array([bs_min, bs_max]))

    if settings.batch_pts > 1:
        batch_size, batch_pts_checked, num_batch_loops = _select_batch_size_from(
            model_loader, dataset_loader, loss_loader, accuracy_style, folder,
            cores, settings, store_up_to, logger, init_cycle_time, bss,
            lse_smoothed_bs_perf_then_derivs, bs_min, bs_max,
            lr_min / init_batch_size, lr_max / init_batch_size)
    else:
        batch_size = (bs_min + bs_max) // 2
        batch_pts_checked = []
        num_batch_loops = -1
        logger.info('Choosing mean batch size: %s', batch_size)

    if settings.rescan_lr_after_bs and batch_size != init_batch_size:
        logger.info('Finding learning rate range on new batch size...')
        second_min_lr = (settings.lr_start / init_batch_size) * batch_size
        second_max_lr = (settings.lr_end / init_batch_size) * batch_size
        lr_min, lr_max, second_lr_window_size, second_lr_num_trials = _select_lr_from(
            model_loader, dataset_loader, loss_loader, accuracy_style,
            os.path.join(folder, 'lr_vs_perf2.npz'), cores, settings,
            store_up_to, logger, lr_sweep_epochs * 2, init_batch_size,
            second_min_lr, second_max_lr, settings.lr_width_only_gradients,
            warmup_lr, warmup_batch, warmup_pts
        )
    else:
        second_min_lr = float('nan')
        second_max_lr = float('nan')
        second_lr_window_size = float('nan')
        second_lr_num_trials = float('nan')
        lr_min = (lr_min / init_batch_size) * batch_size
        lr_max = (lr_max / init_batch_size) * batch_size

    with open(os.path.join(folder, 'final.json'), 'w') as outfile:
        json.dump({'lr_start': lr_min, 'lr_end': lr_max,
                   'batch_size': batch_size,
                   'cycle_size_epochs': init_cycle_time,
                   'epochs': init_cycle_time * 4}, outfile)

    with open(os.path.join(folder, 'misc.json'), 'w') as outfile:
        json.dump(
            {
                'initial_batch_size': init_batch_size,
                'initial_cycle_time': init_cycle_time,
                'initial_lr_sweep_epochs': lr_sweep_epochs,
                'initial_min_lr': settings.lr_start,
                'initial_max_lr': settings.lr_end,
                'initial_lr_num_to_val': initial_lr_num_to_val,
                'initial_lr_num_trials': lr_initial_trials,
                'initial_lr_window_size': lr_initial_window_size,
                'initial_lr_sweep_result_min': initial_lr_sweep_result_min,
                'initial_lr_sweep_result_max': initial_lr_sweep_result_max,
                'initial_avg_lr': (initial_lr_sweep_result_min + initial_lr_sweep_result_max) / 2,
                'initial_min_batch': settings.batch_start,
                'initial_max_batch': settings.batch_end,
                'initial_batch_num_to_val': initial_lr_num_to_val,
                'initial_batch_num_trials': bs_sweep_trials,
                'batch_sweep_result_min': bs_min,
                'batch_sweep_result_max': bs_max,
                'batch_sweep_result': batch_size,
                'batch_sweep_num_pts': len(batch_pts_checked),
                'batch_sweep_pts_list': list(int(i) for i in batch_pts_checked),
                'batch_sweep_trials_each': num_batch_loops,
                'second_min_lr': second_min_lr,
                'second_max_lr': second_max_lr,
                'second_lr_num_trials': second_lr_num_trials,
                'second_lr_window_size': second_lr_window_size,
                'lr_sweep_result_min': lr_min,
                'lr_sweep_result_max': lr_max,
                'warmup_pts': warmup_pts,
                'warmup_lr': warmup_lr,
                'warmup_batch': warmup_batch,
                'lr_width_only_gradients': settings.lr_width_only_gradients
            },
            outfile
        )

    logger.debug('Tuning completed successfully')
Example #50
0
import argparse
import csv
from ...transcripts.transcriptcomputer import TranscriptComputer
from .. import to_gff
from ...loci import Transcript, Gene
from ...parsers.GFF import GFF3
import numpy
from collections import namedtuple, Counter
from ...utilities.log_utils import create_default_logger
from collections import defaultdict
import sklearn.utils.extmath

__author__ = "Luca Venturini"

# pylint: disable=E1101
numpy.seterr(all="ignore")  # Suppress warnings
numpy.warnings.filterwarnings("ignore")
# pylint: enable=E1101


def itemize(counter):
    """
    Private static method to convert a counter into a 2-dimensional numpy array.
    :param counter: the counter to transform
    :type counter: dict
    :return: Numpy array of the counter
    :rtype: array
    """

    if len(counter) == 0:
        return numpy.array([[], []])  # Empty 2dimensional array
Example #51
0
def _select_lr_from(model_loader, dataset_loader, loss_loader,
                    accuracy_style, outfile, cores, settings,
                    store_up_to, logger, cycle_time_epochs,
                    batch_size, lr_start, lr_end,
                    grads_width_only,
                    warmup_lr, warmup_batch, warmup_pts) -> typing.Tuple[int, int]:
    result = _run_and_collate(
        _lr_vs_perf, {
            'model_loader': model_loader,
            'dataset_loader': dataset_loader,
            'loss_loader': loss_loader,
            'accuracy_style': accuracy_style,
            'lr_start': lr_start,
            'lr_end': lr_end,
            'batch_size': batch_size,
            'cycle_time_epochs': cycle_time_epochs,
            'warmup_lr': warmup_lr,
            'warmup_batch': warmup_batch,
            'warmup_pts': warmup_pts
        }, cores, settings.lr_min_inits
    )

    logger.debug('Organizing and interpreting learning rate sweep...')

    lrs = result['lrs']
    lr_perfs = result['perfs']
    if np.isnan(lrs.sum()):
        clip_at = np.isnan(lrs.sum(0)).argmax()
        if clip_at > 0:
            new_lr_end = lrs[0, clip_at - 1]
        else:
            new_lr_end = (lr_start + 0.01 * (lr_end - lr_start))

        clip_at //= 2
        new_lr_end = max(lr_start + 0.05 * (lr_end - lr_start),
                         lr_start + 0.5 * (new_lr_end - lr_start))

        if new_lr_end < lr_start + 0.1 * (lr_end - lr_start):
            logger.debug(
                'Got too many nans, resweeping with lr range reduced to '
                + f'{lr_start}/{new_lr_end}')
            return _select_lr_from(
                model_loader, dataset_loader, loss_loader, accuracy_style,
                outfile, cores, settings, store_up_to, logger,
                cycle_time_epochs, batch_size, lr_start, new_lr_end,
                grads_width_only,
                warmup_lr, warmup_batch, warmup_pts)

        lrs = lrs[:, :clip_at]
        lr_perfs = lr_perfs[:, :clip_at]

    lrs = lrs[0]
    num_trials = lr_perfs.shape[0]
    window_size = smooth_window_size(lrs.shape[0])

    lr_smoothed_perfs = scipy.signal.savgol_filter(
        lr_perfs, window_size, 1)

    old_settings = np.seterr(under='ignore')
    lse_smoothed_lr_perfs = scipy.special.logsumexp(
        lr_smoothed_perfs, axis=0
    )
    np.seterr(**old_settings)
    lse_smoothed_lr_perf_then_derivs = np.gradient(lse_smoothed_lr_perfs)
    lr_perf_derivs = np.gradient(lr_perfs, axis=-1)
    smoothed_lr_perf_derivs = scipy.signal.savgol_filter(
        lr_perfs, window_size, 1, deriv=1)
    mean_smoothed_lr_perf_derivs = smoothed_lr_perf_derivs.mean(0)

    lse_smoothed_lr_perf_then_derivs_then_smooth = scipy.signal.savgol_filter(
        lse_smoothed_lr_perf_then_derivs, window_size, 1)
    lr_min, lr_max = find_with_derivs(lrs, lse_smoothed_lr_perf_then_derivs_then_smooth,
                                      grads_width_only)

    np.savez_compressed(
        outfile,
        lrs=lrs, perfs=lr_perfs,
        smoothed_perfs=lr_smoothed_perfs,
        lse_smoothed_perfs=lse_smoothed_lr_perfs,
        perf_derivs=lr_perf_derivs,
        smoothed_perf_derivs=smoothed_lr_perf_derivs,
        mean_smoothed_perf_derivs=mean_smoothed_lr_perf_derivs,
        lse_smoothed_perf_then_derivs=lse_smoothed_lr_perf_then_derivs,
        lse_smoothed_perf_then_derivs_then_smooth=lse_smoothed_lr_perf_then_derivs_then_smooth,
        lr_range=np.array([lr_min, lr_max]))

    logger.info('Learning rate range: [%s, %s) (found from %s trials)',
                lr_min, lr_max, num_trials)
    return lr_min, lr_max, window_size, num_trials
Example #52
0
    def test_case1(self):

        self.prob = Problem()
        self.prob.model.set_input_defaults('fl_start.P', 17., units='psi')
        self.prob.model.set_input_defaults('fl_start.T', 500., units='degR')
        self.prob.model.set_input_defaults('fl_start.MN', 0.5)
        self.prob.model.set_input_defaults('fl_start.W', 100., units='lbm/s')

        self.prob.model.add_subsystem(
            'fl_start',
            FlowStart(thermo_data=species_data.janaf, elements=AIR_MIX))

        self.prob.set_solver_print(level=-1)
        self.prob.setup(check=False)

        np.seterr(divide='raise')
        # 6 cases to check against
        for i, data in enumerate(ref_data):
            self.prob['fl_start.P'] = data[h_map['Pt']]
            self.prob['fl_start.T'] = data[h_map['Tt']]
            self.prob['fl_start.W'] = data[h_map['W']]
            self.prob['fl_start.MN'] = data[h_map['MN']]

            self.prob.run_model()

            # check outputs
            tol = 1.0e-3

            if data[h_map[
                    'MN']] >= 2.:  # The Mach 2.0 case is at a ridiculously low temperature, so accuracy is questionable
                tol = 5e-2

            print('Case: ', data[h_map['Pt']], data[h_map['Tt']],
                  data[h_map['W']], data[h_map['MN']])
            npss = data[h_map['Pt']]
            pyc = self.prob['fl_start.Fl_O:tot:P']
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['Tt']]
            pyc = self.prob['fl_start.Fl_O:tot:T']
            rel_err = abs(npss - pyc) / npss
            print('Tt:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['W']]
            pyc = self.prob['fl_start.Fl_O:stat:W']
            rel_err = abs(npss - pyc) / npss
            print('W:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['ht']]
            pyc = self.prob['fl_start.Fl_O:tot:h']
            rel_err = abs(npss - pyc) / npss
            print('ht:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['s']]
            pyc = self.prob['fl_start.Fl_O:tot:S']
            rel_err = abs(npss - pyc) / npss
            print('S:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['rhot']]
            pyc = self.prob['fl_start.Fl_O:tot:rho']
            rel_err = abs(npss - pyc) / npss
            print('rhot:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['gamt']]
            pyc = self.prob['fl_start.Fl_O:tot:gamma']
            rel_err = abs(npss - pyc) / npss
            print('gamt:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['MN']]
            pyc = self.prob['fl_start.Fl_O:stat:MN']
            rel_err = abs(npss - pyc) / npss
            print('MN:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['Ps']]
            pyc = self.prob['fl_start.Fl_O:stat:P']
            rel_err = abs(npss - pyc) / npss
            print('Ps:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['Ts']]
            pyc = self.prob['fl_start.Fl_O:stat:T']
            rel_err = abs(npss - pyc) / npss
            print('Ts:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['hs']]
            pyc = self.prob['fl_start.Fl_O:stat:h']
            rel_err = abs(npss - pyc) / npss
            print('hs:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['rhos']]
            pyc = self.prob['fl_start.Fl_O:stat:rho']
            rel_err = abs(npss - pyc) / npss
            print('rhos:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['gams']]
            pyc = self.prob['fl_start.Fl_O:stat:gamma']
            rel_err = abs(npss - pyc) / npss
            print('gams:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['V']]
            pyc = self.prob['fl_start.Fl_O:stat:V']
            rel_err = abs(npss - pyc) / npss
            print('V:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            npss = data[h_map['A']]
            pyc = self.prob['fl_start.Fl_O:stat:area']
            rel_err = abs(npss - pyc) / npss
            print('A:', npss, pyc, rel_err)
            assert_near_equal(pyc, npss, tol)
            print()
Example #53
0
def desire(choice, distance1, feromones):
    np.seterr(divide='ignore', invalid='ignore')  # игнор ошибок деления на NaN
    desire = total_desire(distance1, feromones)[choice, :] / np.nansum(
        total_desire(distance1, feromones)[choice, :])
    return desire
Example #54
0
def _select_batch_size_from(model_loader, dataset_loader, loss_loader,
                            accuracy_style, mainfolder, cores, settings,
                            store_up_to, logger, cycle_time_epochs, bss,
                            collated_smoothed_bs_perf_derivs,
                            bs_min, bs_max, lr_min_over_batch,
                            lr_max_over_batch) -> int:
    settings: HyperparameterSettings
    store_up_to: AnalysisSettings

    bs_min_ind = int((bss == bs_min).argmax())
    bs_max_ind = int((bss == bs_max).argmax())

    incl_raw = store_up_to.hparam_selection_specific_imgs

    if bs_min_ind == bs_max_ind:
        logger.info('Only found a single good batch size, using that without '
                    + 'further investigation')
        return bs_min

    if bs_max_ind - bs_min_ind <= settings.batch_pts:
        logger.debug('Found %s good batch sizes and willing to try up to %s, '
                     + 'so testing all of them.', bs_max_ind - bs_min_ind,
                     settings.batch_pts)
        test_pts = bss[bs_min_ind:bs_max_ind]
    else:
        probs = collated_smoothed_bs_perf_derivs[bs_min_ind:bs_max_ind]
        old_settings = np.seterr(under='ignore')
        probs = scipy.special.softmax(probs)

        iters = 0
        while (probs < 1e-6).sum() != 0:
            if iters > 10:
                probs[:] = 1 / probs.shape[0]
                break
            probs[probs < 1e-6] = 1e-6
            probs = scipy.special.softmax(probs)
            iters += 1

        np.seterr(**old_settings)

        test_pts = np.random.choice(
            np.arange(bs_min_ind, bs_max_ind), settings.batch_pts,
            replace=False, p=probs)
        test_pts = bss[test_pts]

        logger.debug('Comparing batch sizes: %s', test_pts)

    # here we could naively just loop over the test_pts, but this will be
    # a very inefficient use of our cores if we are on fast settings and
    # have many cores. Furthermore, some batch sizes will almost certainly
    # run faster than others. So alas, in the name of performance, this is
    # going to look a lot like _run_and_collate but dissimilar enough to not be
    # worth calling it

    folder = str(uuid.uuid4())
    os.makedirs(folder)

    loops = 0  # number spawned // test_pts.shape[0]
    last_loop_printed = 0
    cur_ind = 0  # in test_pts
    current_processes = []
    target_num_loops = max(
        settings.batch_pt_min_inits,
        cores // test_pts.shape[0]
    )
    while loops < target_num_loops:
        while len(current_processes) == cores:
            if last_loop_printed < loops:
                logger.debug('On loop %s/%s',
                             loops + 1, settings.batch_pt_min_inits)
                last_loop_printed = loops
            time.sleep(0.1)

            for i in range(len(current_processes) - 1, -1, -1):
                if not current_processes[i].is_alive():
                    current_processes.pop(i)

        fname = os.path.join(folder, f'{cur_ind}_{loops}.npz')
        bs = int(test_pts[cur_ind])
        proc = mp.Process(
            target=_train_with_perf,
            args=(
                model_loader, dataset_loader, loss_loader, fname,
                accuracy_style, bs, lr_min_over_batch * bs,
                lr_max_over_batch * bs, cycle_time_epochs,
                cycle_time_epochs, incl_raw
            )
        )
        proc.start()
        current_processes.append(proc)

        cur_ind += 1
        if cur_ind >= test_pts.shape[0]:
            cur_ind = 0
            loops += 1

    logger.debug('Waiting for %s currently running trials to end...',
                 len(current_processes))

    for proc in current_processes:
        proc.join()

    logger.debug('Organizing and interpreting batch size performance info...')

    all_final_perfs = np.zeros((test_pts.shape[0], loops))
    all_final_lse_perfs = np.zeros(test_pts.shape[0])

    raws_dict = dict()

    for i, bs in enumerate(test_pts):
        trials = []
        trials_raw = [] if incl_raw else None
        for trial in range(loops):
            fname = os.path.join(folder, f'{i}_{trial}.npz')
            with np.load(fname) as infile:
                final_perf = infile['final_perf']
                if np.isnan(final_perf).sum() > 0:
                    logger.debug('Found some nans, treating them as inf bad')
                    final_perf[np.isnan(final_perf)] = 0
                trials.append(final_perf)

                if incl_raw:
                    perf = infile['perf']
                    if np.isnan(perf).sum() > 0:
                        logger.debug('Found some nans in raw perfs')
                        perf[np.isnan(perf)] = 0
                    trials_raw.append(perf)
            os.remove(fname)
        trials = np.concatenate(trials)

        old_settings = np.seterr(under='ignore')
        lse_trials = scipy.special.logsumexp(trials)
        np.seterr(**old_settings)

        all_final_perfs[i] = trials
        all_final_lse_perfs[i] = lse_trials

        if incl_raw:
            trials_raw = np.stack(trials_raw)
            smoothed_trials_raw = scipy.signal.savgol_filter(
                trials_raw, smooth_window_size(trials_raw.shape[1]), 1
            )
            old_settings = np.seterr(under='ignore')
            lse_smoothed_trials_raw = scipy.special.logsumexp(
                smoothed_trials_raw, axis=0)
            np.seterr(**old_settings)

            raws_dict[f'raw_{bs}'] = trials_raw
            raws_dict[f'smoothed_raw_{bs}'] = smoothed_trials_raw
            raws_dict[f'lse_smoothed_raw_{bs}'] = lse_smoothed_trials_raw

    os.rmdir(folder)

    best_ind = np.argmax(all_final_lse_perfs)
    best_bs = int(test_pts[best_ind])

    np.savez_compressed(
        os.path.join(mainfolder, 'bs_sampled.npz'),
        bss=test_pts, final=all_final_perfs, lse_final=all_final_lse_perfs,
        **raws_dict
    )

    logger.info('Found best batch size of those tested: %s', best_bs)

    return best_bs, test_pts, loops
Example #55
0
from logging import basicConfig, INFO
from unittest import main, TestCase

from numpy import seterr

from pyrad.optics.gas import Gas
from pyrad.utils.grids import UniformGrid1D


class TestGasOptics(TestCase):
    def test_gas_optics(self):
        # Surface layer of CIRC case 1.
        abundance = {"H2O": 0.006637074}
        for formula, concentration in abundance.items():
            gas = Gas(formula)
            gas.absorption_coefficient(temperature=288.99,
                                       pressure=98388.,
                                       volume_mixing_ratio=concentration,
                                       spectral_grid=UniformGrid1D(
                                           1., 3000., 0.1).points)


if __name__ == "__main__":
    basicConfig(
        format="%(asctime)-15s - %(pathname)s(%(lineno)d):\n\t%(message)s",
        level=INFO)
    seterr(divide="raise", over="raise", invalid="raise")
    main()
Example #56
0
import numpy as np
np.seterr(over='ignore', divide='raise')
from matplotlib import pyplot as plt

class SimpleNeuralNet():
    def activation_function(self, x):
        return 1/(1+np.exp(-x))
    
    def deepcopy(self):
        new_net = SimpleNeuralNet(self.num_inputs, self.num_outputs, self.layer_node_counts)
        new_net.layers = [np.copy(layer) for layer in self.layers]
        return new_net
    
    def execute(self, input_vector):
        assert len(input_vector) == self.num_inputs ,\
        "wrong input vector size"

        next_v = input_vector

        # iterate through layers, computing the activation
        # of the weighted inputs from the previous layer
        for layer in self.layers:
            # add a bias to each layer [1]
            next_v = np.append(next_v, 1)
            
            # pump the input vector through the matrix multiplication
            # and our activation function
            next_v = self.activation_function(np.dot(next_v, layer))
            
        return next_v
        
Example #57
0
File: mods.py Project: cphyc/yt
# also attempt to parse the command line and set up the global state of various
# operations.  The variable unparsed_args is not used internally but is
# provided as a convenience for users who wish to parse arguments in scripts.
# https://mail.python.org/archives/list/[email protected]/thread/L6AQPJ3OIMJC5SNKVM7CJG32YVQZRJWA/
import yt.startup_tasks as __startup_tasks
from yt import *
from yt._maintenance.deprecation import issue_deprecation_warning
from yt.config import ytcfg, ytcfg_defaults
from yt.utilities.logger import _level

issue_deprecation_warning(
    "The yt.mods module is deprecated.", since="4.1.0", removal="4.2.0"
)

unparsed_args = __startup_tasks.unparsed_args


if _level >= int(ytcfg_defaults["yt"]["log_level"]):
    # This won't get displayed.
    mylog.debug("Turning off NumPy error reporting")
    np.seterr(all="ignore")

# We load plugins.  Keep in mind, this can be fairly dangerous -
# the primary purpose is to allow people to have a set of functions
# that get used every time that they don't have to *define* every time.
# This way, other command-line tools can be used very simply.
# Unfortunately, for now, I think the easiest and simplest way of doing
# this is also the most dangerous way.
if ytcfg.get("yt", "load_field_plugins"):
    enable_plugins()
Example #58
0
import util
import numpy as np
import matplotlib.pyplot as plt

np.seterr(all="raise")


factor = 2.0


class LinearModel(object):
    """Base class for linear models."""

    def __init__(self, theta=None):
        """
        Args:
            theta: Weights vector for the model.
        """
        self.theta = theta

    def fit(self, X, y):
        """Run solver to fit linear model. You have to update the value of
        self.theta using the normal equations.

        Args:
            X: Training example inputs. Shape (n_examples, dim).
            y: Training example labels. Shape (n_examples,).
        """
        # *** START CODE HERE ***
        # Normal equation : X^T X theta = X^T y
        self.theta = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))

 Z1 = np.random.randint(0,10,10)
 Z2 = np.random.randint(0,10,10)
 print(np.intersect1d(Z1,Z2))

# =============================================================================
# 31. 如何忽略所有的 numpy 警告(尽管不建议这么做)? (★☆☆)
# (提示: np.seterr, np.errstate)
# 
# =============================================================================



# Suicide mode on
defaults = np.seterr(all="ignore")
Z = np.ones(1) / 0

# Back to sanity
_ = np.seterr(**defaults)

An equivalent way, with a context manager:

with np.errstate(divide='ignore'):
    Z = np.ones(1) / 0

# =============================================================================
# 32. 下面的表达式是正确的吗? (★☆☆)
# (提示: imaginary number)
# =============================================================================
Example #60
0
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
    """
    Yields the subimages of image im defined in the list of bounding polygons
    with baselines preserving order.

    Args:
        im (PIL.Image.Image): Input image
        bounds (list): A list of tuples (x1, y1, x2, y2)

    Yields:
        (PIL.Image) the extracted subimage
    """
    if 'type' in bounds and bounds['type'] == 'baselines':
        old_settings = np.seterr(all='ignore')

        siz = np.array(im.size, dtype=np.float)
        # select proper interpolation scheme depending on shape
        if im.mode == '1':
            order = 0
            im = im.convert('L')
        else:
            order = 1
        im = np.array(im)

        for line in bounds['lines']:
            pl = np.array(line['boundary'])
            baseline = np.array(line['baseline'])
            c_min, c_max = int(pl[:,0].min()), int(pl[:,0].max())
            r_min, r_max = int(pl[:,1].min()), int(pl[:,1].max())

            # fast path for straight baselines requiring only rotation
            if len(baseline) == 2:
                baseline = baseline.astype(np.float)
                # calculate direction vector
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore', RuntimeWarning)
                    slope, _, _, _, _ = linregress(baseline[:, 0], baseline[:, 1])
                if np.isnan(slope):
                    p_dir = np.array([0., np.sign(np.diff(baseline[(0, -1),1])).item()*1.])
                else:
                    p_dir = np.array([1, np.sign(np.diff(baseline[(0, -1),0])).item()*slope])
                    p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2,axis=-1)))
                angle = np.arctan2(p_dir[1], p_dir[0])
                patch = im[r_min:r_max+1, c_min:c_max+1].copy()
                offset_polygon = pl - (c_min, r_min)
                r, c = draw.polygon(offset_polygon[:,1], offset_polygon[:,0])
                mask = np.zeros(patch.shape[:2], dtype=np.bool)
                mask[r, c] = True
                patch[mask != True] = 0
                extrema = offset_polygon[(0,-1),:]
                # scale line image to max 600 pixel width
                tform, rotated_patch = _rotate(patch, angle, center=extrema[0], scale=1.0, cval=0)
                i = Image.fromarray(rotated_patch.astype('uint8'))
            # normal slow path with piecewise affine transformation
            else:
                if len(pl) > 50:
                    pl = approximate_polygon(pl, 2)
                full_polygon = subdivide_polygon(pl, preserve_ends=True)
                pl = geom.MultiPoint(full_polygon)

                bl = zip(baseline[:-1:], baseline[1::])
                bl = [geom.LineString(x) for x in bl]
                cum_lens = np.cumsum([0] + [l.length for l in bl])
                # distance of intercept from start point and number of line segment
                control_pts = []
                for point in pl.geoms:
                    npoint = np.array(point)
                    line_idx, dist, intercept = min(((idx, line.project(point), np.array(line.interpolate(line.project(point)))) for idx, line in enumerate(bl)), key=lambda x: np.linalg.norm(npoint-x[2]))
                    # absolute distance from start of line
                    line_dist = cum_lens[line_idx] + dist
                    intercept = np.array(intercept)
                    # side of line the point is at
                    side = np.linalg.det(np.array([[baseline[line_idx+1][0]-baseline[line_idx][0],
                                                    npoint[0]-baseline[line_idx][0]],
                                                   [baseline[line_idx+1][1]-baseline[line_idx][1],
                                                    npoint[1]-baseline[line_idx][1]]]))
                    side = np.sign(side)
                    # signed perpendicular distance from the rectified distance
                    per_dist = side * np.linalg.norm(npoint-intercept)
                    control_pts.append((line_dist, per_dist))
                # calculate baseline destination points
                bl_dst_pts = baseline[0] + np.dstack((cum_lens, np.zeros_like(cum_lens)))[0]
                # calculate bounding polygon destination points
                pol_dst_pts = np.array([baseline[0] + (line_dist, per_dist) for line_dist, per_dist in control_pts])
                # extract bounding box patch
                c_dst_min, c_dst_max = int(pol_dst_pts[:,0].min()), int(pol_dst_pts[:,0].max())
                r_dst_min, r_dst_max = int(pol_dst_pts[:,1].min()), int(pol_dst_pts[:,1].max())
                output_shape = np.around((r_dst_max - r_dst_min + 1, c_dst_max - c_dst_min + 1))
                patch = im[r_min:r_max+1,c_min:c_max+1].copy()
                # offset src points by patch shape
                offset_polygon = full_polygon - (c_min, r_min)
                offset_baseline = baseline - (c_min, r_min)
                # offset dst point by dst polygon shape
                offset_bl_dst_pts = bl_dst_pts - (c_dst_min, r_dst_min)
                offset_pol_dst_pts = pol_dst_pts - (c_dst_min, r_dst_min)
                # mask out points outside bounding polygon
                mask = np.zeros(patch.shape[:2], dtype=np.bool)
                r, c = draw.polygon(offset_polygon[:,1], offset_polygon[:,0])
                mask[r, c] = True
                patch[mask != True] = 0
                # estimate piecewise transform
                src_points = np.concatenate((offset_baseline, offset_polygon))
                dst_points = np.concatenate((offset_bl_dst_pts, offset_pol_dst_pts))
                tform = PiecewiseAffineTransform()
                tform.estimate(src_points, dst_points)
                o = warp(patch, tform.inverse, output_shape=output_shape, preserve_range=True, order=order)
                i = Image.fromarray(o.astype('uint8'))
            yield i.crop(i.getbbox()), line
    else:
        if bounds['text_direction'].startswith('vertical'):
            angle = 90
        else:
            angle = 0
        for box in bounds['boxes']:
            if isinstance(box, tuple):
                box = list(box)
            if (box < [0, 0, 0, 0] or box[::2] > [im.size[0], im.size[0]] or
                    box[1::2] > [im.size[1], im.size[1]]):
                logger.error('bbox {} is outside of image bounds {}'.format(box, im.size))
                raise KrakenInputException('Line outside of image bounds')
            yield im.crop(box).rotate(angle, expand=True), box