Пример #1
1
    def test_init(self):
        import numpy as np
        import math
        import sys

        assert np.intp() == np.intp(0)
        assert np.intp("123") == np.intp(123)
        raises(TypeError, np.intp, None)
        assert np.float64() == np.float64(0)
        assert math.isnan(np.float64(None))
        assert np.bool_() == np.bool_(False)
        assert np.bool_("abc") == np.bool_(True)
        assert np.bool_(None) == np.bool_(False)
        assert np.complex_() == np.complex_(0)
        # raises(TypeError, np.complex_, '1+2j')
        assert math.isnan(np.complex_(None))
        for c in ["i", "I", "l", "L", "q", "Q"]:
            assert np.dtype(c).type().dtype.char == c
        for c in ["l", "q"]:
            assert np.dtype(c).type(sys.maxint) == sys.maxint
        for c in ["L", "Q"]:
            assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42
        assert np.float32(np.array([True, False])).dtype == np.float32
        assert type(np.float32(np.array([True]))) is np.ndarray
        assert type(np.float32(1.0)) is np.float32
        a = np.array([True, False])
        assert np.bool_(a) is a
Пример #2
0
    def test_custom_array_like(self):

        class MyThing(object):
            __array_priority__ = 1000

            rmul_count = 0
            getitem_count = 0

            def __init__(self, shape):
                self.shape = shape

            def __len__(self):
                return self.shape[0]

            def __getitem__(self, i):
                MyThing.getitem_count += 1
                if not isinstance(i, tuple):
                    i = (i,)
                if len(i) > len(self.shape):
                    raise IndexError("boo")

                return MyThing(self.shape[len(i):])

            def __rmul__(self, other):
                MyThing.rmul_count += 1
                return self

        np.float64(5)*MyThing((3, 3))
        assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
        assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
Пример #3
0
    def grad_EVzxVzxT_by_Z(self, EVzxVzxT_list_this, Z, A, B, p, r):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        ainv = 1 / (self.length_scale * self.length_scale)
        siginv = 1 / (B[0, 0] * B[0, 0])

        dZthis = np.zeros([1, R])

        dZthis[0, r] = 1

        res1 = -0.5 * (dZthis.dot(Z[p, :]) + Z[p, :].dot(dZthis.T)) * (ainv - ainv * (1 / (siginv + 2 * ainv)) * ainv)

        res2 = np.tile(dZthis.dot(A.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv), [P, 1])

        res3 = np.tile(dZthis.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * ainv), [N, 1])

        dZ = np.zeros([N, P, P])

        dZ[:, p, :] += np.float64(res1) + res2.T + res3
        dZ[:, :, p] += np.float64(res1) + res2.T + res3

        # set the diagonal
        # dZ[:,p,p] = dZ[:,p,p]/2.

        res = np.sum(EVzxVzxT_list_this * dZ, axis=0)

        return res
Пример #4
0
def eaxis_ELANEX(y, res, etay=None, etapy=None, ypinch=None, img=None, ymotor=None):
    ymotor = np.float64(ymotor)
    y      = y+ymotor/res

    #  y_motor_calibrated = np.float64(-1e-3)
    #  y_pinch_calibrated = np.float64(130)
    #  y_pixel_size       = np.float64(4.65e-6)
    #  E0 = 20.35

    #  y_motor_calibrated = np.float64(-0.00677574370709)
    #  y_pinch_calibrated = np.float64(211)
    #  y_pixel_size       = np.float64(8.9185e-6)
    E0 = 23.737805394397343771

    # ypinch = y_pinch_calibrated + y_motor_calibrated/y_pixel_size

    # E0=20.35 observed at 130px, motor position -1mm
    #  E0=20.35

    theta  = np.float64(6e-3)
    Lmag   = np.float64(2*4.889500000E-01)
    Ldrift = np.float64(8.792573)

    logger.log(level=loggerlevel, msg='ypinch is: {}'.format(ypinch))
    logger.log(level=loggerlevel, msg='ymotor is: {}'.format(ymotor))

    out = E_no_eta(y, ypinch, res, Ldrift, Lmag, E0, theta)
    return out
Пример #5
0
def do_rangecheck(cf,ds,section='',series='',code=2):
    '''Applies a range check to data series listed in the control file.  Data values that
       are less than the lower limit or greater than the upper limit are replaced with
       c.missing_value and the corresponding QC flag element is set to 2.'''
    if 'RangeCheck' not in cf[section][series].keys(): return
    if 'Lower' in cf[section][series]['RangeCheck'].keys():
        lwr = numpy.array(eval(cf[section][series]['RangeCheck']['Lower']))
        valid_lower = numpy.min(lwr)
        lwr = lwr[ds.series['Month']['Data']-1]
        index = numpy.where((abs(ds.series[series]['Data']-numpy.float64(c.missing_value))>c.eps)&
                                (ds.series[series]['Data']<lwr))
        ds.series[series]['Data'][index] = numpy.float64(c.missing_value)
        ds.series[series]['Flag'][index] = numpy.int32(code)
        ds.series[series]['Attr']['rangecheck_lower'] = cf[section][series]['RangeCheck']['Lower']
    if 'Upper' in cf[section][series]['RangeCheck'].keys():
        upr = numpy.array(eval(cf[section][series]['RangeCheck']['Upper']))
        valid_upper = numpy.min(upr)
        upr = upr[ds.series['Month']['Data']-1]
        index = numpy.where((abs(ds.series[series]['Data']-numpy.float64(c.missing_value))>c.eps)&
                                (ds.series[series]['Data']>upr))
        ds.series[series]['Data'][index] = numpy.float64(c.missing_value)
        ds.series[series]['Flag'][index] = numpy.int32(code)
        ds.series[series]['Attr']['rangecheck_upper'] = cf[section][series]['RangeCheck']['Upper']
        ds.series[series]['Attr']['valid_range'] = str(valid_lower)+','+str(valid_upper)
    if 'RangeCheck' not in ds.globalattributes['Functions']:
        ds.globalattributes['Functions'] = ds.globalattributes['Functions']+',RangeCheck'
Пример #6
0
def calc_pca(feature):
    # Filter out super high numbers due to some instability in the network
    feature[feature>5] = 5
    feature[feature<-5] = -5
    #### Missing an image guided filter with the image as input
    ##
    ##########
    # change to double precision
    feature = np.float64(feature)
    # retrieve size of feature array
    shape = feature.shape
    [h, w, d] = feature.shape
    # resize to a two-dimensional array
    feature = np.reshape(feature, (h*w,d))
    # calculate average of each column
    featmean = np.average(feature,0)
    onearray = np.ones((h*w,1))
    featmeanarray = np.multiply(np.ones((h*w,1)),featmean)
    feature = np.subtract(feature,featmeanarray)
    feature_transpose = np.transpose(feature)
    cover = np.dot(feature_transpose, feature)
    # get largest eigenvectors of the array
    val,vecs = eigs(cover, k=3, which='LI')
    pcafeature = np.dot(feature, vecs)
    pcafeature = np.reshape(pcafeature,(h,w,3))
    pcafeature = np.float64(pcafeature)
    return pcafeature
Пример #7
0
def process_waasmaierdb(data, root):
    if os.path.isfile(os.path.join(root, 'waasmaierdb.h5')):
        return

    try:
        archive = tarfile.open(data)
        lines = archive.extractfile(archive.getnames()[0]).readlines()[19:]
    finally:
        archive.close()

    with h5py.File(os.path.join(root, 'waasmaierdb.h5'), 'w') as elements:
        while 1:
            id = lines.pop(0).split()[0]
            if id == 'END':
                break

            el = elements.create_group(id)

            line = lines.pop(0)
            el['a'] = np.zeros(5, dtype='d')
            for i in range(5):
                el['a'][i], line = np.float64(line[:10]), line[10:]
            el['c'] = np.float64(line[:10])

            line = lines.pop(0)
            el['b'] = np.zeros(5, dtype='d')
            for i in range(5):
                el['b'][i], line = np.float64(line[:10]), line[10:]

            line = lines.pop(0) # skip empty line
Пример #8
0
def eaxis(y, uid, camname, hdf5_data, E0=20.35, etay=0, etapy=0):
    logger.log(level=loggerlevel, msg='Getting energy axis...')

    # eaxis     = E200.eaxis(camname=camname, y=y, res=res, E0=20.35, etay=0, etapy=0, ymotor=ymotor)
    imgstr = hdf5_data['raw']['images'][str(camname)]
    res    = np.float64(imgstr['RESOLUTION'][0, 0])
    res    = res*np.float64(1.0e-6)

    logger.log(level=loggerlevel, msg='Camera detected: {}'.format(camname))
    if camname == 'ELANEX':
        ymotor = hdf5_data['raw']['scalars']['XPS_LI20_DWFA_M5']['dat']
        ymotor = mt.derefdataset(ymotor, hdf5_data.file)
        ymotor = ymotor[0]*1e-3
        logger.log(level=loggerlevel, msg='Original ymotor is: {}'.format(ymotor))

        raw_rf     = hdf5_data['raw']
        scalars_rf = raw_rf['scalars']
        setQS_str  = scalars_rf['step_value']
        setQS_dat  = E200.E200_api_getdat(setQS_str, uid).dat[0]
        setQS      = mt.hardcode.setQS(setQS_dat)

        logger.log(level=loggerlevel, msg='Eaxis''s setQS is: {}'.format(setQS_dat))

        ymotor = setQS.elanex_y_motor()*1e-3
        logger.log(level=loggerlevel, msg='Reconstructed ymotor is: {ymotor}'.format(ymotor=ymotor))

        return eaxis_ELANEX(y=y, res=res, etay=etay, etapy=etapy, ymotor=ymotor)

    elif camname == 'CMOS_FAR':
        return eaxis_CMOS_far(y=y, res=res, E0=E0, etay=etay, etapy=etapy)

    else:
        msg = 'No energy axis available for camera: {}'.format(camname)
        logger.log(level=loggerlevel, msg=msg)
        raise NotImplementedError(msg)
Пример #9
0
    def test_cublasDgemmBatched(self):
        l, m, k, n = 11, 7, 5, 3
        A = np.random.rand(l, m, k).astype(np.float64)
        B = np.random.rand(l, k, n).astype(np.float64)

        C_res = np.einsum('nij,njk->nik',A,B)

        a_gpu = gpuarray.to_gpu(A)
        b_gpu = gpuarray.to_gpu(B)
        c_gpu = gpuarray.empty((l, m, n), np.float64)

        alpha = np.float64(1.0)
        beta = np.float64(0.0)

        a_arr = bptrs(a_gpu)
        b_arr = bptrs(b_gpu)
        c_arr = bptrs(c_gpu)

        cublas.cublasDgemmBatched(self.cublas_handle, 'n','n',
                                  n, m, k, alpha,
                                  b_arr.gpudata, n,
                                  a_arr.gpudata, k,
                                  beta, c_arr.gpudata, n, l)

        assert np.allclose(C_res, c_gpu.get())
Пример #10
0
    def test_compile_helperlib(self):
        with self.check_cc_compiled(self._test_module.cc_helperlib) as lib:
            res = lib.power(2, 7)
            self.assertPreciseEqual(res, 128)
            for val in (-1, -1 + 0j, np.complex128(-1)):
                res = lib.sqrt(val)
                self.assertPreciseEqual(res, 1j)
            for val in (4, 4.0, np.float64(4)):
                res = lib.np_sqrt(val)
                self.assertPreciseEqual(res, 2.0)
            res = lib.spacing(1.0)
            self.assertPreciseEqual(res, 2**-52)
            # Implicit seeding at startup should guarantee a non-pathological
            # start state.
            self.assertNotEqual(lib.random(-1), lib.random(-1))
            res = lib.random(42)
            expected = np.random.RandomState(42).random_sample()
            self.assertPreciseEqual(res, expected)
            res = lib.size(np.float64([0] * 3))
            self.assertPreciseEqual(res, 3)

            code = """if 1:
                from numpy.testing import assert_equal, assert_allclose
                res = lib.power(2, 7)
                assert res == 128
                res = lib.random(42)
                assert_allclose(res, %(expected)s)
                res = lib.spacing(1.0)
                assert_allclose(res, 2**-52)
                """ % {'expected': expected}
            self.check_cc_compiled_in_subprocess(lib, code)
Пример #11
0
    def _combine_match_index(self, other, func, level=None, fill_value=None):
        new_data = {}

        if fill_value is not None:
            raise NotImplementedError
        if level is not None:
            raise NotImplementedError

        new_index = self.index.union(other.index)
        this = self
        if self.index is not new_index:
            this = self.reindex(new_index)

        if other.index is not new_index:
            other = other.reindex(new_index)

        for col, series in compat.iteritems(this):
            new_data[col] = func(series.values, other.values)

        # fill_value is a function of our operator
        if isnull(other.fill_value) or isnull(self.default_fill_value):
            fill_value = np.nan
        else:
            fill_value = func(np.float64(self.default_fill_value),
                              np.float64(other.fill_value))

        return self._constructor(new_data,
                                 index=new_index,
                                 columns=self.columns,
                                 default_fill_value=fill_value,
                                 fill_value=self.default_fill_value).__finalize__(self)
Пример #12
0
    def loadModel(self):
        f = open(self.modelPath)
        list = f.readlines()
        self.vocabSize = int(list[0].strip())
        self.topicK = int(list[1].strip())
        # self.alpha = np.asarray( [float(list[2].strip())] * self.topicK )
        # self.eta = float(list[3].strip())
        self.topicMatrix = np.zeros((self.topicK, self.vocabSize))
        self.wordMatrix = np.zeros((self.vocabSize, self.topicK))
        j = -1
        location = 0
        for i in range(len(list)):
            if i < 2:
                continue
            if "#TOPIC#" in list[i]:
                j = j + 1
                continue
            l = list[i].split("\t")
            if self.index.has_key(l[0].strip()):
                self.topicMatrix[j][self.index[l[0].strip()]] = np.float64(l[1].strip())
            else:
                self.index[l[0].strip()] = location
                location = location + 1
                self.topicMatrix[j][self.index[l[0].strip()]] = np.float64(l[1].strip())
        self.wordMatrix = np.transpose(self.topicMatrix)
        self.indexList = sorted(self.index.iteritems(), key=lambda d: d[1])

        self.eta = 1.0 / 200
        self.alpha = np.asarray([1.0 / self.topicK for i in xrange(self.topicK)])
        self.minimum_probability = 2.0 / self.topicK
        self.stats = self.topicMatrix
        self.expElogbeta = np.exp(dirichlet_expectation(self.eta + self.stats))
Пример #13
0
def get_ymdhms_from_datetime(ds):
    '''
    PURPOSE:
     Gets the year, month, day, hour, minute and second from a list of
     Python datetimes.  The Python datetime series is read from
     the input data structure and the results are written back to the
     data structure.
    USAGE:
     qcutils.get_ymdhms_from_datetime(ds)
    ASSUMPTIONS:
     None
    AUTHOR: PRI
    '''
    nRecs = numpy.int32(ds.globalattributes["nc_nrecs"])
    dt = ds.series["DateTime"]["Data"]
    flag = numpy.zeros(nRecs,dtype=numpy.int32)
    Year = [dt[i].year for i in range(0,nRecs)]
    Month = [dt[i].month for i in range(0,nRecs)]
    Day = [dt[i].day for i in range(0,nRecs)]
    Hour = [dt[i].hour for i in range(0,nRecs)]
    Minute = [dt[i].minute for i in range(0,nRecs)]
    Second = [dt[i].second for i in range(0,nRecs)]
    Hdh = [numpy.float64(Hour[i])+numpy.float64(Minute[i])/60. for i in range(0,nRecs)]
    Ddd = [(dt[i] - datetime.datetime(Year[i],1,1)).days+1+Hdh[i]/24. for i in range(0,nRecs)]
    CreateSeries(ds,'Year',Year,Flag=flag,Attr=MakeAttributeDictionary(long_name='Year',units='none'))
    CreateSeries(ds,'Month',Month,Flag=flag,Attr=MakeAttributeDictionary(long_name='Month',units='none'))
    CreateSeries(ds,'Day',Day,Flag=flag,Attr=MakeAttributeDictionary(long_name='Day',units='none'))
    CreateSeries(ds,'Hour',Hour,Flag=flag,Attr=MakeAttributeDictionary(long_name='Hour',units='none'))
    CreateSeries(ds,'Minute',Minute,Flag=flag,Attr=MakeAttributeDictionary(long_name='Minute',units='none'))
    CreateSeries(ds,'Second',Second,Flag=flag,Attr=MakeAttributeDictionary(long_name='Second',units='none'))
    CreateSeries(ds,'Hdh',Hdh,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal hour of the day',units='none'))
    CreateSeries(ds,'Ddd',Ddd,Flag=flag,Attr=MakeAttributeDictionary(long_name='Decimal day of the year',units='none'))
Пример #14
0
    def test_maybe_convert_scalar(self):

        # pass thru
        result = maybe_convert_scalar('x')
        assert result == 'x'
        result = maybe_convert_scalar(np.array([1]))
        assert result == np.array([1])

        # leave scalar dtype
        result = maybe_convert_scalar(np.int64(1))
        assert result == np.int64(1)
        result = maybe_convert_scalar(np.int32(1))
        assert result == np.int32(1)
        result = maybe_convert_scalar(np.float32(1))
        assert result == np.float32(1)
        result = maybe_convert_scalar(np.int64(1))
        assert result == np.float64(1)

        # coerce
        result = maybe_convert_scalar(1)
        assert result == np.int64(1)
        result = maybe_convert_scalar(1.0)
        assert result == np.float64(1)
        result = maybe_convert_scalar(Timestamp('20130101'))
        assert result == Timestamp('20130101').value
        result = maybe_convert_scalar(datetime(2013, 1, 1))
        assert result == Timestamp('20130101').value
        result = maybe_convert_scalar(Timedelta('1 day 1 min'))
        assert result == Timedelta('1 day 1 min').value
    def hsdiffplot(self, terncomps, terncomps2, descriptor='o', **kwargs):
        comps=numpy.float64(terncomps)
        comps2=numpy.float64(terncomps2)
        compsdiff=comps2-comps
        rgb_arr=self.rgb_compdiff(compsdiff)
        compdist = ((compsdiff**2).sum(axis=1)/2.)**.5
        
        self.colorcompplot(comps, descriptor=descriptor, colors=rgb_arr, hollow=False, markeredgecolor='none', **kwargs)

        # color wheel axes
        self.ax.figure.subplots_adjust(left=.05, right=.7)
        self.cwax=self.ax.figure.add_axes([0.6, 0.45, 0.3, 0.45], projection='polar')
        N = 1024
        x = numpy.linspace(-1, 1, N)
        y = numpy.linspace(-1, 1, N)
        X,Y = numpy.meshgrid(x,y)
        R = numpy.sqrt(X*X + Y*Y)
        PHI = numpy.arctan2(Y, X) - numpy.pi/2
        colorgrid=self.complex_to_rgb_grid(R*numpy.exp(-1j*PHI)  * (R<1), invert=True)
        self.cwax.imshow(colorgrid, extent=[0,2*numpy.pi, 0,1024])
        self.cwax.set_rgrids([1,N/3,2*N/3], angle=45)
        self.cwax.set_xticks([numpy.pi/2, 7*numpy.pi/6, 11*numpy.pi/6])
        self.cwax.set_yticks([N/3, 2*N/3, N])
        self.cwax.set_xticklabels(['%s' % ('G'),\
                                    '%s' % ('R'),\
                                    '%s' % ('B')])
        self.cwax.set_yticklabels([\
                                    '%.3f' % (max(compdist)/3.),\
                                    '%.3f' % (2.*max(compdist)/3.),\
                                    '%.3f' % (max(compdist))])
Пример #16
0
def GetDateIndex(datetimeseries,date,ts=30,default=0,match='exact'):
    # return the index of a date/datetime string in an array of datetime objects
    #  datetimeseries - array of datetime objects
    #  date - a date or date/time string in a format dateutils can parse
    #  default - default value, integer
    try:
        if len(date)!=0:
            i = datetimeseries.index(dateutil.parser.parse(date))
        else:
            if default==-1:
                i = len(datetimeseries)-1
            else:
                i = default
    except ValueError:
        if default==-1:
            i = len(datetimeseries)-1
        else:
            i = default
    if match=='startnextday':
        while abs(datetimeseries[i].hour+numpy.float64(datetimeseries[i].minute)/60-numpy.float64(ts)/60)>c.eps:
            i = i + 1
    if match=='endpreviousday':
        while abs(datetimeseries[i].hour+numpy.float64(datetimeseries[i].minute)/60)>c.eps:
            i = i - 1
    return i
Пример #17
0
def testNumpyTypeCoercion():
    t = emzed.utils.toTable("a", [np.int32(1)])
    t.info()
    assert t.getColTypes() == [int], t.getColTypes()
    t = emzed.utils.toTable("a", [None, np.int32(1)])
    t.info()
    assert t.getColTypes() == [int], t.getColTypes()

    t.addColumn("b", np.int32(1))
    assert t.getColTypes() == [int, int], t.getColTypes()
    t.replaceColumn("b", [None, np.int32(1)])
    assert t.getColTypes() == [int, int], t.getColTypes()

    t.replaceColumn("b", np.int64(1))
    assert t.getColTypes() == [int, int], t.getColTypes()
    t.replaceColumn("b", [None, np.int64(1)])
    assert t.getColTypes() == [int, int], t.getColTypes()

    t.replaceColumn("b", np.float32(1.0))
    assert t.getColTypes() == [int, float], t.getColTypes()
    t.replaceColumn("b", [None, np.float32(1.0)])
    assert t.getColTypes() == [int, float], t.getColTypes()

    t.replaceColumn("b", np.float64(2.0))
    assert t.getColTypes() == [int, float], t.getColTypes()
    t.replaceColumn("b", [None, np.float64(2.0)])
    assert t.getColTypes() == [int, float], t.getColTypes()
def rp_gumbel_original(p_zero, loc, scale, flvol, max_return_period=1e9):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')
    max_p = 1-1./max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-np.float64(p_zero))/(1-np.float64(p_zero)), 0), 1)
    max_reduced_variate = -np.log(-np.log(np.float64(max_p_residual)))
    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values)
    # make sure that the reduced variate does not exceed the one, resembling the 1,000,000 year return period
    reduced_variate = np.minimum((flvol-loc)/scale, max_reduced_variate)
    # reduced_variate = (flvol-loc)/scale
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.float64(reduced_variate))), 0), 1)
    # tranform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1-p_zero) + p_zero, p_zero), max_p)  # Never larger than max_p
    # transform into a return period    
    return_period = 1./(1-p)
    test_p = p == 1    
    return return_period, test_p
Пример #19
0
  def testVarianceAndCovarianceMatrix(self):
    amp = np.float64(.5)
    len_scale = np.float64(.2)
    jitter = np.float64(1e-4)
    observation_noise_variance = np.float64(3e-3)

    kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)

    index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)

    gp = tfd.GaussianProcess(
        kernel,
        index_points,
        observation_noise_variance=observation_noise_variance,
        jitter=jitter)

    def _kernel_fn(x, y):
      return amp * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))

    expected_covariance = (
        _kernel_fn(np.expand_dims(index_points, 0),
                   np.expand_dims(index_points, 1)) +
        (observation_noise_variance + jitter) * np.eye(10))

    self.assertAllClose(expected_covariance,
                        self.evaluate(gp.covariance()))
    self.assertAllClose(np.diag(expected_covariance),
                        self.evaluate(gp.variance()))
  def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self):
    d = 10
    scale_diag = rng.rand(d)
    scale_identity_multiplier = np.float64(1.2)
    loc = rng.randn(d)
    with self.test_session() as sess:
      vlap = ds.VectorLaplaceDiag(
          loc=loc,
          scale_diag=scale_diag,
          scale_identity_multiplier=scale_identity_multiplier,
          validate_args=True)
      sasvlap = ds.VectorSinhArcsinhDiag(
          loc=loc,
          scale_diag=scale_diag,
          scale_identity_multiplier=scale_identity_multiplier,
          distribution=ds.Laplace(np.float64(0.), np.float64(1.)),
          validate_args=True)

      x = rng.randn(5, d)
      vlap_pdf, sasvlap_pdf = sess.run([vlap.prob(x), sasvlap.prob(x)])
      self.assertAllClose(vlap_pdf, sasvlap_pdf)

      vlap_samps, sasvlap_samps = sess.run(
          [vlap.sample(10000, seed=0),
           sasvlap.sample(10000, seed=0)])
      self.assertAllClose(loc, sasvlap_samps.mean(axis=0), atol=0.1)
      self.assertAllClose(
          vlap_samps.mean(axis=0), sasvlap_samps.mean(axis=0), atol=0.1)
      self.assertAllClose(
          vlap_samps.std(axis=0), sasvlap_samps.std(axis=0), atol=0.1)
Пример #21
0
def arc_duration_fraction(defined, nt):
    """
    :param defined: boolean array of where there is a detection
    :param nt: number of possible detections
    :return: fraction of the full duration that the detection exists
    """
    return np.float64(np.sum(defined)) / np.float64(nt)
Пример #22
0
 def test_init(self):
     import numpy as np
     import math
     import sys
     assert np.intp() == np.intp(0)
     assert np.intp('123') == np.intp(123)
     raises(TypeError, np.intp, None)
     assert np.float64() == np.float64(0)
     assert math.isnan(np.float64(None))
     assert np.bool_() == np.bool_(False)
     assert np.bool_('abc') == np.bool_(True)
     assert np.bool_(None) == np.bool_(False)
     assert np.complex_() == np.complex_(0)
     #raises(TypeError, np.complex_, '1+2j')
     assert math.isnan(np.complex_(None))
     for c in ['i', 'I', 'l', 'L', 'q', 'Q']:
         assert np.dtype(c).type().dtype.char == c
     for c in ['l', 'q']:
         assert np.dtype(c).type(sys.maxint) == sys.maxint
     for c in ['L', 'Q']:
         assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42
     assert np.float32(np.array([True, False])).dtype == np.float32
     assert type(np.float32(np.array([True]))) is np.ndarray
     assert type(np.float32(1.0)) is np.float32
     a = np.array([True, False])
     assert np.bool_(a) is a
Пример #23
0
def get_field_rotation_BB_integral(lsamp, clcurl, cl_E_unlensed_sp, lmax=None, lsamp_out=None, acc=1, raw_cl=False):
    CurlSp = InterpolatedUnivariateSpline(lsamp, clcurl)
    lmax = lmax or lsamp[-1]
    if lsamp_out is None: lsamp_out = np.array([L for L in lsamp if L <= lmax // 2])
    Bcurl = np.zeros(lsamp_out.shape)

    for i, ll in enumerate(lsamp_out):
        l = np.float64(ll)
        for llp in range(10, lmax):
            lp = np.float64(llp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2)
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            sinphi = np.sin(phi)
            sin2phi = np.sin(2 * phi)
            lpp = np.sqrt(lp ** 2 + l ** 2 - 2 * cosphi * l * lp)
            w[lpp < 2] = 0
            w[lpp > lmax] = 0
            curls = CurlSp(lpp)
            dCEs = cl_E_unlensed_sp(lp) * lp * dphi
            crossterm = sinphi * l * lp / lpp ** 2
            Bcurl[i] += np.dot(w, curls * (crossterm * sin2phi) ** 2) * dCEs

    Bcurl *= 4 / (2 * np.pi) ** 2
    if not raw_cl: Bcurl *= lsamp_out * (lsamp_out + 1) / (2 * np.pi)
    return lsamp_out, Bcurl
Пример #24
0
    def test_maybe_convert_scalar(self):

        # pass thru
        result = com._maybe_convert_scalar('x')
        self.assertEqual(result, 'x')
        result = com._maybe_convert_scalar(np.array([1]))
        self.assertEqual(result, np.array([1]))

        # leave scalar dtype
        result = com._maybe_convert_scalar(np.int64(1))
        self.assertEqual(result, np.int64(1))
        result = com._maybe_convert_scalar(np.int32(1))
        self.assertEqual(result, np.int32(1))
        result = com._maybe_convert_scalar(np.float32(1))
        self.assertEqual(result, np.float32(1))
        result = com._maybe_convert_scalar(np.int64(1))
        self.assertEqual(result, np.float64(1))

        # coerce
        result = com._maybe_convert_scalar(1)
        self.assertEqual(result, np.int64(1))
        result = com._maybe_convert_scalar(1.0)
        self.assertEqual(result, np.float64(1))
        result = com._maybe_convert_scalar(pd.Timestamp('20130101'))
        self.assertEqual(result, pd.Timestamp('20130101').value)
        result = com._maybe_convert_scalar(datetime(2013, 1, 1))
        self.assertEqual(result, pd.Timestamp('20130101').value)
        result = com._maybe_convert_scalar(pd.Timedelta('1 day 1 min'))
        self.assertEqual(result, pd.Timedelta('1 day 1 min').value)
Пример #25
0
    def testMulArray(self):
        from numpy import arange, round
        a = arange(0, 60, dtype = 'd').reshape(3,4,5) + .3
        r1 = self.rxns['NTRplOH']
        r2 = r1 * a

        for spcn in "NTR OH".split():
            spc = self.spcs[spcn]
            self.assertTrue((r2[spc] == -a).all())

        for spcn in "HNO3 HO2".split():
            spc = self.spcs[spcn]
            self.assertTrue((r2[spc] == a).all())

        for spcn in "FORM ALD2 ALDX".split():
            spc = self.spcs[spcn]
            self.assertTrue((r2[spc] == (a * float64(.33))).all())

        for spcn in ["ALD"]:
            spc = self.spcs[spcn]
            # Value is not exact because of addition
            self.assertTrue((round(r2[spc], decimals = 8) == round(a * float64(.99), decimals = 8)).all())

        for spcn in ["PAR"]:
            spc = self.spcs[spcn]
            self.assertTrue((r2[spc] == (-a * .66)).all())
Пример #26
0
    def _get_op_result_fill_value(self, other, func):
        own_default = self.default_fill_value

        if isinstance(other, DataFrame):
            # i.e. called from _combine_frame

            other_default = getattr(other, 'default_fill_value', np.nan)

            # if the fill values are the same use them? or use a valid one
            if own_default == other_default:
                # TOOD: won't this evaluate as False if both are np.nan?
                fill_value = own_default
            elif np.isnan(own_default) and not np.isnan(other_default):
                fill_value = other_default
            elif not np.isnan(own_default) and np.isnan(other_default):
                fill_value = own_default
            else:
                fill_value = None

        elif isinstance(other, SparseSeries):
            # i.e. called from _combine_match_index

            # fill_value is a function of our operator
            if isna(other.fill_value) or isna(own_default):
                fill_value = np.nan
            else:
                fill_value = func(np.float64(own_default),
                                  np.float64(other.fill_value))

        else:
            raise NotImplementedError(type(other))

        return fill_value
Пример #27
0
def test_get_dimensions():
    '''
    Test various ways of getting/comparing the dimensions of a quantity.
    '''
    q = 500 * ms
    assert get_dimensions(q) is get_or_create_dimension(q.dimensions._dims)
    assert get_dimensions(q) is q.dimensions
    assert q.has_same_dimensions(3 * second)
    dims = q.dimensions
    assert_equal(dims.get_dimension('time'), 1.)
    assert_equal(dims.get_dimension('length'), 0)
    
    assert get_dimensions(5) is DIMENSIONLESS
    assert get_dimensions(5.0) is DIMENSIONLESS
    assert get_dimensions(np.array(5, dtype=np.int)) is DIMENSIONLESS
    assert get_dimensions(np.array(5.0)) is DIMENSIONLESS
    assert get_dimensions(np.float32(5.0)) is DIMENSIONLESS
    assert get_dimensions(np.float64(5.0)) is DIMENSIONLESS
    assert is_scalar_type(5)
    assert is_scalar_type(5.0)
    assert is_scalar_type(np.array(5, dtype=np.int))
    assert is_scalar_type(np.array(5.0))
    assert is_scalar_type(np.float32(5.0))
    assert is_scalar_type(np.float64(5.0))
    assert_raises(TypeError, lambda: get_dimensions('a string'))
    # wrong number of indices
    assert_raises(TypeError, lambda: get_or_create_dimension([1, 2, 3, 4, 5, 6]))
    # not a sequence
    assert_raises(TypeError, lambda: get_or_create_dimension(42))
Пример #28
0
 def Provisional(self,N,provis,unknowns):    
     j=0    
     finalX=Points('New Provisional Points')
     orientations={}
     for i in unknowns:
         name= i[0:-2]
         variable = i[-1]
         if variable=="o":
             orientations[name]= float64(N[j])
             j+=1
             continue
         x=provis[name].x
         y=provis[name].y
         h=provis[name].h
         if not finalX.has_key(name):
             finalX[name]= Point(0,0,0,False,name)
         if variable=="x":
             finalX[name].ChangeVariable(variable,float64(x+N[j]))
             j+=1
             continue
         if variable=="y":
             finalX[name].ChangeVariable(variable,float64(y+N[j]))
             j+=1
             continue
         if variable=="h":
             finalX[name].ChangeVariable(variable,float64(h+N[j]))
             j+=1
             continue
     return finalX
Пример #29
0
def read_as_sigmas(options, infiles):
    """Read overdispersion parameters for allele-specific test
    (Beta-Binomial). Expect one for each individual."""

    if (options.as_disp):
        disp_file = open(options.as_disp)
        line = disp_file.readline()
        as_sigmas = []
        while line:
            val = np.float64(line.strip())
            if val < 0.0 or val > 1.0:
                raise ValueError("expected as_sigma values to be "
                                 " in range 0.0-1.0, but got %g" % 
                                 val)
            as_sigmas.append(np.float64(line.strip()))
            line = disp_file.readline()
            
        disp_file.close()

        if len(as_sigmas) != len(infiles):
            raise ValueError("expected %d values in as_disp file "
                             "(one for each input file) but got "
                             "%d" % (len(infiles), len(as_sigmas)))
        
    else:
        as_sigmas = [0.001] * len(infiles)

    return as_sigmas
Пример #30
0
def imageGradientY(image):
    """ This function differentiates an image in the Y direction.

    Note: See lectures 02-06 (Differentiating an image in X and Y) for a good
    explanation of how to perform this operation.

    The Y direction means that you are subtracting rows:
    der. F(x, y) = F(x, y+1) - F(x, y)
    This corresponds to image[r,c] = image[r+1,c] - image[r,c]

    You should compute the absolute value of the differences in order to avoid
    setting a pixel to a negative value which would not make sense.

    We want you to iterate the image to complete this function. You may NOT use
    any functions that automatically do this for you.

    Args:
        image (numpy.ndarray): A grayscale image represented in a numpy array.

    Returns:
        output (numpy.ndarray): The image gradient in the Y direction. The shape
                                of the output array should have a height that is
                                one less than the original since no calculation
                                can be done once the last row is reached.
    """
    # WRITE YOUR CODE HERE.
    shift1 = np.float64(np.roll(image,-1,axis=0))
    
    diff = np.uint8(np.absolute(np.float64(image) - shift1))[0:-1,:]
    
    return diff
Пример #31
0
    def _train(self):
        config = self.config
        sample_time, sync_time, learn_time, apply_time = 0, 0, 0, 0
        iter_init_timesteps = self.cur_timestep

        num_loop_iters = 0
        steps_per_iter = config["sample_batch_size"] * len(self.workers)
        while (self.cur_timestep - iter_init_timesteps <
               config["timesteps_per_iteration"]):
            dt = time.time()
            ray.get([
                w.do_steps.remote(
                    config["sample_batch_size"], self.cur_timestep)
                for w in self.workers])
            num_loop_iters += 1
            self.cur_timestep += steps_per_iter
            self.steps_since_update += steps_per_iter
            sample_time += time.time() - dt

            if self.cur_timestep > config["learning_starts"]:
                dt = time.time()
                # Minimize the error in Bellman's equation on a batch sampled
                # from replay buffer.
                self._update_worker_weights()
                sync_time += (time.time() - dt)
                dt = time.time()
                gradients = ray.get(
                    [w.get_gradient.remote(self.cur_timestep)
                        for w in self.workers])
                learn_time += (time.time() - dt)
                dt = time.time()
                for grad in gradients:
                    self.actor.apply_gradients(grad)
                apply_time += (time.time() - dt)

            if (self.cur_timestep > config["learning_starts"] and
                    self.steps_since_update >
                    config["target_network_update_freq"]):
                self.actor.dqn_graph.update_target(self.actor.sess)
                # Update target network periodically.
                self._update_worker_weights()
                self.steps_since_update -= config["target_network_update_freq"]
                self.num_target_updates += 1

        mean_100ep_reward = 0.0
        mean_100ep_length = 0.0
        num_episodes = 0
        buffer_size_sum = 0
        for mean_rew, mean_len, episodes, exploration, buf_sz in ray.get(
              [w.stats.remote(self.cur_timestep) for w in self.workers]):
            mean_100ep_reward += mean_rew
            mean_100ep_length += mean_len
            num_episodes += episodes
            buffer_size_sum += buf_sz
        mean_100ep_reward /= len(self.workers)
        mean_100ep_length /= len(self.workers)

        info = [
            ("mean_100ep_reward", mean_100ep_reward),
            ("exploration_frac", exploration),
            ("steps", self.cur_timestep),
            ("episodes", num_episodes),
            ("buffer_sizes_sum", buffer_size_sum),
            ("target_updates", self.num_target_updates),
            ("sample_time", sample_time),
            ("weight_sync_time", sync_time),
            ("apply_time", apply_time),
            ("learn_time", learn_time),
            ("samples_per_s",
                num_loop_iters * np.float64(steps_per_iter) / sample_time),
            ("learn_samples_per_s",
                num_loop_iters * np.float64(config["train_batch_size"]) *
                np.float64(config["num_workers"]) / learn_time),
        ]

        for k, v in info:
            logger.record_tabular(k, v)
        logger.dump_tabular()

        result = TrainingResult(
            episode_reward_mean=mean_100ep_reward,
            episode_len_mean=mean_100ep_length,
            timesteps_this_iter=self.cur_timestep - iter_init_timesteps,
            info=info)

        return result
Пример #32
0
def load_gfile_mds(shot,
                   time,
                   tree="EFIT04",
                   exact=False,
                   connection=None,
                   tunnel=True,
                   verbal=True):
    """
    This is scavenged from the load_gfile_d3d script on the EFIT repository,
    except updated to run on python3.

    shot:       Shot to get gfile for.
    time:       Time of the shot to load gfile for, in ms.
    tree:       One of the EFIT trees to get the data from.
    exact:      If True will raise error if time does not exactly match any gfile
                times. False will grab the closest time.
    connection: An MDSplus connection to atlas.
    tunnel:     Set to True if accessing outside DIII-D network.

    returns:    The requested gfile as a dictionary.
    """

    # Connect to server, open tree and go to g-file
    if connection is None:
        if tunnel is True:
            connection = mds.Connection("localhost")
        else:
            connection = mds.Connection('atlas.gat.com')
    connection.openTree(tree, shot)

    base = 'RESULTS:GEQDSK:'

    # get time slice
    if verbal:
        print("Loading gfile:")
        print("  Shot: " + str(shot))
        print("  Tree: " + tree)
        print("  Time: " + str(time))
    signal = 'GTIME'
    k = np.argmin(np.abs(connection.get(base + signal).data() - time))
    time0 = int(connection.get(base + signal).data()[k])

    if (time != time0):
        if exact:
            raise RuntimeError(tree + ' does not exactly contain time %.2f'\
                               %time + '  ->  Abort')
        else:
            if verbal:
                print('Warning: Closest time is ' + str(time0) + '.')
                #print('Fetching time slice ' + str(time0))
            time = time0

    # store data in dictionary
    g = {'shot': shot, 'time': time}

    # get header line
    try:
        header = connection.get(base + 'ECASE').data()[k]
    except:
        print("  No header line.")

    # get all signals, use same names as in read_g_file
    translate = {
        'MW': 'NR',
        'MH': 'NZ',
        'XDIM': 'Xdim',
        'ZDIM': 'Zdim',
        'RZERO': 'R0',
        'RMAXIS': 'RmAxis',
        'ZMAXIS': 'ZmAxis',
        'SSIMAG': 'psiAxis',
        'SSIBRY': 'psiSep',
        'BCENTR': 'Bt0',
        'CPASMA': 'Ip',
        'FPOL': 'Fpol',
        'PRES': 'Pres',
        'FFPRIM': 'FFprime',
        'PPRIME': 'Pprime',
        'PSIRZ': 'psiRZ',
        'QPSI': 'qpsi',
        'NBBBS': 'Nlcfs',
        'LIMITR': 'Nwall'
    }
    for signal in translate:
        try:
            g[translate[signal]] = connection.get(base + signal).data()[k]
        except:
            print("  Node not found: " + base + signal)

    g['R1'] = connection.get(base + 'RGRID').data()[0]
    g['Zmid'] = 0.0

    RLIM = connection.get(base + 'LIM').data()[:, 0]
    ZLIM = connection.get(base + 'LIM').data()[:, 1]
    g['wall'] = np.vstack((RLIM, ZLIM)).T

    RBBBS = connection.get(base + 'RBBBS').data()[k][:int(g['Nlcfs'])]
    ZBBBS = connection.get(base + 'ZBBBS').data()[k][:int(g['Nlcfs'])]
    g['lcfs'] = np.vstack((RBBBS, ZBBBS)).T

    KVTOR = 0
    RVTOR = 1.7
    NMASS = 0
    RHOVN = connection.get(base + 'RHOVN').data()[k]

    # convert floats to integers
    for item in ['NR', 'NZ', 'Nlcfs', 'Nwall']:
        g[item] = int(g[item])

    # convert single (float32) to double (float64) and round
    for item in [
            'Xdim', 'Zdim', 'R0', 'R1', 'RmAxis', 'ZmAxis', 'psiAxis',
            'psiSep', 'Bt0', 'Ip'
    ]:
        g[item] = np.round(np.float64(g[item]), 7)

    # convert single arrays (float32) to double arrays (float64)
    for item in [
            'Fpol', 'Pres', 'FFprime', 'Pprime', 'psiRZ', 'qpsi', 'lcfs',
            'wall'
    ]:
        g[item] = np.array(g[item], dtype=np.float64)

    # Construct (R,Z) grid for psiRZ
    g['dR'] = g['Xdim'] / (g['NR'] - 1)
    g['R'] = g['R1'] + np.arange(g['NR']) * g['dR']

    g['dZ'] = g['Zdim'] / (g['NZ'] - 1)
    NZ2 = int(np.floor(0.5 * g['NZ']))
    g['Z'] = g['Zmid'] + np.arange(-NZ2, NZ2 + 1) * g['dZ']

    # normalize psiRZ
    g['psiRZn'] = (g['psiRZ'] - g['psiAxis']) / (g['psiSep'] - g['psiAxis'])

    return g
            val_dataframe = pd.read_csv(file_train, encoding='utf-8')
            val_dataframe = val_dataframe.ix[val_in]
            val_dataframe['predicte'] = val_pre_value
            val_dataframe['pre_label'] = val_pre
            val_dataframe.to_csv(file_val_pre,
                                 encoding='utf-8-sig',
                                 index=None)

    print("-------------Finally-------------")
    print("f1_score: %f " % (np.mean(f1_sco_list)))
    print("precision: %f " % (np.mean(precision_list)))
    print("recall: %f " % (np.mean(recall_list)))

    #sample 100 test_data: evaluation
    for i in range(5):
        test_pre_list[i] = np.float64(test_pre_list[i])

    test_pre_fin = np.mean(test_pre_list, axis=0)
    test_data = pd.read_csv(file_test)
    label = list(test_data['manual_label'])
    predict = np.where(test_pre_fin > 0.5, 1, 0)

    precision = precision_score(label, predict, average=None)
    recall = recall_score(label, predict, average=None)
    f1_sco = f1_score(label, predict, average=None)
    print("-------------Test data-------------")
    print("0 f1_score: %f " % (f1_sco[0]))
    print("0 precision: %f " % (precision[0]))
    print("0 recall: %f " % (recall[0]))
    print("1 f1_score: %f " % (f1_sco[1]))
    print("1 precision: %f " % (precision[1]))
 def Interval(min_val, max_val):
     assert min_val <= max_val
     return (numpy.float64(min_val), numpy.float64(max_val))
Пример #35
0
                       n_iter=100,
                       verbose=True,
                       name='mhmm')
mhmm.fit(data.flatten()[:, np.newaxis], y, lengths)
pi_nk, transitions = mhmm._compute_mixture_posteriors(data.flatten()[:, np.newaxis], y, lengths)

trans = transitions.reshape(M, S, S)
print(trans)

print(np.exp(pi_nk))

predicted_cluster = []
label_0 = [0 for i in range(len(data1[:, 0]))]
label_1 = [1 for i in range(len(data2[:, 0]))]
for n in range(num_of_cells):
    cell = np.float64(pi_nk[n])
    predicted_cluster = np.append(predicted_cluster, np.where(cell == max(cell))[0][0])

label = label_0 + label_1

print(label)
print(predicted_cluster)

v = v_measure_score(label, predicted_cluster)
print('oMHMM v-measure: {}'.format(v))

print('entropy: {}'.format((calculate_entropy(trans[0]) + calculate_entropy(trans[1]))/2))

###################################################################################################

mhmm = MHMM.SpaMHMM(n_nodes=1,
Пример #36
0
class TypedJSONDecoder(json.JSONDecoder):
    def __init__(self, *args, **kwargs):
        json.JSONDecoder.__init__(self,
                                  object_hook=self.object_hook,
                                  *args,
                                  **kwargs)

    _custom_supported_types = {
        Enum: deserialize_enum,
        pint.Measurement: deserialize_measurement,
        pint.Quantity: deserialize_quantity,
        set: deserialize_set,
        frozenset: deserialize_frozen_set,
        np.float16: lambda x: np.float16(x["value"]),
        np.float32: lambda x: np.float32(x["value"]),
        np.float64: lambda x: np.float64(x["value"]),
        np.int32: lambda x: np.int32(x["value"]),
        np.int64: lambda x: np.int64(x["value"]),
        np.ndarray: lambda x: np.array(x["value"]),
        datetime: lambda x: dateutil.parser.parse(x["value"]),
    }

    @staticmethod
    def object_hook(object_dictionary):

        if "@type" not in object_dictionary:
            return object_dictionary

        type_string = object_dictionary["@type"]
        class_type = _type_string_to_object(type_string)

        custom_decoder = None

        for decoder_type in TypedJSONDecoder._custom_supported_types:

            if isinstance(decoder_type, str):

                if decoder_type != class_type.__qualname__:
                    continue

            elif not issubclass(class_type, decoder_type):
                continue

            custom_decoder = TypedJSONDecoder._custom_supported_types[
                decoder_type]
            break

        if custom_decoder is not None:

            try:
                deserialized_object = custom_decoder(object_dictionary)

            except Exception as e:

                raise ValueError(
                    "{} ({}) could not be deserialized "
                    "using a specialized custom decoder: {}".format(
                        object_dictionary, type(class_type), e))

        elif hasattr(class_type, "__setstate__"):

            class_init_signature = inspect.signature(class_type)

            for parameter in class_init_signature.parameters.values():

                if (parameter.default != inspect.Parameter.empty
                        or parameter.kind == inspect.Parameter.VAR_KEYWORD
                        or parameter.kind == inspect.Parameter.VAR_POSITIONAL):

                    continue

                raise ValueError(
                    f"Cannot deserialize objects ({class_type}) which have non-"
                    f"optional arguments {parameter.name} in the constructor.")

            deserialized_object = class_type()
            deserialized_object.__setstate__(object_dictionary)

        else:

            raise ValueError(
                "Objects of type {} are not deserializable, please either"
                "add a __setstate__ method, or add the object to the list"
                "of custom supported types.".format(type(class_type)))

        return deserialized_object
Пример #37
0
 def loss(x, c):
     x = x.astype(dtype)
     v1 = distance(x)
     v2 = crossentropy(x)
     return np.float64(v1 + c * v2)
from typing import Optional

import numpy as np

NP_ZERO = np.float64(0)


class Vector:
    def __init__(
        self,
        x: Optional[np.float64] = NP_ZERO,
        y: Optional[np.float64] = NP_ZERO,
        z: Optional[np.float64] = NP_ZERO,
    ):
        self.x = x
        self.y = y
        self.z = z

    @property
    def magnitude(self):
        return np.sqrt(self * self)

    def __str__(self):
        return f"<{self.x:.1f} {self.y:.1f} {self.z:.1f}>"

    def __repr__(self):
        return str(self)

    def __add__(self, other):
        return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def imaging(
        initimage, imagewin=None,
        vistable=None, amptable=None, bstable=None, catable=None,
        lambl1=-1., lambtv=-1, lambtsv=-1, normlambda=True, nonneg=True,
        logreg=False, niter=1000,
        totalflux=None, fluxconst=False,
        istokes=0, ifreq=0):
    '''

    '''
    # Check Arguments
    if ((vistable is None) and (amptable is None) and
            (bstable is None) and (catable is None)):
        print("Error: No data are input.")
        return -1

    # Total Flux constraint: Sanity Check
    dofluxconst = False
    if ((vistable is None) and (amptable is None) and (totalflux is None)):
        print("Error: No absolute amplitude information in the input data.")
        print("       You need to set the total flux constraint by totalflux.")
        return -1
    elif ((totalflux is None) and (fluxconst is True)):
        print("Error: No total flux is specified, although you set fluxconst=True.")
        print("       You need to set the total flux constraint by totalflux.")
        return -1
    elif ((vistable is None) and (amptable is None) and
          (totalflux is not None) and (fluxconst is False)):
        print("Warning: No absolute amplitude information in the input data.")
        print("         The total flux will be constrained, although you do not set fluxconst=True.")
        dofluxconst = True
    elif fluxconst is True:
        dofluxconst = True

    # get initial images
    Iin = np.float64(initimage.data[istokes, ifreq])

    # size of images
    Nx = np.int32(initimage.header["nx"])
    Ny = np.int32(initimage.header["ny"])
    Nyx = Nx * Ny

    # pixel coordinates
    x, y = initimage.get_xygrid(twodim=True, angunit="rad")
    x = np.float64(x)
    y = np.float64(y)
    xidx = np.int32(np.arange(Nx) + 1)
    yidx = np.int32(np.arange(Ny) + 1)
    xidx, yidx = np.meshgrid(xidx, yidx)

    # apply the imaging area
    if imagewin is None:
        print("Imaging Window: Not Specified. We solve the image on all the pixels.")
        Iin = Iin.reshape(Nyx)
        x = x.reshape(Nyx)
        y = y.reshape(Nyx)
        xidx = xidx.reshape(Nyx)
        yidx = yidx.reshape(Nyx)
    else:
        print("Imaging Window: Specified. Images will be solved on specified pixels.")
        idx = np.where(imagewin)
        Iin = Iin[idx]
        x = x[idx]
        y = y[idx]
        xidx = xidx[idx]
        yidx = yidx[idx]

    # dammy array
    dammyreal = np.zeros(1, dtype=np.float64)

    # Full Complex Visibility
    Ndata = 0
    if dofluxconst:
        print("Total Flux Constraint: set to %g" % (totalflux))
        totalfluxdata = {
            'u': [0.],
            'v': [0.],
            'amp': [totalflux],
            'phase': [0.],
            'sigma': [1.]
        }
        totalfluxdata = pd.DataFrame(totalfluxdata)
        fcvtable = pd.concat([totalfluxdata, vistable], ignore_index=True)
    else:
        print("Total Flux Constraint: disabled.")
        if vistable is None:
            fcvtable = None
        else:
            fcvtable = vistable.copy()

    if fcvtable is None:
        isfcv = False
        vfcvr = dammyreal
        vfcvi = dammyreal
        varfcv = dammyreal
    else:
        isfcv = True
        phase = np.deg2rad(np.array(fcvtable["phase"], dtype=np.float64))
        amp = np.array(fcvtable["amp"], dtype=np.float64)
        vfcvr = amp * np.cos(phase)
        vfcvi = amp * np.sin(phase)
        varfcv = np.square(np.array(fcvtable["sigma"], dtype=np.float64))
        Ndata += len(vfcvr)
        del phase, amp

    # Visibility Amplitude
    if amptable is None:
        isamp = False
        vamp = dammyreal
        varamp = dammyreal
    else:
        isamp = True
        vamp = np.array(amptable["amp"], dtype=np.float64)
        varamp = np.square(np.array(amptable["sigma"], dtype=np.float64))
        Ndata += len(vamp)

    # Closure Phase
    if bstable is None:
        iscp = False
        cp = dammyreal
        varcp = dammyreal
    else:
        iscp = True
        cp = np.deg2rad(np.array(bstable["phase"], dtype=np.float64))
        varcp = np.square(
            np.array(bstable["sigma"] / bstable["amp"], dtype=np.float64))
        Ndata += len(cp)

    # Closure Amplitude
    if catable is None:
        isca = False
        ca = dammyreal
        varca = dammyreal
    else:
        isca = True
        ca = np.array(catable["logamp"], dtype=np.float64)
        varca = np.square(np.array(catable["logsigma"], dtype=np.float64))
        Ndata += len(ca)

    # Sigma for the total flux
    if dofluxconst:
        varfcv[0] = np.square(fcvtable.loc[0, "amp"] / (Ndata - 1.))

    # Normalize Lambda
    if normlambda:
        # Guess Total Flux
        if totalflux is None:
            fluxscale = []
            if vistable is not None:
                fluxscale.append(vistable["amp"].max())
            if amptable is not None:
                fluxscale.append(amptable["amp"].max())
            fluxscale = np.max(fluxscale)
            print("Flux Scaling Factor for lambda: The expected total flux is not given.")
            print("                                The scaling factor will be %g" % (fluxscale))
        else:
            fluxscale = np.float64(totalflux)
            print("Flux Scaling Factor for lambda: The scaling factor will be %g" % (fluxscale))
        if logreg:
            lambl1_sim = lambl1 / (len(xidx)*np.log(1+fluxscale/len(xidx)))
            lambtv_sim = lambtv / (len(xidx)*np.log(1+fluxscale/len(xidx)))
            lambtsv_sim = lambtsv / (len(xidx)*np.log(1+fluxscale/len(xidx)))**2
        else:
            lambl1_sim = lambl1 / fluxscale
            lambtv_sim = lambtv / fluxscale
            lambtsv_sim = lambtsv / fluxscale**2
    else:
        lambl1_sim = lambl1
        lambtv_sim = lambtv
        lambtsv_sim = lambtsv

    # get uv coordinates and uv indice
    u, v, uvidxfcv, uvidxamp, uvidxcp, uvidxca = _get_uvlist(
        fcvtable=fcvtable, amptable=amptable, bstable=bstable, catable=catable
    )

    # run imaging
    Iout = fortlib.dftim2d.imaging(
        # Images
        iin=Iin, x=x, y=y, xidx=xidx, yidx=yidx, nx=Nx, ny=Ny,
        # UV coordinates,
        u=u, v=v,
        # Imaging Parameters
        lambl1=lambl1_sim, lambtv=lambtv_sim, lambtsv=lambtsv_sim,
        nonneg=nonneg, logreg=logreg, niter=niter,
        # Full Complex Visibilities
        isfcv=isfcv, uvidxfcv=uvidxfcv, vfcvr=vfcvr, vfcvi=vfcvi, varfcv=varfcv,
        # Visibility Ampltiudes
        isamp=isamp, uvidxamp=uvidxamp, vamp=vamp, varamp=varamp,
        # Closure Phase
        iscp=iscp, uvidxcp=uvidxcp, cp=cp, varcp=varcp,
        # Closure Amplituds
        isca=isca, uvidxca=uvidxca, ca=ca, varca=varca,
        # Following 3 parameters are for L-BFGS-B
        m=np.int32(lbfgsbprms["m"]), factr=np.float64(lbfgsbprms["factr"]),
        pgtol=np.float64(lbfgsbprms["pgtol"])
    )

    outimage = copy.deepcopy(initimage)
    outimage.data[istokes, ifreq] = 0.
    for i in np.arange(len(xidx)):
        outimage.data[istokes, ifreq, yidx[i] - 1, xidx[i] - 1] = Iout[i]
    outimage.update_fits()
    return outimage
Пример #40
0
    estimate = np.mean(estimates[:sample_i+1], dtype=np.float64)
    bin_estimate = np.sign(np.sum(estimates[:sample_i+1], dtype=np.float64))

    # print('Estimate from %d clicks, estimate: %0.05f squared error: %0.09f error: %0.09f' % (
    #         sample_i+1, estimate,
    #         (estimate - expected_train_reward)**2.,
    #         (estimate - expected_train_reward),
    #         ))

    # print('Deviation from true mean in estimate: %0.09f' % np.mean((estimates[:sample_i] - expected_train_reward)**2.) )
    # print('Old squared error: %0.09f' % (estimate_1 - expected_train_reward)**2.)
    # print('Mean error from true mean in estimate: %0.05f' % np.mean((estimates[:sample_i] - expected_train_reward)) )
    # print('Deviation from validation mean in estimate: %0.05f' % np.mean((estimates[:click_i] - expected_vali_reward)**2.) )

    results['results'].append(
      {
        'num queries': sample_i+1,
        'binary error': float(bin_estimate != np.sign(test_diff)),
        'binary train error': float(bin_estimate != np.sign(expected_train_reward)),
        'squared error': (estimate - expected_train_reward)**2.,
        'absolute error': np.abs(estimate - expected_train_reward),
        'estimate': estimate,
        'mean squared error': np.mean((estimates[:sample_i] - expected_train_reward)**2.),
        'logging policy CTR': total_clicks/np.float64(sample_i+1),
      }
    )

print('Writing results to %s' % args.output_path)
with open(args.output_path, 'w') as f:
  json.dump(results, f)
 def _log_partition_infinity_is_accurate(self, float_dtype):
   """Tests that the partition function is accurate at infinity."""
   alpha = float_dtype(float('inf'))
   log_z_true = np.float64(0.70526025442)  # From mathematica.
   log_z = distribution.log_base_partition_function(alpha)
   np.testing.assert_allclose(log_z, log_z_true, atol=1e-7, rtol=1e-7)
def statistics(
        initimage, imagewin=None,
        vistable=None, amptable=None, bstable=None, catable=None,
        lambl1=1., lambtv=-1, lambtsv=1, logreg=False, normlambda=True,
        totalflux=None, fluxconst=False,
        istokes=0, ifreq=0, fulloutput=True, **args):
    '''

    '''
    # Check Arguments
    if ((vistable is None) and (amptable is None) and
            (bstable is None) and (catable is None)):
        print("Error: No data are input.")
        return -1

    # Total Flux constraint: Sanity Check
    dofluxconst = False
    if ((vistable is None) and (amptable is None) and (totalflux is None)):
        print("Error: No absolute amplitude information in the input data.")
        print("       You need to set the total flux constraint by totalflux.")
        return -1
    elif ((totalflux is None) and (fluxconst is True)):
        print("Error: No total flux is specified, although you set fluxconst=True.")
        print("       You need to set the total flux constraint by totalflux.")
        return -1
    elif ((vistable is None) and (amptable is None) and
          (totalflux is not None) and (fluxconst is False)):
        print("Warning: No absolute amplitude information in the input data.")
        print("         The total flux will be constrained, although you do not set fluxconst=True.")
        dofluxconst = True
    elif fluxconst is True:
        dofluxconst = True

    # get initial images
    Iin = np.float64(initimage.data[istokes, ifreq])

    # size of images
    Nx = np.int32(initimage.header["nx"])
    Ny = np.int32(initimage.header["ny"])
    Nyx = Nx * Ny

    # pixel coordinates
    x, y = initimage.get_xygrid(twodim=True, angunit="rad")
    x = np.float64(x)
    y = np.float64(y)
    xidx = np.int32(np.arange(Nx) + 1)
    yidx = np.int32(np.arange(Ny) + 1)
    xidx, yidx = np.meshgrid(xidx, yidx)

    # apply the imaging area
    if imagewin is None:
        print("Imaging Window: Not Specified. We solve the image on all the pixels.")
        Iin = Iin.reshape(Nyx)
        x = x.reshape(Nyx)
        y = y.reshape(Nyx)
        xidx = xidx.reshape(Nyx)
        yidx = yidx.reshape(Nyx)
    else:
        print("Imaging Window: Specified. Images will be solved on specified pixels.")
        idx = np.where(imagewin)
        Iin = Iin[idx]
        x = x[idx]
        y = y[idx]
        xidx = xidx[idx]
        yidx = yidx[idx]

    # dammy array
    dammyreal = np.zeros(1, dtype=np.float64)

    # Full Complex Visibility
    Ndata = 0
    if vistable is None:
        fcvtable = None
    else:
        fcvtable = vistable.copy()

    if fcvtable is None:
        isfcv = False
        vfcvr = dammyreal
        vfcvi = dammyreal
        varfcv = dammyreal
    else:
        isfcv = True
        phase = np.deg2rad(np.array(fcvtable["phase"], dtype=np.float64))
        amp = np.array(fcvtable["amp"], dtype=np.float64)
        vfcvr = amp * np.cos(phase)
        vfcvi = amp * np.sin(phase)
        varfcv = np.square(np.array(fcvtable["sigma"], dtype=np.float64))
        Ndata += len(vfcvr)
        del phase, amp

    # Visibility Amplitude
    if amptable is None:
        isamp = False
        vamp = dammyreal
        varamp = dammyreal
    else:
        isamp = True
        vamp = np.array(amptable["amp"], dtype=np.float64)
        varamp = np.square(np.array(amptable["sigma"], dtype=np.float64))
        Ndata += len(vamp)

    # Closure Phase
    if bstable is None:
        iscp = False
        cp = dammyreal
        varcp = dammyreal
    else:
        iscp = True
        cp = np.deg2rad(np.array(bstable["phase"], dtype=np.float64))
        varcp = np.square(
            np.array(bstable["sigma"] / bstable["amp"], dtype=np.float64))
        Ndata += len(cp)

    # Closure Amplitude
    if catable is None:
        isca = False
        ca = dammyreal
        varca = dammyreal
    else:
        isca = True
        ca = np.array(catable["logamp"], dtype=np.float64)
        varca = np.square(np.array(catable["logsigma"], dtype=np.float64))
        Ndata += len(ca)

    # Normalize Lambda
    if normlambda:
        # Guess Total Flux
        if totalflux is None:
            fluxscale = []
            if vistable is not None:
                fluxscale.append(vistable["amp"].max())
            if amptable is not None:
                fluxscale.append(amptable["amp"].max())
            fluxscale = np.max(fluxscale)
            print("Flux Scaling Factor for lambda: The expected total flux is not given.")
            print("The scaling factor will be %g" % (fluxscale))
        else:
            fluxscale = np.float64(totalflux)
            print("Flux Scaling Factor for lambda: The scaling factor will be %g" % (fluxscale))
        if logreg:
            lambl1_sim = lambl1 / (len(xidx)*np.log(1+fluxscale/len(xidx)))
            lambtv_sim = lambtv / (len(xidx)*np.log(1+fluxscale/len(xidx)))
            lambtsv_sim = lambtsv / (len(xidx)*np.log(1+fluxscale/len(xidx)))**2
        else:
            lambl1_sim = lambl1 / fluxscale
            lambtv_sim = lambtv / fluxscale
            lambtsv_sim = lambtsv / fluxscale**2
    else:
        lambl1_sim = lambl1
        lambtv_sim = lambtv
        lambtsv_sim = lambtsv

    # get uv coordinates and uv indice
    u, v, uvidxfcv, uvidxamp, uvidxcp, uvidxca = _get_uvlist(
        fcvtable=fcvtable, amptable=amptable, bstable=bstable, catable=catable
    )

    # calculate all
    out = fortlib.dftim2d.statistics(
        # Images
        iin=Iin, x=x, y=y, xidx=xidx, yidx=yidx, nx=Nx, ny=Ny,
        # UV coordinates,
        u=u, v=v,
        # Imaging Parameters
        lambl1=lambl1_sim, lambtv=lambtv_sim, lambtsv=lambtsv_sim,
        logreg=logreg,
        # Full Complex Visibilities
        isfcv=isfcv, uvidxfcv=uvidxfcv, vfcvr=vfcvr, vfcvi=vfcvi, varfcv=varfcv,
        # Visibility Ampltiudes
        isamp=isamp, uvidxamp=uvidxamp, vamp=vamp, varamp=varamp,
        # Closure Phase
        iscp=iscp, uvidxcp=uvidxcp, cp=cp, varcp=varcp,
        # Closure Amplituds
        isca=isca, uvidxca=uvidxca, ca=ca, varca=varca
    )
    stats = collections.OrderedDict()
    # Cost and Chisquares
    stats["cost"] = out[0]
    stats["chisq"] = out[3] + out[4] + out[5] + out[6]
    stats["rchisq"] = stats["chisq"] / Ndata
    stats["isfcv"] = isfcv
    stats["isamp"] = isamp
    stats["iscp"] = iscp
    stats["isca"] = isca
    stats["chisqfcv"] = out[3]
    stats["chisqamp"] = out[4]
    stats["chisqcp"] = out[5]
    stats["chisqca"] = out[6]
    stats["rchisqfcv"] = out[3] / len(vfcvr)
    stats["rchisqamp"] = out[4] / len(vamp)
    stats["rchisqcp"] = out[5] / len(cp)
    stats["rchisqca"] = out[6] / len(ca)

    # Regularization functions
    if lambl1 > 0:
        stats["lambl1"] = lambl1
        stats["lambl1_sim"] = lambl1_sim
        stats["l1"] = out[7]
        stats["l1cost"] = out[7] * lambl1_sim
    else:
        stats["lambl1"] = 0.
        stats["lambl1_sim"] = 0.
        stats["l1"] = out[7]
        stats["l1cost"] = 0.

    if lambtv > 0:
        stats["lambtv"] = lambtv
        stats["lambtv_sim"] = lambtv_sim
        stats["tv"] = out[8]
        stats["tvcost"] = out[8] * lambtv_sim
    else:
        stats["lambtv"] = 0.
        stats["lambtv_sim"] = 0.
        stats["tv"] = out[8]
        stats["tvcost"] = 0.

    if lambtsv > 0:
        stats["lambtsv"] = lambtsv
        stats["lambtsv_sim"] = lambtsv_sim
        stats["tsv"] = out[9]
        stats["tsvcost"] = out[9] * lambtsv_sim
    else:
        stats["lambtsv"] = 0.
        stats["lambtsv_sim"] = 0.
        stats["tsv"] = out[9]
        stats["tsvcost"] = 0.

    if fulloutput:
        # gradcost
        gradcostimage = initimage.data[istokes, ifreq, :, :].copy()
        gradcostimage[:, :] = 0.
        for i in np.arange(len(xidx)):
            gradcostimage[yidx[i] - 1, xidx[i] - 1] = out[1][i]
        stats["gradcost"] = gradcostimage
        del gradcostimage

        if isfcv:
            stats["fcvampmod"] = np.sqrt(out[10] * out[10] + out[11] * out[11])
            stats["fcvphamod"] = np.angle(out[10] + 1j * out[11], deg=True)
            stats["fcvrmod"] = out[10]
            stats["fcvimod"] = out[11]
            stats["fcvres"] = out[12]
        else:
            stats["fcvampmod"] = None
            stats["fcvphamod"] = None
            stats["fcvres"] = None

        if isamp:
            stats["ampmod"] = out[13]
            stats["ampres"] = out[14]
        else:
            stats["ampmod"] = None
            stats["ampres"] = None

        if iscp:
            stats["cpmod"] = np.rad2deg(out[15])
            stats["cpres"] = np.rad2deg(out[16])
        else:
            stats["cpmod"] = None
            stats["cpres"] = None

        if isca:
            stats["camod"] = out[17]
            stats["cares"] = out[18]
        else:
            stats["camod"] = None
            stats["cares"] = None

    return stats
Пример #43
0
def RandBinList(n):
    a = [0] * n
    for b in range(0, n - 1):
        seed(None)
        a[b] = np.float64(randint(0, 1))
    return a
Пример #44
0
    def __init__(self,
                 name,
                 env_spec,
                 hidden_sizes=(32, 32),
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_hidden_sizes=(32, 32),
                 min_std=1e-6,
                 std_modifier=1.0,
                 std_hidden_nonlinearity=tf.nn.tanh,
                 hidden_nonlinearity=tf.nn.tanh,
                 output_nonlinearity=tf.identity,
                 mean_network=None,
                 std_network=None,
                 std_parametrization='exp',
                 grad_step_size=1.0,
                 stop_grad=False,
                 init_flr_full=1.0,
                 latent_dim=None):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers for std
        :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std
        :param std_parametrization: how the std should be parametrized. There are a few options:
            - exp: the logarithm of the std will be stored, and applied a exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]
        :param stop_grad: whether or not to stop the gradient through the gradient.
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)
        self.latent_dim = latent_dim
        self.init_flr_full = init_flr_full
        self.obs_dim = env_spec.observation_space.flat_dim
        self.action_dim = env_spec.action_space.flat_dim
        self.n_hidden = len(hidden_sizes)
        self.hidden_nonlinearity = hidden_nonlinearity
        self.output_nonlinearity = output_nonlinearity
        self.input_shape = (
            None,
            self.obs_dim + self.latent_dim,
        )

        self.step_size = grad_step_size
        self.stop_grad = stop_grad
        if type(self.step_size) == list:
            raise NotImplementedError('removing this since it didnt work well')

        # create network
        if mean_network is None:
            self.all_params = self.create_MLP(  # TODO: this should not be a method of the policy! --> helper
                name="mean_network",
                output_dim=self.action_dim,
                hidden_sizes=hidden_sizes,
            )
            self.input_tensor, _ = self.forward_MLP(
                'mean_network',
                self.all_params,
                reuse=None  # Need to run this for batch norm
            )
            forward_mean = lambda x, params, is_train: self.forward_MLP(
                'mean_network', params, input_tensor=x, is_training=is_train)[1
                                                                              ]
        else:
            raise NotImplementedError('Not supported.')

        if std_network is not None:
            raise NotImplementedError('Not supported.')
        else:
            if adaptive_std:
                raise NotImplementedError('Not supported.')
            else:
                if std_parametrization == 'exp':
                    init_std_param = np.log(init_std)
                elif std_parametrization == 'softplus':
                    init_std_param = np.log(np.exp(init_std) - 1)
                else:
                    raise NotImplementedError
                self.all_params['std_param'] = make_param_layer(
                    num_units=self.action_dim,
                    param=tf.constant_initializer(init_std_param),
                    name="output_std_param",
                    trainable=learn_std,
                )
                self.all_params['std_param_stepsize'] = tf.Variable(
                    self.init_flr_full *
                    tf.ones_like(self.all_params['std_param']),
                    name="output_std_param_stepsize")

                forward_std = lambda x, params: forward_param_layer(
                    x, params['std_param'])
            self.all_param_vals = None

            # unify forward mean and forward std into a single function
            self._forward = lambda obs, params, is_train: (forward_mean(
                obs, params, is_train), forward_std(obs, params))

            self.std_parametrization = std_parametrization

            if std_parametrization == 'exp':
                min_std_param = np.log(min_std)
            elif std_parametrization == 'softplus':
                min_std_param = np.log(np.exp(min_std) - 1)
            else:
                raise NotImplementedError

            self.min_std_param = min_std_param
            self.std_modifier = np.float64(std_modifier)

            self._dist = DiagonalGaussian(self.action_dim)

            self._cached_params = {}

            super(MAMLGaussianMLPPolicy, self).__init__(env_spec)

            dist_info_sym = self.dist_info_sym(self.input_tensor,
                                               dict(),
                                               is_training=False)
            mean_var = dist_info_sym["mean"]
            log_std_var = dist_info_sym["log_std"]

            # pre-update policy
            self._init_f_dist = tensor_utils.compile_function(
                inputs=[self.input_tensor],
                outputs=[mean_var, log_std_var],
            )
            self._cur_f_dist = self._init_f_dist
Пример #45
0
    def generateGroupAdditivityValues(self, trainingSet):
        """
        Generate the group additivity values using the given `trainingSet`,
        a list of 2-tuples of the form ``(template, kinetics)``. You must also
        specify the `kunits` for the family and the `method` to use when
        generating the group values. Returns ``True`` if the group values have
        changed significantly since the last time they were fitted, or ``False``
        otherwise.
        """
        # keep track of previous values so we can detect if they change
        old_entries = dict()
        for label, entry in list(self.entries.items()):
            if entry.data is not None:
                old_entries[label] = entry.data

        # Determine a complete list of the entries in the database, sorted as
        # in the tree
        groupEntries = self.top[:]

        for entry in self.top:
            # Entries in the TS_group.py tree
            groupEntries.extend(self.descendants(entry))

        # Determine a unique list of the groups we will be able to fit
        # parameters for
        groupList = []
        for template, distances in trainingSet:
            for group in template:
                if isinstance(group, str):
                    group = self.entries[group]
                if group not in self.top:
                    groupList.append(group)
                    groupList.extend(self.ancestors(group)[:-1])
        groupList = list(set(groupList))
        groupList.sort(key=lambda x: x.index)

        if True:  # should remove this IF block, as we only have one method.
            # Initialize dictionaries of fitted group values and uncertainties
            groupValues = {}
            groupUncertainties = {}
            groupCounts = {}
            groupComments = {}
            for entry in groupEntries:
                groupValues[entry] = []
                groupUncertainties[entry] = []
                groupCounts[entry] = []
                groupComments[entry] = set()

            # Generate least-squares matrix and vector
            A = []
            b = []

            # ['d12', 'd13', 'd23']
            distance_keys = sorted(trainingSet[0][1].distances.keys())
            distance_data = []
            for template, distanceData in trainingSet:
                d = [distanceData.distances[key] for key in distance_keys]
                distance_data.append(d)

                # Create every combination of each group and its ancestors with
                # each other
                combinations = []
                for group in template:
                    groups = [group]
                    # Groups from the group.py tree
                    groups.extend(self.ancestors(group))
                    combinations.append(groups)
                combinations = getAllCombinations(combinations)
                # Add a row to the matrix for each combination
                for groups in combinations:
                    Arow = [1 if group in groups else 0 for group in groupList]
                    Arow.append(1)
                    brow = d
                    A.append(Arow)
                    b.append(brow)

                    for group in groups:
                        if isinstance(group, str):
                            group = self.entries[group]
                        groupComments[group].add("{0!s}".format(template))

            if len(A) == 0:
                logging.warning(
                    'Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(
                        self.label))
                return
            A = np.array(A)
            b = np.array(b)
            distance_data = np.array(distance_data)

            x, residues, rank, s = np.linalg.lstsq(A, b)

            for t, distance_key in enumerate(distance_keys):

                # Determine error in each group
                stdev = np.zeros(len(groupList) + 1, np.float64)
                count = np.zeros(len(groupList) + 1, np.int)

                for index in range(len(trainingSet)):
                    template, distances = trainingSet[index]
                    d = np.float64(distance_data[index, t])
                    dm = x[-1, t] + sum([x[groupList.index(group), t]
                                         for group in template if group in groupList])
                    variance = (dm - d)**2
                    for group in template:
                        groups = [group]
                        groups.extend(self.ancestors(group))
                        for g in groups:
                            if g.label not in [top.label for top in self.top]:
                                ind = groupList.index(g)
                                stdev[ind] += variance
                                count[ind] += 1
                    stdev[-1] += variance
                    count[-1] += 1

                import scipy.stats
                ci = np.zeros(len(count))
                for i in range(len(count)):
                    if count[i] > 1:
                        stdev[i] = np.sqrt(stdev[i] / (count[i] - 1))
                        ci[i] = scipy.stats.t.ppf(
                            0.975, count[i] - 1) * stdev[i]
                    else:
                        stdev[i] = None
                        ci[i] = None
                # Update dictionaries of fitted group values and uncertainties
                for entry in groupEntries:
                    if entry == self.top[0]:
                        groupValues[entry].append(x[-1, t])
                        groupUncertainties[entry].append(ci[-1])
                        groupCounts[entry].append(count[-1])
                    elif entry.label in [group.label for group in groupList]:
                        index = [group.label for group in groupList].index(
                            entry.label)
                        groupValues[entry].append(x[index, t])
                        groupUncertainties[entry].append(ci[index])
                        groupCounts[entry].append(count[index])
                    else:
                        groupValues[entry] = None
                        groupUncertainties[entry] = None
                        groupCounts[entry] = None

            # Store the fitted group values and uncertainties on the associated
            # entries
            for entry in groupEntries:
                if groupValues[entry] is not None:
                    if not any(
                        np.isnan(
                            np.array(
                                groupUncertainties[entry]))):
                        # should be entry.data.* (e.g.
                        # entry.data.uncertainties)
                        uncertainties = np.array(groupUncertainties[entry])
                        uncertaintyType = '+|-'
                    else:
                        uncertainties = {}
                    # should be entry.*
                    shortDesc = "Fitted to {0} distances.\n".format(
                        groupCounts[entry][0])
                    longDesc = "\n".join(groupComments[entry.label])
                    distances_dict = {key: distance for key, distance in zip(
                        distance_keys, groupValues[entry])}
                    uncertainties_dict = {
                        key: distance for key, distance in zip(
                            distance_keys, uncertainties)}
                    entry.data = DistanceData(
                        distances=distances_dict,
                        uncertainties=uncertainties_dict)
                    entry.shortDesc = shortDesc
                    entry.longDesc = longDesc
                else:
                    entry.data = DistanceData()

        changed = False
        for label, entry in list(self.entries.items()):
            if entry.data is not None:
                continue  # because this is broken:
                if label in old_entries:
                    old_entry = old_entries[label][label][0]
                    for key, distance in entry.data.items():
                        diff = 0
                        for k in range(3):
                            diff += abs(distance[0][k] / old_entry[k] - 1)
                        if diff > 0.01:
                            changed = True
                            entry.history.append(event)
            else:
                changed = True
                entry.history.append(event)
        return True  # because the thing above is broken
        return changed
Пример #46
0
#
# Written by Alejandro Aguilar ([email protected])
#
# CMPS 142
# Homework 1, problem 5
#
# This is an implementation of Stochastic Gradient descent
#-------------------------------------------------------------------------------
import numpy as np
from random import *

randBinList = lambda n: [np.float64(randint(0, 1)) for b in range(1, n + 1)]


def RandBinList(n):
    a = [0] * n
    for b in range(0, n - 1):
        seed(None)
        a[b] = np.float64(randint(0, 1))
    return a


def uniformBinList(n):
    a = [0] * n
    for b in range(0, n):
        seed(None)
        num = np.float64(uniform(0, 1))
        if num > 0.5:
            a[b] = 1
        else:
            a[b] = -1
Пример #47
0
from numpy.linalg import inv

filename_r_train = 'normal_train_data.csv'
filename_r_test = 'normal_test_data.csv'

x_train = []
y_train = []
x_test = []
y_test = []

# read train data
with open( filename_r_train ) as csv_file:
    csv_reader = csv.reader( csv_file, delimiter = ',' )
    #np.float64(row[0]),
    for row in csv_reader:
        x_train.append( [  np.float64(row[1]), np.float64(row[2]), np.float64(row[3]), np.float64(row[4]), np.float64(row[5]), np.float64(row[6]), np.float64(row[7]), np.float64(row[8]) ] )
        y_train.append( np.float64(row[0]) )

# read test data
with open( filename_r_test ) as csv_file:
    csv_reader = csv.reader( csv_file, delimiter = ',' )

    for row in csv_reader:
        x_test.append( [ np.float64(row[1]), np.float64(row[2]), np.float64(row[3]), np.float64(row[4]), np.float64(row[5]), np.float64(row[6]), np.float64(row[7]), np.float64(row[8]) ] )
        y_test.append( np.float64(row[0]) )

print( 'DONE READING' )

def closed_form(x, y):
    #print( x.shape() )
    #temp_x = [ [x[i] * 2 .append(1)] for i in range( len(x) ) ]
Пример #48
0
    rval, image = cap.read()
    if rval == True:
        orig = image.copy()

        # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
        image = image.astype("float") / 255.0
        image = img_to_array(image)
        image = np.expand_dims(image, axis=0)

        tic = time.time()
        fire_prob = model.predict(image)[0][0] * 100
        toc = time.time()
        print("Time taken = ", toc - tic)
        print("FPS: ", 1 / np.float64(toc - tic))
        print("Fire Probability: ", fire_prob)
        print("Predictions: ", model.predict(image))
        print(image.shape)

        label = "Fire Probability: " + str(fire_prob)
        cv2.putText(orig, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        cv2.imshow("Output", orig)

        key = cv2.waitKey(10)
        if key == 27:  # exit on ESC
            break
    elif rval == False:
        break
Пример #49
0
 def test_exact_float_check(self):
     code = 'def exact_float_check(i): return i'
     return self.run_test(code, np.float64(1.1), exact_float_check=[float])
Пример #50
0
np.uint16(A())  # E: incompatible type
np.uint32(A())  # E: incompatible type
np.uint64(A())  # E: incompatible type

np.void("test")  # E: incompatible type

np.generic(1)  # E: Cannot instantiate abstract class
np.number(1)  # E: Cannot instantiate abstract class
np.integer(1)  # E: Cannot instantiate abstract class
np.signedinteger(1)  # E: Cannot instantiate abstract class
np.unsignedinteger(1)  # E: Cannot instantiate abstract class
np.inexact(1)  # E: Cannot instantiate abstract class
np.floating(1)  # E: Cannot instantiate abstract class
np.complexfloating(1)  # E: Cannot instantiate abstract class
np.character("test")  # E: Cannot instantiate abstract class
np.flexible(b"test")  # E: Cannot instantiate abstract class

np.float64(value=0.0)  # E: Unexpected keyword argument
np.int64(value=0)  # E: Unexpected keyword argument
np.uint64(value=0)  # E: Unexpected keyword argument
np.complex128(value=0.0j)  # E: Unexpected keyword argument
np.str_(value='bob')  # E: No overload variant
np.bytes_(value=b'test')  # E: No overload variant
np.void(value=b'test')  # E: Unexpected keyword argument
np.bool_(value=True)  # E: Unexpected keyword argument
np.datetime64(value="2019")  # E: No overload variant
np.timedelta64(value=0)  # E: Unexpected keyword argument

np.bytes_(b"hello", encoding='utf-8')  # E: No overload variant
np.str_("hello", encoding='utf-8')  # E: No overload variant
Пример #51
0
def nrlmsise00(doy, year, sec, alt, g_lat, g_long, lst, f107A, f107, ap):
    """
    nrlmsise00 calculates atmospheric quantities using the NRLMSISE-00
    atmosphere published in 2001 by Mike Picone, Alan Hedin, and Doug Drob.
    Originally written in FORTRAN, it was later implemented in C by Dominik 
    Brodowski.
    
    This function calls a Python port of Brodowski's C implementation originally
    written by Joshua Milas in 2013. This software was released under an MIT 
    license (see the license file in the atmosphere_models directory).
    
    The NRLMSISE-00 model uses a number of switches (contained in the flags 
    class) to modify the model output.  At the moment, these defaults are hard-
    wired into PETra. Later revisions will give the user the ability to select
    these switches. For more detailed information about the inputs/outputs/switches
    used in this model, the user is directed to the docstrings of the funcitons
    contained in the model files (norlmsise_00_header.py and nrlmsise_00.py).
    
    Inputs: 
        doy: day of year
        year: year (currently ignored)
        sec: seconds in day
        alt: altitude
        g_lat: geodetic latitude
        g_long: geodetic longitude
        lst: local apparent solar time (hours)
        f107A: 81 day average of F10.7 flux (centred on doy)
        f107: daily f10.7 flux (for previous day)
        ap: magnetic index (daily)
     
    Outputs:
        rho: density at the requested altitude
        pressure_mixture: pressure at the requested altitude
        temperature: temperature at the requested altitude
        R_mixture: the gas constant of the mixture
        mean_free_path: mean free path of the air at the requested altitude. 
                        In contrast to the other outputs of this function, the
                        mean free path calculation assumes a single molecule 
                        gas (assumed to be an 'average' air molecule)
        eta: viscosity (calcualted using Sutherland's law)
        molecular_weight_mixture: the molecular weight of the air at the
                                  requested altitude
        SoS: speed of sound (assume ratio of specific heats is constant 1.4
             everywhere in the atmosphere)
    
    """

    output = nrlmsise_output()
    Input = nrlmsise_input()
    #    output = [nrlmsise_output() for _ in range(17)]
    #    Input = [nrlmsise_input() for _ in range(17)]
    flags = nrlmsise_flags()
    aph = ap_array()  # For more detailed ap data (i.e more than daily)

    flags.switches[0] = 1  # to have results in m rather than cm
    for i in range(1, 24):
        flags.switches[i] = 1

    # below 80 km solar & magnetic effects not well established so set to defaults
    if alt < 80e3:
        f107 = 150.
        f107A = 150.
        ap = 4.

    # fill out Input class
    Input.year = year
    Input.doy = doy
    Input.sec = sec
    Input.alt = alt * 1e-3  #change input to km
    Input.g_lat = g_lat * 180 / np.pi
    Input.g_long = g_long * 180 / np.pi
    Input.lst = lst
    Input.f107A = f107A
    Input.f107 = f107
    Input.ap = ap

    if alt > 500e3:
        gtd7d(Input, flags, output)
    else:
        gtd7(Input, flags, output)

    d = output.d
    t = output.t
    """
    DEFAULT OUTPUT VARIABLES:
    d[0] - HE NUMBER DENSITY(CM-3)
    d[1] - O NUMBER DENSITY(CM-3)
    d[2] - N2 NUMBER DENSITY(CM-3)
    d[3] - O2 NUMBER DENSITY(CM-3)
    d[4] - AR NUMBER DENSITY(CM-3)                       
    d[5] - TOTAL MASS DENSITY(GM/CM3) [includes d[8] in td7d]
    d[6] - H NUMBER DENSITY(CM-3)
    d[7] - N NUMBER DENSITY(CM-3)
    d[8] - Anomalous oxygen NUMBER DENSITY(CM-3)
    t[0] - EXOSPHERIC TEMPERATURE
    t[1] - TEMPERATURE AT ALT
    """

    #Now process output to get required values
    kb = 1.38064852e-23  # Boltzmann constant (m**2 kg)/(s**2 K)
    Na = 6.022140857e26  # avogadro number (molecules per kilomole)
    R0 = kb * Na  # universal gas constant

    #Molecular weights of different components (kg/kmole)
    molecular_weights = np.zeros(8)
    molecular_weights[0] = 4.002602  #He
    molecular_weights[1] = 15.9994  #O
    molecular_weights[2] = 28.0134  #N2
    molecular_weights[3] = 31.9988  #O2
    molecular_weights[4] = 39.948  #AR
    molecular_weights[5] = 1.00794  #H
    molecular_weights[6] = 14.0067  #N
    molecular_weights[7] = 15.9994  #anomalous O

    # Calculate partial pressures
    partial_p = np.zeros(8)
    partial_p[0] = d[0] * kb * t[1]  #He
    partial_p[1] = d[1] * kb * t[1]  #O
    partial_p[2] = d[2] * kb * t[1]  #N2
    partial_p[3] = d[3] * kb * t[1]  #O2
    partial_p[4] = d[4] * kb * t[1]  #AR
    partial_p[5] = d[6] * kb * t[1]  #H
    partial_p[6] = d[7] * kb * t[1]  #N
    partial_p[7] = d[8] * kb * t[1]  #anomalous O

    #Assuming perfect gas, calculate atmospheric pressure
    pressure_mixture = np.sum(partial_p)

    temperature = t[1]

    mole_fraction = np.divide(partial_p, pressure_mixture)

    molecular_weight_mixture = np.sum(
        np.multiply(mole_fraction, molecular_weights))  #kg/kmol

    mass_fractions = np.multiply(
        mole_fraction, np.divide(molecular_weights, molecular_weight_mixture))

    specific_gas_constants = R0 / molecular_weights

    R_mixture = np.sum(np.multiply(mass_fractions, specific_gas_constants))

    number_density_mixture = np.sum(d) - d[5]

    mean_free_path = (np.sqrt(2) * np.pi * 4.15e-10**2 *
                      number_density_mixture)**-1

    eta = np.float64(
        1.458e-6 * temperature**1.5 /
        (temperature + 110.4))  # dynamic viscosity via sutherland law

    SoS = np.float64(np.sqrt(1.4 * R_mixture * temperature))

    rho = d[5]

    return rho, pressure_mixture, temperature, R_mixture, mean_free_path, eta, molecular_weight_mixture, SoS
        for month in monthAhead:
            for feat in features:
                yFiles = sorted(glob.glob(dirArray[dataInd][0]))  # Change Cell
                xFiles = sorted(glob.glob(dirArray[dataInd][0]))  # Change Cell

                DataX, DataY = makingDataset(xFiles, yFiles, lag, month)
                print(dirArray[dataInd][1])
                print()

                DataX = np.array(DataX)
                DataY = np.array(DataY)
                print(DataX.shape)
                print(DataY.shape)
                DataXX, DataYY, pixelPOS = makingDataset2(DataX, DataY, feat)

                DataXX = np.float64(np.array(DataXX))
                DataYY = np.sqrt(np.float64(np.array(DataYY)))

                ################################## splitting the Data #####################################
                XtestImg = DataXX[-(70 * 40 * 60):]
                YtestImg = DataYY[-(70 * 40 * 60):]
                indexImg = pixelPOS[-(70 * 40 * 60):]

                Xtrain = DataXX[0:-(70 * 40 * 60)]
                Ytrain = DataYY[0:-(70 * 40 * 60)]

                scaler.fit(Xtrain)
                Xtrain = scaler.transform(Xtrain)
                XtestImg = scaler.transform(XtestImg)
                ###################################################################
                print(DataXX.shape)
def stEnergy(frame):
    """Computes signal energy of frame"""
    return numpy.sum(frame**2) / numpy.float64(len(frame))
 def setPrimaryElevationAngle(self, primaryElevationAngle):
     self.primaryElevationAngle = numpy.float64(primaryElevationAngle)
Пример #55
0
def read_float64(field: str) -> np.float64:
    """Read a float64."""
    return np.float64(field) if field != "" else np.nan
Пример #56
0
    #Status of 1 means that the particle is a stable product
    stable_cond = frame["Status"] == 1
    #All even leptons are neutrinos which we can't measure
    notNeutrino_cond = frame["PID"] % 2 == 1
    parton_cond = np.abs(frame["PID"]) <= 8
    #Get all entries that satisfy the conditions
    frame = frame[stable_cond & notNeutrino_cond]
    #Drop the Status frame since we only needed it to see if the particle was stable
    frame = frame.drop(["Status"], axis=1)
    return frame


#Define the speed of light C
#Turns C=1 since everything is in natural units :/
#C = np.float64(2.99792458e8); #m/s
mass_of_electron = np.float64(0.0005109989461)  #eV/c
mass_of_muon = np.float64(0.1056583715)  #eV/c
#def four_vec_func(inputs):
#        E = inputs[0]
#        Eta = inputs[1]
#        Phi = inputs[2]
#        PT = inputs[3]
#        E_over_c = E/C
#        px = E_over_c * np.sin(Phi) * np.cos(Eta)
#        py = E_over_c * np.sin(Phi) * np.sin(Eta)
#        pz = E_over_c * np.cos(Phi)
#        print(np.sqrt(px*px + py*py), PT/C)
#        return [E_over_c, px, py, pz]

#def getPandasPhotons(filename):
#    four_vec_inputs, dummy = leaves_from_obj("Photon", ["PT", "Eta", "Phi"])
Пример #57
0
 def z(self):
     return numpy.float64(self._data[2])
def stZCR(frame):
    """Computes zero crossing rate of frame"""
    count = len(frame)
    countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2
    return (numpy.float64(countZ) / numpy.float64(count - 1.0))
Пример #59
0
 def y(self):
     return numpy.float64(self._data[1])
Пример #60
0
def US62_76(r, RE):
    """
    US62_76 is a very simple atmosphere model that uses the US76 standard 
    atmosphere below 80 km and the US62 standard atmosphere above 80km
    
    Inputs: 
        r: altitude 
        RE: radius of the Earth
    
    Outputs:
        rho: density 
        P: pressure
        T: temperature
        mfp: mean free path
        eta: viscosity (sutherland's law)
        MolW: molecular weight
        SoS: speed of sound  
    """
    #Some constants:
    #RE = 6378.137e3
    Na = np.float64(6.0220978e23)
    sig = np.float64(3.65e-10)

    # Sea level standard values:
    P0 = 101325.0  #Pa
    T0 = 288.15  #K
    M = np.array([
        28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, 28.962,
        28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85, 24.70, 22.66, 19.94,
        17.94, 16.84, 16.17
    ])  # Molecular masses with altitude g/mol
    R0 = 8.31432  # J/mol-K
    g0 = 9.806658  # m/s2
    GM_R = g0 * M / R0  # GM/R K/km
    Z = (r - RE) * 1e-3  # convert radius in m to altitude in km
    H = me2po(RE, Z)  # geopotential altitude

    BLH = np.array([
        0., 11., 20., 32., 47., 51., 71.,
        me2po(RE, 86.),
        me2po(RE, 100.),
        me2po(RE, 110.),
        me2po(RE, 120.),
        me2po(RE, 150.),
        me2po(RE, 160.),
        me2po(RE, 170.),
        me2po(RE, 190.),
        me2po(RE, 230.),
        me2po(RE, 300.),
        me2po(RE, 400.),
        me2po(RE, 500.),
        me2po(RE, 600.),
        me2po(RE, 700.)
    ])

    L = np.array([
        0., -6.5, 0., 1., 2.8, 0., -2.8, -2., 1.693, 5., 10., 20., 15., 10.,
        7., 5., 4., 3.3, 2.6, 1.7, 1.1
    ])
    BLT = np.zeros((21, ))
    BLP = np.zeros((21, ))
    BLT[0] = T0
    BLP[0] = P0

    for i in range(0, 20):
        # Calculate base temperatures
        BLT[i + 1] = BLT[i] + L[i + 1] * (BLH[i + 1] - BLH[i])

        # Calculate base pressures
        if (i + 1 == 0) or (i + 1 == 2) or (i + 1 == 5):
            BLP[i + 1] = BLP[i] * np.exp(-GM_R[i + 1] *
                                         (BLH[i + 1] - BLH[i]) / BLT[i])
        else:
            BLP[i + 1] = BLP[i] * (
                (BLT[i] + L[i + 1] *
                 (BLH[i + 1] - BLH[i])) / BLT[i])**(-GM_R[i + 1] / L[i + 1])

        # Calculate values at requested altitude
        if H > BLH[i] and H <= BLH[i + 1]:
            # Molecular weight (interpolate)]
            MolW = M[i] + (M[i + 1] - M[i]) * (H - BLH[i]) / (BLH[i + 1] -
                                                              BLH[i])
            gmrtemp = g0 * MolW / R0

            # Molecular scale Temperature
            T = np.float64(BLT[i] + L[i + 1] * (H - BLH[i]))
            T = MolW * T / M[
                0]  # Convert molecular scale temperature to kinetic temperature

            # Pressure
            if i + 1 == 0 or i + 1 == 2 or i + 1 == 5:
                P = np.float64(BLP[i] * np.exp(-gmrtemp *
                                               (H - BLH[i]) / BLT[i]))
            else:
                P = np.float64(
                    BLP[i] * ((BLT[i] + L[i + 1] *
                               (H - BLH[i])) / BLT[i])**(-gmrtemp / L[i + 1]))

            # Density
            rho = np.float64(MolW * 1e-3 * P / (R0 * T))
            mfp = np.float64(
                MolW * 1e-3 /
                (2**0.5 * np.pi * sig**2 * rho * Na))  # mean free path
            eta = np.float64(
                1.458e-6 * T**1.5 /
                (T + 110.4))  # dynamic viscosity via sutherland law
            SoS = np.float64(np.sqrt(1.4 * 287.085 * T))

    return rho, P, T, mfp, eta, MolW, SoS