Пример #1
1
		def fisher(theta, i, j = None):
			"""
			Fisher information using the first order derivative

			:param theta: the theta of the density
			:param i: The ith component of the diagonal of the fisher information matrix will be returned (if j is None)
			:param j: The i,j th component of the fisher information matrix will be returned
			"""

			#Bring it in a form that we can derive
			fh = lambda ti, t0, tn, x: np.log(self.density(x, list(t0) + [ti] + list(tn)))

			# The derivative
			f_d_theta_i = lambda x: derivative(fh, theta[i], dx=1e-5, n=1, args=(theta[0:i], theta[i + 1:], x))

			if j is not None:
				f_d_theta_j = lambda x: derivative(fh, theta[j], dx=1e-5, n=1, args=(theta[0:j], theta[j + 1:], x))
				f = lambda x: np.float128(0) if fabs(self.density(x, theta)) < 1e-5 else f_d_theta_i(x) * f_d_theta_j(x) * self.density(x, theta)
			else:
				# The function to integrate
				f = lambda x: np.float128(0) if fabs(self.density(x, theta)) < 1e-5 else f_d_theta_i(x) ** 2 * self.density(x, theta)


			#First order
			result = integrate(f, self.support)
			return result
Пример #2
0
def sec_ord_corr(chains,t1,emission_symbol_1,t2,emission_symbol_2):
    
    ones_at_t1_t2 = 0
    ones_at_t1 = 0
    ones_at_t2 = 0
    
    for i in range(0,len(chains)): 
        a_t1 = chains[i][t1]
        b_t1 = emission_symbol_1
        
        a_t2 = chains[i][t2]
        b_t2 = emission_symbol_2

        if a_t1 == b_t1:
            ones_at_t1 += 1
        
        if a_t2 == b_t2:
            ones_at_t2 += 1

        if a_t1 == b_t1 and a_t2 == b_t2:
            ones_at_t1_t2 += 1
    try:
        soc = (np.float128(ones_at_t1_t2) / ones_at_t1) /  (np.float128(ones_at_t2) / len(chains))
    except ZeroDivisionError:
        soc = np.Inf
    return soc
    def calcPosteriorProb(self, dir, priors, condProb):

         totalCmap = {}
         # vocabCalc = VocabularyCalculator()
         # vocabCalc.calculate(dir)
         #
         # print "Test class vocab count: ", vocabCalc.getClassVocabCount()
         testDir = "earn"
         for classDir in os.listdir(dir):
             # if classDir == testDir:
             if 1:
                 numberOfFiles = len(os.listdir(os.path.join(dir, classDir)))
                 correctClassEstimation = 0
                 for filename in os.listdir(os.path.join(dir, classDir)):
                     docVocab = {}
                     currentCmap = {}
                     testFile = open(os.path.join(dir, classDir, filename), "r")

                     for line in testFile:
                         words = line.split()

                         for word in words:
                             if word in docVocab:
                                 docVocab[word] += 1
                             else:
                                 docVocab[word] = 1


                     # calculate currentCmap
                     totalCmap[filename] = {}
                     tempFile = open("temp.txt", "w")
                     for trainClass, classPrior in priors.iteritems():
                         prod = np.float128(1.0)

                         for word, count in docVocab.iteritems():
                             # print word, count
                             if word in condProb[trainClass]:
                                 prod = prod * condProb[trainClass][word]
                                 # print prod
                                 # if condProb[trainClass][word] == 0.0:
                                 # print trainClass, word, condProb[trainClass][word]
                                 tempFile.write(trainClass + " " + word + " " + str(condProb[trainClass][word]) + " "
                                                + str(prod))
                                 tempFile.write("\n")


                         currentCmap[trainClass] = prod * np.float128(classPrior)

                     totalCmap[filename] = currentCmap
                     estimatedClass = max(currentCmap.iteritems(), key=operator.itemgetter(1))[0]
                     # print "Actual file class: ", classDir, ", Estimated file class: " , estimatedClass
                     # print "\n"
                     if(estimatedClass == testDir):
                         correctClassEstimation += 1

                 accuracy = (correctClassEstimation / numberOfFiles) * 100
                 print "Accuracy for ", classDir, ": ", accuracy, "%"


         return totalCmap
Пример #4
0
def alignRec(record, template=template, bgfile='image_Ch1.nrrd', alignSet='', threshold=0.6):
    record = checkDir(record)
    record['last_host'] = host
    print 'Finalising alignment for: ' + record['name']
    # bgfile = record['original_nrrd'][('Ch' + str(record['background_channel']) + '_file')]
    record['aligned_bg'], r = cmtk.align(bgfile, template=template, settings=alignSet)
    record['aligned_avgslice_score'] = str(
        ci.rateOne(record['aligned_bg'], results=None, methord=slicescore.avgOverlapCoeff, template=template))
    record['aligned_slice_score'] = str(
        ci.rateOne(record['aligned_bg'], results=None, methord=slicescore.OverlapCoeff, template=template))
    record['aligned_score'] = str(
        np.mean([np.float128(record['aligned_avgslice_score']), np.float128(record['aligned_slice_score'])]))
    # Note: np.float128 array score converted to string as mongoDB only supports float(64/32 dependant on machine).
    record['aligned_bg'] = str(record['aligned_bg']).replace(tempfolder, '')
    print 'Result: ' + record['aligned_score']
    if record['aligned_score'] > threshold:
        record['alignment_stage'] = 6
        print 'Passed!'
    else:
        record['alignment_stage'] = 0
        print 'Failed!'
    if r > 0:
        print 'Error Code:' + str(r)
        record['alignment_stage'] = 0
    record['max_stage'] = 6
    return record
Пример #5
0
 def test_precision(self):
     """
     Tests if the errors due to limited floating point precision are reduced
     by the more complex averaging algorithms.
     Somehow the SumDirectly algorithm seems to be more precise than the
     slightly more sophisticated SortedSum algorithm. Maybe this is because
     Python can optimize simple algorithms by itself. But since this test
     expects the SortedSum algorithm to be more exact, this test fails almost
     each time it is run, so it is marked as incomplete.
     """
     # create some random values
     values = []
     for i in range(2 ** 10 - 1):
         values.append(random.gauss(0.0, 1.0))
     # initialize some averages with the random values
     avg1 = sumpf.helper.average.SumDirectly(values=values)
     avg2 = sumpf.helper.average.SortedSum(values=values)
     avg3 = sumpf.helper.average.SumList(values=values)
     # calculate a very precise average
     summed = numpy.float128(0.0)
     for v in sorted(values):
         summed += numpy.float128(v)
     average = summed / numpy.float128(len(values))
     # calculate the errors of the averaging algorithms
     e1 = abs(avg1.GetAverage() - average)
     e2 = abs(avg2.GetAverage() - average)
     e3 = abs(avg3.GetAverage() - average)
     # compare their performance
     self.assertGreaterEqual(e1, e2)     # the SortedSum algorithm is expected to be more precise than the SumDirectly algorithm
     self.assertGreaterEqual(e2, e3)     # the SumList algorithm is expected to be more precise than the SortedSum algorithm
Пример #6
0
def _merge_trigger_into(trigger, cluster):
    trigger_snr = trigger["snr"]

    cluster['time_min'] = min(trigger['time_min'], cluster['time_min'])
    cluster['time_max'] = max(trigger['time_max'], cluster['time_max'])
    cluster['freq_min'] = min(trigger['freq_min'], cluster['freq_min'])
    cluster['freq_max'] = max(trigger['freq_max'], cluster['freq_max'])

    if trigger_snr > cluster['snr']:
        cluster['time'] = trigger['time']
        cluster['freq'] = trigger['freq']
        cluster['snr'] = trigger_snr
        cluster['amplitude'] = max(trigger['amplitude'], cluster['amplitude'])
        cluster['q'] = max(trigger['q'], cluster['q'])

    cluster["trigger_count"] += trigger.get("trigger_count", 1)
    cluster["weighted_time"] += trigger.get("weighted_time", 
                                            trigger_snr
                                            *np.float128(trigger["time"]))
    cluster["weighted_freq"] += trigger.get("weighted_freq", 
                                            trigger_snr
                                            *np.float128(trigger["freq"]))
    cluster["snr_sum"] += trigger.get("snr_sum", trigger_snr)

    return cluster
Пример #7
0
    def test_numpy(self):
        assert chash(np.bool_(True)) == chash(np.bool_(True))

        assert chash(np.int8(1)) == chash(np.int8(1))
        assert chash(np.int16(1))
        assert chash(np.int32(1))
        assert chash(np.int64(1))

        assert chash(np.uint8(1))
        assert chash(np.uint16(1))
        assert chash(np.uint32(1))
        assert chash(np.uint64(1))

        assert chash(np.float32(1)) == chash(np.float32(1))
        assert chash(np.float64(1)) == chash(np.float64(1))
        assert chash(np.float128(1)) == chash(np.float128(1))

        assert chash(np.complex64(1+1j)) == chash(np.complex64(1+1j))
        assert chash(np.complex128(1+1j)) == chash(np.complex128(1+1j))
        assert chash(np.complex256(1+1j)) == chash(np.complex256(1+1j))

        assert chash(np.datetime64('2000-01-01')) == chash(np.datetime64('2000-01-01'))
        assert chash(np.timedelta64(1,'W')) == chash(np.timedelta64(1,'W'))

        self.assertRaises(ValueError, chash, np.object())

        assert chash(np.array([[1, 2], [3, 4]])) == \
            chash(np.array([[1, 2], [3, 4]]))
        assert chash(np.array([[1, 2], [3, 4]])) != \
            chash(np.array([[1, 2], [3, 4]]).T)
        assert chash(np.array([1, 2, 3])) == chash(np.array([1, 2, 3]))
        assert chash(np.array([1, 2, 3], dtype=np.int32)) != \
            chash(np.array([1, 2, 3], dtype=np.int64))
Пример #8
0
def parse_vecfile(a_fname):
    """Parse files containing word vectors

    @param a_fname - name of the wordvec file

    @return \c dimension of the vectors
    """
    global POS, NEG
    ivec = None
    with codecs.open(a_fname, 'r', ENCODING) as ifile:
        fnr = True
        toks = None
        for iline in ifile:
            iline = iline.strip()
            if fnr:
                ndim = int(iline.split()[-1])
                fnr = False
                continue
            elif not iline:
                continue
            toks = iline.split()
            assert (len(toks) - 1) == ndim, "Wrong vector dimension: {:d}".format(\
                len(toks) - 1)
            if toks[0] in POS:
                ivec = np.array([np.float128(i) for i in toks[1:]])
                # ivec /= _get_vec_len(ivec)
                POS[toks[0]] = ivec
            elif toks[0] in NEG:
                ivec = np.array([np.float128(i) for i in toks[1:]])
                # ivec /= _get_vec_len(ivec)
                NEG[toks[0]] = ivec
    # prune words for which there were no vectors
    POS = {iword: ivec for iword, ivec in POS.iteritems() if ivec is not None}
    NEG = {iword: ivec for iword, ivec in NEG.iteritems() if ivec is not None}
    return ndim
Пример #9
0
def main():
    g  = geolocation_table('128.173.90.68')
    g.start_db()

    l= []
    start = g.get_start()
    end = g.get_end()

    i = start
    while ( i <= end ):
        data = g.get_data(i)
        loc = data[6]
        loc = loc.strip(' ()')        # format location
        loc = loc.split(',')
        loc[0] = np.float128(loc[0])
        loc[1] = np.float128(loc[1])
        l.append(loc)
        i += 1

    g.stop_db()


    # print l[0]
    # print 'type(l[0]): ',type(l[0][0])

    kml_write = sdr_kml_writer.kml_writer()
    kml_write.add_colored_pushpin('red-pushpin','ff0000ff')
    n = 1
    for i in l:
        s = 'Guess' + str(n)
        kml_write.add_placemark(s,s,i,'red-pushpin')
        n += 1
    filename = 'guesses_degen.kml'
    kml_write.write_to_file(filename)
Пример #10
0
    def _data_to_file(self, data):
        # process the datatypes
        if self.file_dtype is None:
            # load from data
            self.file_dtype = data.dtype
        else:
            # make sure it's a dtype
            if not isinstance(self.file_dtype, np.dtype):
                try:
                    self.file_dtype = np.dtype(self.file_dtype)
                except:
                    ValueError("file_dtype should be a numpy dtype.")

        # process the gain
        if self.gain is None:
            # default to 1.0
            self.gain = 1.0
            # calc it if we are going from float to int
            if (self.file_dtype.kind == 'i') and (self.data_dtype.kind == 'f'):
                fr = np.float128(np.iinfo(self.file_dtype).max*2)
                dr = np.float128(np.abs(data).max()*2 * (1.+self.gain_buffer))
                self.gain = np.float64(dr/fr)
                
        # calc and apply gain if necessary
        if self.apply_gain and self.gain != 1.0:
            return np.asarray(data/self.gain,dtype=self.file_dtype)
        else:
            return np.asarray(data,dtype=self.file_dtype)
Пример #11
0
 def calc_ev_dos(self, ev_from=-100, ev_to=20, delta=0.01, sigma=0.1):
   """
   Calculates dos of electronic eigenvalues    
   
   """
   
   success = True
   error = ""
   
   if ((self.eigenvalues is None) and (self.evs_up is None) and (self.evs_down is None)):
     success = False
     error = "Eigenvalues were not read in"
     
     return success, error
   
   _extraBins = 2
 
   # get min and max 
   ev_dos_min = np.float128(ev_from)
   ev_dos_max = np.float128(ev_to)
       
   # number of bins
   ev_dos_n_bins = np.around(int((ev_dos_max - ev_dos_min) / delta) + _extraBins, decimals=0)
       
   # array to hold the ev bin values
   ev_dos_bins = np.arange(ev_dos_min, np.around(ev_dos_min + ev_dos_n_bins * delta, decimals=4), delta)
   self.ev_dos_bins = copy.deepcopy(ev_dos_bins)
   
   if (self.eigenvalues is not None):
     # array for the dos values
     ev_dos = np.zeros(ev_dos_n_bins, dtype=np.float128) 
         
     # calculating DOS
     for i in range(ev_dos_n_bins):
       ev_dos[i] = np.sum((1/(sigma*np.pi**0.5)) * np.exp(-(ev_dos_bins[i] - self.eigenvalues)**2 / sigma**2))
     
     self.ev_dos = copy.deepcopy(ev_dos)
   
   if (self.evs_up is not None):
     # array for the dos values
     ev_dos = np.zeros(ev_dos_n_bins, dtype=np.float128) 
         
     # calculating DOS
     for i in range(ev_dos_n_bins):
       ev_dos[i] = np.sum((1/(sigma*np.pi**0.5)) * np.exp(-(ev_dos_bins[i] - self.evs_up)**2 / sigma**2))
     
     self.ev_up_dos = copy.deepcopy(ev_dos)
   
   if (self.evs_down is not None):
     # array for the dos values
     ev_dos = np.zeros(ev_dos_n_bins, dtype=np.float128) 
         
     # calculating DOS
     for i in range(ev_dos_n_bins):
       ev_dos[i] = np.sum((1/(sigma*np.pi**0.5)) * np.exp(-(ev_dos_bins[i] - self.evs_down)**2 / sigma**2))
     
     self.ev_down_dos = copy.deepcopy(ev_dos)
    
   return success, error
Пример #12
0
def unpack_time(payload):
################################################################################
    (t_c,) = struct.unpack('!I', payload[0:4])
    (t_m,) = struct.unpack('!d', payload[4:12])

    t = repr(np.float128(t_c) + np.float128(t_m))

    return t
Пример #13
0
def get_I(k, i):
    assert k >= 0
    assert 1 >= i >= 0
    if k == 1:
        return i
    else:
        k = np.float128(k)
        i = np.float128(i)
        return (pow(k, i) - 1) / (k-1)
Пример #14
0
def get_i(k, I):
    assert k >= 0
    assert 1 >= I >= 0
    if k == 1:
        return I
    else:
        k = np.float128(k)
        I = np.float128(I)
        return np.log(I * (k - 1) + 1) / np.log(k)
    def test_ppc64_ibm_double_double128(self):
        # check that the precision decreases once we get into the subnormal
        # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
        # which happens when the first double is normal and the second is
        # subnormal.
        x = np.float128('2.123123123123123123123123123123123e-286')
        got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
        expected = [
            "1.06156156156156156156156156156157e-286",
            "1.06156156156156156156156156156158e-287",
            "1.06156156156156156156156156156159e-288",
            "1.0615615615615615615615615615616e-289",
            "1.06156156156156156156156156156157e-290",
            "1.06156156156156156156156156156156e-291",
            "1.0615615615615615615615615615616e-292",
            "1.0615615615615615615615615615615e-293",
            "1.061561561561561561561561561562e-294",
            "1.06156156156156156156156156155e-295",
            "1.0615615615615615615615615616e-296",
            "1.06156156156156156156156156e-297",
            "1.06156156156156156156156157e-298",
            "1.0615615615615615615615616e-299",
            "1.06156156156156156156156e-300",
            "1.06156156156156156156155e-301",
            "1.0615615615615615615616e-302",
            "1.061561561561561561562e-303",
            "1.06156156156156156156e-304",
            "1.0615615615615615618e-305",
            "1.06156156156156156e-306",
            "1.06156156156156157e-307",
            "1.0615615615615616e-308",
            "1.06156156156156e-309",
            "1.06156156156157e-310",
            "1.0615615615616e-311",
            "1.06156156156e-312",
            "1.06156156154e-313",
            "1.0615615616e-314",
            "1.06156156e-315",
            "1.06156155e-316",
            "1.061562e-317",
            "1.06156e-318",
            "1.06155e-319",
            "1.0617e-320",
            "1.06e-321",
            "1.04e-322",
            "1e-323",
            "0.0",
            "0.0"]
        assert_equal(got, expected)

        # Note: we follow glibc behavior, but it (or gcc) might not be right.
        # In particular we can get two values that print the same but are not
        # equal:
        a = np.float128('2')/np.float128('3')
        b = np.float128(str(a))
        assert_equal(str(a), str(b))
        assert_(a != b)
 def foldlight(self,period,nbins,ephemeris=0):
     '''
     folds a lightcurve on a known linear ephemeris
     v1.0 Kieran O'Brien - Dec 2011
     '''
     phase=(self.obstimes-np.float128(ephemeris))/np.float128(period)
     phase=phase-phase.astype('int')
     self.phase,self.phasebins=np.histogram(phase,nbins)
     return
Пример #17
0
def test_legendre_gauss_lobatto_nodes_weights():
    from polynomials import legendre_gauss_lobatto_nodes_weights as gll

    # n = 6 
    x_ref = numpy.float128([-1, -0.830223896278567, -0.468848793470714, 0])
    w_ref = numpy.float128( \
            [0.04761904761904762, 0.276826047361566, 0.431745381209863, 0.487619047619048])
    x, w = gll(6)
    aaae(x_ref, x[:4], 15, 'n=6, x')
    aaae(w_ref, w[:4], 15, 'n=6, w')
Пример #18
0
def _loadTrailer(tree):
    output = []
    all_trailers_element = tree.xpath("/IRP_Roadef_Challenge_Instance/trailers/IRP_Roadef_Challenge_Instance_Trailers")
    for trailer_element in all_trailers_element:
        index = int(trailer_element.find("index").text)
        capacity = np.float128(trailer_element.find("Capacity").text)
        initial_quantity = np.float128(trailer_element.find("InitialQuantity").text)
        distance_cost = float(trailer_element.find("DistanceCost").text)
        output.append(Trailer(index, capacity, initial_quantity, distance_cost))
    return output;
Пример #19
0
def intsum(a,b):
    c = np.float128(0.0)
    d = np.float128(0.0)
    c = pairsum(a)
    d = pairsum(b)
#    for i in xrange(a.size):
#        c = c + a[i]
#        d = d + b[i]
    d = d / 1e12
    d = d + c
    return d
Пример #20
0
def fixsum(a,b):
    c = np.int64(0)
    d = np.int64(0)
    e = np.float128(0.0)
    for i in xrange(a.size):
        c = c + a[i]
        d = d + b[i]
    e = np.float128(d)
    e = e / 1e20
    e = e + np.float128(c)
    return c
Пример #21
0
    def run(self):
        self.data = self.read_input()

        self.num_per_mapper = len(self.data)
        self.avg_cluster_size = np.float128(self.num_per_mapper) / np.float128(self.no_clusters)
        self.keep_ratio = np.float128(self.out_per_mapper) / np.float128(self.num_per_mapper)

        np.random.shuffle(self.data)
        self.cluster_center_points = self.build_coresets()
        self.cluster_centers = [ClusterCenter(c) for c in self.cluster_center_points]
        self.sample_points()
Пример #22
0
def bigkahansum(a):
    b = np.float128(0.0)
    c = np.float128(0.0)
    y = np.float128(0.0)
    t = np.float128(0.0)
    for i in xrange(a.size):
        y = a[i] - c
        t = b + y
        c = (t - b) - y
        b = t
    return b
def EqFTh(x,pars):
    Ncell = pars[0]
    #Nsrc = pars[2]
    Nc=Ncell
    FAP_ = pars[1]
    Nsrc = pars[2]
    #return 1.-(1.-(1.+x)*np.exp(-x))**(Nc)-FAP_
    sumx=(1.+x)
    for i in range(2,2*Nsrc):
        sumx = sumx + x**i * invfactorial[i]
    return Nc*np.log(np.float128(1.)-np.float128(sumx*np.exp(-x)))-np.log(1.-FAP_)
Пример #24
0
def test_legendre_gauss_nodes_weights():
    from polynomials import legendre_gauss_nodes_weights as gl

    # n = 6 
    x_ref = numpy.float128( \
            [-0.949107912342759, -0.741531185599395, -0.405845151377397, 0])
    w_ref = numpy.float128( \
            [0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469])
    x, w = gl(6)
    aaae(x_ref, x[:4], 15, 'n=6, x')
    aaae(w_ref, w[:4], 15, 'n=6, w')
Пример #25
0
def convert_quad_sum(arr1, arr2):
    qsum1 = numpy.float128(0)
    qsum2 = numpy.float128(0)
    for i in xrange(nx):
        qsum1 += numpy.float128(arr1[i])
        qsum2 += numpy.float128(arr2[i])

    sum1 = numpy.float64(qsum1)
    sum2 = numpy.float64(qsum2)

    return sum1, sum2
def foldlight_freq(light,freq,nbins,ephemeris=0,freqdot=0):
        if(freqdot == 0):
            phi=(light-np.float128(ephemeris))*np.float128(freq)
            phi=phi-phi.astype('int')
            phase,phasebins=np.histogram(phi,nbins)
        else:
            deltat=(light-np.float128(ephemeris))
            phi=deltat*(freq+freqdot*deltat)
            phi=phi-phi.astype('int')
            phase,phasebins=np.histogram(phi,nbins)
        return phasebins, phase
        
def loadPackingData(raw):
    retval = {}
    raw_as_list = "[" + raw.replace("=", ",").replace("{", "(").replace("}", ")").replace("\r", "") + "]"
    raw_as_list = re.sub("([0-9]+\.[0-9]+)", r"ld('\1')", raw_as_list)

    variables = ['N', 'L', 'L1', 'L2', 'P', 'P0']
    f = np.longdouble
    dtypes    = [np.int32, f  , f   , f   , f  , f   ]
    
    eval_environ = dict((a,a) for a in variables)
    eval_environ['ld'] = parse_longdouble
    data = eval(raw_as_list, eval_environ)
    # data = ["N", 64, "L1", [...], ..., [particle positions]]        
    
    # first process the variables
    vardata = data[:-1] # ["N", 64, ..., "P0", 1e-7]
    keys = vardata[0::2] # ["N", ..., "P0"]
    values = vardata[1::2] # [64, ..., 1e-7]
    
    for key, dtype, value in zip(keys, dtypes, values):
        retval[key] = dtype(value)
                
    # now process the particles
    particles = np.array(data[-1]) # [x1, y1, r1, x2, ...]
    particles = particles.reshape((particles.size/3, 3)) # [[x1, y1, r1], [x2, ...]] 
    particles = particles.transpose()
    x = particles[0]
    y = particles[1]
    r = particles[2]

    #convertedVectors = jamBashbulk.convertLvectors(retval["L1"], retval["L2"])
    #retval['alpha'] = convertedVectors['alpha']
    #retval['delta'] = convertedVectors['delta']
    #for key, value in jamBashbulk.get_packing_data(retval["N"], retval["P0"], x, y, r, **convertedVectors).iteritems():
    #    retval[key+"_calc"] = value


    x_major = float64(x); x_minor = float64(x-float128(x_major))
    y_major = float64(y); y_minor = float64(y-float128(y_major))
    r = float64(r)

    particles = array(zip(x_major, x_minor, y_major, y_minor, r), dtype=[('x', float64), ('x_err', float64),
                                                                         ('y', float64), ('y_err', float64),
                                                                         ('r', float64)])

#    particles = particles[0] # hack around structured array creation; for some
                             # reason there is a wrapping [ ]; this removes that.
   
    # split particles in two float64 parts
    #particles.dtype.names = ['x', 'y', 'r']
    retval['particles'] = particles
    
    return retval
Пример #28
0
def convert_quad_sum_pyramid(arr1, arr2, power):
    qarr1 = numpy.float128(arr1)
    qarr2 = numpy.float128(arr2)
    for p in [2**i for i in xrange(1,power+1)]:
        for i in xrange(nx//p):
            qarr1[p*i] += qarr1[p*i+p//2]
            qarr2[p*i] += qarr2[p*i+p//2]

    sum1 = numpy.float64(qarr1[0])
    sum2 = numpy.float64(qarr2[0])

    return sum1, sum2
Пример #29
0
def normalize_padded(padded, means=None, stds=None):
    """Normalize by last dim of padded with means/stds or calculate them.

        .. TODO::
           * consider importing instead ex:

                from sklearn.preprocessing import StandardScaler, RobustScaler
                robust_scaler = RobustScaler()
                x_train = robust_scaler.fit_transform(x_train)
                x_test  = robust_scaler.transform(x_test)
                ValueError: Found array with dim 3. RobustScaler expected <= 2.

           * Don't normalize binary features
           * If events are sparse then this may lead to huge values.
    """
    # TODO epsilon choice is random
    epsilon = 1e-6
    original_dtype = padded.dtype

    is_flat = len(padded.shape) == 2
    if is_flat:
        padded = np.expand_dims(padded, axis=-1)

    n_features = padded.shape[2]
    n_obs = padded.shape[0] * padded.shape[1]

    if means is None:
        means = np.nanmean(np.float128(
            padded.reshape(n_obs, n_features)), axis=0)

    means = means.reshape([1, 1, n_features])
    padded = padded - means

    if stds is None:
        stds = np.nanstd(np.float128(
            padded.reshape(n_obs, n_features)), axis=0)

    stds = stds.reshape([1, 1, n_features])
    if (stds < epsilon).any():
        print('warning. Constant cols: ', np.where((stds < epsilon).flatten()))
        stds[stds < epsilon] = 1.0
        # should be (small number)/1.0 as mean is subtracted.
        # Possible prob depending on machine err

    # 128 float cast otherwise
    padded = (padded / stds).astype(original_dtype)

    if is_flat:
        # Return to flat
        padded = np.squeeze(padded)
    return padded, means, stds
Пример #30
0
def _botev_fixed_point(t, M, I, a2):
    l = 7
    I = np.float128(I)
    M = np.float128(M)
    a2 = np.float128(a2)
    f = 2 * np.pi ** (2 * l) * np.sum(I ** l * a2 *
                                      np.exp(-I * np.pi ** 2 * t))
    for s in irange(l, 1, -1):
        K0 = np.prod(np.arange(1, 2 * s, 2)) / np.sqrt(2 * np.pi)
        const = (1 + (1 / 2) ** (s + 1 / 2)) / 3
        time = (2 * const * K0 / M / f) ** (2 / (3 + 2 * s))
        f = 2 * np.pi ** (2 * s) * \
            np.sum(I ** s * a2 * np.exp(-I * np.pi ** 2 * time))
    return t - (2 * M * np.sqrt(np.pi) * f) ** (-2 / 5)
Пример #31
0
    def testBijectorOverRange(self):
        with self.test_session():
            for dtype in (np.float32, np.float64):
                skewness = np.array([1.2, 5.], dtype=dtype)
                tailweight = np.array([2., 10.], dtype=dtype)
                # The inverse will be defined up to where sinh is valid, which is
                # arcsinh(np.finfo(dtype).max).
                log_boundary = np.log(
                    np.sinh(
                        np.arcsinh(np.finfo(dtype).max) / tailweight -
                        skewness))
                x = np.array([
                    np.logspace(-2, log_boundary[0], base=np.e, num=1000),
                    np.logspace(-2, log_boundary[1], base=np.e, num=1000)
                ],
                             dtype=dtype)
                # Ensure broadcasting works.
                x = np.swapaxes(x, 0, 1)

                y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
                bijector = tfb.SinhArcsinh(skewness=skewness,
                                           tailweight=tailweight,
                                           validate_args=True)

                self.assertAllClose(y,
                                    bijector.forward(x).eval(),
                                    rtol=1e-4,
                                    atol=0.)
                self.assertAllClose(x,
                                    bijector.inverse(y).eval(),
                                    rtol=1e-4,
                                    atol=0.)

                # Do the numpy calculation in float128 to avoid inf/nan.
                y_float128 = np.float128(y)
                self.assertAllClose(np.log(
                    np.cosh(np.arcsinh(y_float128) / tailweight - skewness) /
                    np.sqrt(y_float128**2 + 1)) - np.log(tailweight),
                                    bijector.inverse_log_det_jacobian(
                                        y, event_ndims=0).eval(),
                                    rtol=1e-4,
                                    atol=0.)
                self.assertAllClose(-bijector.inverse_log_det_jacobian(
                    y, event_ndims=0).eval(),
                                    bijector.forward_log_det_jacobian(
                                        x, event_ndims=0).eval(),
                                    rtol=1e-4,
                                    atol=0.)
def compute_markus_lyapunov(param):
    window_size, offset, scale, sampling, seed, x0, max_iter, max_init, \
        step_size, chunk = param

    results = np.zeros(step_size, dtype='i4')
    pos = 0

    while pos < step_size:
        step_pos = pos + chunk * step_size
        screen_coord = (step_pos / window_size[1], step_pos % window_size[1])
        c = np.complex128(
            complex(
                screen_coord[0] / scale[0] + offset[0],
                ((window_size[1] - screen_coord[1]) / scale[1] + offset[1])))
        markus_func = lambda x: c.real if seed[idx % len(seed)] == "A" \
                      else c.imag

        # Init
        x = np.float128(x0)
        try:
            for idx in range(0, max_init):
                r = markus_func(idx)
                with np.errstate(over='raise'):
                    x = r * x * (1 - x)
        except FloatingPointError:
            pass

        # Exponent
        total = np.float64(0)
        try:
            for idx in range(0, max_iter):
                r = markus_func(idx)
                with np.errstate(over='raise'):
                    x = r * x * (1 - x)
                v = abs(r - 2 * r * x)
                if v == 0:
                    break
                total = total + math.log(v) / math.log(1.23)
        except FloatingPointError:
            pass

        if total == 0 or total == float('Inf'):
            exponent = 0
        else:
            exponent = total / float(max_iter)
        results[pos] = exponent
        pos += sampling
    return results
Пример #33
0
def calculate_distance_cluster_numpy(df, centr):
    av = (np.zeros(k))
    av_count = (np.zeros(k))
    for i in range(df.shape[0]):
        cluster = int(df[i][cluster_index])
        distance = np.float128(
            math.sqrt(
                sum([(a - b)**2
                     for a, b in zip(df[i][0:cluster_index], centr[cluster])
                     ])))
        df[i][distance_cluster_index] = (distance)
        av[cluster] += (distance)
        av_count[cluster] += 1
        # print("Iter: ",i,"        Sum: ", av[0])

    return df, av, av_count
Пример #34
0
 def test_scalar(self):
     res = make_np(1.1)
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(
         res.shape, (1, ))
     res = make_np(1 << 64 - 1)  # uint64_max
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(
         res.shape, (1, ))
     res = make_np(np.float16(1.00000087))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(
         res.shape, (1, ))
     res = make_np(np.float128(1.00008 + 9))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(
         res.shape, (1, ))
     res = make_np(np.int64(100000000000))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(
         res.shape, (1, ))
Пример #35
0
 def _execute(self, data):
     """ Normalizes the samples vector to norm one """
     if self.feature_names == []:
         self.feature_names = data.feature_names
     elif self.feature_names != data.feature_names:
         raise InconsistentFeatureVectorsException("Two feature vectors used during training do not contain the same features!")
     x = data.view(numpy.ndarray)
     a = x[0,:]
     if self.dim == None:
         self.dim = len(a)
     a = a*numpy.float128(1)/numpy.linalg.norm(a)
     if self.dimension_scale:
         a = FeatureVector([len(a)*a],self.feature_names)
         return a
     else:
         return FeatureVector([a],self.feature_names)
def binding_energy(DG, Na, c0=1):
    """Calculate the binding free energy while considering the
    contributions due to the sodium concentration.

    Parameters
    ----------
    DG : floating point value collected from MD simulations.

    Na : Sodium concentration 

    Returns
    -------
    DG : floating point value of the free energy with added
    contributions from sodium concentration.
    """
    return np.float128(DG - (R * T) * np.log(Na / c0))
Пример #37
0
def open_wave(fn):
  '''Returns a tuple of sync_samples, data_samples, and sample_rate.'''
  wavefile = wave.open(fn, 'r')
  raw_data = wavefile.readframes(wavefile.getnframes())
  samples = numpy.fromstring(raw_data, dtype=numpy.int16)
  # try to use 128-bit float for the renormalization
  try:
    normed_samples = samples / numpy.float128(numpy.iinfo(numpy.int16).max)
  except:
    normed_samples = samples / float(numpy.iinfo(numpy.int16).max)
  sync_samples = normed_samples[0::2]
  data_samples = normed_samples[1::2]
  # Need to invert these to match Matlab code for some reason.
  sync_samples *= -1
  data_samples *= -1
  return sync_samples, data_samples, wavefile.getframerate()
Пример #38
0
def test_trig_func_has_correct_abs_max(analyzed_trig_func):
    """Test that absolute_maximum() returns correct value.

    First, make sure that its approximation is correct. Then, compare
    the exact values.
    """
    trig_abs_max = analyzed_trig_func.absolute_maximum
    approximate_expected_max = [-46.35559793676238, 1.013176643861527]
    np.testing.assert_allclose(
        np.float128(trig_abs_max), approximate_expected_max
    )
    exact_expected_max = analyzed_trig_func.relative_maxima[10]
    np.testing.assert_equal(
        trig_abs_max,
        [exact_expected_max, analyzed_trig_func.func_real(exact_expected_max)],
    )
Пример #39
0
def do_validation(sess, model, validation_iter, epoch, saver):
    tot = np.float128(0.0)
    loss, records = do_epoch(sess, model, validation_iter, "Validation", epoch)
    if do_validation.min_loss is None or do_validation.min_loss > loss:
        do_validation.min_loss = loss
        ckpt = "{}/deeprccar-model".format(args.checkpoint_dir)
        saver.save(sess, ckpt)
        validf = "{}/validation-epoch{:03d}.txt".format(args.checkpoint_dir,
                                                        epoch)
        with open(validf, "w") as valid_f:
            for imfile, record in records.items():
                print(imfile, record[0], record[1], record[2], file=valid_f)
                tot += record[-1]
        print("Unormalized Validation RMSE: ", np.sqrt(tot/len(records)))
        print("Model saved at ", ckpt)
    return loss
Пример #40
0
def test_numpy_high_precision_float_downcasting(fake_run, record_q, records_util):
    # CLI: GH2255
    run = fake_run()
    run.log(dict(this=np.float128(0.0)))
    r = records_util(record_q)
    assert len(r.records) == 1
    assert len(r.summary) == 0
    history = r.history
    assert len(history) == 1

    found = False
    for item in history[0].item:
        if item.key == "this":
            assert item.value_json == "0.0"
            found = True
    assert found
Пример #41
0
def ktda_s(task, higherPriorityTasks, ieq, s, mode=0):
    # This function is used to report a upper bound of the probability for one deadline miss

    kpoints = []
    # pick up k testing points here
    kpoints = findpoints(task, higherPriorityTasks, 0)

    # for loop checking k points time
    minP = np.float128(1.0)
    selecteds = 0
    minS = 0
    for t in kpoints:
        workload = determineWorkload(task, higherPriorityTasks, t)
        if workload <= t:
            return 0
        #as WCET does not pass, check if the probability is acceptable
        fy = float(t)
        if ieq == Chernoff_bounds:
            if mode == 0:
                probRes = ieq(task, higherPriorityTasks, fy, s)
            else:
                try:
                    res = minimize_scalar(
                        lambda x: ieq(task, higherPriorityTasks, fy, x),
                        method='bounded',
                        bounds=[0, s])
                    probRes = ieq(task, higherPriorityTasks, fy, res.x)
                    selecteds = res.x
                except TypeError:
                    print("TypeError")
                    probRes = 1
        elif ieq == SympyChernoff:
            ResList = []
            ResList = ieq(task, higherPriorityTasks, fy, s)
            probRes = ResList[0]
            selecteds = ResList[1]
        elif ieq == Hoeffding_inequality:
            probRes = ieq(task, higherPriorityTasks, fy)
        elif ieq == Bernstein_inequality:
            probRes = ieq(task, higherPriorityTasks, fy)
        else:
            raise NotImplementedError(
                "Error: You use a bound without implementation.")
        if minP > probRes:  #find out the minimum in k points
            minP = probRes
            minS = selecteds
    return [minP, minS]
Пример #42
0
  def testBijectorOverRange(self):
    with self.cached_session():
      for dtype in (np.float32, np.float64):
        skewness = np.array([1.2, 5.], dtype=dtype)
        tailweight = np.array([2., 10.], dtype=dtype)
        # The inverse will be defined up to where sinh is valid, which is
        # arcsinh(np.finfo(dtype).max).
        log_boundary = np.log(
            np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))
        x = np.array([
            np.logspace(-2, log_boundary[0], base=np.e, num=1000),
            np.logspace(-2, log_boundary[1], base=np.e, num=1000)
        ], dtype=dtype)
        # Ensure broadcasting works.
        x = np.swapaxes(x, 0, 1)

        y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
        bijector = SinhArcsinh(
            skewness=skewness, tailweight=tailweight, validate_args=True)

        self.assertAllClose(y, bijector.forward(x).eval(), rtol=1e-4, atol=0.)
        self.assertAllClose(x, bijector.inverse(y).eval(), rtol=1e-4, atol=0.)

        # On IBM PPC systems, longdouble (np.float128) is same as double except that it can have more precision.
        # Type double being of 8 bytes, can't hold square of max of float64 (which is also 8 bytes) and
        # below test fails due to overflow error giving inf. So this check avoids that error by skipping square
        # calculation and corresponding assert.

        if np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and \
           np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min)):

          # Do the numpy calculation in float128 to avoid inf/nan.
          y_float128 = np.float128(y)
          self.assertAllClose(
              np.log(np.cosh(
                  np.arcsinh(y_float128) / tailweight - skewness) / np.sqrt(
                      y_float128**2 + 1)) -
              np.log(tailweight),
              bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
              rtol=1e-4,
              atol=0.)
        self.assertAllClose(
            -bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
            bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
            rtol=1e-4,
            atol=0.)
Пример #43
0
 def __init__(self, value=None, type=None, **kwargs):
     self.value = value
     if type is None:
         if isinstance(value, int):
             self.type = ParamType.Int
         elif isinstance(value, (float, numpy.float128)):
             self.type = ParamType.Float
             self.value = numpy.float128(value)
         elif isinstance(value, bool):
             self.type = ParamType.Flag
         elif isinstance(value, str):
             self.type = ParamType.Str
         else:
             raise ValueError(
                 'Could not guess datatype for {}'.format(value))
     else:
         self.type = ParamType(int(type))
Пример #44
0
def expResAssign(size, n, minIndex, maxIndex):
    resArr = np.zeros(size)
    for switch in range(0, size):
        if switch <= minIndex:
            resArr[switch] = 0
        elif switch >= maxIndex:
            resArr[switch] = 1
        else:
            resArr[switch] = (np.float128(n)**np.float128(switch - minIndex) -
                              np.float128(1)) / (np.float128(n)**np.float128(
                                  maxIndex - minIndex) - np.float128(1))
    return resArr
Пример #45
0
    def var_stack(d1, x2, x1_mean, n1):
        d2 = np.diag(x2.T*x2)
        n2 = len(x2)
        x2_mean = np.mean(x2, axis=0)

        d1 = np.float128(d1)
        x1_mean = np.float128(x1_mean)
        n1 = np.float128(n1)
        d2 = np.float128(d2)
        x2_mean = np.float128(x2_mean)
        n2 = np.float128(n2)
        return np.float32((d1 + d2)/(n1 + n2) - np.matrix(np.array((n1*x1_mean + n2*x2_mean)/(n1+n2))**2))
Пример #46
0
    def log_prob(sentence, v_lambda):
        total = np.float128(0)
        for w in range(len(sentence)):

            ptri, pbi, puni = np.float128(0), np.float128(0), np.float128(0)
            unigram = sentence[w]
            puni = np.float128(unigram_counts[unigram]) / train_token_count

            if w >= 1:
                bigram = (sentence[w - 1], sentence[w])
                unigram = sentence[w - 1]
                pbi = np.float128(0) if bigram not in bigram_counts \
                    else np.float128(bigram_counts[bigram])/unigram_counts[unigram]

            if w >= 2:
                trigram = (sentence[w - 2], sentence[w - 1], sentence[w])
                bigram = (sentence[w - 2], sentence[w - 1])
                ptri = np.float128(0) if trigram not in trigram_counts \
                    else np.float128(trigram_counts[trigram])/bigram_counts[bigram]

                total += np.log2(np.dot([ptri, pbi, puni], v_lambda))

        return total
Пример #47
0
def st_dev(Img, i, j, m, N):

    a = int((N - 1) / 2)
    sd = 0

    valid_cnt = 0
    for k in range(-a, a):
        for h in range(-a, a):
            if ((k + i) > 0 and (k + i) < Img.shape[0] and (h + j) > 0
                    and (h + j) < Img.shape[1]):

                sd = sd + np.square(Img[k + i][h + j] - m)
                valid_cnt = valid_cnt + 1

    sd = np.float128(sd / (valid_cnt * valid_cnt))
    sd = np.sqrt(sd)

    return sd
Пример #48
0
def wRoutine(l, mode=0):
    #suppose that the table is ready
    #this is the Lemma 1 in the paper
    resProb = np.float128(0.)
    listProb = []
    if l == 0:
        return 1.0
    else:
        for w in range(1, l + 1):
            if mode == 0:
                resProb = lookupTable[w] * wRoutine(l - w, mode)
            else:
                resProb = conlookupTable[w] * wRoutine(l - w, mode)
            # if resProb == 0.0:
            #     print "resProb is 0", w, lookupTable[w], wRoutine(l-w, mode)
            listProb.append(resProb)
    # print "l is ", l,  listProb
    return max(listProb)
Пример #49
0
    def posterior(self, model_params, my_y, beta=1.0):

        tiny = np.finfo(np.float64).tiny
        mx = np.finfo(np.float64).max / (self.H)

        my_N = my_y.shape[0]

        log_posteriors = self.log_p_y(model_params, my_y, beta=beta)
        log_posteriors += np.log(model_params['pies'])[None, :] * beta
        posteriors = np.float64(np.exp(np.float128(log_posteriors)))
        posteriors[np.isnan(posteriors)] = tiny
        posteriors[posteriors < tiny] = tiny
        posteriors[np.isinf(posteriors)] = mx

        return {
            'posteriors_h': posteriors / np.sum(posteriors, 1)[:, None],
            'logpj': log_posteriors
        }
Пример #50
0
def sec_ord_corr_Adam(chains, t1, emission_symbol_1, t2, emission_symbol_2):

    ones_at_t1_t2 = 0

    for i in range(0, len(chains)):
        a_t1 = chains[i][t1]
        b_t1 = emission_symbol_1

        a_t2 = chains[i][t2]
        b_t2 = emission_symbol_2

        if a_t1 == b_t1 and a_t2 == b_t2:
            ones_at_t1_t2 += 1
    soc = np.float128(ones_at_t1_t2) / len(chains)

    #    print '\r     ',np.float128(ones_at_t1_t2),'--',len(chains),'                                           \r',
    #    sys.exit('soc test!!!')
    return soc
Пример #51
0
 def to_list(p):
     line = []
     for h in harr:
         if (h not in p) or (p[h] == None):
             line.append(-1)
         else:
             line.append(p[h])
     #timestamp
     line[0] = np.float128(line[0])
     #ip
     if line[1] != -1:
         line[1] = dottedQuadToNum(line[1])
     if line[2] != -1:
         line[2] = dottedQuadToNum(line[2])
     #everything else
     for i in range(3, 12):
         line[i] = int(line[i])
     return line
Пример #52
0
def ans_one_model(index, mweights, mmeans, mcovs):
    #init
    nummods = mweights.shape[0]
    initgaus = mweights.shape[1]
    finalgaus = 64
    lenvars = mmeans.shape[2]
    weights = numpy.zeros(finalgaus, dtype="float128")
    means = numpy.zeros((finalgaus, lenvars), dtype="float128")
    covs = numpy.zeros((finalgaus, lenvars, lenvars), dtype="float128")
    h = numpy.float128(
        numpy.random.randint(1, 10000, (finalgaus, nummods, initgaus)))
    h = h / h.sum(0)

    for _ in range(80):
        #M-step
        weights = h.sum(1).sum(1) / (nummods * initgaus)
        temp = h * mweights
        temp = temp / temp.sum(1).sum(1).reshape(finalgaus, 1, 1)
        tempcovs = (mmeans.reshape(1, nummods, initgaus, lenvars, 1) -
                    means.reshape(finalgaus, 1, 1, lenvars, 1)).reshape(
                        finalgaus * nummods * initgaus, lenvars, 1)
        tempcovs = numpy.array([tt.dot(tt.transpose()) for tt in tempcovs
                                ]).reshape(finalgaus, nummods, initgaus,
                                           lenvars, lenvars)
        tempcovs = tempcovs + mcovs
        covs = (temp.reshape(finalgaus, nummods, initgaus, 1, 1) *
                tempcovs).sum(1).sum(1)
        means = (temp.reshape(finalgaus, nummods, initgaus, 1) *
                 mmeans.reshape(1, nummods, initgaus, lenvars)).sum(1).sum(1)
        #E-step
        for m in range(finalgaus):
            gaus = mnormal(means[m], covs[m])
            invcovs = numpy.linalg.inv(numpy.float64(covs[m]))
            temp = numpy.zeros((nummods, initgaus), dtype="float128")
            for j in range(nummods):
                for k in range(initgaus):
                    temp[j, k] = -0.5 * invcovs.dot(mcovs[j][k]).trace()
            h[m, :, :] = sc.power(
                gaus.pdf(mmeans) * sc.power(sc.e, temp),
                mweights) * weights[m] + 1e-20
        h = h / h.sum(0)

    with open("final_model/%d.pickle" % (index, ), "wb") as f:
        pickle.dump((weights, means, covs), f)
Пример #53
0
def createROC_curve(dataset):
    loss_dic = dataset.loss
    weight_dic = dict(zip(dataset.SM_names, dataset.SM_val_weights))

    p_SM = np.logspace(base=10, start=-7, stop=0, num=100)
    p_SM[-1] = 0.999

    t_SM = np.concatenate((loss_dic['Wlnu'], loss_dic['qcd'], loss_dic['Zll'],
                           loss_dic['ttbar']))

    w_SM = np.concatenate(
        (np.full_like(loss_dic['Wlnu'], weight_dic['Wlnu'], np.float128),
         np.full_like(loss_dic['qcd'], weight_dic['qcd'], np.float128),
         np.full_like(loss_dic['Zll'], weight_dic['Zll'], np.float128),
         np.full_like(loss_dic['ttbar'], weight_dic['ttbar'], np.float128)))

    i_sort = np.argsort(t_SM)

    t_SM = t_SM[i_sort]
    w_SM = w_SM[i_sort]

    l = np.zeros(4)
    for i, n in enumerate(dataset.SM_names):
        l[i] = dataset.valSamples[n].shape[0]
    i_min = np.argmin(l / dataset.SM_fraction)

    cum_sum = np.cumsum(w_SM, dtype=np.float128) / np.float128(
        l[i_min] / dataset.SM_fraction[i_min])
    print('CumSum accuracy:', cum_sum[-1])

    idx_q = np.argmax(cum_sum > np.atleast_2d(1 - p_SM).T, axis=1)
    q_SM = t_SM[idx_q]

    dic_ROC = {}
    for n in dataset.BSM_names:
        out = loss_dic[n] > np.atleast_2d(q_SM).T
        p_BSM = np.float64(
            np.sum(out, axis=1, dtype=np.float128) / loss_dic[n].shape[0])

        roc_auc = np.trapz(p_BSM, p_SM)

        dic_ROC[n] = {'eff_BSM': p_BSM, 'eff_SM': p_SM, 'roc_auc': roc_auc}

    return dic_ROC
Пример #54
0
    def convertLongToArray(self, Z, visitIndices):
        """
    takes a list Z of dimension [NR_BIOMK] x [NR_SUBJ] x array(NR_VISITS) and a similar list of
    visit indices where data is available. For instance, if for biomarker 2 subject 3 had data
    only in visits 0 and 2, then visitIndices[2][3] = array([0,2])
    :param Z:
    :param visitIndices:
    :return:
    Z_array - a biomarker-wise serialised version of Z, where len(Z[b]) = all possible elements
              in Z_array[b] dimension is [NR_BIOMK] x array(all_values_linearised)
    N_obs_per_sub - a list of dimensions [NR_BIOMKS] x array(NR_SUBJ), containing the number of
                    observations for each sbuject in each biomarker
    indFullToMissingArrayB - a list of indices which could map a potential array
                             Z_full with no missing entries to Z_array.
                             so Z_array = Z_full[indFullToMissingArrayB]
    """
        nrBiomk = len(Z)
        Z_array = [0 for b in range(nrBiomk)]
        N_obs_per_sub = [0 for b in range(nrBiomk)]

        indFullToMissingArrayB = [0 for b in range(nrBiomk)]

        for b in range(nrBiomk):
            # Creating 1d arrays of individuals' time points and observations
            Z_array[b] = np.array([
                np.float128(item) for sublist in Z[b] for item in sublist
            ]).reshape(-1, 1)
            N_obs_per_sub[b] = [len(Z[b][j]) for j in range(len(Z[b]))]

            visitsSoFar = 0
            indFullToMissingArrayS = [0 for s in range(len(Z[0]))]
            for s in range(len(Z[0])):
                indFullToMissingArrayS[s] = visitsSoFar + visitIndices[b][s]
                visitsSoFar += visitIndices[b][s].shape[0]

            indFullToMissingArrayB[b] = np.array(
                [i for subIdx in indFullToMissingArrayS for i in subIdx])

        for s in range(len(Z[0])):
            if np.sum([Z[b][s].shape[0] for b in range(nrBiomk)]) == 0:
                print('Z[b][s]', [Z[b][s].shape[0] for b in range(nrBiomk)])
                print('s', s)

        return Z_array, N_obs_per_sub, indFullToMissingArrayB
Пример #55
0
def rhs_variation_of_parameters_stabilized(t, parameters):
    state, jac = kepler_three_body(*unpack_three_body(parameters))

    m_1, m_2, m_3 = [np.float128(m) for m in [state[6], state[13], state[20]]]
    m_i = m_1 + m_2
    x_1, x_2, x_3 = [
        x.astype(np.float128) for x in [state[:3], state[7:10], state[14:17]]
    ]
    x_i = (m_1 * x_1 + m_2 * x_2) / m_i

    x_3_i = x_3 - x_i
    r2_3_i = np.sum(x_3_i**2)
    a_i = G * m_3 * (x_3 - x_i) * r2_3_i**np.float128(-3. / 2)

    x_3_1 = x_3 - x_1
    r2_3_1 = np.sum(x_3_1**2)
    a_1 = G * m_3 * (x_3 - x_1) * r2_3_1**np.float128(-3. / 2)

    x_3_2 = x_3 - x_2
    r2_3_2 = np.sum(x_3_2**2)
    a_2 = G * m_3 * (x_3 - x_2) * r2_3_2**np.float128(-3. / 2)

    # FIXME: compute these differences without subtraction
    a_1_delta = a_1 - a_i
    a_2_delta = a_2 - a_i

    a_3_i = -G * m_i * (x_3 - x_i) * r2_3_i**np.float128(-3. / 2)
    a_3_1 = -G * m_1 * (x_3 - x_1) * r2_3_1**np.float128(-3. / 2)
    a_3_2 = -G * m_2 * (x_3 - x_2) * r2_3_2**np.float128(-3. / 2)

    a_3_delta = a_3_1 + a_3_2 - a_3_i

    extra_rhs = np.zeros(21)

    extra_rhs[3:6] = a_1_delta
    extra_rhs[10:13] = a_2_delta
    extra_rhs[17:20] = a_3_delta

    rhs = scipy.linalg.solve(jac, extra_rhs)

    rhs[6] = 0
    rhs[7] += -1
    rhs[14] += -1
    rhs[-6:] = 0

    return rhs
Пример #56
0
def predict(x, P, Fs):
    # discretization period
    T = np.float128(1 / Fs)

    # total length of output signal
    L = len(x) + P

    # time axis of input + predicted samples
    tp = T * np.linspace(0, L - 1, L)
    # order of regression model
    N = P

    # convolve with leaky integrator to reduce noise
    #M = 10
    #lbd = float(M-1) / float(M)
    #h = (1 - lbd) * pow(lbd, np.arange(100))
    #x = np.convolve(x, h, 'valid')

    # compute regression coefficients.
    while(True):
        global A
        gotException = False
        try:
            [A, E, K] = arburg(x, N)
        except (ValueError, IndexError):
            gotException = True
            N = N / 2
        if gotException == False:
            break

    # allocate memory for output
    y = np.zeros(L)

    # fill part of the output with known part
    y[0:len(x)] = x

    # apply regression model to the signal.
    # actually this is IIR filter.
    # use lfilter func in future.
    for i in range(len(x), L):
        y[i] = -1 * np.sum(np.real(A) * y[i-1:i-1-N:-1])

    return tp, y
Пример #57
0
def maineq(Xtest, Xerrtest, Xtrain, Xerrtrain):
    def Pi(di, sigdi, yi, sigyi):
        C1 = sigdi**-2
        C2 = sigyi**-2

        p1 = np.log((2 * np.pi * (C1 + C2))**-0.5 / (sigdi * sigyi))
        p2 = 0.5 * (C1 * di**2 + C2 * yi**2)
        p3 = 0.5 * ((di * C1 + yi * C2)**2) / (C1 + C2)
        return p1 - p2 + p3

    ntest = len(Xtest)
    ntrain = len(Xtrain)

    problist = np.zeros(ntest)
    for i in range(ntest):
        p = np.sum(Pi(Xtest[i], Xerrtest[i], Xtrain, Xerrtrain), axis=1)
        P = np.sum(np.exp(np.float128(p)))
        problist[i] = np.log(P) + np.log(0.5) - np.log(ntrain)
    return problist
Пример #58
0
def NewApproximation(n, fr, J, k, tasks, mode=0):
    # mode 0 == EMR, 1 = CON
    # J is the bound of the idx
    prepareTable(n, fr, J, n - 1, tasks, mode)
    probsum = 0.
    phikl = np.float128(1.0)
    # this is the summation from 1 to J (prob * j)
    #print "which mode:", mode
    for x in range(1, J + 1):
        #print "for index:", x
        phikl = wRoutine(x, mode)
        # print phikl
        probsum += phikl * x

    if probsum == 0:
        return 0
    else:
        #for avoiding numerical inconsistance
        return probsum / (1 + probsum - wRoutine(1, mode))
def von_mises_deg(xx,mu,k,a,b):
  # make a von mises function over the range in xx
  # assume the input is 0-180 or 0-360 deg space.
  axis_size_deg = np.max(xx)+1
  if k<10**(-15):
    print('WARNING: k is too small, might get precision errors')
    
  xx_rad2pi = np.float128(xx/axis_size_deg*2*np.pi)
  mu_rad2pi = mu/axis_size_deg*2*np.pi
  yy = np.exp(k*(np.cos(xx_rad2pi-mu_rad2pi)-1))
  
  # first make the y values span from 0-1
  yy = yy-min(yy)
  yy = yy/max(yy)
  
  # then apply the entered amplitude and baseline.
  yy = a*yy+b
  
  return yy
Пример #60
0
    def calc_sampling_weight(self):
        if self.q is None:
            # Formula slide 33, dm-09
            center_dist_ratio = np.float128(1.0)
            if Helper.dist_func(self.cluster.center,
                                self.point) != np.float128(0.0):
                center_dist_ratio = Helper.dist_func(
                    self.cluster.center,
                    self.point) / self.cluster.dist_point_sum()

            self.q = np.ceil(
                ((np.float128(5.0) / np.float128(len(self.cluster))) +
                 center_dist_ratio) * np.float128(3.0)) - np.float128(2.0)
        return self.q