Ejemplo n.º 1
0
 def tresholdCreating(self):
     msec = self.frequency / 1000 # 1 msec =  frequency/1000 points
     self.defaultFrame = 4 * msec #frame size for mean() and std() finding must depend on frequency. assume it equal to 4 msec
     self.stimulyDuration = int(0.8 * msec) #1msec# treshold for stimuli filtering ~20points==2msec==2*self.msec
     self.level = int((math.log(100.0 / self.frequency, 0.5) - 1)) #wavelet decomposition level. level 6 to 10kHz signal.
     highNoiseFrequency = 14000.0
     self.highNoiseLevel = int((math.log(highNoiseFrequency / self.frequency, 0.5)))
     self.localDelay = 1 * msec #time delay before spike(filtering of local responses)
Ejemplo n.º 2
0
def herrmann_data(X_, TF=True, cos=False, normalization=False):
    X = []
    for x in X_:
        data = {"%s" % x: 0 for x in range(1, 1515)}
        for k in x:
            data["%s" % np.abs(k)] += 1
        tmp_ = []
        for x in range(1, 1515):
            tmp_.append(data["%s" % x])
        X.append(tmp_)

    if normalization == True:
        X_t = []
        for x in X:
            x_T = []
            for x_ in x:
                x_tmp = (x_ * 1.0) / sum(x)
                x_T.append(x_tmp)
            X_t.append(x_T)
        X = X_t

    if TF is True:
        X_t = []
        for x in X:
            x_tmp_ = []
            for k in x:
                x_tmp_.append(math.log(1.0 + k, math.e))
            X_t.append(x_tmp_)
        X = X_t
        return X

    if cos is True:
        instances = X
        big_X = []
        for k, instance in enumerate(instances):
            X_T = []
            for x in instance:
                # Apply TF Transformation
                t_tmp = math.log(1.0 + x, 2)
                X_T.append(t_tmp)

            # Store Euclidean Length for Cosine Normalisation (Section 4.5.2)
            euclideanLength = 0
            for attribute in X_T:
                euclideanLength += 1.0 * attribute * attribute
            euclideanLength = math.sqrt(euclideanLength)

            X_T2 = []
            for attribute in X_T:
                # Apply Cosine Normalisation
                t_tmp = 1.0 * attribute / euclideanLength
                X_T2.append(t_tmp)
            big_X.append(X_T2)
        X = big_X

    return X
Ejemplo n.º 3
0
def Haar(points, depth):
    if len(points) < 2 or depth < 1:
        return points
    if depth > math.log(len(points), 2):
        depth = math.log(len(points), 2)
    sum_sequence = []
    diff_sequence = []
    for i in range(0, len(points), 2):
        diff_sequence.append((points[i] - points[i + 1]) / math.sqrt(2))
        sum_sequence.append((points[i] + points[i + 1]) / math.sqrt(2))
    v = Haar(sum_sequence, depth - 1)
    v.extend(diff_sequence)
    return v
Ejemplo n.º 4
0
 def tresholdCreating(self):
     msec = self.frequency / 1000  # 1 msec =  frequency/1000 points
     self.defaultFrame = 4 * msec  #frame size for mean() and std() finding must depend on frequency. assume it equal to 4 msec
     self.stimulyDuration = int(
         0.8 * msec
     )  #1msec# treshold for stimuli filtering ~20points==2msec==2*self.msec
     self.level = int(
         (math.log(100.0 / self.frequency, 0.5) -
          1))  #wavelet decomposition level. level 6 to 10kHz signal.
     highNoiseFrequency = 14000.0
     self.highNoiseLevel = int(
         (math.log(highNoiseFrequency / self.frequency, 0.5)))
     self.localDelay = 1 * msec  #time delay before spike(filtering of local responses)
Ejemplo n.º 5
0
 def calculate_entropy(self, sigA, sigB):
     '''
     '''
     if sigA == [] or sigB == []:
         return None
     sum_xy = []
     N = float(sum(self.contingency))
     n = float(sum(self.signatcount))
     for a in sigA:
         for b in sigB:
             i = self.signature[a]
             j = self.signature[b]
             if i != j:
                 p_xy = (self.contingency[i][j] +
                         self.contingency[j][i]) / N
             else:
                 p_xy = self.contingency[i][j] / N
             p_x = self.signatcount[i] / n
             p_y = self.signatcount[j] / n
             if p_xy != 0:
                 # print "  ", a, b, math.log(p_xy/(p_x * p_y), 2)
                 sum_xy.append(math.log(p_xy / (p_x * p_y), 2))
             else:
                 # print "  ", a, b, 0.0
                 sum_xy.append(0.0)
     return sum(sum_xy)
Ejemplo n.º 6
0
def values_exact_sol(y0,a,b,h,tm):
    k = m.log(2)/tm
    x_range = np.arange(a,b,h)
    y = []
    for x in x_range:
        yn = exact_sol(k,y0,x)
        y.append(yn)
    return x_range,y
Ejemplo n.º 7
0
 def mainLevelFinding(self):
     self.mainLevel = int(
         math.log(
             (self.baseFrequency *
              (0.5 + sqrt(sqrt(self.snr)) / 2)) / self.frequency, 0.5) -
         1.4)  # magic
     logger.warn(
         "mainLevelFinding # self.mainLevel: {0}, self.snr: {1}".format(
             self.mainLevel, self.snr))
Ejemplo n.º 8
0
def get_retention(t):
    #     c = 1.8
    #     d = 1.21
    c = average([1.8, 1.34, 0.9, 1.36])
    d = average([1.21, 0.873, 0.9, 1.36])
    print(c, d)
    if t <= 1.0:
        return 1.0
    innr = math.pow(math.log(t, 10.0), d)
    return c / (innr + c)
def Norm_z(p):
    a0,a1,a2,a3  = 2.5066282,  -18.6150006, 41.3911977,  -25.4410605
    b1,b2,b3,b4  = -8.4735109, 23.0833674,  -21.0622410, 3.1308291
    c0,c1,c2,c3  = -2.7871893, -2.2979648,  4.8501413,   2.3212128
    d1,d2,r,z    = 3.5438892,  1.6370678,   None,        None
    if (p>0.42):
        r=math.sqrt(-math.log(0.5-p));
        z=(((c3*r+c2)*r+c1)*r+c0)/((d2*r+d1)*r+1)
    else:
        r=p*p
        z=p*(((a3*r+a2)*r+a1)*r+a0)/((((b4*r+b3)*r+b2)*r+b1)*r+1)
    return z
Ejemplo n.º 10
0
def dataFitting(data):
    try:
        dataLen = len(data)
        tmp = 2**(math.ceil(math.log(dataLen, 2)))
        delta = tmp - dataLen
        meanTmp = ar.histMean(data[:dataLen / 4])
        newData = empty(tmp, dtype='float32')
        newData.fill(meanTmp)
        newData[delta:] = data
        return newData, delta
    except:
        logger.error("dataFitting # Error: {0}".format(sys.exc_info()))
Ejemplo n.º 11
0
def dataFitting(data):
    try:
        dataLen = len(data)
        tmp = 2**(math.ceil(math.log(dataLen, 2)))
        delta = tmp - dataLen
        meanTmp = ar.histMean(data[:dataLen / 4])
        newData = empty(tmp, dtype='float32')
        newData.fill(meanTmp)
        newData[delta:] = data
        return newData, delta
    except:
        logger.error("dataFitting # Error: {0}".format(sys.exc_info()))
Ejemplo n.º 12
0
def euler_method(x0,y0,a,b,h,tm):
    k = m.log(2)/tm
    xn = x0
    yn = y0
    x = [xn]
    y = [yn]
    while xn <= b:
        fn = EDO_dr(xn,yn,k)
        xn = xn + h
        yn = yn + h * fn
        x.append(xn)
        y.append(yn)
    return x,y
Ejemplo n.º 13
0
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(
            torch.arange(0, d_model, 2).float() *
            (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)
Ejemplo n.º 14
0
def max_batch_size(gpu_ram_bytes:int,
                   model:models.Model, scalar_width:int=4,
                   default_max:int=32,
                   usable=0.95, verbose=True)->int:
    """
    See table 2 in https://www.microsoft.com/en-us/research/uploads/prod/2020/05/dnnmem.pdf for
    more categories of usage than are dealt with here; and for a proposal for a tool to do away
    with this estimation.
    See https://arxiv.org/1609.04836 for the suggestion that anything over 32 is probably
    bad anyway.
    :param gpu_ram_bytes: The RAM available to your graphics processor. For dual-GPU cards
    this should still be the memory of a single GPU unless you configure for multiple workers
    :param model: a keras Model which can be inspected to find it's weights and inputs
    :param scalar_width: the width of your datatype in bytes. e.g. 4 for float32, 8 for float64
    :param default_max: Cut-off beyond which we assume that bigger batches will
    degrade generalisability (e.g. https://arxiv.org/1609.04836)
    :param usable: defaults to 0.95 The fraction of GPU memory that should be considered available
    for your model and inputs. Usually less than 100% because of framework, alignment loss,
    buffers, runtime context etc.
    :param verbose: print calculation
    :return: an integer which is our best guess for the biggest power of 2 batch size that will
    fit into your GPU memory at one go.
    """
    assert 0 < gpu_ram_bytes, 'required: 0 < gpu_ram_bytes, you said %r' % gpu_ram_bytes
    assert 0 < usable, 'required: 0 < usable, you said %r' % usable
    assert 0 < scalar_width, 'required: 0< model_dtype_width, you said %r' % scalar_width
    assert model and model.layers, 'model.layers must not be None or empty'
    warnif(usable>1, "You've set usable GPU memory usage to more than 100%")
    all_inputs = sum([ reduce(operator.mul,[dim if dim else 1 for dim in l.input_shape])
                        for l in model.layers])
    outputs = reduce(operator.mul,
                     [dim if dim else 1 for dim in model.layers[-1].output_shape])
    tensors_size= all_inputs + outputs * 3 #outputs, labels, output vs loss gradients
    num_ephemeral=tensors_size # Actual value is ‘we have no idea, it depends on implementation’
    num_weights=sum(
            [ a.shape.num_elements()
              for a in model.trainable_weights + model.non_trainable_weights ])
    num_gradients=num_weights
    num_scalars=tensors_size + num_weights + num_gradients + num_ephemeral

    max_size= int(usable * gpu_ram_bytes / scalar_width / num_scalars)
    best_size= min( 2**int(math.floor(math.log(max_size, 2))), default_max)
    best_size=max(1,best_size)
    if verbose:
        print('Found Inputs+Outputs*3={} scalars. Doubling it for ephemerals. '
              'Weights,Gradients:{} scalars each. Scalar width={}. '
              'Given Usable={}, max batch size for {}GB is {}, best size is {}'\
              .format(tensors_size, num_weights, scalar_width,
                      int(usable*100), gpu_ram_bytes/GB,
                      max_size, best_size))
    return best_size
Ejemplo n.º 15
0
def kl_divergence_normal_pair(mu1, mu2, sd1, sd2):
    if not any([
            isinstance(mu1, tt.Variable),
            isinstance(mu2, tt.Variable),
            isinstance(sd1, tt.Variable),
            isinstance(sd2, tt.Variable),
    ]):
        elemwise_kl = (math.log(sd2 / sd1) + (sd2**2 + (mu1 - mu2)**2) /
                       (2. * sd2**2) - 0.5)  # type: np.ndarray
        return np.sum(elemwise_kl)
    else:
        elemwise_kl = (tt.log(sd2 / sd1) + (sd2**2 + (mu1 - mu2)**2) /
                       (2. * sd2**2) - 0.5)
        return tt.sum(elemwise_kl)
Ejemplo n.º 16
0
def add_random_edges_between(G, nodes1, p, nodes2 = None):
    """Generate random edges between specified nodes in a networkx graph w/probability p
    
    If nodes2 is None then just generate random undirected edges between nodes1
    If nodes2 is not None then generate random directed edges between nodes1 and nodes2
    
    This code is mostly taken from nx.generators.fast_gnp_random_graph. It generalizes
    that code to the situation of generating independent identically distributed 
    
    References
    ----------
    .. [1] Vladimir Batagelj and Ulrik Brandes, 
       "Efficient generation of large random networks",
       Phys. Rev. E, 71, 036113, 2005.
    """
    if p == 0:
        return G
    directed = nodes2!=None

    v = 0  # Nodes in graph are from 0,n-1 (this is the first node index).
    w = -1
    lp = np.log(1.0 - p)
    n1 = len(nodes1)

    if directed:
        n2 = len(nodes2) 
        loop = not np.array_equal(nodes1, nodes2) # avoid self loops
        while v < n1: 
            lr = np.log(1.0 - random.random()) 
            w = w + 1 + int(lr/lp) 
            if not loop or v == w: # avoid self loops 
                w = w + 1 
            while  w >= n2 and v < n1:
                w = w - n2
                v = v + 1
                if not loop or v == w: # avoid self loops
                    w = w + 1
            if v < n1:
                G.add_edge(nodes1[v],nodes2[w])
    else:
        while v < n1:
            lr = math.log(1.0 - random.random())
            w = w + 1 + int(lr/lp)
            while w >= v and v < n1: 
                w = w - v 
                v = v + 1
            if v < n1:
                G.add_edge(nodes1[v],nodes1[w])
    return G
Ejemplo n.º 17
0
def calcu_idf(docs):

    words = set(chain.from_iterable(docs))

    df = {}
    for w in words:
        for doc in docs:
            if w in doc:
                df[w] = df.get(w, 0) + 1

    idf_dic = {}
    for word, freq in df.items():
        idf_dic[word] = math.log((len(docs) - freq + 0.5) / (freq + 0.5))

    return idf_dic
Ejemplo n.º 18
0
 def __init__(self, d_model, dropout=None, max_batch = 32, max_len=500):
     super(PositionalEncoding, self).__init__()
     if dropout is not None:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = None
     pe = torch.zeros(max_batch, max_len, d_model)
     for i in range(max_batch-1):
         position = torch.arange(0, max_len).float().unsqueeze(1)  # len x 1
         div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-(math.log(10000.0) / d_model)))  # D/2
         pe[i, :, 0::2] = torch.sin(position * div_term)
         pe[i, :, 1::2] = torch.cos(position * div_term)
     self.register_buffer('pe', pe)
     if use_cuda:
         pe = pe.cuda()
Ejemplo n.º 19
0
def runge_kutta_method(x0,y0,a,b,h,tm):
    k = m.log(2)/tm
    xn = x0
    yn = y0
    x = [xn]
    y = [yn]
    while xn <= b:
        k1 = EDO_dr(xn,yn,k)
        k2 = EDO_dr(xn + (1/2) * h,yn +(1/2) * h * k1,k)
        k3 = EDO_dr(xn + (1/2) * h,yn + (1/2) * h * k2,k)
        xn = xn + h
        k4 = EDO_dr(xn,yn + h*k3,k)
        yn = yn + (h/6) * (k1+ 2 * k2 + 2 * k3 + k4)
        x.append(xn)
        y.append(yn)
    return x,y
Ejemplo n.º 20
0
def convert_size(size_bytes):
    if size_bytes == 0:
        return "0B"  # pragma: no cover
    size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
    i = int(math.floor(math.log(size_bytes, 1024)))
    return "%s %s" % (int(size_bytes / math.pow(1024, i)), size_name[i])
Ejemplo n.º 21
0
 def mainLevelFinding(self):
     self.mainLevel = int(math.log((self.baseFrequency * (0.5 + sqrt(sqrt(self.snr)) / 2)) / self.frequency, 0.5) - 1.4)  # magic
     logger.warn("mainLevelFinding # self.mainLevel: {0}, self.snr: {1}".format(self.mainLevel, self.snr))
Ejemplo n.º 22
0
from numpy import math
import numpy as np
from theano import tensor as tt

c = -0.5 * math.log(2 * math.pi)


def kl_divergence_normal_pair(mu1, mu2, sd1, sd2):
    if not any([
            isinstance(mu1, tt.Variable),
            isinstance(mu2, tt.Variable),
            isinstance(sd1, tt.Variable),
            isinstance(sd2, tt.Variable),
    ]):
        elemwise_kl = (math.log(sd2 / sd1) + (sd2**2 + (mu1 - mu2)**2) /
                       (2. * sd2**2) - 0.5)  # type: np.ndarray
        return np.sum(elemwise_kl)
    else:
        elemwise_kl = (tt.log(sd2 / sd1) + (sd2**2 + (mu1 - mu2)**2) /
                       (2. * sd2**2) - 0.5)
        return tt.sum(elemwise_kl)


def log_normal(x, mean, std, eps=0.0):
    std += eps
    return c - tt.log(tt.abs_(std)) - (x - mean)**2 / (2 * std**2)


def log_normal3(x, mean, rho, eps=0.0):
    std = ttrho2sd(rho)
    return log_normal(x, mean, std, eps)
Ejemplo n.º 23
0
def protectedLog(var):
    if (var > 0):
        return math.log(var)
    else:
        return 1
Ejemplo n.º 24
0
 def customLog(number):
     return math.log(number, base)
Ejemplo n.º 25
0
def extract_check_calc_specific_sites(recalc_data_oxides_cats_OX_list):

    print(
        "\n\tFORMULA=> extract_check_calc_specific_sites(recalc_data_oxides_cats_OX__list)"
    )

    print("\t\tDEVO SEPARARE PER LISTE DI MINERALE")
    print("\t\t\tPER OGNI LISTA DI MINERALE FARE I CALCOLI")
    print("\t\t\t\tRITORNARE LISTE DI LISTE DI MINERALI CON specific sites")
    print()

    a_args = []

    print(recalc_data_oxides_cats_OX_list)

    lista = []
    for each_analysis in recalc_data_oxides_cats_OX_list:

        lista.append(each_analysis)
    print("LISTA ", lista)

    for l in lista:
        print("l: ", l)
        if l['mineral'] not in a_args:
            a_args += [l['mineral']]

            new_list = [[]] * len(a_args)

    dict_of_list = {}
    for i in range(len(a_args)):

        for l in [l for l in lista if l['mineral'] == a_args[i]]:
            new_list[i] = new_list[i] + [l]

            sublist_list = new_list[i]

        dict_of_list[a_args[i]] = sublist_list

    for mine, value in dict_of_list.items():

        print("\nmineral group = ", mine, 'is: ', value)
        global zzz
        zzz = 100.00001

        if mine == 'grt':
            #GARNET#
            for single in value:
                alm = single['Fe2'] / (single['Fe2'] + single['Mg'] +
                                       single['Ca'] + single['Mn'])
                py = single['Mg'] / (single['Fe2'] + single['Mg'] +
                                     single['Ca'] + single['Mn'])
                gr = single['Ca'] / (single['Fe2'] + single['Mg'] +
                                     single['Ca'] + single['Mn'])
                sps = single['Mn'] / (single['Fe2'] + single['Mg'] +
                                      single['Ca'] + single['Mn'])
                XFe = single['Fe2'] / (single['Fe2'] + single['Mg'])
                XMg = single['Mg'] / (single['Fe2'] + single['Mg'])
                '''
                Fe3+ = 2*X*(1-T/S)
                X=>oxigens in formula
                T=>ideal number of cations
                S=>observed cations
                '''
                if 'Fe3' in single:
                    print("good to know")
                    pass
                else:
                    print("CATTTIONI SUM GRT: ", single['SUMcat'])
                    Fe3 = 2 * 12 * (1 - 8 / single['SUMcat'])
                    single.update({'Fe3': round(Fe3, 3)})
                    pass
                single.update({'alm': round(alm, 3)})
                single.update({'py': round(py, 3)})
                single.update({'gr': round(gr, 3)})
                single.update({'sps': round(sps, 3)})
                single.update({'XFe': round(XFe, 3)})
                single.update({'XMg': round(XMg, 3)})

                print("every mineral analysis: ", single, "")

        elif mine == 'amph':
            #AMPH#
            for single in value:
                if 8 - single['Si'] > 0:
                    aliv = 8 - single['Si']
                else:
                    aliv = 0
                single.update({'aliv': aliv})

                alvi = single['Al'] - aliv
                single.update({'alvi': round(alvi, 3)})

                single.update({'T': zzz})

                if 'Fe3' in single:
                    print("good to know")
                    pass
                else:
                    print("CATTTIONI SUM: ", single['SUMcat'])
                    Fe3 = 2 * 12 * (1 - 8 / single['SUMcat'])
                    single.update({'Fe3': round(Fe3, 3)})
                    pass

                print("every mineral analysis: ", single, "")

        elif mine == 'px':
            #PYROXENE#
            for single in value:
                if 2 - single['Si'] > 0:
                    aliv = 2 - single['Si']
                else:
                    aliv = 0
                single.update({'aliv': round(aliv, 3)})

                alvi = single['Al'] - aliv
                single.update({'alvi': round(alvi, 3)})

                jd1 = single['Na'] * 2
                single.update({'jd1': round(jd1, 3)})

                if single['alvi'] > (single['Na'] + single['K']):
                    jd2 = single['alvi']
                else:
                    jd2 = single['Na'] + single['K']
                single.update({'jd2': round(jd2, 3)})

                if single['alvi'] > (single['Na'] + single['K']):
                    acm = single['Na'] + single['K'] - single['alvi']
                else:
                    acm = 0

                single.update({'acm': round(acm, 3)})

                if 'Fe3' in single:
                    print("good to know")
                    pass
                else:
                    print("CATTTIONI SUM: ", single['SUMcat'])
                    Fe3 = 2 * 12 * (1 - 8 / single['SUMcat'])
                    single.update({'Fe3': round(Fe3, 3)})
                    pass

                if (single['Fe3'] + single['Cr']) / 2 > single['acm']:
                    CaFeTs = (single['Fe3'] + single['Cr']) / 2
                else:
                    CaFeTs = 0
                single.update({'CaFeTs': round(CaFeTs, 3)})

                CaTiTs = single['Ti']
                single.update({'CaTiTs': round(CaTiTs, 3)})

                if ((single['aliv'] + single['alvi'] - single['jd2'] -
                     2 * single['Ti']) / 2) > 0:
                    CaTs = (single['aliv'] + single['alvi'] - single['jd2'] -
                            2 * single['Ti']) / 2
                else:
                    CaTs = 0
                single.update({'CaTs': CaTs})

                if (single['Ca'] - single['CaFeTs'] - single['CaTiTs'] -
                        single['CaTs']) > 0:
                    woll = single['Ca'] - single['CaFeTs'] - single[
                        'CaTiTs'] - single['CaTs']
                else:
                    woll = 0

                single.update({'woll': round(woll, 3)})

                if 'Ni' in single.keys():
                    en = (single['Mg'] + single['Ni']) / 2
                else:
                    en = (single['Mg'])
                single.update({'en': round(en, 3)})

                fs = (single['Mn'] + single['Fe2']) / 2
                single.update({'fs': round(fs, 3)})

                print("every mineral analysis: ", single, "")

        elif mine == 'bt':
            for single in value:

                #single.update({'Jdddd':zzz})
                print("every mineral analysis: ", single, "")
                #print("TIIIII: ", single['Ti'])
                #global T_henry2005
                if (single['Ti'] > 0.06 and single['Ti'] < 0.6):
                    #print("TIIIIIAAA: ", single['Ti'])
                    b = 4.6482E-09
                    a = -2.3594
                    #b = 4648200000
                    c = -1.7283
                    lnTi = round(math.log(single['Ti']), 3)
                    xmg = round(single['Mg'] / (single['Mg'] + single['Fe2']),
                                3)

                    print("lnTi ", lnTi)
                    print("xmg ", xmg)

                    primo = lnTi
                    secondo = a
                    terzo = round(c * (math.pow(xmg, 3)), 3)

                    print("terzo", terzo)

                    quarto = round((primo - secondo - terzo), 3)

                    quinto = round((quarto / b), 3)
                    print("quarto ", quarto)
                    print("quinto", quinto)

                    if quinto > 0:
                        finale = math.pow(quinto, 0.333)

                        T_henry2005 = finale

                    else:
                        print(
                            "cannot use Henry's calibration, see original paper"
                        )
                        single.update({'T_henry2005': 'OutOf_XMg_Range'})
                        pass
                else:
                    print("cannot use Henry's calibration, see original paper")
                    single.update({'T_henry2005': 'OutOf_Ti_Range'})
                    pass

                if (T_henry2005 > 400 and T_henry2005 < 800):
                    single.update({'T_henry2005': round(T_henry2005, 3)})
                else:
                    print("cannot use Henry's calibration, see original paper")
                    single.update({'T_henry2005': 'OutOf_T_Range'})

    return dict_of_list  ##LISTE_DI_LISTE_DI_MINERALI_CON_specific_sites
Ejemplo n.º 26
0
Archivo: 53.py Proyecto: tkmckenzie/pan
from numpy import math as npm


def choose(n, r):
    return npm.factorial(n) / (npm.factorial(r) * npm.factorial(n - r))


def log_sum(n):
    #Gives sum of log(1), ..., log(n)
    #Equivalent to log(n!)
    return sum(map(npm.log, range(2, n + 1)))


def log_choose(n, r):
    return log_sum(n) - log_sum(r) - log_sum(n - r)


threshold = npm.log(1e6)
count = 0
for n in range(1, 101):
    for r in range(n + 1):
        if log_choose(n, r) > threshold:
            count += 1

print(count)
Ejemplo n.º 27
0
def convert_size(size_bytes):
    if size_bytes == 0:
        return "0B"  # pragma: no cover
    size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
    i = int(math.floor(math.log(size_bytes, 1024)))
    return "%s %s" % (int(size_bytes / math.pow(1024, i)), size_name[i])
Ejemplo n.º 28
0
#!/usr/bin/python

from numpy import math

#clusters = [[40.0, 20.0],[10.0, 30.0]]
#clusters=[[35.0, 15.0],[5.0, 5.0],[10.0, 30.0]]
clusters=[[50.0, 0.0],[0.0, 50.0]]

entropy = 0.0
for row in clusters:
	rowTotal = 0.0
	for i in row:
		if i != 0:
			rowTotal += -1 * (i/sum(row)) * math.log( (i/sum(row)), 2 )
	entropy += ( sum(row) / 100.0 ) * rowTotal

print "Entropy:", entropy
Ejemplo n.º 29
0
#!/usr/bin/python

from numpy import math

clusters = [[40.0, 20.0], [10.0, 30.0]]
#clusters=[[35.0, 15.0],[5.0, 5.0],[10.0, 30.0]]
#clusters=[[50.0, 0.0],[0.0, 50.0]]

N = sum([sum(cluster) for cluster in clusters])

h1 = 0.0
for row in clusters:
    h1 += -1.0 * sum(row) / N * math.log(sum(row) / N, 2)

classSums = [0.0 for x in range(len(clusters[0]))]

h2 = 0.0
for i in range(len(clusters[0])):
    for row in clusters:
        classSums[i] += row[i]
    h2 += -1.0 * classSums[i] / N * math.log(classSums[i] / N, 2)

print "Summed", classSums
print "H1:", h1, "H2:", h2

numerator = 0.0
for row in clusters:
    for i, val in enumerate(row):
        if val != 0.0:
            numerator += (val / N) * math.log(
                (val * N) / (sum(row) * classSums[i]), 2)