Beispiel #1
0
def allflesh(skill):
    die = rand(1, 10)
    more = die
    rol = []
    if die == 10:
        while die == 10:
            nextroll = rand(1, 10)
            rol.append(nextroll)
            additional = max(0, nextroll - 5)
            die = nextroll
            more += additional
    if die == 1:
        while die == 1:
            nextroll = rand(1, 10)
            rol.append(nextroll)
            additional = min(nextroll - 5, 0)
            if additional < 0 and more == die:
                more -= 1
            if nextroll == 1:
                additional -= 1
            die = nextroll
            more += additional
    rollage = skill + more
    if rollage <= 8:
        ret = 0
    elif 9 <= rollage <= 16:
        ret = int(ceiling((rollage - 8) / 2.))
    elif 17 <= rollage <= 20:
        ret = 5
    elif 21 <= rollage:
        ret = int(ceiling((rollage - 20) / 3.)) + 5
    if rol:
        return "%s (Total: %s, role of luck %s)" % \
                (ret, rollage, ', '.join(str(x) for x in rol))
    return "%s (Total: %s)" % (ret, rollage)
Beispiel #2
0
def btvs(skill):
    rollage = skill + rand(1, 10)
    if rollage <= 8:
        ret = 0
    elif 9 <= rollage <= 16:
        ret = int(ceiling((rollage - 8) / 2.))
    elif 17 <= rollage <= 20:
        ret = 5
    elif 21 <= rollage:
        ret = int(ceiling((rollage - 20) / 3.)) + 5
    return "%s (Total: %s)" % (ret, rollage)
Beispiel #3
0
def reHeap(aray):
  curr_ind = len(aray)-1
  parent_ind = ceiling(curr_ind/2) - 1
  while True:
    if curr_ind < 1:
      break
    if aray[parent_ind] <= aray[curr_ind]:
      curr_ind = parent_ind
    elif aray[parent_ind] > aray[curr_ind]:
      tmp=aray[parent_ind]
      aray[parent_ind] = aray[curr_ind]
      aray[curr_ind] = tmp
      curr_ind = parent_ind
    parent_ind = ceiling(curr_ind/2) - 1
  return aray
 def list_satchel(self):
     if len(a.satchel) > 79:
         page_counter = math.ceiling(len(a.satchel) / 79)
     else:
         page_counter = 1
     while page_counter > 0: #satchel could conceivably not all fit on one page
         for x in satchel:
             y = a.satchel[x]
             counter = 1
             print(w.fill(colored(str(counter) + ". " + y.name,'magenta')))
             if y.sklass == 1:
                 print(colored(" Attack power: " + str(y.atk), 'magenta'))
                 print(colored(" Enchantments:", 'magenta'))
                 for z in y.ench:
                     if y.ench[z] != 0:
                         print(colored("   -  +" + str(y.ench[z]) + " to " + y.enchnm[z], 'magenta'))
                     else:
                         print(colored("none", 'magenta'))                                        
             elif a.satchel[x].sklass == 2:
                 print(colored("Defense:      " + str(y.dfn), 'magenta'))
                 print(colored("Enchantments:", 'magenta'))
                 for z in y.ench:
                     if y.ench[z] != 0:
                         print(colored("   -  +" + str(y.ench[z]) + " to " + y.enchnm[z], 'magenta'))
                     else:
                         print(colored("none", 'magenta'))
             elif a.satchel[x].stackable:
                 print(colored(" Quantity: " + str(y.qua), 'magenta'))
             counter += 1
             if counter > 79 or counter == len(a.satchel) + 1:
                 page_counter -= 1
                 dots()
Beispiel #5
0
def lsh_matcher(signatures, rows_per_band=5):
    ''' Use locality-sensitive hashing to find candidate pairs
    
    LSH finds candidate related incidents. For each band, it hashes each column
    into a big hash table. Any two columns (observations/documents) that hash 
    to the same bucket for any band are a candidate pair.
    
    Args: 
        signatures (numpy 2-d array): A row for each hash and a column for
            each observation.
        rows_per_band (int): 5 by default.  If the number of rows of signatures
            is not a multiple of rows_per_band, the final band will have fewer
            rows than any preceding bands.
        
    Returns: a list of candidate groups.  The i-th element of the list is a
        set of integers drawn from the column numbers of signatures 
    '''
    n_hashes = signatures.shape[0]
    n_bands = int(ceiling(n_hashes / float(rows_per_band)))
    candidate_groups = []
    for b in xrange(n_bands):
        band_start = rows_per_band * b
        band_end = min(n_hashes, band_start + rows_per_band)
        hash_bins = {}
        for col in xrange(signatures.shape[1]):
            h = hash(tuple(signatures[band_start:band_end, col]))
            if h not in hash_bins:
                hash_bins[h] = []
            hash_bins[h].append(col)
        for key, bin in hash_bins.iteritems():
            if len(bin) > 1:
                candidate_groups.append(bin)
    return candidate_groups
Beispiel #6
0
def valid_content_repetition(node, max_sample_size, repetition_threshold):
    """This function takes a sample of node's tweets and sees if they are
    repeated over 50% of the time.
    
    :param node: The node to test
    :returns: Is the tested node
        repeating the same content over 50% of the time?
    :rtype: Boolean
    
    """
    # Collect max_sample_size of the first statuses
    statuses = node.statuses[:max_sample_size]

    # Iterate until we could not have enough nodes untested to have 50%+ repitition
    for index, status_a in enumerate(statuses[ceiling(max_sample_size *
                                                      repetition_threshold):]):
        repetition_count = 0

        for status_b in statuses[index:]:
            seq = difflib.SequenceMatcher(a=status_a.text.lower(),
                                          b=status_b.text.lower())
            # If status_a and status_b are over 75% similar
            if (seq.ratio() > 0.75):
                repetition_count += 1

        # If any tweet is repeated more than 50% of the user's tweets, too much repetition
        if (repetition_count > len(node.statuses) * repetition_threshold):
            return False
    return True
Beispiel #7
0
def valid_content_repetition(node, max_sample_size, repetition_threshold):
    """This function takes a sample of node's tweets and sees if they are
    repeated over 50% of the time.
    
    :param node: The node to test
    :returns: Is the tested node
        repeating the same content over 50% of the time?
    :rtype: Boolean
    
    """
    # Collect max_sample_size of the first statuses
    statuses = node.statuses[:max_sample_size]
    
    # Iterate until we could not have enough nodes untested to have 50%+ repitition
    for index, status_a in enumerate(
            statuses[ceiling(max_sample_size * repetition_threshold):]):
        repetition_count = 0
        
        for status_b in statuses[index:]:
            seq = difflib.SequenceMatcher(a=status_a.text.lower(), b=status_b.text.lower())
            # If status_a and status_b are over 75% similar
            if (seq.ratio() > 0.75):
                repetition_count += 1
        
        # If any tweet is repeated more than 50% of the user's tweets, too much repetition
        if (repetition_count > len(node.statuses) * repetition_threshold):
            return False
    return True
 def hasher(self, data):
     hash_dict = {}
     from math import ceil as ceiling
     for x in range(ceiling(len(data) / 4096)):
         block = data[4096 * x:4096 * (x + 1)]
         hash = hashlib.sha256(block).hexdigest()
         #print(hash)
         hash_dict.update({hash: block})
     return hash_dict
Beispiel #9
0
def maxPool(image, kernel):

    #width and height of the kernel
    width = kernel[0]
    height = kernel[1]

    #these define the amount the kernel moves in each direction in between intervals. A lot of maxPooling functions allow for arbitrary
    #movement so I've defined these as separate variables, however for our purposes a step size that is the same as the corresponding
    #dimension of the kernel is used since we neither want to miss any cells in the image or overlap any.
    hStep = kernel[0]
    vStep = kernel[1]

    #this array holds the results of the pooling operation at each interval, as the name suggests, it is what the function will output
    output = []
    #we have to take into account the possibility that the kernel will not evenly cover the image, ie, it's width and heigth might not
    #evenly divide the width and height of the given image respectively. If this occurs, at some iterations the kernel will hang over
    #the edge of the image. This is the reason for using ceiling() when initializing the output array below, we need it
    #to always round up when calculating its dimensions since the iterations when the kernel hangs over the side need a place to put the
    #result of the pooling operation.
    for i in range(0, ceiling(len(image)/hStep)):
        #[0] * ceiling(len(image[0])/vStep) is weird python notation for an array of zeroes of length ceiling(len(image[0])/vStep)
        output.append([0] * ceiling(len(image[0])/vStep))

    #this nested for loop iterates the kernel accross the image, maxPooling at each iteration and throwing the result in output
    #i and j are the index in output the result will be put in. Note that when we index image with i and j.
    for i in range(0, len(output)):
        for j in range(0, len(output[0])):
            #this array represents the values in image covered by the kernel in this iteration
            kernelCover = []
            #these nested for loops append all the values covered by the kernel to kernelCover
            #x and y can be thought of as the indices in the kernel
            for x in range(0, width):
                for y in range(0, height):
                    #this try except statement accounts for when the kernel overhangs the edge of the image. Note that if it does overhang,
                    #an IndexError will be thrown and nothing will be appended to kernelCover.
                    try:
                        kernelCover.append(image[i*hStep + x][j*vStep + y])
                    except IndexError:
                        pass

            output[i][j] = max(kernelCover)

    # returns output as a numpy array since other parts of the program use numpy arrays
    return np.array(output)
Beispiel #10
0
    def populate_road(self, populate_type, vehicle_type="car"):
        # find out which spots to occupy
        if populate_type == "fixed_width":
            incrementer = math.ceiling(1.0/self.density)
            filled_in_spots = [i*incrementer for i in xrange(self.num_lanes*self.length/incrementer+1)]
        elif populate_type == "random":
            filled_in_spots = [i for i in xrange(self.num_lanes*self.length) if (random.random()*self.density >= 0.5)]

        # set the spots to the desired vehicle type
        self.set_vehicles(filled_in_spots, vehicle_type)
Beispiel #11
0
def create(frequency):
    """
    Create and return a guitar string of the given frequency, using a sampling
    rate given by SPS. A guitar string is represented as a ring buffer of
    of capacity N (SPS divided by frequency, rounded up to the nearest
    integer), with all values initialized to 0.0.
    """

    n = math.ceiling(SPS / frequency)
    init = stdarray.create1D(n, 0.0)
    return create_from_samples(init)
Beispiel #12
0
def select(array, order = (len(array)+1)/2):
    pivot = array.pop(random.random()*len(array))
    length = math.floor(len(array)/NUM_MAP_TASKS)
    map_results = {'less than or equal to pivot': [], 'greater than pivot': []}
    for i in range(0, NUM_MAP_TASKS):
        r = map(array[length*i:length*(i+1)], pivot)
        map_results['less than or equal to pivot'] += r['less than or equal to pivot']
        map_results['greater than pivot'] += r['greater than pivot']
        
    # when all map tasks are done
    candidate1 = reduce(map_results['less than or equal to pivot'], math.floor(order), pivot)
    candidate2 = reduce(map_results['greater than pivot', math.ceiling(order - len(map_results['less than or equal to pivot'])-1)], pivot)
    return candidate1 or candidate2
Beispiel #13
0
def startCallBack(data):
    px = data.pose.pose.position.x
    py = data.pose.pose.position.y
    quat = data.pose.pose.orientation
    q = [quat.x, quat.y, quat.z, quat.w]
    roll, pitch, yaw = euler_from_quaternion(q)
    global xInit
    global yInit
    global thetaInit
    xInit = px
    xInit = math.ceiling(xInit * 20) / 20
    yInit = py
    yInit = math.cieling(yInit * 20) / 20
    thetaInit = yaw * 180.0 / math.pi
Beispiel #14
0
    def pull_friend_network(self, user, limit):
        """This function pulls a user's friend network, returns immediately if
        enough of a user's network has been pulled
        
        :param user: The user to pull data for
        :param limit: How many user's of relation friend to pull
        
        """
        scope_limit = ceiling(limit / self.scope_depth)

        # Check to see if network needs to be retrieved
        retrieved_count = len(user.friends)
        if (retrieved_count >= limit or retrieved_count >= user.friends_count):
            return

        self.pull_remote_graph(user, user.friends, scope_limit,
                               self.twitter.get_friends_list)
Beispiel #15
0
 def pull_friend_network(self, user, limit):
     """This function pulls a user's friend network, returns immediately if
     enough of a user's network has been pulled
     
     :param user: The user to pull data for
     :param limit: How many user's of relation friend to pull
     
     """
     scope_limit = ceiling(limit / self.scope_depth)
     
     # Check to see if network needs to be retrieved
     retrieved_count = len(user.friends)
     if (retrieved_count >= limit or retrieved_count >= user.friends_count):
         return
     
     self.pull_remote_graph(user, user.friends,
                            scope_limit, self.twitter.get_friends_list)
Beispiel #16
0
def annotate_metadata(song):
    path = song.audio.path
    audio = None
    info = {}
    ext = path.lower()[len(path) - 3:]

    # Mutagen doesn't like unicode
    path_latin1 = path.encode('latin1')

    # TODO(XXX) Once mutagen 1.17 makes it everywhere we can do away
    # with this bother with extensions and figuring out the file
    # format. We can use mutagen.File(filename) and it will hand us
    # the correct Mutagen tag parse for it. We could do this today,
    # but it's awkward because you can't specify you want EasyTags,
    # it'll hand you the raw MP3 tags, which are butts. 1.17 is much
    # much nicer, but not yet released.

    if ext == "mp3":
        # Now, open up the MP3 file and save the tag data into the database.
        try:
            audio = MP3(path_latin1, ID3=EasyID3)
        except Exception, e:
            logging.exception(e.message)
            audio = {}
        try:
            info['title'] = audio['title'][0]
        except (KeyError, IndexError):
            info['title'] = 'Unnamed Song'
        try:
            info['album'] = audio['album'][0]
        except (KeyError, IndexError):
            info['album'] = ''
        try:
            info['artist'] = audio['artist'][0]
        except (KeyError, IndexError):
            info['artist'] = ''
        try:
            info['track'] = int(audio['tracknumber'][0].split('/')[0])
        except (KeyError, IndexError, ValueError):
            info['track'] = 0
        try:
            info['time'] = int(ceiling(audio.info.length))
        except AttributeError:
            info['time'] = 0
Beispiel #17
0
def annotate_metadata(song):
    path = song.audio.path
    audio = None
    info = {}
    ext = path.lower()[len(path) - 3 :]

    # Mutagen doesn't like unicode
    path_latin1 = path.encode("latin1")

    # TODO(XXX) Once mutagen 1.17 makes it everywhere we can do away
    # with this bother with extensions and figuring out the file
    # format. We can use mutagen.File(filename) and it will hand us
    # the correct Mutagen tag parse for it. We could do this today,
    # but it's awkward because you can't specify you want EasyTags,
    # it'll hand you the raw MP3 tags, which are butts. 1.17 is much
    # much nicer, but not yet released.

    if ext == "mp3":
        # Now, open up the MP3 file and save the tag data into the database.
        try:
            audio = MP3(path_latin1, ID3=EasyID3)
        except Exception, e:
            logging.exception(e.message)
            audio = {}
        try:
            info["title"] = audio["title"][0]
        except (KeyError, IndexError):
            info["title"] = "Unnamed Song"
        try:
            info["album"] = audio["album"][0]
        except (KeyError, IndexError):
            info["album"] = ""
        try:
            info["artist"] = audio["artist"][0]
        except (KeyError, IndexError):
            info["artist"] = ""
        try:
            info["track"] = int(audio["tracknumber"][0].split("/")[0])
        except (KeyError, IndexError, ValueError):
            info["track"] = 0
        try:
            info["time"] = int(ceiling(audio.info.length))
        except AttributeError:
            info["time"] = 0
Beispiel #18
0
def splitToClasses(sortedSlideList, nClasses):

    output = []

    maxTags = sortedSlideList[-1].tagNumber
    classSize = math.ceiling(maxTags / nClasses)

    assert (maxTags >= nClasses)
    assert (classSize >= 1)

    currentClassMax = classSize

    sizeClass = []

    for slide in sortedSlideList:

        if slide.tagNumber < currentClassMax:
            sizeClass.append(slide)
        else:
            output.append(sizeClass)
            sizeClass = []
            currentClassMax += classSize

    return output
Beispiel #19
0
endofyear = 0
averagewavlist = [] #average weighed average per year
averagewav = 0
falllastindex = fallcount-1
winterlastindex = wintercount-1
springlastindex = springcount-1
summerlastindex = summercount-1
addfall = 1
addwinter = 1
addspring = 1
addsummer = 1

while (marker < totalnumberyears):
  if (addfall == 1):
    for i in reversed(xrange(fallcount+1)):
      if (math.ceiling((fallarray[i][0])/365)>marker):
        averagewavarray = averagewavarray + fall.calculate_weighedaverage(fallarray, falllastindex, i)
        endofyear+=1
        falllastindex = i
        addfall = 0
        break
      else:
        continue
  if (addwinter == 1):
    for i in reversed(xrange(wintercount+1)):
      if (math.ceiling((winterarray[i][0])/365)>marker):
        averagewavarray = averagewavarray + winter.calculate_weighedaverage(winterarray, winterlastindex, i)
        endofyear+=1
        winterlastindex = i
        addwinter = 0
        break
Beispiel #20
0
def pipeintstallc(
        l
):  # Determines the cost of installing the total length of pipe required
    pic = 500 * ceiling(l)
    return pic
Beispiel #21
0
def siteprepc(
    a
):  # Determines the cost of preparing the land required for the reservoir given the reservoir area
    spc = 0.25 * ceiling(a)
    return spc
Beispiel #22
0
def wallc(
    resd, p
):  # Determines the cost per meter of reservoir wall for a given reservoir depth using
    # linear regression
    wc = (30 + (resd - 5) * (340 - 30) / (20 - 5)) * ceiling(p)
    return wc
Beispiel #23
0
def pipec(
    p, d, l
):  # Determines the cost of the total length of pipe required for a given pipe
    # type, diameter, and length
    pc = pipe[p][d] * ceiling(l)
    return pc
Beispiel #24
0
                info["album"] = audio["\xa9ALB"][0]
            except (KeyError, IndexError):
                info["album"] = ""
        try:
            info["artist"] = audio["\xa9art"][0]
        except (KeyError, IndexError):
            try:
                info["artist"] = audio["\xa9ART"][0]
            except (KeyError, IndexError):
                info["artist"] = ""
        try:
            info["track"] = int(audio["trkn"][0][0])
        except (KeyError, IndexError, ValueError, TypeError):
            info["track"] = 0
        try:
            info["time"] = int(ceiling(audio.info.length))
        except AttributeError:
            info["time"] = 0
    else:
        raise BadContent(ext)

    song.title = info["title"]
    song.album = info["album"]
    song.artist = info["artist"]
    song.track = info["track"]
    song.time = info["time"]

    if hasattr(audio, "info") and not hasattr(audio.info, "sketchy"):
        # Mutagen only checks mp3s for sketchiness
        audio.info.sketchy = False
Beispiel #25
0
        bend1Index = i

for i in range(len(bendValues)):
    if bendCoeff2 == bendValues[i]:
        bend2Index = i

for i in range(len(turbineValues)):
    if tEff == turbineValues[i]:
        turbineIndex = i

for i in range(len(pipeDiameters)):
    if pDiameter == pipeDiameters[i]:
        diameterIndex = i

for i in range(len(performanceRatings)):
    if ceiling((rH + rDepth) / 10) * 10 == performanceRatings[i]:
        performanceIndex = i

# ---------------------------------------------------
#  Computations
# ---------------------------------------------------
pArea = pipearea(pDiameter)
vUp = velocity(pFlowRate, pArea)
vDown = velocity(tFlowRate, pArea)
effH = effelevation(rH, rDepth)
eOutJ = mwhtojoule(eOut)
waterMass = massreq(eOutJ, tEff, effH, pFCoeff, pL, vDown, pDiameter,
                    bendCoeff1, bendCoeff2)
eInJ = energyreq(waterMass, effH, pFCoeff, pL, vUp, pDiameter, bendCoeff1,
                 bendCoeff2, pEff)
rArea = reservoirarea(eOutJ, tEff, waterMass, pFCoeff, pL, vDown, pDiameter,
Beispiel #26
0
import math

s = 'Qwerty'
print(s[math.ceiling(s) // 2:] + s[:math.ceiling(s) // 2])
Beispiel #27
0
def main(A, a, b, v):
    i = math.ceiling((a + b) / 2)
    if b - a <= 1:
        return b
    if A[i] < z: return main(A, i, b, z)
    else: return main(A, a, i, z)
def frange(lowerbound, upperbound, increment):
    iterationcount = 1 + ceiling((upperbound - lowerbound) / increment)
    for i in range(0, iterationcount):
        yield lowerbound + float(i) * increment
Beispiel #29
0
def trailingZeros(n):
    """Returns the number of trailing zeros in n factorial"""
    zeros = 0
    for i in range(1, ceiling(log(n, 5))):
        zeros += n // (5**i)
    return zeros
def ReadDataFile(njoy,wims_names, reactions):
    """
        Read in all of the specified data from the njoy file.
        
        Input:
                njoy         - open NJOY data file
                wims_names   - names of the nuclides to extract
    """    

    mean=True
    std=True
    cov=True

    materials,names=IsotopeToMAT(wims_names)

#    reactions=[2,18,102,452]
#    reactions=[2,4,16,17,18,102,452]
#    reactions=[2,4,16,18,102,452]
#    reactions=[2,4,16,17,18,102,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,452]


    nuclides={} #Dictionary of all the isotopes
    for mat,name in zip(materials,names):
        nuclides[str(mat)] = XS(name)

    line=njoy.readline()
    while line:

        split=line.split()
        if int(split[0])==1: #Read the mean values
                  
            NG=int(split[2]);MAT=int(split[6]);MT=int(split[7]);MAT1=int(split[8]);MT1=int(split[9])
            val_lines=int(ceiling(float(split[10])*10/80.0))
            box_lines=int(ceiling(float(split[12])*float(split[13])/80.0))
            sym_flag=int(split[16])            
            
            
            
            if not mean:
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()
            
            elif MAT in materials and MT in reactions and MT1 in reactions:
                
                values=[];boxes=[]
                
                for i in range(val_lines):        
                    line=njoy.readline()
                    for val in chunk_float(line,10): values.append(val)
                for i in range(box_lines):
                    line=njoy.readline()
                    for box in chunk_int(line,3): boxes.append(box)
                    
                nuclides[str(MAT)].means[str(MT)]=vecboxer(NG,values,boxes)
                if nuclides[str(MAT)].NG==None: nuclides[str(MAT)].NG=NG

            else: 
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()         


        elif int(split[0])==2: #Read the standard deviations (may be relative)
            NG=int(split[2]);MAT=int(split[6]);MT=int(split[7]);MAT1=int(split[8]);MT1=int(split[9])
            val_lines=int(ceiling(float(split[10])*10/80.0))
            box_lines=int(ceiling(float(split[12])*float(split[13])/80.0))
            sym_flag=int(split[16])     
            
            if not std:
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()            
                   
            elif MAT in materials and MT in reactions and MT1 in reactions:
                
                values=[];boxes=[]
                
                for i in range(val_lines):        
                    line=njoy.readline()
                    for val in chunk_float(line,10): values.append(val)
                for i in range(box_lines):
                    line=njoy.readline()
                    for box in chunk_int(line,3): boxes.append(box)
                    
                nuclides[str(MAT)].stds[str(MT)]=vecboxer(NG,values,boxes)
                if nuclides[str(MAT)].NG==None: nuclides[str(MAT)].NG=NG

            else: 
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()      

        elif int(split[0])==3: #Read the covariances
            val_lines=int(ceiling(float(split[10])*10/80.0))
            box_lines=int(ceiling(float(split[12])*float(split[13])/80.0))
            sym_flag=int(split[16])
            NG=int(split[2]);MAT=int(split[6]);MT=int(split[7]);MAT1=int(split[8]);MT1=int(split[9])
    

            if not cov:
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()
            
            if MAT in materials and MT in reactions and MT1 in reactions:
                if MAT != MAT1:
                    print 'correlated materials!!!'
                    continue
                values=[];boxes=[]
                
                for i in range(val_lines):        
                    line=njoy.readline()
                    for val in chunk_float(line,10): values.append(val)
                for i in range(box_lines):
                    line=njoy.readline()
                    for box in chunk_int(line,4): boxes.append(box)
                    
                nuclides[str(MAT)].covariances[str(MT)+'_'+str(MT1)]=boxer(NG,values,boxes,sym_flag)
#                if MT not in nuclides[str(MAT)].reactions:
#                    nuclides[str(MAT)].reactions.append(MT)
                if nuclides[str(MAT)].NG==None: nuclides[str(MAT)].NG=NG
                
            else: 
                for i in range(val_lines): njoy.readline()
                for i in range(box_lines): njoy.readline()         
        else: 
            MAT=int(split[6])
            val_lines=int(ceiling(float(split[10])*10/80.0))
            box_lines=int(ceiling(float(split[12])*float(split[13])/80.0))
            for i in range(val_lines): njoy.readline()
            for i in range(box_lines): njoy.readline()
            
        line=njoy.readline()

#       -------------------------
#       End of file reading

    return nuclides
Beispiel #31
0
def calc_stt(iord, qty, buyp, sellp):
	if iord == "I":
		stt = 0.00025 * qty * sellp
	else:
		stt = 0.001 * qty * ( buyp + sellp )
	return(ceiling(stt))
Beispiel #32
0
                info['album'] = audio['\xa9ALB'][0]
            except (KeyError, IndexError):
                info['album'] = ''
        try:
            info['artist'] = audio['\xa9art'][0]
        except (KeyError, IndexError):
            try:
                info['artist'] = audio['\xa9ART'][0]
            except (KeyError, IndexError):
                info['artist'] = ''
        try:
            info['track'] = int(audio['trkn'][0][0])
        except (KeyError, IndexError, ValueError, TypeError):
            info['track'] = 0
        try:
            info['time'] = int(ceiling(audio.info.length))
        except AttributeError:
            info['time'] = 0
    else:
        raise BadContent(ext)

    song.title = info['title']
    song.album = info['album']
    song.artist = info['artist']
    song.track = info['track']
    song.time = info['time']

    if hasattr(audio, 'info') and not hasattr(audio.info, 'sketchy'):
        # Mutagen only checks mp3s for sketchiness
        audio.info.sketchy = False
Beispiel #33
0
def median(values):
    medindex_root = len(values)/2.0
    medindex1 = math.floor(medindex_root)
    medindex2 = math.ceiling(medindex_root)
    return average([medindex1,medindex2])
Beispiel #34
0
def duration_to_framecount(duration_ms, fps):
    return ceiling(duration_ms * fps / 1000)
Beispiel #35
0
# 1
# math, operator ,time , numbers , json ,
# i used the function help and pass it 'modules' and it print all the built module
# 2
import math
import operator
import time
from math import sin, sqrt, tan
from time import *
from math import factorial as calcFactorial
from math import exp as exponent
from math import ceil as ceiling

assert(calcFactorial(5) == 120)
print(exponent(3))
print(ceiling(3.2))
print(math.tan(90))

#d.angle of sin(0.8660254037844386)
import math
print(math.sin(0.8660254037844386))

#e.5^8
import math
print(math.pow(5, 8))

#f.square root of 400
import math
print(math.sqrt(400))

#g.the value of 5^e
import math
print(math.pow(5, math.e))

#h.the value of log(1024),base 2
import math
print(math.log(1024, 2))

#i.the value of log(1024),base 10
import math
print(math.log(1024, 10))

#j.the flore and ceiling value of23.56
import math
print("print flore value of 23.56:", math.flore(23.56))
print("print ceiling value of 23.56:", math.ceiling(23.56))