Example #1
0
def rank_perm(file_path):
    lines = [x.strip() for x in open(file_path).readlines()]
    value = -1
    i = 0
    rTermList = []

    while i < len(lines):
        val_list = lines[i].split(" ")

        if fabs(value - float(val_list[-1])) > min_float:
            value = float(val_list[-1])
            rTerm = RankTerm()
            # print "rank term: ", lines[i] ;
            rTerm.readRecord(lines[i])
            i = i + 1
            while i < len(lines):
                val_list = lines[i].split(" ")
                if fabs(value - float(val_list[-1])) > min_float:
                    break
                rTerm.readRecord(lines[i])
                i = i + 1
        rTermList.append(rTerm)
    # for test in rTermList:
    # print test.rank_list ;
    return rTermList
 def testGPSCalc2(self):
     self.test3 = standardcalc.GPSDistAway(self.source, 1000, 1000)
     self.test4 = standardcalc.GPSDistAway(self.source, -1000, -1000)
     self.assertEqual((math.fabs(self.result3.lat - self.test3.lat) <= 0.00004),1)
     self.assertEqual((math.fabs(self.result3.long - self.test3.long) <= 0.00004),1)
     self.assertEqual((math.fabs(self.result4.lat - self.test4.lat) <= 0.00004),1)
     self.assertEqual((math.fabs(self.result4.long - self.test4.long) <= 0.00004),1)
Example #3
0
    def nearly_same(self, xxs, yys, key='', absTol=1e-12, relTol=1e-6):
        """
        Compare two numbers or arrays, checking all elements are nearly equal.

        Parameters:
            - xxs, yys: two lists of numbers to compare
            - key: the key to the column we are comparing in output files
            - absTol: absolute tolerance
            - relTol: relative tolerance
        Returns: True or false depending if the two lists are nearly the same
            or not
        Throws: Assertion error if xxs and yys not nearly the same.
        """
        #
        # Coerce scalar to array if necessary.
        if( not hasattr(xxs, '__iter__') ):
            xxs = [xxs]
        if( not hasattr(yys, '__iter__') ):
            yys = [yys]
        lenXX = len(xxs)
        nearlySame = (len(yys) == lenXX)
        idx = 0
        while( nearlySame and idx<lenXX ):
            xx = xxs[idx]
            absDiff = math.fabs(yys[idx]-xx)
            if (absDiff>absTol and absDiff>relTol*math.fabs(xx)):
                self.assertFalse((absDiff>absTol and \
                        absDiff>relTol*math.fabs(xx)),
                    (key + ' is not nearly same: ' + str(xx) + ' ' \
                    + str(yys[idx]) + ' idx: ' + str(idx) + ' absDiff: ' \
                    + str(absDiff), ' relDiff: '+ str(absDiff/math.fabs(xx))))
                nearlySame = False
            idx += 1
        return( nearlySame)
def distance(p1, p2):
    p1lat, p1lon = float(p1[0]), float(p1[1])
    p2lat, p2lon = float(p2[0]), float(p2[1])
    latdiff = (p2lat + p1lat) / 2 * 0.01745
    dlat = 111.3 * np.fabs(p1lat - p2lat)
    dlon = 111.3 * np.cos(latdiff) * np.fabs(p1lon - p2lon)
    return np.sqrt(np.pow(dlat, 2) + np.pow(dlon, 2))
Example #5
0
    def _check(self, statsE, stats):
        """
    Check earthquake stats.
    """
        attrs = ["timestamp", "ruparea", "potency", "moment", "avgslip", "mommag"]

        statsE.avgslip = statsE.potency / (statsE.ruparea + 1.0e-30)
        statsE.mommag = 2.0 / 3.0 * (numpy.log10(statsE.moment) - 9.05)

        for attr in attrs:
            valuesE = statsE.__getattribute__(attr)
            values = stats.__getattribute__(attr)
            msg = "Mismatch in number of snapshots for attribute '%s', %d != %d." % (attr, len(valuesE), len(values))
            self.assertEqual(len(valuesE), len(values), msg=msg)

            for (valueE, value) in zip(valuesE, values):
                msg = "Mismatch in value for attribute '%s', %g != %g." % (attr, valueE, value)
                if valueE != 0.0:
                    if math.isinf(math.fabs(valueE)):
                        self.assertAlmostEqual(1.0, math.fabs(value) / 1.0e30, places=6, msg=msg)
                    else:
                        self.assertAlmostEqual(1.0, value / valueE, places=6, msg=msg)
                else:
                    self.assertAlmostEqual(valueE, value, places=6, msg=msg)

        return
 def em(self, nstep, grid, p):
     l = self.k - 1
     w, n, e, b = self.w, self.n, self.e, self.b
     if self.k == 1:
         s11 = (w * b / np.ones(n)).sum()
         s12 = (w * e / np.ones(n)).sum()
         grid[l] = s11 / s12
         p[l] = 1.
         mix = self.getMixedProb(grid)
         grad, mix_den = self.getGradient(mix, p)
         grad_max, grad_max_inx = self.getMaxGradient(grad)
         return {'accuracy': math.fabs(grad_max - 1), 'k': self.k, 'p': p, 'grid': grid, 'gradient': grad, 'mix_den': mix_den}
     else:
         res = {}
         for counter in range(nstep):
             mix = self.getMixedProb(grid)
             grad, mix_den = self.getGradient(mix, p)
             p = p * grad
             su = p[:-1].sum()
             p[l] = 1. - su
             for j in range(self.k):
                 mix_den_fil = mix_den > 1.E-10
                 f_len = len(mix_den_fil)
                 s11 = (w * e[mix_den_fil] / np.ones(f_len) * mix[mix_den_fil, j] / mix_den[mix_den_fil]).sum()
                 s12 = (w * b[mix_den_fil] * (mix[mix_den_fil, j] / np.ones(f_len)) / mix_den[mix_den_fil]).sum()
                 if s12 > 1.E-12:
                     grid[j] = s11 / s12
             grad_max, grad_max_inx = self.getMaxGradient(grad)
             res = {'accuracy': math.fabs(grad_max - 1.), 'step': counter + 1, 'k': self.k, 'p': p, 'grid': grid, 'gradient': grad, 'mix_den': mix_den}
             if res['accuracy'] < self.acc and counter > 10:
                 break
     return res
Example #7
0
def matrix_max_norm(matrix):
    maxNorm = math.fabs(matrix[0, 0])
    for rowIndex in range(0, matrix.shape[0]):
        for colIndex in range(0, matrix.shape[1]):
            if math.fabs(matrix[rowIndex, colIndex]) > math.fabs(maxNorm):
                maxNorm = matrix[rowIndex, colIndex]
    return math.fabs(maxNorm)
Example #8
0
def positionHelper(a, b, afstanden):
    if b.rect.bottom - a.rect.top < 0: #above

        if b.rect.left - a.rect.right > 0:
            afstand = (((b.rect.left - a.rect.right)**2 + (b.rect.bottom - a.rect.top)**2) **(1.0/2))/4
            afstanden.append(afstand)

        elif a.rect.left - b.rect.right > 0:
            afstand = (((a.rect.left - b.rect.right)**2 + (b.rect.bottom - a.rect.top)**2) **(1.0/2))/4
            afstanden.append(afstand)
        else:
            afstand = fabs((a.rect.bottom - b.rect.top)/4)
            afstanden.append(afstand)

    elif a.rect.bottom - b.rect.top < 0: #below

        if b.rect.left - a.rect.right > 0:
            afstand = (((b.rect.left - a.rect.right)**2 + (a.rect.bottom -b.rect.top)**2) **(1.0/2))/4
            afstanden.append(afstand)

        elif a.rect.left - b.rect.right > 0:
            afstand = (((a.rect.left - b.rect.right)**2 + (a.rect.bottom -b.rect.top)**2) **(1.0/2))/4
            afstanden.append(afstand)

        else:
            afstand = fabs((b.rect.bottom - a.rect.top)/4)
            afstanden.append(afstand)

    elif a.rect.left - b.rect.right > 0: #left
        afstand = fabs((a.rect.left - b.rect.right)/4)
        afstanden.append(afstand)

    elif b.rect.left - a.rect.right > 0: #right
        afstand = fabs((b.rect.left - a.rect.right)/4)
        afstanden.append(afstand)
    def getStepsize(self, mix_den, ht):
        mix_den_fil = np.fabs(mix_den) > 1.E-7
        a = ht[mix_den_fil] / mix_den[mix_den_fil]
        b = 1.0 + a
        b_fil = np.fabs(b) > 1.E-7
        w = self.w
        sl = w * ht[b_fil] / b[b_fil]
        s11 = sl.sum()
        s0 = (w * ht).sum()

        step, oldstep = 0., 0.
        for i in range(50):
            grad1, grad2 = 0., 0.
            for j in range(self.n):
                a = mix_den[j] + step * ht[j]
            if math.fabs(a) > 1.E-7:
                b = ht[j] / a
                grad1 = grad1 + w * b
                grad2 = grad2 - w * b * b
            if math.fabs(grad2) > 1.E-10:
                step = step - grad1 / grad2
            if oldstep > 1.0 and step > oldstep:
                step = 1.
                break
            if grad1 < 1.E-7:
                break
            oldstep = step
        if step > 1.0:
            return 1.0
        return step
    def __eq__(self, another):
        """Compare this attribute with another for value"""
        if another is None or not isinstance(another, VertexAttribute):
            return False

        if self.name != another.name:
            return False

        if isinstance(self.value, list) and isinstance(another.value, list):
            if len(self.value) == len(another.value):
                isEqual = True
                for pos in range(0, len(self.value)):
                    thisValue = ROUND_STRING.format(self.value[pos])
                    otherValue = ROUND_STRING.format(another.value[pos])

                    if thisValue != otherValue:
                        # handles cases where 0 and -0 are different when compared as strings
                        if math.fabs(self.value[pos]) - math.fabs(another.value[pos]) == math.fabs(self.value[pos]):
                            compareThisForZero = ROUND_STRING.format(math.fabs(self.value[pos]))
                            compareOtherForZero = ROUND_STRING.format(math.fabs(another.value[pos]))

                            if compareThisForZero != compareOtherForZero:
                                isEqual = False
                                break
                        else:
                            isEqual = False
                            break

                return isEqual
            else:
                return False
        else:
            return self.value == another.value
Example #11
0
def colormap(pixel):
    white=(221,221,221)
    orange=(219,125,62)
    magenta=(179,80,188)
    lightblue=(107,138,201)
    yellow=(177,166,39)
    lime=(65,174,56)
    pink=(208,132,153)
    gray=(64,64,64)
    lightgray=(154,161,161)
    cyan=(46,110,137)
    purple=(126,61,181)
    blue=(46,56,141)
    brown=(79,50,31)
    green=(53,70,27)
    red=(150,52,48)
    black=(25,22,22)
 
    colors=(white,orange,magenta,lightblue,yellow,lime,pink,gray,lightgray,cyan,purple,blue,brown,green,red,black)
 
    thecolor=0
    finalresult=256*256*256
    for idx,color in enumerate(colors):
       result=math.fabs(color[0]-pixel[0])+math.fabs(color[1]-pixel[1])+math.fabs(color[2]-pixel[2])
       if result < finalresult:
          finalresult=result
          thecolor=idx
    return thecolor
Example #12
0
def getMotionArrayRGB(image1, image2, threshold = 10):
    i1 = image1
    i2 = image2
    #both images need to be the same size in pixels
    if (i1.size[0] != i2.size[0]) or (i1.size[1] != i2.size[1]):
        return 0 #if not, we return 0
    size = i1.size

    imgArr1 = list(i1.getdata())
    imgArr2 = list(i2.getdata())

    t = threshold
    motionarray = [] #2D array to store motion areas

    i=0
    while i < len(imgArr1): #scan through the images
            p1 = imgArr1[i]
            p2 = imgArr2[i]

            if (fabs(p1[0]-p2[0]) > t) or (fabs(p1[1]-p2[1]) > t) or (fabs(p1[2]-p2[2]) > t): #compare each pixel in R,G,B channel
                    y = i/size[0]
                    x = i - y*size[0]
                    motionarray.append((x,y,p2))

            i = i+1
 
    return motionarray
Example #13
0
def get_rectangle(bounds):
    # This converts a latitude delta into an image delta. For USA, at zoom
    # level 19, we know that we have 0.21 meters/pixel. So, an image is showing
    # about 1280 pixels * 0.21 meters/pixel = 268.8 meters.
    # On the other hand we know that at the same angle, a degress in latlon is:
    # https://en.wikipedia.org/wiki/Latitude
    # latitude = 111,132 m
    # longitude = 78,847 m
    latitude_factor = 111132.0 / 0.21
    longitude_factor = 78847.0 / 0.21

    # Feature size
    feature_width = longitude_factor * math.fabs(bounds[1] - bounds[3])
    feature_height = latitude_factor * math.fabs(bounds[0] - bounds[2])
    if feature_width > image_width or feature_height > image_height:
        print "** Warning ** The feature is bigger than the image."

    # CV params (int required)
    x = int((image_width / 2) - (feature_width / 2))
    y = int((image_height / 2) - (feature_height / 2))
    w = int(feature_width)
    h = int(feature_height)
    if w <= 25 or h <= 25:
        print "** Warning ** This image has very narrow bounds."
        print bounds
        print x, y, w, h
    if x <= 0 or y <= 0 or w <= 0 or h <= 0:
        print "** Warning ** There is something wrong with this image bounds."
        print bounds
        print x, y, w, h
    return x, y, w, h
Example #14
0
def getMotionArrayOLD(image1, image2, threshold = 10):
    i1 = image1
    i2 = image2
    #both images need to be the same size in pixels
    if (i1.size[0] != i2.size[0]) or (i1.size[1] != i2.size[1]):
        return 0 #if not, we return 0
    size = i1.size

    t = threshold
    motionarray = [] #2D array to store motion areas

    i=0
    while i in range(size[1]): #scan through the images
        j=0
        while j in range(size[0]):
            p1 = i1.getpixel((j,i))
            p2 = i2.getpixel((j,i))

            if (fabs(p1[0]-p2[0]) > t) or (fabs(p1[1]-p2[1]) > t) or (fabs(p1[2]-p2[2]) > t): #compare each pixel in R,G,B channel
                    motionarray.append((j,i))

            j = j+1
        i = i+1
 
    return motionarray
Example #15
0
def getMotionArrayRGBOLD(image1, image2, treshold = 10):
    i1 = image1
    i2 = image2
    #both images need to be the same size in pixels
    if (i1.size[0] != i2.size[0]) or (i1.size[1] != i2.size[1]):
        return 0 #if not, we return 0
    size = i1.size

    t = treshold
    motionarray = [] #2D array to store motion areas

    i=0
    while i in range(size[1]): #scan through the images
        j=0
        while j in range(size[0]):
            p1 = i1.getpixel((j,i))
            p2 = i2.getpixel((j,i))

            if (fabs(p1[0]-p2[0]) > t) or (fabs(p1[1]-p2[1]) > t) or (fabs(p1[2]-p2[2]) > t): #compare each pixel in R,G,B channel
                    motionarray.append((j,i,p2))#by deducting these values we mirror the coordinates to mimick a mirror-image
                                          #also return p2; the RGB value for the pixel from the second (newer) image
            j = j+1
        i = i+1
 
    return motionarray
Example #16
0
	def get_accel_offs(self):
		data_offs_min = self.get_accel_raw();
		data_offs_max = self.get_accel_raw();
		for num in range(0,10000):
			data = self.get_accel_raw()
			data_gyro = self.get_gyro()
			if math.fabs(data_gyro['x']) < 2 and  math.fabs(data_gyro['y']) < 2 and  math.fabs(data_gyro['z']) < 2 :
				if data['x'] > data_offs_max['x']:
					data_offs_max['x'] = data['x']

				if data['y'] > data_offs_max['y']:
					data_offs_max['y'] = data['y']

				if data['z'] > data_offs_max['z']:
					data_offs_max['z'] = data['z']

				if data['x'] < data_offs_min['x']:
					data_offs_min['x'] = data['x']

				if data['y'] < data_offs_min['y']:
					data_offs_min['y'] = data['y']

				if data['z'] < data_offs_min['z']:
					data_offs_min['z'] = data['z']

		data = {
		'x': data_offs_min['x']+(data_offs_max['x']-data_offs_min['x'])/2,
		'y': data_offs_min['y']+(data_offs_max['y']-data_offs_min['y'])/2,
		'z': data_offs_min['z']+(data_offs_max['z']-data_offs_min['z'])/2 }
		return data
Example #17
0
 def add_level(self):
     """ 
         Splits a tree node into four child nodes
     """
     # don't split if the current node is smaller than the minimum size
     if self.min_size:
         if(math.fabs(self.bounds.north - self.bounds.south) < self.min_size
            and math.fabs(self.bounds.west - self.bounds.east) < self.min_size):
             return None
     
     
     # get the boundary points for the new child nodes 
     ns_half = self.bounds.north - (self.bounds.north - self.bounds.south) / 2
     ew_half = self.bounds.east - (self.bounds.east - self.bounds.west) / 2 
        
     self.children = []
     self.children.append(QuadTreeNode(Point(self.bounds.west, self.bounds.north), Point(ew_half, ns_half)))
     self.children.append(QuadTreeNode(Point(ew_half, self.bounds.north), Point(self.bounds.east, ns_half)))
     self.children.append(QuadTreeNode(Point(self.bounds.west, ns_half), Point(ew_half, self.bounds.south)))
     self.children.append(QuadTreeNode(Point(ew_half, ns_half), Point(self.bounds.east, self.bounds.south)))
 
     # move items to children
     for item in self.items:
         self.add(item.point(), item.object())
     self.items = []
    def fit(self, X, y):
        self.w1 = self.w2 = 0.0
        self.l = X.shape[0]
        for x in range(0, self.max_iter):
            sum1 = 0.0
            sum2 = 0.0
            for index, row in X.iterrows():
                x1 = row.data[0]
                x2 = row.data[1]
                y_label = y[index]
                sub = 1 - (1 / (1 + math.exp(-y_label * (self.w1*x1 + self.w2*x2))))

                sum1 += y_label * x1 * sub
                sum2 += y_label * x2 * sub

            w1_gradient = self.k * (1 / self.l) * sum1
            w2_gradient = self.k * (1 / self.l) * sum2
            if self.regularization:
                w1_gradient -= (self.k * self.c * self.w1)
                w2_gradient -= (self.k * self.c * self.w2)
            self.w1 += w1_gradient
            self.w2 += w2_gradient
            if math.fabs(w1_gradient) <= self.stop_value and math.fabs(w2_gradient) <= self.stop_value:
                self.n_iter = x
                print("in {} steps learned w1={} and w2 = {}".format(x, self.w1, self.w2))
                break
Example #19
0
def SeBlock(a,first):                                 #read Ascii block of Integers
    line1 = a[first]
    first += 1
    val = ExtractFloat(a[first])               #Q,AMAX,HWHM
    Q = val[0]
    AMAX = val[1]
    HWHM = val[2]
    first += 1
    val = ExtractFloat(a[first])               #A0
    int0 = [AMAX*val[0]]
    first += 1
    val = ExtractFloat(a[first])                #AI,FWHM first peak
    fw = [2.*HWHM*val[1]]
    int = [AMAX*val[0]]
    first += 1
    val = ExtractFloat(a[first])                 #SIG0
    int0.append(val[0])
    first += 1
    val = ExtractFloat(a[first])                  #SIG3K
    int.append(AMAX*math.sqrt(math.fabs(val[0])+1.0e-20))
    first += 1
    val = ExtractFloat(a[first])                  #SIG1K
    fw.append(2.0*HWHM*math.sqrt(math.fabs(val[0])+1.0e-20))
    first += 1
    be = ExtractFloat(a[first])                  #EXPBET
    first += 1
    val = ExtractFloat(a[first])                  #SIG2K
    be.append(math.sqrt(math.fabs(val[0])+1.0e-20))
    first += 1
    return first,Q,int0,fw,int,be                                      #values as list
Example #20
0
def full_width_half_max (sigma_l, sigma_r=0):
    if (sigma_r == 0):
        sigma_r = sigma_l
    width_l = (2.35482/2.0)*m.fabs(sigma_l)
    width_r = (2.35482/2.0)*m.fabs(sigma_r)
    FWHM = width_l + width_r
    return FWHM
Example #21
0
def nearlySame(xxs, yys, absTol=1e-12, relTol=1e-6):
    """Compare two numbers or arrays, checking all elements are nearly equal."""
    #
    # Coerce scalar to array if necessary.
    if( not hasattr(xxs, '__iter__') ):
        xxs = [xxs]
    if( not hasattr(yys, '__iter__') ):
        yys = [yys]
    #
    # Initialize.
    lenXX = len(xxs)
    nearlySame = (len(yys) == lenXX)
    #
    idx = 0
    while( nearlySame and idx<lenXX ):
        xx = xxs[idx]
        absDiff = math.fabs(yys[idx]-xx)
        if( absDiff>absTol and absDiff>relTol*math.fabs(xx) ):
            print('Not nearly same:', xx, yys[idx], 'idx:',idx, 'absDiff:',absDiff, 'relDiff:',absDiff/math.fabs(xx))
            nearlySame = False
        #
        # Prepare for next iteration.
        idx += 1
    #
    return( nearlySame)
Example #22
0
def _scatter(actual, prediction, args):
    plt.figure()
    plt.plot(actual, prediction, 'b'+args['plot_scatter_marker'])
    xmin=min(actual)
    xmax=max(actual)
    ymin=min(prediction)
    ymax=max(prediction)
    diagxmin=min(math.fabs(x) for x in actual)
    diagymin=min(math.fabs(y) for y in prediction)
    diagpmin=min(diagxmin,diagymin)
    pmin=min(xmin,ymin)
    pmax=max(xmax,ymax)
    plt.plot([diagpmin,pmax],[diagpmin,pmax],'k-')
    if args['plot_identifier'] != 'NoName':
        plt.title(args['plot_identifier'])
    plt.xlabel('Observed')
    plt.ylabel('Modeled')
    if args['plot_performance_log'] == True:
        plt.yscale('log')
        plt.xscale('log')
    if args['plot_scatter_free'] != True:
        plt.axes().set_aspect('equal')
    if args['plot_dump'] == True:
        pfname=os.path.join(args['plot_dir'],args['plot_identifier']+'_eiger_scatter.pdf')
        plt.savefig(pfname,format="pdf")
    else:
        plt.show()
 def moveByPlatform(self):
     for platform in map.platforms:
         for point in self.boxPoints:
                 if self.x+platform.fX+point.x>platform.x and self.x+platform.fX+point.x<platform.x+platform.sizeX and self.y+platform.fY+1+point.y>platform.y and self.y+platform.fY+1+point.y<platform.y+platform.sizeY or \
                 self.x+point.x>platform.x and self.x+point.x<platform.x+platform.sizeX and self.y+point.y>platform.y and self.y+point.y<platform.y+platform.sizeY:
                     for i in range(int(math.fabs(platform.fX)+1)):
                         if not self.touch():
                             if platform.fX>0:
                                 self.x += 1
                             if platform.fX<0:
                                 self.x -= 1
                     if platform.fX>0:
                         self.x -= 1
                     elif platform.fX<0:
                         self.x += 1
                     for i in range(int(math.fabs(platform.fY)+1)):
                         if not self.touch():
                             if platform.fY>0:
                                 self.y += 1
                             if platform.fY<0:
                                 self.y -= 1
                     if platform.fY>0:
                         self.y -= 1
                     elif platform.fY<0:
                         self.y += 1
                     break
Example #24
0
    def updatePWM(self):
        vl = self.leftSpeed*self.left_sgn
        vr = self.rightSpeed*self.right_sgn

        pwml = self.PWMvalue(vl, self.LEFT_MOTOR_MIN_PWM, self.LEFT_MOTOR_MAX_PWM)
        pwmr = self.PWMvalue(vr, self.RIGHT_MOTOR_MIN_PWM, self.RIGHT_MOTOR_MAX_PWM)

        if self.debug:
            print "v = %5.3f, u = %5.3f, vl = %5.3f, vr = %5.3f, pwml = %3d, pwmr = %3d" % (v, u, vl, vr, pwml, pwmr)

        if fabs(vl) < self.SPEED_TOLERANCE:
            leftMotorMode = Adafruit_MotorHAT.RELEASE
        elif vl > 0:
            leftMotorMode = Adafruit_MotorHAT.FORWARD
        elif vl < 0: 
            leftMotorMode = Adafruit_MotorHAT.BACKWARD

        if fabs(vr) < self.SPEED_TOLERANCE:
            rightMotorMode = Adafruit_MotorHAT.RELEASE
            pwmr = 0;
        elif vr > 0:
            rightMotorMode = Adafruit_MotorHAT.FORWARD
        elif vr < 0: 
            rightMotorMode = Adafruit_MotorHAT.BACKWARD

        self.leftMotor.setSpeed(pwml)
        self.leftMotor.run(leftMotorMode);
        self.rightMotor.setSpeed(pwmr)
        self.rightMotor.run(rightMotorMode);
Example #25
0
   def wmplugin_exec(self, m):
	
	axes = [None, None, None, None]


	self.acc = [self.NEW_AMOUNT*(new-zero)/(one-zero) + self.OLD_AMOUNT*old
	       for old,new,zero,one in zip(self.acc,m,self.acc_zero,self.acc_one)]
	a = math.sqrt(sum(map(lambda x: x**2, self.acc)))

	roll = math.atan(self.acc[cwiid.X]/self.acc[cwiid.Z])
	if self.acc[cwiid.Z] <= 0:
		if self.acc[cwiid.X] > 0: roll += math.pi
		else: roll -= math.pi

	pitch = math.atan(self.acc[cwiid.Y]/self.acc[cwiid.Z]*math.cos(roll))

	axes[0] = int(roll  * 1000 * self.Roll_Scale)
	axes[1] = int(pitch * 1000 * self.Pitch_Scale)

	if (a > 0.85) and (a < 1.15):
		if (math.fabs(roll)*(180/math.pi) > 10) and \
		   (math.fabs(pitch)*(180/math.pi) < 80):
			axes[2] = int(roll * 5 * self.X_Scale)

		if (math.fabs(pitch)*(180/math.pi) > 10):
			axes[3] = int(pitch * 10 * self.Y_Scale)

	return math.degrees(pitch), math.degrees(roll)
Example #26
0
    def rank_it(self,scores):
        sorted_S = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)

        if (math.fabs(sorted_S[0][1])+1) < math.fabs(sorted_S[1][1]) :
            return (sorted_S[0][0])
        else:
            return ('neutral')
    def process_markers(self, msg):
        for marker in msg.markers:
            # do some filtering basd on prior knowledge
            # we know the approximate z coordinate and that all angles but yaw should be close to zero
            euler_angles = euler_from_quaternion((marker.pose.pose.orientation.x,
                                                  marker.pose.pose.orientation.y,
                                                  marker.pose.pose.orientation.z,
                                                  marker.pose.pose.orientation.w))
            angle_diffs = TransformHelpers.angle_diff(euler_angles[0],pi), TransformHelpers.angle_diff(euler_angles[1],0)
            print angle_diffs, marker.pose.pose.position.z
            if (marker.id in self.marker_locators and
                3.0 <= marker.pose.pose.position.z <= 3.6 and
                fabs(angle_diffs[0]) <= .4 and
                fabs(angle_diffs[1]) <= .4):
                print "FOUND IT!"
                locator = self.marker_locators[marker.id]
                xy_yaw = list(locator.get_camera_position(marker))
                if self.is_flipped:
                    print "WE ARE FLIPPED!!!"
                    xy_yaw[2] += pi
                print self.pose_correction
                print self.phase_offset
                xy_yaw[0] += self.pose_correction*cos(xy_yaw[2]+self.phase_offset)
                xy_yaw[1] += self.pose_correction*sin(xy_yaw[2]+self.phase_offset)

                orientation_tuple = quaternion_from_euler(0,0,xy_yaw[2])
                pose = Pose(position=Point(x=-xy_yaw[0],y=-xy_yaw[1],z=0),
                            orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2], w=orientation_tuple[3]))
                # TODO: use markers timestamp instead of now() (unfortunately, not populated currently by ar_pose)
                pose_stamped = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="STAR"),pose=pose)
                # TODO: use frame timestamp instead of now()
                self.star_pose_pub.publish(pose_stamped)
                self.fix_STAR_to_odom_transform(pose_stamped)
Example #28
0
def is_square(contour):
    """
    Squareness checker

    Square contours should:
        -have 4 vertices after approximation, 
        -have relatively large area (to filter out noisy contours)
        -be convex.
        -have angles between sides close to 90deg (cos(ang) ~0 )
    Note: absolute value of an area is used because area may be
    positive or negative - in accordance with the contour orientation
    """

    area = math.fabs( cv.ContourArea(contour) )
    isconvex = cv.CheckContourConvexity(contour)
    s = 0
    if len(contour) == 4 and area > 1000 and isconvex:
        for i in range(1, 4):
            # find minimum angle between joint edges (maximum of cosine)
            pt1 = contour[i]
            pt2 = contour[i-1]
            pt0 = contour[i-2]

            t = math.fabs(angle(pt0, pt1, pt2))
            if s <= t:s = t

        # if cosines of all angles are small (all angles are ~90 degree) 
        # then its a square
        if s < 0.3:return True

    return False       
Example #29
0
    def naive_setup(self,type):
        self.naive_classes_prior = {'neutral':0,'positive':0,'negative':0}
        self.naive_classes_count = {'neutral':0,'positive':0,'negative':0}
        self.naive_classes_words = {'neutral':[],'positive':[],'negative':[]}
        total = 0
        unique = []

        for some in self.all:
            status = some[type].split(",")
            if int(some[1]) == 0:
                typeC = 'neutral'
                self.naive_classes_prior['neutral'] += 1
                self.naive_classes_count['neutral'] += len(status)
                total += 1
            elif int(some[1]) > 0:
                typeC = 'positive'
                self.naive_classes_prior['positive'] += (math.fabs(int(some[1])))
                self.naive_classes_count['positive'] += len(status)
            else:
                typeC = 'negative'
                self.naive_classes_prior['negative'] += (math.fabs(int(some[1])))
                self.naive_classes_count['negative'] += len(status)

            total += math.fabs(int(some[1]))

            for word in status:
                self.naive_classes_words[typeC].append(word)
                if word not in unique:
                    unique.append(word)

        for i in self.naive_classes_prior:
            self.naive_classes_prior[i] = self.naive_classes_prior[i]/total

        self.unique = len(unique)
def get_interval(data, numFrames):
    data = np.absolute(data)
    data = smooth_plus(data, 3000) #smooth(data, 300)
    std = 0.30*np.std(data) #+ np.mean(data) 
    interval = []
    coeff = data.shape[0] / numFrames
    i = 0
    while i < len(data):
        if math.fabs(data[i]) > std:
            beg, end = i, 0
            count = 0
            while (math.fabs(data[i]) > std or count < 3000) and ((i+1) <= (len(data)-1)):
                i += 1
                if (math.fabs(data[i]) < std):
                    count += 1
                else:
                    count = 0
                    end = i
            if(end - beg > 3000):
                if end + 10000 < len(data):
                    end += 10000
                    i += 6500
                else:
                    end = len(data) - 1
                interval.append(['', (1+(beg/coeff), 1+(end/coeff))])
        i += 1
    return interval
Example #31
0
 def Mutate(self):
     geneToMutate = random.randint(0, 3)
     self.genome[geneToMutate] = random.gauss(geneToMutate,
                                              math.fabs(geneToMutate))
Example #32
0
def sample_rows(total, data, replace=True, accounting_column=None, max_iterations=50):
    """
    Samples and returns rows from a data frame while matching a desired control total. The total may
    represent a simple row count or may attempt to match a sum/quantity from an accounting column.

    Parameters
    ----------
    total : int
        The control total the sampled rows will attempt to match.
    data: pandas.DataFrame
        Table to sample from.
    replace: bool, optional, default True
        Indicates if sampling with or without replacement.
    accounting_column: string, optional
        Name of column with accounting totals/quantities to apply towards the control.
        If not provided then row counts will be used for accounting.
    max_iterations: int, optional, default 50
        When using an accounting attribute, the maximum number of sampling iterations
        that will be applied.

    Returns
    -------
    sample_rows : pandas.DataFrame
        Table containing the sample.
    """

    # simplest case, just return n random rows
    if accounting_column is None:
        if replace is False and total > len(data.index.values):
            raise ValueError('Control total exceeds the available samples')
        return data.loc[np.random.choice(data.index.values, total, replace=replace)].copy()

    # make sure this is even feasible
    if replace is False and total > data[accounting_column].sum():
        raise ValueError('Control total exceeds the available samples')

    # determine avg number of accounting items per sample (e.g. persons per household)
    per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))

    # do the initial sample
    num_samples = int(math.ceil(total / per_sample))
    if replace:
        sample_idx = data.index.values
        sample_ids = np.random.choice(sample_idx, num_samples)
    else:
        sample_idx = np.random.permutation(data.index.values)
        sample_ids = sample_idx[0:num_samples]
        sample_pos = num_samples

    sample_rows = data.loc[sample_ids].copy()
    curr_total = sample_rows[accounting_column].sum()

    # iteratively refine the sample until we match the accounting total
    for i in range(0, max_iterations):

        # keep going if we haven't hit the control
        remaining = total - curr_total
        if remaining == 0:
            break
        num_samples = int(math.ceil(math.fabs(remaining) / per_sample))

        if remaining > 0:
            # we're short, keep sampling
            if replace:
                curr_ids = np.random.choice(sample_idx, num_samples)
            else:
                curr_ids = sample_idx[sample_pos:sample_pos + num_samples]
                sample_pos += num_samples

            curr_rows = data.loc[curr_ids].copy()
            sample_rows = pd.concat([sample_rows, curr_rows])
            curr_total += curr_rows[accounting_column].sum()
        else:
            # we've overshot, remove from existing samples (FIFO)
            curr_rows = sample_rows[:num_samples]
            sample_rows = sample_rows[num_samples:]
            curr_total -= curr_rows[accounting_column].sum()
            if not replace:
                np.append(sample_idx, curr_rows.index.values)

    return sample_rows.copy()
Example #33
0
def create_report_file(param_obj, multipliers, infectiousness, report_name,
                       debug):
    with open(report_name, "w") as outfile:
        success = True
        if not multipliers:
            outfile.write(sft.sft_no_test_data)
        sigma = param_obj[Param_keys.LOGNORMAL_SCALE]
        base_infectivity = param_obj[Param_keys.BASE_INFECTIVITY]
        if sigma > 0:
            mu = -sigma**2 / 2.0
            # test log_normal distribution
            success = sft.test_lognorm(multipliers,
                                       mu=mu,
                                       sigma=sigma,
                                       report_file=outfile,
                                       round=False)

            # test mean_l = 1
            mean_l = np.mean(multipliers)
            mean_infectiousness = np.mean(infectiousness)
            outfile.write(
                "mean of the multipliers is {}, expected 1.0.\n".format(
                    mean_l))
            outfile.write(
                "mean of the Infectiousness is {0}, while base infectivity is {1}.\n"
                .format(mean_infectiousness, base_infectivity))

            tolerance = 2e-2
            if math.fabs(mean_l - 1.0) > tolerance:
                outfile.write(
                    "BAD: mean of the multipliers is {}, expected 1.0.\n".
                    format(mean_l))
                success = False
            # plotting
            size = len(multipliers)
            outfile.write("size is {}\n".format(size))
            scale = math.exp(mu)
            dist_lognormal = stats.lognorm.rvs(sigma, 0, scale, size)
            sft.plot_data(multipliers,
                          dist_lognormal,
                          label1="Emod",
                          label2="Scipy",
                          ylabel="Multiplier",
                          xlabel="data point",
                          category="Emod_vs_Scipy",
                          title="Emod_vs_Scipy, sigma = {}".format(sigma),
                          show=True)
            sft.plot_probability(
                multipliers,
                dist_lognormal,
                precision=1,
                label1="Emod",
                label2="Scipy",
                category="Probability_mass_function_Emod_vs_Scipy",
                title="Emod_vs_Scipy, sigma = {}".format(sigma),
                show=True)
            sft.plot_cdf(multipliers,
                         dist_lognormal,
                         label1="Emod",
                         label2="Scipy",
                         category="cdf",
                         title="cdf, sigma = {}".format(sigma),
                         show=True,
                         line=False)
            if debug:
                with open("scipy_data.txt", "w") as file:
                    for n in sorted(dist_lognormal):
                        file.write(str(n) + "\n")
                with open("emod_data.txt", "w") as file:
                    for n in sorted(multipliers):
                        file.write(str(n) + "\n")
        else:
            # sigma = 0, this feature is disabled
            for multiplier in multipliers:
                if multiplier != 1.0:
                    success = False
                    outfile.write(
                        "BAD: multiplier is {0} when {1} set to {2}, expected 1.0.\n"
                        .format(multiplier, Param_keys.LOGNORMAL_SCALE, sigma))
            # plotting
            sft.plot_data(multipliers,
                          label1="Multiplier",
                          label2="NA",
                          category="Multiplier",
                          title="Multiplier_Sigma={}".format(sigma),
                          ylabel="Multiplier",
                          xlabel="data point",
                          show=True)
            sft.plot_data(
                infectiousness,
                label1="Infectiousness",
                label2="NA",
                category="Infectiousness",
                title="Infectiousness_Sigma={0}_BaseInfectivity={1}".format(
                    sigma, base_infectivity),
                ylabel="Infectiousness",
                xlabel="data point",
                show=True)
        outfile.write(sft.format_success_msg(success))

    if debug:
        print "SUMMARY: Success={0}\n".format(success)
    return success
Example #34
0
def visao_reg(ref, *keywords, **keysMap):

    imLists = visao_inventory()
    sci_imlist = imLists['sci_imlist']
    # variables I will use later on
    nims = len(sci_imlist)
    dummy_im = fits.open(sci_imlist[0])[0]
    dimY, dimX = dummy_im.shape
    xcen = (dimX - 1) / 2
    ycen = (dimY / 2 - 1) / 2

    if not 'indiv' in keywords:
        Line = fits.open('Line_flat_preproc.fits')[0]
        Cont = fits.open('Cont_flat_preproc.fits')[0]
        center_ref_line = Line.data[ref - 1, :, :]
        center_ref_cont = Cont.data[ref - 1, :, :]
        print('Registering against image: ' + sci_imlist[ref - 1])
    else:
        center_ref_line = fits.open('./indiv/Line_flat_' +
                                    str('%04d' % (ref, )) + '.fits')
        center_ref_cont = fits.open('./indiv/Cont_flat_' +
                                    str('%04d' % (ref, )) + '.fits')
        print('Registering against image: ' + './indiv/Line/Cont_flat_' +
              str('%04d' % (ref, )) + '.fits')

    if not 'FWHM' in keysMap:
        fwhm = 10
    else:
        fwhm = keysMap['fwhm']
    gauss_cen = make_Gaussian(dimX, dimY / 2, fwhm, (xcen, ycen))
    # smoothing to avoid cosmic rays
    center_ref_line_smooth = gaussian_filter(center_ref_line, sigma=5)
    center_ref_cont_smooth = gaussian_filter(center_ref_cont, sigma=5)

    # this is where we do most of the work
    # usfac is the precision so that if usfac=20, precision is 1/20 of pixel
    temp_arr1, temp_arr2 = np.zeros((Line.shape[1], Line.shape[2])), np.zeros(
        (Line.shape[1], Line.shape[2]))
    dxl, dyl = imreg.register_images(center_ref_line_smooth,
                                     gauss_cen,
                                     usfac=1000)
    sci.shift(center_ref_line, (dyl, dxl), order=1, output=temp_arr1)
    center_ref_line = temp_arr1
    dxc, dyc = imreg.register_images(center_ref_cont_smooth,
                                     gauss_cen,
                                     usfac=1000)
    sci.shift(center_ref_cont, (dyc, dxc), order=1, output=temp_arr2)
    center_ref_cont = temp_arr2

    if 'clip' in keysMap:
        clip = keysMap['clip']
    else:
        clip = dimY
    # print(broccoli)
    # Initializing eventual return lists and some other helpful lists
    Lnim = Line.shape[0]
    Line_reg, Cont_reg, SDI_im = np.zeros((Lnim, clip, clip)), np.zeros((Lnim, clip, clip)),\
                                 np.zeros((Lnim, clip, clip))
    Line_smooth, Cont_smooth = np.zeros(Line.shape), np.zeros(Line.shape)
    # main for loop
    for i in range(0, nims):
        Line_smooth[i, :, :] = gaussian_filter(Line.data[i, :, :], sigma=5)
        Cont_smooth[i, :, :] = gaussian_filter(Cont.data[i, :, :], sigma=5)
        temp_arr3, temp_arr4 = np.zeros(
            (Line.shape[1], Line.shape[2])), np.zeros(
                (Line.shape[1], Line.shape[2]))
        shiftXL, shiftYL = imreg.register_images(Line_smooth[i, :, :],
                                                 center_ref_line,
                                                 usfac=200)
        sci.shift(Line.data[i, :, :], (shiftYL, shiftXL),
                  order=1,
                  output=temp_arr3)
        Line.data[i, :, :] = temp_arr3
        shiftXC, shiftYC = imreg.register_images(Cont_smooth[i, :, :],
                                                 center_ref_cont,
                                                 usfac=200)
        sci.shift(Cont.data[i, :, :], (shiftYC, shiftXC),
                  order=1,
                  output=temp_arr4)
        Cont.data[i, :, :] = temp_arr4

        #Zoom option doesn't work with any nan values, will have to find other option
        if 'scl' in keysMap:
            scl = keysMap['scl']
            temp_arr5 = np.zeros((scl * Line.shape[1], scl * Line.shape[2]))
            sci.zoom(Cont.data[i, :, :], scl, output=temp_arr5, order=1)
            Cont.data[i, :, :] = temp_arr5[ycen * scl - dimY / 4:ycen * scl +
                                           dimY / 4, xcen * scl -
                                           dimX / 2:xcen * scl + dimX / 2]

        #print('Line x,y = (' + str(shiftXL) + ',' + str(shiftYL) + ')     Cont x,y = ' + str(shiftXC) + ' ' + str(
        #shiftYC))
        # making sure clip doesn't overshoot frame in some images for line/cont
        if dimX / 2 - math.fabs(shiftXL) < clip / 2:
            print('Line overshoots by ' +
                  str(dimX / 2 - math.fabs(shiftXL) - clip / 2) +
                  ' pixels in x.')
            Line_cropx = math.floor(dimX / 2 - math.fabs(shiftXL))
        else:
            Line_cropx = clip / 2
        if dimY / 4 - math.fabs(shiftYL) < clip / 2:
            print('Line overshoots by ' +
                  str(dimY / 4 - math.fabs(shiftYL) - clip / 2) +
                  ' pixels in y.')
            Line_cropy = math.floor(dimY / 4 - math.fabs(shiftYL))
        else:
            Line_cropy = clip / 2
        if dimX / 2 - math.fabs(shiftXC) < clip / 2:
            print('Cont overshoots by ' +
                  str(dimX / 2 - math.fabs(shiftXC) - clip / 2) +
                  ' pixels in x.')
            Cont_cropx = math.floor(dimX / 2 - math.fabs(shiftXC))
        else:
            Cont_cropx = clip / 2
        if dimY / 4 - math.fabs(shiftYC) < clip / 2:
            print('Cont overshoots by ' +
                  str(dimY / 4 - math.fabs(shiftYC) - clip / 2) +
                  ' pixels in y.')
            Cont_cropy = math.floor(dimY / 4 - math.fabs(shiftYC))
        else:
            Cont_cropy = clip / 2

        # reassigning Line_reg and Cont_reg (yes I know the assignment is confusing, don't think about it)
        Line_reg[i, clip / 2 - Line_cropy:clip / 2 + Line_cropy, clip / 2 - Line_cropx:clip / 2 + Line_cropx] = \
            Line.data[i, ycen - Line_cropy + 1:ycen + Line_cropy + 1, xcen - Line_cropx + 1:xcen + Line_cropx + 1]
        Cont_reg[i, clip / 2 - Cont_cropy:clip / 2 + Cont_cropy, clip / 2 - Cont_cropx:clip / 2 + Cont_cropx] = \
            Cont.data[i, ycen - Cont_cropy + 2:ycen + Cont_cropy + 2, xcen - Cont_cropx + 2:xcen + Cont_cropx + 2]

        # shifting the array if the clip value is odd numbered
        if clip % 2 != 0:
            temp_arr6, temp_arr7 = np.zeros(
                (Line.shape[1], Line.shape[2])), np.zeros(
                    (Line.shape[1], Line.shape[2]))
            sci.shift(Line_reg[:, :, i], 0.5, output=temp_arr6)
            Line_reg[:, :, i] = temp_arr6
            sci.shift(Cont_reg[:, :, i], 0.5, output=temp_arr7)
            Cont_reg[:, :, i] = temp_arr7

        print('Processed image ' + str(i + 1) + ' of ' + str(nims))

        if 'sdi' in keysMap:
            sdi = keysMap['sdi']
            SDI_im[:, :, i] = Line_reg[:, :, i] - sdi * Cont_reg[:, :, i]

    if 'clip' in keysMap:
        fits.writeto('Line_clip' + str('%03d' % (clip, )) + '_reg-2.fits',
                     Line_reg,
                     clobber=True)
        fits.writeto('Cont_clip' + str('%03d' % (clip, )) + '_reg-2.fits',
                     Cont_reg,
                     clobber=True)
    else:
        fits.writeto('Line_reg.fits', Line_reg, clobber=True)
        fits.writeto('Cont_reg.fits', Cont_reg, clobber=True)

    if 'sdi' in keysMap:
        if 'clip' in keysMap:
            fits.writeto('SDI_sc' + str('%.2f' % clip) + '_clip' +
                         str('%03d' % (clip, )) + 'reg.fits',
                         SDI_im,
                         clobber=True)
        else:
            fits.writeto('SDI_sc' + str('%.2f' % clip) + 'reg.fits',
                         SDI_im,
                         clobber=True)
Example #35
0
                                                      # all terms in the
                                                      # numerator and the
                                                      # denominator
           iexp = iexp + 1
           numerator = numerator + factor*float(train[0])
           denominator = denominator + factor


        # end for
        predicted_time = numerator / denominator
        if args.filename:
             out.write("AlaShefLen_word_mt\t{0}\t{1}".format(iexp,predicted_time)+os.linesep)   
        test_samples = test_samples + 1
        dev=predicted_time-float(test[0])
        forRMSE = forRMSE + dev*dev
        forMAE = forMAE + math.fabs(dev)

    RMSE = math.sqrt(forRMSE/test_samples)
    MAE = forMAE/test_samples
    if args.verbose :
         print currentalpha, currentbeta, RMSE, MAE 
    if args.mae : 
         err=MAE
    else :
         err=RMSE
    if err < besterr :
         besterr = err
         bestalpha = currentalpha
         bestbeta = currentbeta
         print bestalpha, bestbeta, "RMSE=", RMSE, "MAE=", MAE
    
Example #36
0
 match = []
 # check for lines
 for i in range(3):
     if board[i][0] == board[i][1] == board[i][2] != ' ':
         match.append(step_turn_sign)
 # check for columns
 for j in range(3):
     if board[0][j] == board[1][j] == board[2][j] != ' ':
         match.append(step_turn_sign)
 # check for diagonals
 if board[0][0] == board[1][1] == board[2][2] != ' ':
     match.append(step_turn_sign)
 elif board[0][2] == board[1][1] == board[2][0] != ' ':
     match.append(step_turn_sign)
 # result printing
 if len(match) > 1 or math.fabs(board.count('X') - board.count('O')) > 1:
     print('Impossible')
     continue
 elif len(match) == 0 and ' ' in board:
     print('Game not finished')
     continue
 elif len(match) == 0 and ' ' not in board and step_count == 9:
     print('Draw')
     break
 elif len(match) == 1:
     print('{} wins'.format(match[0]))
     break
 if step_turn_sign == 'X':
     step_turn_sign = 'O'
 else:
     step_turn_sign = 'X'
Example #37
0
def main():

    allEdges = [
        edge("Bath", "Bristol", 12.9, usedReason.NotUsed, 0, 0, 0),
        edge("Derby", "Nottingham", 14.9, usedReason.NotUsed, 0, 0, 0),
        edge("Liverpool", "Warrington", 18.4, usedReason.NotUsed, 0, 0, 0),
        edge("Chester", "Liverpool", 18.8, usedReason.NotUsed, 0, 0, 0),
        edge("Portsmouth", "Southampton", 19.5, usedReason.NotUsed, 0, 0, 0),
        edge("Manchester", "Warrington", 21.0, usedReason.NotUsed, 0, 0, 0),
        edge("Chester", "Warrington", 22.6, usedReason.NotUsed, 0, 0, 0),
        edge("Salisbury", "Southampton", 23.6, usedReason.NotUsed, 0, 0, 0),
        edge("Leicester", "Nottingham", 27.9, usedReason.NotUsed, 0, 0, 0),
        edge("Leeds", "York", 28.2, usedReason.NotUsed, 0, 0, 0),
        edge("Preston", "Warrington", 30.5, usedReason.NotUsed, 0, 0, 0),
        edge("Oxford", "Swindon", 30.6, usedReason.NotUsed, 0, 0, 0),
        edge("Derby", "Leicester", 32.2, usedReason.NotUsed, 0, 0, 0),
        edge("Leeds", "Sheffield", 35.5, usedReason.NotUsed, 0, 0, 0),
        edge("Derby", "Stoke", 36.2, usedReason.NotUsed, 0, 0, 0),
        edge("Hull", "York", 37.6, usedReason.NotUsed, 0, 0, 0),
        edge("Chester", "Stoke", 37.7, usedReason.NotUsed, 0, 0, 0),
        edge("Nottingham", "Sheffield", 37.8, usedReason.NotUsed, 0, 0, 0),
        edge("Stoke", "Warrington", 39.1, usedReason.NotUsed, 0, 0, 0),
        edge("Bath", "Salisbury", 39.4, usedReason.NotUsed, 0, 0, 0),
        edge("Bristol", "Gloucester", 39.6, usedReason.NotUsed, 0, 0, 0),
        edge("Bristol", "Swindon", 40.2, usedReason.NotUsed, 0, 0, 0),
        edge("Manchester", "Sheffield", 40.9, usedReason.NotUsed, 0, 0, 0),
        edge("Derby", "Sheffield", 41.5, usedReason.NotUsed, 0, 0, 0),
        edge("Cardiff", "Swansea", 41.6, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Derby", 41.8, usedReason.NotUsed, 0, 0, 0),
        edge("Leicester", "Peterborough", 42.1, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Leicester", 42.7, usedReason.NotUsed, 0, 0, 0),
        edge("Cambridge", "Peterborough", 43.1, usedReason.NotUsed, 0, 0, 0),
        edge("Bristol", "Cardiff", 44.1, usedReason.NotUsed, 0, 0, 0),
        edge("Leeds", "Manchester", 44.7, usedReason.NotUsed, 0, 0, 0),
        edge("Exeter", "Plymouth", 44.8, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Stoke", 44.9, usedReason.NotUsed, 0, 0, 0),
        edge("Ipswich", "Norwich", 45.0, usedReason.NotUsed, 0, 0, 0),
        edge("Edinburgh", "Glasgow", 47.5, usedReason.NotUsed, 0, 0, 0),
        edge("Brighton", "Portsmouth", 50.0, usedReason.NotUsed, 0, 0, 0),
        edge("Brighton", "London", 53.3, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Oxford", 55.9, usedReason.NotUsed, 0, 0, 0),
        edge("Carlisle", "Newcastle", 59.1, usedReason.NotUsed, 0, 0, 0),
        edge("Cambridge", "Norwich", 63.9, usedReason.NotUsed, 0, 0, 0),
        edge("Cambridge", "London", 64.0, usedReason.NotUsed, 0, 0, 0),
        edge("Hull", "Leeds", 64.4, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Portsmouth", 74.7, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Oxford", 78.5, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Southampton", 79.2, usedReason.NotUsed, 0, 0, 0),
        edge("Bristol", "Exeter", 80.7, usedReason.NotUsed, 0, 0, 0),
        edge("Ipswich", "London", 81.9, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Peterborough", 85.4, usedReason.NotUsed, 0, 0, 0),
        edge("Carlisle", "Preston", 88.3, usedReason.NotUsed, 0, 0, 0),
        edge("Exeter", "Salisbury", 91.6, usedReason.NotUsed, 0, 0, 0),
        edge("Carlisle", "Glasgow", 96.8, usedReason.NotUsed, 0, 0, 0),
        edge("Edinburgh", "Newcastle", 104.0, usedReason.NotUsed, 0, 0, 0),
        edge("Canterbury", "Dover", 17.1, usedReason.NotUsed, 0, 0, 0),
        edge("Canterbury", "London", 61.9, usedReason.NotUsed, 0, 0, 0),
        edge("Hull", "Lincoln", 48.2, usedReason.NotUsed, 0, 0, 0),
        edge("Lincoln", "Nottingham", 39.5, usedReason.NotUsed, 0, 0, 0),
        edge("Lincoln", "Sheffield", 46.8, usedReason.NotUsed, 0, 0, 0),
        edge("Lincoln", "Peterborough", 51.9, usedReason.NotUsed, 0, 0, 0),
        edge("Bangor", "Chester", 60.9, usedReason.NotUsed, 0, 0, 0),
        edge("Blackpool", "Preston", 16.0, usedReason.NotUsed, 0, 0, 0),
        edge("Edinburgh", "Perth", 43.4, usedReason.NotUsed, 0, 0, 0),
        edge("Dundee", "Perth", 22.4, usedReason.NotUsed, 0, 0, 0),
        edge("Inverness", "Perth", 112.0, usedReason.NotUsed, 0, 0, 0),
        edge("Aberdeen", "Dundee", 66.0, usedReason.NotUsed, 0, 0, 0),
        edge("Aberdeen", "Inverness", 104.0, usedReason.NotUsed, 0, 0, 0),
        edge("Chester", "Wrexham", 13.4, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Shrewsbury", 47.2, usedReason.NotUsed, 0, 0, 0),
        edge("Shrewsbury", "Wrexham", 31.7, usedReason.NotUsed, 0, 0, 0),
        edge("Birmingham", "Worcester", 40.6, usedReason.NotUsed, 0, 0, 0),
        edge("Gloucester", "Worcester", 30.2, usedReason.NotUsed, 0, 0, 0),
        edge("Bournemouth", "Exeter", 84.6, usedReason.NotUsed, 0, 0, 0),
        edge("Bournemouth", "Southampton", 33.2, usedReason.NotUsed, 0, 0, 0),
        edge("Darlington", "Leeds", 61.3, usedReason.NotUsed, 0, 0, 0),
        edge("Darlington", "Newcastle", 37.3, usedReason.NotUsed, 0, 0, 0),
        edge("Darlington", "Middlesbrough", 16.4, usedReason.NotUsed, 0, 0, 0),
        edge("Burnley", "Leeds", 35.7, usedReason.NotUsed, 0, 0, 0),
        edge("Burnley", "Manchester", 29.0, usedReason.NotUsed, 0, 0, 0),
        edge("Burnley", "Preston", 21.5, usedReason.NotUsed, 0, 0, 0),
        edge("Cambridge", "Kings Lynn", 45.1, usedReason.NotUsed, 0, 0, 0),
        edge("Norwich", "Kings Lynn", 43.6, usedReason.NotUsed, 0, 0, 0),
        edge("Peterborough", "Kings Lynn", 36.4, usedReason.NotUsed, 0, 0, 0),
        edge("Scarborough", "York", 41.2, usedReason.NotUsed, 0, 0, 0),
        edge("Middlesbrough", "Scarborough", 48.9, usedReason.NotUsed, 0, 0,
             0),
        edge("Hull", "Scarborough", 42.5, usedReason.NotUsed, 0, 0, 0),
        edge("Aberystwyth", "Shrewsbury", 76.0, usedReason.NotUsed, 0, 0, 0),
        edge("Aberystwyth", "Cardigan", 38.3, usedReason.NotUsed, 0, 0, 0),
        edge("Cardigan", "Carmarthen", 26.1, usedReason.NotUsed, 0, 0, 0),
        edge("Carmarthen", "Swansea", 28.0, usedReason.NotUsed, 0, 0, 0),
        edge("Carlisle", "Kendal", 46.1, usedReason.NotUsed, 0, 0, 0),
        edge("Kendal", "Preston", 42.7, usedReason.NotUsed, 0, 0, 0),
        edge("Darlington", "Kendal", 63.3, usedReason.NotUsed, 0, 0, 0),
        edge("Barrow", "Kendal", 34.7, usedReason.NotUsed, 0, 0, 0),
        edge("Cambridge", "Luton", 41.0, usedReason.NotUsed, 0, 0, 0),
        edge("Leicester", "Luton", 69.5, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Luton", 34.3, usedReason.NotUsed, 0, 0, 0),
        edge("Luton", "Oxford", 43.5, usedReason.NotUsed, 0, 0, 0),
        edge("London", "Newbury", 60.8, usedReason.NotUsed, 0, 0, 0),
        edge("Newbury", "Oxford", 27.2, usedReason.NotUsed, 0, 0, 0),
        edge("Newbury", "Southampton", 38.1, usedReason.NotUsed, 0, 0, 0),
        edge("Swindon", "Newbury", 26.0, usedReason.NotUsed, 0, 0, 0),
    ]

    g = createNodes(allEdges)
    dumpDotGraph(g.nodes, g.numNodes, 0)

    doBoruvkaAlgorithm(g)
    dumpDotGraph(g.nodes, g.numNodes, 0)
    Blength = CalculateTotalEdgeLength(g)

    disconnectGraph(g)
    doPrimAlgorithm(g)
    dumpDotGraph(g.nodes, g.numNodes, 0)
    Plength = CalculateTotalEdgeLength(g)

    disconnectGraph(g)
    doKruskalAlgorithm(g)
    dumpDotGraph(g.nodes, g.numNodes, 0)
    Klength = CalculateTotalEdgeLength(g)

    connectGraph(g)
    doReverseDeleteAlgorithm(g)
    dumpDotGraph(g.nodes, g.numNodes, 0)
    Rlength = CalculateTotalEdgeLength(g)

    if (math.fabs(Blength - Plength) < 1.0
            and math.fabs(Plength - Klength) < 1.0
            and math.fabs(Klength - Rlength) < 1.0):
        print("Passed")
    else:
        print("Failed")
Example #38
0
    def log_upload_progress(self):
        """
        Aproximative and evolving method of computing the progress of upload
        """
        if not globals.progress or not self.has_collected_evidence():
            return

        current_time = datetime.now()
        if self.start_time is None:
            self.start_time = current_time
        if self.last_time is not None:
            elapsed = (current_time - self.last_time)
        else:
            elapsed = timedelta()
        self.last_time = current_time

        # Detect (and report) a stallment if no changing data for more than 5 seconds
        if self.stall_last_time is None:
            self.stall_last_time = current_time
        if (current_time - self.stall_last_time).seconds > max(
                5, 2 * globals.progress_rate):
            log.TransferProgress(100.0 * self.progress_estimation,
                                 self.time_estimation, self.total_bytecount,
                                 (current_time - self.start_time).seconds,
                                 self.speed, True)
            return

        self.nsteps += 1
        """
        Compute the ratio of information being written for deltas vs file sizes
        Using Knuth algorithm to estimate approximate upper bound in % of completion
        The progress is estimated on the current bytes written vs the total bytes to
        change as estimated by a first-dry-run. The weight is the ratio of changing
        data (Delta) against the total file sizes. (pessimistic estimation)
        The method computes the upper bound for the progress, when using a sufficient
        large volsize to accomodate and changes, as using a small volsize may inject
        statistical noise.
        """
        from duplicity import diffdir
        changes = diffdir.stats.NewFileSize + diffdir.stats.ChangedFileSize
        total_changes = self.total_stats.NewFileSize + self.total_stats.ChangedFileSize
        if total_changes == 0 or diffdir.stats.RawDeltaSize == 0:
            return

        # Snapshot current values for progress
        last_progress_estimation = self.progress_estimation

        if self.is_full:
            # Compute mean ratio of data transfer, assuming 1:1 data density
            self.current_estimation = float(
                self.total_bytecount) / float(total_changes)
        else:
            # Compute mean ratio of data transfer, estimating unknown progress
            change_ratio = float(self.total_bytecount) / float(
                diffdir.stats.RawDeltaSize)
            change_delta = change_ratio - self.change_mean_ratio
            self.change_mean_ratio += change_delta / float(
                self.nsteps)  # mean cumulated ratio
            self.change_r_estimation += change_delta * (change_ratio -
                                                        self.change_mean_ratio)
            change_sigma = math.sqrt(
                math.fabs(self.change_r_estimation / float(self.nsteps)))
            """
            Combine variables for progress estimation
            Fit a smoothed curve that covers the most common data density distributions,
            aiming for a large number of incremental changes.
            The computation is:
                Use 50% confidence interval lower bound during first half of the progression.
                Conversely, use 50% C.I. upper bound during the second half. Scale it to the
                changes/total ratio
            """
            self.current_estimation = float(changes) / float(total_changes) * (
                (self.change_mean_ratio - 0.67 * change_sigma) *
                (1.0 - self.current_estimation) +
                (self.change_mean_ratio + 0.67 * change_sigma) *
                self.current_estimation)
            """
            In case that we overpassed the 100%, drop the confidence and trust more the mean as the
            sigma may be large.
            """
            if self.current_estimation > 1.0:
                self.current_estimation = float(changes) / float(
                    total_changes) * (
                        (self.change_mean_ratio - 0.33 * change_sigma) *
                        (1.0 - self.current_estimation) +
                        (self.change_mean_ratio + 0.33 * change_sigma) *
                        self.current_estimation)
            """
            Meh!, if again overpassed the 100%, drop the confidence to 0 and trust only the mean.
            """
            if self.current_estimation > 1.0:
                self.current_estimation = self.change_mean_ratio * float(
                    changes) / float(total_changes)
        """
        Lastly, just cap it... nothing else we can do to approximate it better.
        Cap it to 99%, as the remaining 1% to 100% we reserve for the last step
        uploading of signature and manifests
        """
        self.progress_estimation = max(
            0.0,
            min(
                self.prev_estimation +
                (1.0 - self.prev_estimation) * self.current_estimation, 0.99))
        """
        Estimate the time just as a projection of the remaining time, fit to a
        [(1 - x) / x] curve
        """
        # As sum of timedeltas, so as to avoid clock skew in long runs
        # (adding also microseconds)
        self.elapsed_sum += elapsed
        projection = 1.0
        if self.progress_estimation > 0:
            projection = (1.0 -
                          self.progress_estimation) / self.progress_estimation
        self.time_estimation = int(projection *
                                   float(self.elapsed_sum.total_seconds()))

        # Apply values only when monotonic, so the estimates look more consistent to the human eye
        if self.progress_estimation < last_progress_estimation:
            self.progress_estimation = last_progress_estimation
        """
        Compute Exponential Moving Average of speed as bytes/sec of the last 30 probes
        """
        if elapsed.total_seconds() > 0:
            self.transfers.append(
                float(self.total_bytecount - self.last_total_bytecount) /
                float(elapsed.total_seconds()))
        self.last_total_bytecount = self.total_bytecount
        if len(self.transfers) > 30:
            self.transfers.popleft()
        self.speed = 0.0
        for x in self.transfers:
            self.speed = 0.3 * x + 0.7 * self.speed

        log.TransferProgress(100.0 * self.progress_estimation,
                             self.time_estimation, self.total_bytecount,
                             (current_time - self.start_time).seconds,
                             self.speed, False)
Example #39
0
        def format(self, obj, pattern=None):
            "See zope.i18n.interfaces.IFormat"
            # Make or get binary form of datetime pattern
            if pattern is not None:
                bin_pattern = parseNumberPattern(pattern)
            else:
                bin_pattern = self._bin_pattern
            # Get positive or negative sub-pattern
            if obj >= 0:
                bin_pattern = bin_pattern[0]
            else:
                bin_pattern = bin_pattern[1]

            if bin_pattern[EXPONENTIAL] != '':
                obj_int_frac = str(obj).split('.')
                # The exponential might have a mandatory sign; remove it from the
                # bin_pattern and remember the setting
                exp_bin_pattern = bin_pattern[EXPONENTIAL]
                plus_sign = u''
                if exp_bin_pattern.startswith('+'):
                    plus_sign = self.symbols['plusSign']
                    exp_bin_pattern = exp_bin_pattern[1:]
                # We have to remove the possible '-' sign
                if obj < 0:
                    obj_int_frac[0] = obj_int_frac[0][1:]
                if obj_int_frac[0] == '0':
                    # abs() of number smaller 1
                    if len(obj_int_frac) > 1:
                        res = re.match('(0*)[0-9]*',
                                       obj_int_frac[1]).groups()[0]
                        exponent = self._format_integer(
                            str(len(res) + 1), exp_bin_pattern)
                        exponent = self.symbols['minusSign'] + exponent
                        number = obj_int_frac[1][len(res):]
                    else:
                        # We have exactly 0
                        exponent = self._format_integer('0', exp_bin_pattern)
                        number = self.symbols['nativeZeroDigit']
                else:
                    exponent = self._format_integer(
                        str(len(obj_int_frac[0]) - 1), exp_bin_pattern)
                    number = ''.join(obj_int_frac)

                fraction, roundInt = self._format_fraction(
                    number[1:], bin_pattern[FRACTION])
                if roundInt:
                    number = str(int(number[0]) + 1) + fraction
                else:
                    number = number[0] + fraction

                # We might have a plus sign in front of the exponential integer
                if not exponent.startswith('-'):
                    exponent = plus_sign + exponent

                pre_padding = len(bin_pattern[FRACTION]) - len(number) + 2
                post_padding = len(exp_bin_pattern) - len(exponent)
                number += self.symbols['exponential'] + exponent

            else:
                obj_int_frac = str(obj).split('.')
                if len(obj_int_frac) > 1:
                    fraction, roundInt = self._format_fraction(
                        obj_int_frac[1], bin_pattern[FRACTION])
                else:
                    fraction = ''
                    roundInt = False
                if roundInt:
                    obj = round(obj)
                integer = self._format_integer(str(int(math.fabs(obj))),
                                               bin_pattern[INTEGER])
                # Adding grouping
                if bin_pattern[GROUPING] == 1:
                    help = ''
                    for pos in range(1, len(integer) + 1):
                        if (pos - 1) % 3 == 0 and pos != 1:
                            help = self.symbols['group'] + help
                        help = integer[-pos] + help
                    integer = help
                pre_padding = len(bin_pattern[INTEGER]) - len(integer)
                post_padding = len(bin_pattern[FRACTION]) - len(fraction) + 1
                number = integer + fraction

            # Put it all together
            text = ''
            if bin_pattern[PADDING1] is not None and pre_padding > 0:
                text += bin_pattern[PADDING1] * pre_padding
            text += bin_pattern[PREFIX]
            if bin_pattern[PADDING2] is not None and pre_padding > 0:
                if bin_pattern[PADDING1] is not None:
                    text += bin_pattern[PADDING2]
                else:
                    text += bin_pattern[PADDING2] * pre_padding
            text += number
            if bin_pattern[PADDING3] is not None and post_padding > 0:
                if bin_pattern[PADDING4] is not None:
                    text += bin_pattern[PADDING3]
                else:
                    text += bin_pattern[PADDING3] * post_padding
            text += bin_pattern[SUFFIX]
            if bin_pattern[PADDING4] is not None and post_padding > 0:
                text += bin_pattern[PADDING4] * post_padding

            # TODO: Need to make sure unicode is everywhere
            return unicode(text)
Example #40
0
 def isAt(self, x0, y0, err):
     if math.fabs(self.rect.centerx -
                  x0) < err and math.fabs(self.rect.centery - y0) < err:
         return True
     return False
Example #41
0
 def manhattan_distance(point_a, point_b):
     return math.fabs(point_a[ 0 ] - point_b[ 0 ]) + math.fabs(point_a[ 1 ] - point_b[ 1 ])
def __normalize_to_2byte(wave_data):
    amplitude_multiplier = min(1, (2**(8 * __SAMPLE_WIDTH_IN_BYTES - 1) - 1) /
                               max([fabs(signal) for signal in wave_data]))
    return [int(amplitude_multiplier * signal) for signal in wave_data]
Example #43
0
 def __areInRange(num1, num2, Range=1):
     for i in range(Range + 1):
         if math.fabs(num1 - num2) == i:
             return True
     return False
Example #44
0
def quantize(image, num, score_tone=100, score_hist=1000, score_diff=100):

    if 0 >= num >= 256: return

    score_tone *= image.size
    score_hist *= image.size
    score_diff *= image.size

    pixels = image.data

    class _Cls:
        color = 0
        hist = 0
        score = 0
        next = 0

    cls = [_Cls() for i in range(256)]

    # Get histogram
    i = 0
    while i < image.size:
        cls[ord(pixels[i])].hist += 1
        i += 1

    # Initialize all classes
    colors = 0
    last = -1
    first = -1
    for i in range(256):
        cls[i].color = i
        if cls[i].hist:
            if last == -1:
                last = i
                first = i
            else:
                cls[last].score = (fabs(cls[last].color-cls[i].color)*score_tone) \
                                + (cls[last].hist+cls[i].hist)*score_hist   \
                                - (abs(cls[last].hist-cls[i].hist)*score_diff)
                cls[last].next = i
                colors += 1
                last = i
        else:
            cls[i].color = -1
            cls[i].score = 0
            cls[i].next = -1

# Register the last one
    cls[last].color = last
    cls[last].score = (1 * score_tone)  \
                + (cls[last].hist+cls[last].hist)*score_hist   \
                - (abs(cls[last].hist-cls[last].hist)*score_diff)

    cls[last].next = -1
    colors += 1

    # Remove a color everytime, until I have the number of colors required
    while (colors > num):
        i = first
        ant = -1
        last = i
        min = i
        while (i != -1):
            if (cls[i].score < cls[min].score):
                last = ant
                min = i
            ant = i
            i = cls[i].next

        if (min == first):  # First one
            i = min
            j = cls[min].next

            n = ((cls[i].color * cls[i].hist) +
                 (cls[j].color * cls[j].hist)) / float(cls[i].hist +
                                                       cls[j].hist)

            # Match colors
            c1 = cls[i].color
            c2 = cls[j].color
            for k in range(256):
                if (cls[k].color == c1):
                    cls[k].color = n
                if (cls[k].color == c2):
                    cls[k].color = n

            cls[j].hist += cls[i].hist
            cls[j].score=  (fabs(cls[j].color-cls[cls[j].next].color)*score_tone)  \
                        + (cls[j].hist+cls[cls[j].next].hist)*score_hist          \
                        - (abs(cls[j].hist-cls[cls[j].next].hist)*score_diff)

            # Remove the least one
            first = cls[i].next

        else:
            if (min == ant):  # last one
                i = cls[last].next
                j = last

                n = ((cls[i].color * cls[i].hist) +
                     (cls[j].color * cls[j].hist)) / float(cls[i].hist +
                                                           cls[j].hist)

                # Matching colors
                ant = -1
                c1 = cls[i].color
                c2 = cls[j].color
                for k in range(256):
                    if (cls[k].color == c1):
                        cls[k].color = n

                    if (cls[k].color == c2):
                        cls[k].color = n

                    # Get the previous to the last one
                    if (cls[k].next == last):
                        ant = k

                cls[j].hist += cls[i].hist
                if (ant != -1):
                    cls[ant].score= (fabs(cls[ant].color-cls[j].color)*score_tone) \
                                    + (cls[ant].hist+cls[j].hist)*score_hist \
                                    - (abs(cls[ant].hist-cls[j].hist)*score_diff)

                # Remove the least
                cls[j].next = -1

            else:  # Enmedio
                i = min
                j = cls[min].next

                n = ((cls[i].color * cls[i].hist) +
                     (cls[j].color * cls[j].hist)) / float(cls[i].hist +
                                                           cls[j].hist)

                # Matching colors
                c1 = cls[i].color
                c2 = cls[j].color
                for k in range(256):
                    if (cls[k].color == c1):
                        cls[k].color = n
                    if (cls[k].color == c2):
                        cls[k].color = n

                cls[j].hist += cls[i].hist
                cls[last].score=  (fabs(cls[last].color-cls[j].color)*score_tone)   \
                            + (cls[last].hist+cls[j].hist)*score_hist   \
                            - (abs(cls[last].hist-cls[j].hist)*score_diff)
                cls[j].score=  (fabs(cls[cls[j].next].color-cls[j].color)*score_tone)   \
                            + (cls[cls[j].next].hist+cls[j].hist)*score_hist   \
                            - (abs(cls[cls[j].next].hist-cls[j].hist)*score_diff)

                # Remove the least
                cls[last].next = j

        colors -= 1

    # Rellena la tabla de colores (quedan los primeros sin arreglar. Da igual)
    c1 = -1
    for i in range(256):
        if (cls[i].color != -1):
            c1 = cls[i].color
        else:
            cls[i].color = c1

    # Modify tones in the original image
    i = 0
    while i < image.size:
        j = ord(pixels[i])
        pixels[i] = chr(int(cls[j].color + 0.5))
        i += 1
Example #45
0
    def execute(self, context):
        # Enable for debugging code
        debugmode = False

        scene = context.scene
        mypoints = None
        clearangles = None

        if debugmode is True:
            print("======================================================================")
            print("==                                                                  ==")
            print("==  Grease pencil strokes analysis                                  ==")
            print("==                                                                  ==")
            print("======================================================================")

        # -----------------------------------
        # Get grease pencil points
        # -----------------------------------
        # noinspection PyBroadException
        try:

            # noinspection PyBroadException
            try:
                pencil = bpy.context.object.grease_pencil.layers.active
            except:
                pencil = bpy.context.scene.grease_pencil.layers.active

            if pencil.active_frame is not None:
                for i, stroke in enumerate(pencil.active_frame.strokes):
                    stroke_points = pencil.active_frame.strokes[i].points
                    allpoints = [(point.co.x, point.co.y)
                                 for point in stroke_points]

                    mypoints = []
                    idx = 0
                    x = 0
                    y = 0
                    orientation = None
                    old_orientation = None

                    for point in allpoints:
                        if idx == 0:
                            x = point[0]
                            y = point[1]
                        else:
                            abs_x = abs(point[0] - x)
                            abs_y = abs(point[1] - y)

                            if abs_y > abs_x:
                                orientation = "V"
                            else:
                                orientation = "H"

                            if old_orientation == orientation:
                                x = point[0]
                                y = point[1]
                            else:
                                mypoints.extend([(x, y)])
                                x = point[0]
                                y = point[1]
                                old_orientation = orientation

                        idx += 1
                    # Last point
                    mypoints.extend([(x, y)])

                    if debugmode is True:
                        print("\nPoints\n====================")
                        i = 0
                        for p in mypoints:
                            print(str(i) + ":" + str(p))
                            i += 1
                    # -----------------------------------
                    # Calculate distance between points
                    # -----------------------------------
                    if debugmode is True:
                        print("\nDistance\n====================")
                    i = len(mypoints)
                    distlist = []
                    for e in range(1, i):
                        d = sqrt(
                            ((mypoints[e][0] - mypoints[e - 1][0]) ** 2) + ((mypoints[e][1] - mypoints[e - 1][1]) ** 2))
                        # Imperial units if needed
                        if bpy.context.scene.unit_settings.system == "IMPERIAL":
                            d *= 3.2808399

                        distlist.extend([d])

                        if debugmode is True:
                            print(str(e - 1) + ":" + str(d))
                    # -----------------------------------
                    # Calculate angle of walls
                    # clamped to right angles
                    # -----------------------------------
                    if debugmode is True:
                        print("\nAngle\n====================")

                    i = len(mypoints)
                    anglelist = []
                    for e in range(1, i):
                        sinv = (mypoints[e][1] - mypoints[e - 1][1]) / sqrt(
                            ((mypoints[e][0] - mypoints[e - 1][0]) ** 2) + ((mypoints[e][1] - mypoints[e - 1][1]) ** 2))
                        a = asin(sinv)
                        # Clamp to 90 or 0 degrees
                        if fabs(a) > pi / 4:
                            b = pi / 2
                        else:
                            b = 0

                        anglelist.extend([b])
                        # Reverse de distance using angles (inverse angle to axis) for Vertical lines
                        if a < 0.0 and b != 0:
                            distlist[e - 1] *= -1  # reverse distance

                        # Reverse de distance for horizontal lines
                        if b == 0:
                            if mypoints[e - 1][0] > mypoints[e][0]:
                                distlist[e - 1] *= -1  # reverse distance

                        if debugmode is True:
                            print(str(e - 1) + ":" + str((a * 180) / pi) + "...:" + str(
                                (b * 180) / pi) + "--->" + str(distlist[e - 1]))

                    # ---------------------------------------
                    # Verify duplications and reduce noise
                    # ---------------------------------------
                    if len(anglelist) >= 1:
                        clearangles = []
                        cleardistan = []
                        i = len(anglelist)
                        oldangle = anglelist[0]
                        olddist = 0
                        for e in range(0, i):
                            if oldangle != anglelist[e]:
                                clearangles.extend([oldangle])
                                cleardistan.extend([olddist])
                                oldangle = anglelist[e]
                                olddist = distlist[e]
                            else:
                                olddist += distlist[e]
                        # last
                        clearangles.extend([oldangle])
                        cleardistan.extend([olddist])

            # ----------------------------
            # Create the room
            # ----------------------------
            if len(mypoints) > 1 and len(clearangles) > 0:
                # Move cursor
                bpy.context.scene.cursor_location.x = mypoints[0][0]
                bpy.context.scene.cursor_location.y = mypoints[0][1]
                bpy.context.scene.cursor_location.z = 0  # always on grid floor

                # Add room mesh
                bpy.ops.mesh.archimesh_room()
                myroom = context.object
                mydata = myroom.RoomGenerator[0]
                # Number of walls
                mydata.wall_num = len(mypoints) - 1
                mydata.ceiling = scene.archimesh_ceiling
                mydata.floor = scene.archimesh_floor
                mydata.merge = scene.archimesh_merge

                i = len(mypoints)
                for e in range(0, i - 1):
                    if clearangles[e] == pi / 2:
                        if cleardistan[e] > 0:
                            mydata.walls[e].w = round(fabs(cleardistan[e]), 2)
                            mydata.walls[e].r = (fabs(clearangles[e]) * 180) / pi  # from radians
                        else:
                            mydata.walls[e].w = round(fabs(cleardistan[e]), 2)
                            mydata.walls[e].r = (fabs(clearangles[e]) * 180 * -1) / pi  # from radians

                    else:
                        mydata.walls[e].w = round(cleardistan[e], 2)
                        mydata.walls[e].r = (fabs(clearangles[e]) * 180) / pi  # from radians

                # Remove Grease pencil
                if pencil is not None:
                    for frame in pencil.frames:
                        pencil.frames.remove(frame)

                self.report({'INFO'}, "Archimesh: Room created from grease pencil strokes")
            else:
                self.report({'WARNING'}, "Archimesh: Not enough grease pencil strokes for creating room.")

            return {'FINISHED'}
        except:
            self.report({'WARNING'}, "Archimesh: No grease pencil strokes. Do strokes in top view before creating room")
            return {'CANCELLED'}
    listener=tf2_ros.TransformListener(tfBuffer)

    pospub=rospy.Publisher('Bot_Pos0',geometry_msgs.msg.Vector3, queue_size=1)
    rate=rospy.Rate(10)

    while not rospy.is_shutdown():
        try:
            trans_front=tfBuffer.lookup_transform('camera','front',rospy.Time(),rospy.Duration(1))
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
            rate.sleep()
            continue
        timer=trans_front.header.stamp.secs

        if (rospy.get_time()-timer)<1.5:
            bot_pos=geometry_msgs.msg.Vector3()

            quat=[trans_front.transform.rotation.x,trans_front.transform.rotation.y,trans_front.transform.rotation.z,trans_front.transform.rotation.w]
            pitch,yaw,roll=euler_from_quaternion(quat)

            bot_pos.x=-trans_front.transform.translation.x-0.25
            bot_pos.y=4.7123+yaw #CCW angle off pos x from dump bucket corner, still needs stepper transform
            bot_pos.z=trans_front.transform.translation.z
            if math.fabs(roll)>2:
                bot_pos.x=-bot_pos.x
                bot_pos.y=1.5707-yaw
        else:
            bot_pos.y=-42

        pospub.publish(bot_pos)
        rate.sleep()
def optimize_neighborhoods(grouped_lists):

    # Step 1: make the grouped lists into a single list of homologs
    org_hlog_list = list(itertools.chain(*grouped_lists))

    neighborhoods_only = [i for i in grouped_lists if len(i) > 1]
    grouped_hlog_list = list(itertools.chain(*neighborhoods_only))

    # Step 2: determine the number of unique genes in both neighborhoods, and the organism.
    # To better explain: I need to know the number of unique genes the organism contains for the gene block.
    # I also need to know the number of unique genes found in neighborhoods.
    number_unique_genes_in_organism = len(
        make_unique(org_hlog_list, lambda x: x.blast_annotation()))
    number_unique_in_neighborhoods = len(
        make_unique(grouped_hlog_list, lambda x: x.blast_annotation()))
    '''
    # Debugging.  This does check out.  kinda interesting stuff though here, there are some inline tandem repeats where gene name is different.
    # Everything looks like it works correctly though
    print org_hlog_list[0].organism(), "number_unique_genes_in_organism", number_unique_genes_in_organism, "number_unique_in_neighborhoods", number_unique_in_neighborhoods
    if number_unique_in_neighborhoods == 1:
        print "neighborhoods_only", neighborhoods_only
        for group in neighborhoods_only:
            for gene in group:
                print gene.blast_annotation(), gene.genbank_annotation(), gene.locus(), gene.start(), gene.stop()
        #print "grouped_lists",grouped_lists
    '''
    # Step 3: greedy algorithm to determine the best neighborhoods to report as a final result

    optimal = False
    num_in_list = 1  # this is the number of elements per list reurned
    best_duplicates = 0
    splits = number_unique_genes_in_organism - number_unique_in_neighborhoods
    while not optimal:
        for group in itertools.combinations(grouped_lists, num_in_list):
            #all_homologs_in_grouping = [item for sublist in group for item in sublist]
            all_homologs_in_grouping = list(itertools.chain(*group))

            #print all_homologs_in_grouping
            #unique_in_set = len(MakeUnique(all_homologs_in_grouping, lambda a: a.predicted_gene))
            unique_in_set = len(
                make_unique(all_homologs_in_grouping,
                            lambda x: x.blast_annotation()))
            #if unique_in_set == len_unique_grouped: # we have an optimal solution, perhaps not global optima
            if unique_in_set == number_unique_in_neighborhoods:  # we have an optimal solution, perhaps not global optima
                duplicates = int(
                    math.fabs(
                        len(all_homologs_in_grouping) -
                        number_unique_in_neighborhoods))
                if not optimal:
                    optimal = True
                    best_grouping = list(group)
                    best_duplicates = duplicates
                    best_split = splits
                elif duplicates < best_duplicates:
                    best_grouping = list(group)
                    best_duplicates = duplicates
        splits += 1
        num_in_list += 1
    #print "splits " , splits, ": best_split ", best_split
    #print "Best grouping as found by the program\n", best_grouping

    # Step 4: determine the best (if necessary) singleton genes to complete
    if number_unique_genes_in_organism != number_unique_in_neighborhoods:
        # This step takes time, so only perform it when you have to
        best_singletons = return_best_singleton_genes(grouped_lists)
        #print "Difference", number_unique_genes_in_organism - number_unique_in_neighborhoods, len(best_singletons)
        #print "singletons", best_singletons , ' '.join([i.blast_annotation() for i in list(itertools.chain(*best_singletons))])
        best_grouping = best_grouping + best_singletons

    return best_grouping, best_split, best_duplicates  #, len_unique_grouped
Example #48
0
with open("tb_1.log") as f:
        content_1 = f.readlines()

for x in content_1:
    roots_1.append(re.search(r'square=x\s*([^\n]+)', x))

for x in content_1:
    entries_1.append(re.search(r'Entrada=x\s*([^\n]+),', x))

for idx,valx in enumerate(roots_1):
	for idy,valy in enumerate(entries_1):		
		if idx==idy:	
			#print('Roots',int(valx.group(1))," Entries",int(valy.group(1))," \n")	
			#print('Raices iteradas',float(valx.group(1))/131072,"Calculo con entradas",1/math.sqrt(float(valy.group(1))/131072))
			#print('Errores',((float(valx.group(1))/131072 - 1/math.sqrt(float(valy.group(1))/131072))/(float(valx.group(1))/131072))*100)
			errors_1.append(math.fabs(((float(valx.group(1))/131072 - 1/math.sqrt(float(valy.group(1))/131072))/(float(valx.group(1))/131072))*100))		
	
cont_1 = len(errors_1)
accumulator_1 = sum(float(i) for i in errors_1)
total_error_1 = float(accumulator_1)/float(cont_1)
print("Error promedio ",total_error_1) 

roots_2 = []
entries_2 = []
errors_2 = []

with open("tb_2.log") as f:
        content_2 = f.readlines()

for x in content_2:
    roots_2.append(re.search(r'square=x\s*([^\n]+)', x))
#since the corrlation coefficient is very low towards 0, indicating very little correlation, thus creating a t-stat that
# which states that there is little evidence to how relation, reulting in a p-value greater then 0.05 confidence value
# ultimate stating that the gene is not abnormal in indicating gene expression

#Does not hold: (CC)0.600022 (p-value)0.014003 (t-value)2.806403
#since the corellation coefficient is very high and close to one, there is alot of correlation, thus getting a t-stat of
# a greater value since there is a lot of evidence to show the correlation, finally the p-value is way less than 0.05
# indicating that the genes abnormal expression is a valuble to be diffrently expressed.


# In[206]:

# Credit given to group: Nick Dawes, Megan Chan, Shrey Desai, Elias Sanchez, Chirs Apgar, Jose Augilar
sig_gene_list = []##init list holding genes with criteria: p-val <= 0.05 & |log2(fc) >=2|
for i in range(0, len(resultTable.index)):##iterate through all genes
    if resultTable.ix[i, 'p-value'] <= .05 and math.fabs(resultTable.ix[i, 'log-FC'] >= 1): ##test conditions
        sig_gene_list.append(resultTable.index[i])##append if pass condition
sig_gene_list
seaborn.clustermap(log.ix[sig_gene_list]).savefig("clustermapTAMALS.png")##go back to main df(logRPM filtered by relevant samples) and pick out "significant genes")


# In[199]:

# Writen by group : Nick Dawes, Megan Chan, Shrey Desai, Elias Sanchez, Chirs Apgar, Jose Augilar
#The cluster map found by our group for c9ALS cerebellum samples as compared to healthy cerebellum samples seems 
#to mostly agree with the equivalent cluster map as shown in Figure 1g in the Petrucelli ALS paper. A small set 
#of genes that are significantly differentially expressed in the c9ALS samples show clustering,whereas the healthy 
#samples do not show anywhere close to the same levels of differential expression in the clustered genes. There is a 
#difference between the group cluster map and the Petrucelli cluster map, however, in the rotation of the map, where
#it appears that the genes were represented on the y-axis, and the samples on the x-axis in our clustermap. 
#The Petrucelli cluster map appears to have the genes on the x-axis while the various samples are represented on the 
Example #50
0
    def getLinacAccLatticeFromDA(self, names, acc_da):
        """
		Returns the linac accelerator lattice for specified sequence names.
		"""
        if (len(names) < 1):
            msg = "The SNS_LinacLatticeFactory method getLinacAccLatticeFromDA(names,): you have to specify the names array!"
            msg = msg + os.linesep
            msg = msg + "Stop."
            msg = msg + os.linesep
            orbitFinalize(msg)
        #----- let's parse the XML DataAdaptor
        accSeq_da_arr = acc_da.childAdaptors()
        #-----let's filter and check that the names in good order
        accSeq_da_arr = self.filterSequences_and_OptionalCheck(
            accSeq_da_arr, names)
        #----make linac lattice
        linacAccLattice = LinacAccLattice(acc_da.getName())

        #There are the folowing possible types of elements in the linac tree:
        #QUAD - quadrupole
        #RFGAP - RF Gap
        #DCH - horizontal dipole corrector
        #DCV - vertical dipole corrector
        #Marker - anything else with the length equals to 0
        #Before putting enerything into the linacAccLattice we will create sequences
        # with all nodes.
        #----------------------------------------------------------------------
        # The DRIFTS will be generated additionally and put into right places
        #----------------------------------------------------------------------
        def positionComp(node1_da, node2_da):
            if (node1_da.getParam("pos") > node2_da.getParam("pos")):
                return 1
            else:
                if (node1_da.getParam("pos") == node2_da.getParam("pos")):
                    return 0
            return -1

        accSeqs = []
        accRF_Cavs = []
        seqPosition = 0.
        for seq_da in accSeq_da_arr:
            #print "debug === seq=",seq_da.getName()
            accSeq = Sequence(seq_da.getName())
            accSeq.setLinacAccLattice(linacAccLattice)
            accSeq.setLength(seq_da.doubleValue("length"))
            accSeq.setPosition(seqPosition)
            seqPosition = seqPosition + accSeq.getLength()
            #---- BPM frequnecy for this sequence ----
            if (seq_da.hasAttribute("bpmFrequency")):
                bpmFrequency = seq_da.doubleValue("bpmFrequency")
                accSeq.addParam("bpmFrequency", bpmFrequency)
            #-----------------------------------------
            accSeqs.append(accSeq)
            #---- create RF Cavities
            if (len(seq_da.childAdaptors("Cavities")) == 1):
                cavs_da = seq_da.childAdaptors("Cavities")[0]
                cav_da_arr = cavs_da.childAdaptors("Cavity")
                for cav_da in cav_da_arr:
                    frequency = cav_da.doubleValue("frequency")
                    cav_amp = cav_da.doubleValue("ampl")
                    cav_name = cav_da.stringValue("name")
                    cav_pos = cav_da.doubleValue("pos")
                    cav = RF_Cavity(cav_name)
                    cav.setAmp(cav_amp)
                    cav.setFrequency(frequency)
                    cav.setPosition(cav_pos)
                    accSeq.addRF_Cavity(cav)
            #----------------------------
            #node_da_arr - array of nodes. These nodes are not AccNodes. They are XmlDataAdaptor class instances
            node_da_arr = seq_da.childAdaptors("accElement")
            #put nodes in order according to the position in the sequence
            for node_da in node_da_arr:
                node_da.setParam("pos", node_da.doubleValue("pos"))
            node_da_arr.sort(positionComp)
            #thinNodes - array of accNode nodes with zero length
            #They can be positioned inside the thick nodes, and this will be done at the end
            #of this method
            thinNodes = []
            for node_da in node_da_arr:
                params_da = node_da.childAdaptors("parameters")[0]
                node_type = node_da.stringValue("type")
                node_length = node_da.doubleValue("length")
                node_pos = node_da.getParam("pos")
                #------------QUAD-----------------
                if (node_type == "QUAD"):
                    accNode = Quad(node_da.stringValue("name"))
                    accNode.setParam("dB/dr", params_da.doubleValue("field"))
                    accNode.setParam("field", params_da.doubleValue("field"))
                    accNode.setLength(node_length)
                    if (params_da.hasAttribute("poles")):
                        #accNode.setParam("poles",[int(x) for x in eval(params_da.stringValue("poles"))])
                        accNode.setParam("poles",
                                         params_da.intArrayValue("poles"))
                    if (params_da.hasAttribute("kls")):
                        #accNode.setParam("kls", [x for x in eval(params_da.stringValue("kls"))])
                        accNode.setParam("kls",
                                         params_da.doubleArrayValue("kls"))
                    if (params_da.hasAttribute("skews")):
                        #accNode.setParam("skews",[int(x) for x in eval(params_da.stringValue("skews"))])
                        accNode.setParam("skews",
                                         params_da.intArrayValue("skews"))
                    if (0.5 * accNode.getLength() > self.maxDriftLength):
                        accNode.setnParts(
                            2 * int(0.5 * accNode.getLength() /
                                    self.maxDriftLength + 1.5 - 1.0e-12))
                    if (params_da.hasAttribute("aperture")
                            and params_da.hasAttribute("aprt_type")):
                        accNode.setParam("aprt_type",
                                         params_da.intValue("aprt_type"))
                        accNode.setParam("aperture",
                                         params_da.doubleValue("aperture"))
                    #---- possible parameters for PMQ description of the in Trace3D style
                    if (params_da.hasAttribute("radIn")
                            and params_da.hasAttribute("radOut")):
                        accNode.setParam("radIn",
                                         params_da.doubleValue("radIn"))
                        accNode.setParam("radOut",
                                         params_da.doubleValue("radOut"))
                    accNode.setParam("pos", node_pos)
                    accSeq.addNode(accNode)
                #------------BEND-----------------
                elif (node_type == "BEND"):
                    accNode = Bend(node_da.stringValue("name"))
                    if (params_da.hasAttribute("poles")):
                        #accNode.setParam("poles",[int(x) for x in eval(params_da.stringValue("poles"))])
                        accNode.setParam("poles",
                                         params_da.intArrayValue("poles"))
                    if (params_da.hasAttribute("kls")):
                        #accNode.setParam("kls", [x for x in eval(params_da.stringValue("kls"))])
                        accNode.setParam("kls",
                                         params_da.doubleArrayValue("kls"))
                    if (params_da.hasAttribute("skews")):
                        #accNode.setParam("skews",[int(x) for x in eval(params_da.stringValue("skews"))])
                        accNode.setParam("skews",
                                         params_da.intArrayValue("skews"))
                    accNode.setParam("ea1", params_da.doubleValue("ea1"))
                    accNode.setParam("ea2", params_da.doubleValue("ea2"))
                    accNode.setParam("theta", params_da.doubleValue("theta"))
                    if (params_da.hasAttribute("aperture_x")
                            and params_da.hasAttribute("aperture_y")
                            and params_da.hasAttribute("aprt_type")):
                        accNode.setParam("aprt_type",
                                         params_da.intValue("aprt_type"))
                        accNode.setParam("aperture_x",
                                         params_da.doubleValue("aperture_x"))
                        accNode.setParam("aperture_y",
                                         params_da.doubleValue("aperture_y"))
                    accNode.setLength(node_length)
                    if (accNode.getLength() > self.maxDriftLength):
                        accNode.setnParts(
                            2 * int(accNode.getLength() / self.maxDriftLength +
                                    1.5 - 1.0e-12))
                    accNode.setParam("pos", node_pos)
                    accSeq.addNode(accNode)
                #------------RF_Gap-----------------
                elif (node_type == "RFGAP"):
                    accNode = BaseRF_Gap(node_da.stringValue("name"))
                    accNode.setLength(0.)
                    accNode.setParam("E0TL", params_da.doubleValue("E0TL"))
                    accNode.setParam("E0L", params_da.doubleValue("E0L"))
                    accNode.setParam("mode", params_da.doubleValue("mode"))
                    accNode.setParam(
                        "gap_phase",
                        params_da.doubleValue("phase") * math.pi / 180.)
                    accNode.setParam("EzFile", params_da.stringValue("EzFile"))
                    cav_name = params_da.stringValue("cavity")
                    cav = accSeq.getRF_Cavity(cav_name)
                    cav.addRF_GapNode(accNode)
                    if (accNode.isFirstRFGap()):
                        cav.setPhase(accNode.getParam("gap_phase"))
                    #---- TTFs parameters
                    ttfs_da = node_da.childAdaptors("TTFs")[0]
                    beta_min = ttfs_da.doubleValue("beta_min")
                    beta_max = ttfs_da.doubleValue("beta_max")
                    accNode.setParam("beta_min", beta_min)
                    accNode.setParam("beta_max", beta_max)
                    (polyT, polyS, polyTp,
                     polySp) = accNode.getTTF_Polynimials()
                    polyT_da = ttfs_da.childAdaptors("polyT")[0]
                    polyS_da = ttfs_da.childAdaptors("polyS")[0]
                    polyTp_da = ttfs_da.childAdaptors("polyTP")[0]
                    polySp_da = ttfs_da.childAdaptors("polySP")[0]
                    polyT.order(polyT_da.intValue("order"))
                    polyS.order(polyS_da.intValue("order"))
                    polyTp.order(polyTp_da.intValue("order"))
                    polySp.order(polySp_da.intValue("order"))
                    #---- actually these polynomials are functions of kappa =  2*PI*Freq/(c*beta)
                    #---- but we will use these parameters to keep beta
                    polyT.minX(beta_min)
                    polyT.maxX(beta_max)
                    polyS.minX(beta_min)
                    polyS.maxX(beta_max)
                    polyTp.minX(beta_min)
                    polyTp.maxX(beta_max)
                    polySp.minX(beta_min)
                    polySp.maxX(beta_max)
                    coef_arr = polyT_da.doubleArrayValue("pcoefs")
                    for coef_ind in range(len(coef_arr)):
                        polyT.coefficient(coef_ind, coef_arr[coef_ind])
                    coef_arr = polyS_da.doubleArrayValue("pcoefs")
                    for coef_ind in range(len(coef_arr)):
                        polyS.coefficient(coef_ind, coef_arr[coef_ind])
                    coef_arr = polyTp_da.doubleArrayValue("pcoefs")
                    for coef_ind in range(len(coef_arr)):
                        polyTp.coefficient(coef_ind, coef_arr[coef_ind])
                    coef_arr = polySp_da.doubleArrayValue("pcoefs")
                    for coef_ind in range(len(coef_arr)):
                        polySp.coefficient(coef_ind, coef_arr[coef_ind])
                    if (params_da.hasAttribute("aperture")
                            and params_da.hasAttribute("aprt_type")):
                        accNode.setParam("aprt_type",
                                         params_da.intValue("aprt_type"))
                        accNode.setParam("aperture",
                                         params_da.doubleValue("aperture"))
                    accNode.setParam("pos", node_pos)
                    accSeq.addNode(accNode)
                #------------THICK KICK-----------------
                elif (node_type == "THICK_KICK"):
                    accNode = ThickKick(node_da.stringValue("name"))
                    accNode.setParam("Bx", params_da.doubleValue("Bx"))
                    accNode.setParam("By", params_da.doubleValue("By"))
                    accNode.setLength(node_length)
                    accNode.setParam("pos", node_pos)
                    accSeq.addNode(accNode)
                else:
                    if (node_length != 0.):
                        msg = "The LinacLatticeFactory method getLinacAccLattice(names): there is a strange element!"
                        msg = msg + os.linesep
                        msg = msg + "name=" + node_da.stringValue("name")
                        msg = msg + os.linesep
                        msg = msg + "type=" + node_type
                        msg = msg + os.linesep
                        msg = msg + "length(should be 0.)=" + str(node_length)
                        orbitFinalize(msg)
                    #------ thin nodes analysis
                    accNode = None
                    if (node_type == "DCV" or node_type == "DCH"):
                        if (node_type == "DCV"):
                            accNode = DCorrectorV(node_da.stringValue("name"))
                        if (node_type == "DCH"):
                            accNode = DCorrectorH(node_da.stringValue("name"))
                        accNode.setParam("effLength",
                                         params_da.doubleValue("effLength"))
                        if (params_da.hasAttribute("B")):
                            accNode.setParam("B", params_da.doubleValue("B"))
                    else:
                        accNode = MarkerLinacNode(node_da.stringValue("name"))
                    accNode.setParam("pos", node_pos)
                    thinNodes.append(accNode)
            #----- assign the thin nodes that are inside the thick nodes
            unusedThinNodes = []
            for thinNode in thinNodes:
                thinNode_pos = thinNode.getParam("pos")
                isInside = False
                for accNode in accSeq.getNodes():
                    length = accNode.getLength()
                    if (length > 0.):
                        pos = accNode.getParam("pos")
                        if (thinNode_pos >= (pos - length / 2)
                                and thinNode_pos <= (pos + length / 2)):
                            isInside = True
                            delta_pos = thinNode_pos - (pos - length / 2)
                            s_path = 0.
                            part_ind_in = -1
                            for part_ind in range(accNode.getnParts()):
                                part_ind_in = part_ind
                                s_path += accNode.getLength(part_ind)
                                if (delta_pos <= s_path + self.zeroDistance):
                                    break
                            accNode.addChildNode(thinNode,
                                                 place=AccNode.BODY,
                                                 part_index=part_ind_in,
                                                 place_in_part=AccNode.AFTER)
                            thinNode.setParam("pos",
                                              (pos - length / 2) + s_path)
                if (not isInside):
                    unusedThinNodes.append(thinNode)
            thinNodes = unusedThinNodes
            newAccNodes = accSeq.getNodes()[:] + thinNodes
            newAccNodes.sort(positionComp)
            accSeq.setNodes(newAccNodes)
            #insert the drifts ======================start ===========================
            #-----now check the integrety quads and rf_gaps should not overlap
            #-----and create drifts
            copyAccNodes = accSeq.getNodes()[:]
            firstNode = copyAccNodes[0]
            lastNode = copyAccNodes[len(copyAccNodes) - 1]
            driftNodes_before = []
            driftNodes_after = []
            #insert the drift before the first element if its half length less than its position
            if (math.fabs(firstNode.getLength() / 2.0 -
                          firstNode.getParam("pos")) > self.zeroDistance):
                if (firstNode.getLength() / 2.0 > firstNode.getParam("pos")):
                    msg = "The LinacLatticeFactory method getLinacAccLattice(names): the first node is too long!"
                    msg = msg + os.linesep
                    msg = msg + "name=" + firstNode.getName()
                    msg = msg + os.linesep
                    msg = msg + "type=" + firstNode.getType()
                    msg = msg + os.linesep
                    msg = msg + "length=" + str(firstNode.getLength())
                    msg = msg + os.linesep
                    msg = msg + "pos=" + str(firstNode.getParam("pos"))
                    orbitFinalize(msg)
                else:
                    driftNodes = []
                    driftLength = firstNode.getParam(
                        "pos") - firstNode.getLength() / 2.0
                    nDrifts = int(driftLength / self.maxDriftLength) + 1
                    driftLength = driftLength / nDrifts
                    for idrift in range(nDrifts):
                        drift = Drift(accSeq.getName() + ":START:" +
                                      str(idrift + 1) + ":drift")
                        drift.setLength(driftLength)
                        drift.setParam("pos",
                                       0. + drift.getLength() * (idrift + 0.5))
                        driftNodes.append(drift)
                    driftNodes_before = driftNodes
            #insert the drift after the last element if its half length less + position is less then the sequence length
            if (math.fabs(lastNode.getLength() / 2.0 +
                          lastNode.getParam("pos") - accSeq.getLength()) >
                    self.zeroDistance):
                if (lastNode.getLength() / 2.0 + lastNode.getParam("pos") >
                        accSeq.getLength()):
                    msg = "The LinacLatticeFactory method getLinacAccLattice(names): the last node is too long!"
                    msg = msg + os.linesep
                    msg = msg + "name=" + lastNode.getName()
                    msg = msg + os.linesep
                    msg = msg + "type=" + lastNode.getType()
                    msg = msg + os.linesep
                    msg = msg + "length=" + str(lastNode.getLength())
                    msg = msg + os.linesep
                    msg = msg + "pos=" + str(lastNode.getParam("pos"))
                    msg = msg + os.linesep
                    msg = msg + "sequence name=" + accSeq.getName()
                    msg = msg + os.linesep
                    msg = msg + "sequence length=" + str(accSeq.getLength())
                    orbitFinalize(msg)
                else:
                    driftNodes = []
                    driftLength = accSeq.getLength() - (
                        lastNode.getParam("pos") + lastNode.getLength() / 2.0)
                    nDrifts = int(driftLength / self.maxDriftLength) + 1
                    driftLength = driftLength / nDrifts
                    for idrift in range(nDrifts):
                        drift = Drift(accSeq.getName() + ":" +
                                      lastNode.getName() + ":" +
                                      str(idrift + 1) + ":drift")
                        drift.setLength(driftLength)
                        drift.setParam(
                            "pos",
                            lastNode.getParam("pos") +
                            lastNode.getLength() / 2.0 + drift.getLength() *
                            (idrift + 0.5))
                        driftNodes.append(drift)
                    driftNodes_after = driftNodes
            #now move on and generate drifts between (i,i+1) nodes from copyAccNodes
            newAccNodes = driftNodes_before
            for node_ind in range(len(copyAccNodes) - 1):
                accNode0 = copyAccNodes[node_ind]
                newAccNodes.append(accNode0)
                accNode1 = copyAccNodes[node_ind + 1]
                dist = accNode1.getParam("pos") - accNode1.getLength() / 2 - (
                    accNode0.getParam("pos") + accNode0.getLength() / 2)
                if (dist < 0.):
                    msg = "The LinacLatticeFactory method getLinacAccLattice(names): two nodes are overlapping!"
                    msg = msg + os.linesep
                    msg = msg + "sequence name=" + accSeq.getName()
                    msg = msg + os.linesep
                    msg = msg + "node 0 name=" + accNode0.getName(
                    ) + " pos=" + str(accNode0.getParam("pos")) + " L=" + str(
                        accNode0.getLength())
                    msg = msg + os.linesep
                    msg = msg + "node 1 name=" + accNode1.getName(
                    ) + " pos=" + str(accNode1.getParam("pos")) + " L=" + str(
                        accNode1.getLength())
                    msg = msg + os.linesep
                    orbitFinalize(msg)
                elif (dist > self.zeroDistance):
                    driftNodes = []
                    nDrifts = int(dist / self.maxDriftLength) + 1
                    driftLength = dist / nDrifts
                    for idrift in range(nDrifts):
                        drift = Drift(accSeq.getName() + ":" +
                                      accNode0.getName() + ":" +
                                      str(idrift + 1) + ":drift")
                        drift.setLength(driftLength)
                        drift.setParam(
                            "pos",
                            accNode0.getParam("pos") +
                            accNode0.getLength() * 0.5 + drift.getLength() *
                            (idrift + 0.5))
                        driftNodes.append(drift)
                    newAccNodes += driftNodes
                else:
                    pass
            newAccNodes.append(lastNode)
            newAccNodes += driftNodes_after
            accSeq.setNodes(newAccNodes)
            #insert the drifts ======================stop ===========================
            #add all AccNodes to the linac lattice
            for accNode in accSeq.getNodes():
                linacAccLattice.addNode(accNode)
        #------- finalize the lattice construction
        linacAccLattice.initialize()
        return linacAccLattice
Example #51
0
def normalize_angle(angle):
    if (math.fabs(angle) > math.pi):
        angle = angle - (2 * math.pi * angle) / (math.fabs(angle))
    return angle
def loopFilter(gamma, LoopBW, PM, CPGain, KVCO, Fout, Fcomp, T31):
    LoopBWRads = 2 * math.pi * LoopBW

    #######
    #Numerical solution of T1 using bisection method
    #######
    def T1est(T1guess):
        wcT1 = LoopBWRads * T1guess
        #return wcT1,math.atan(wcT1)
        return PM - (180 / math.pi) * (math.atan(gamma / wcT1 / (1 + T31)) -
                                       math.atan(wcT1) - math.atan(wcT1 * T31))

    #Approximate value from Banerjee
    T1approx = ((1 / math.cos(PM * math.pi / 180)) -
                math.tan(PM * math.pi / 180)) / LoopBWRads / (1 + T31)
    #Create a bracket such T1est(a) and T1est(b) have opposite signs.
    #So that bisection method converges to a solution.
    #Since T1est(Tapprox) will be small, if it's negative and we double it, we will have a good bracket.
    #If it's positive and we halve it we'll also have a good bracket.
    if T1est(T1approx) < 0:
        a = T1approx
        b = T1approx * 2.0
        # print a, b
    else:
        a = T1approx * 0.5
        b = T1approx
        #print a, b
    tol = 0.01
    c = (a + b) / 2.0  #Mid point. First guess
    #First guess will be worse than T1approx but the algorithm should still converge quickly.
    while math.fabs(T1est(c)) > tol:
        # print a,b,c
        if (T1est(a) < 0 and T1est(c) < 0) or (T1est(a) > 0 and T1est(c) > 0):
            a = c
        else:
            b = c
        c = (a + b) / 2.0
        # print c, T1est(c,gamma,LoopBWRads,T31,PM)
    T1approx = c
    #######
    #Rest of calculations
    #######

    T3 = T1approx * T31
    T2 = gamma / ((LoopBWRads)**2) / (T1approx + T3)
    #print "T1approx = ",T1approx," T2 = ",T2," T3 = ",T3
    N = float(Fout / Fcomp)
    P = 8.0
    Ndig = float(N / P)
    A0_sqrt = math.sqrt(
        (1 + (LoopBWRads * T2)**2) / (1 + (LoopBWRads * T1approx)**2) /
        (1 + (LoopBWRads * T3)**2))
    A0_coeff = CPGain * KVCO / ((LoopBWRads)**2) / N
    A0 = A0_coeff * A0_sqrt
    A1 = A0 * (T1approx + T3)
    A2 = A0 * T1approx * T3
    #print "A0 = ",A0," A1 = ",A1," A2 = ",A2
    C1_sqrt = math.sqrt(1 + T2 * (T2 * A0 - A1) / A2)
    C1 = A2 * (1 + C1_sqrt) / (T2**2)
    C3 = (-(T2**2) * (C1**2) + T2 * A1 * C1 - A2 * A0) / ((T2**2) * C1 - A2)
    C2 = A0 - C1 - C3
    R2 = T2 / C2
    R3 = A2 / C1 / C3 / T2
    #print "C1 = ",C1," C2 = ",C2," C3 = ",C3," R2 = ",R2," R3 = ",R3
    #return C1/1e-9,C2/1e-9,C3/1e-9,R2/1e3,R3/1e3,A2,A1,A0,N
    f = np.logspace(2, 8, 31)
    f2 = []
    for i in range(len(f)):
        f2.append(f[i] * 2 * math.pi)
    K = KVCO * CPGain / N
    num = []
    R = []
    ROL = []
    XOL = []
    X = []
    den3Real = []
    den3Imag = []
    den3 = []
    den3OLReal = []
    den3OLImag = []
    den3OL = []
    constantCL = K * N
    magCL = []
    phaseCL = []
    magOL = []
    phaseOL = []
    vcoTFNumR = []
    vcoTFNumX = []
    vcoTFNumReal = []
    vcoTFNumImag = []
    vcoTFNum = []
    magvcoTF = []
    magprescalerTF = []
    magpfdcpTF = []
    denR2 = []
    denR2_R = []
    denR2_X = []
    magR2TF = []
    magLFTF_num_R = []
    magLFTF_num_X = []
    magLFTF_num = []
    magLFTF_den_R = []
    magLFTF_den_X = []
    magLFTF_den = []
    magLFTF = []
    magLFTFR2 = []
    numR3_R = []
    numR3_X = []
    numR3 = []
    denR3 = []
    denR3_R = []
    denR3_X = []
    magR3TF = []
    magLFTFR3 = []
    for i in range(len(f)):
        #Expand the denominator of Eq. 16.2 on page 127 to get real and imag components.
        #A3 = 0
        R.append(A2 * ((f2[i])**4) - A0 * ((f2[i])**2) +
                 K)  #Real comp. of CL denom
        X.append(K * T2 * f2[i] - A1 * ((f2[i])**3))  #Imag comp. of CL denom
        #Expand denominator of Z(s)/s for 3rd order
        ROL.append(A2 * ((f2[i])**4) - A0 *
                   ((f2[i])**2))  #Real comp. of OL denom
        XOL.append(-A1 * ((f2[i])**3))  #Imag comp. of OL denom
        den3Real.append(R[i])
        den3Imag.append(X[i])
        den3OLReal.append(ROL[i])
        den3OLImag.append(XOL[i])
        den3.append(complex(den3Real[i], den3Imag[i]))
        den3OL.append(complex(den3OLReal[i], den3OLImag[i]))
        #Transfer function for VCO noise
        vcoTFNumR.append(A2 * ((f2[i])**4) - A0 * ((f2[i])**2))
        vcoTFNumX.append(-A1 * ((f2[i])**3))
        vcoTFNumReal.append(vcoTFNumR[i])
        vcoTFNumImag.append(vcoTFNumX[i])
        vcoTFNum.append(complex(vcoTFNumReal[i], vcoTFNumImag[i]))
        #The denominator is the same as that of the CL transfer function
        #constant.append(K*N)
        #num.append(math.sqrt(1.0+((f[i]/(1/T2))**2)))
        num.append(complex(1.0, f2[i] / (1 / T2)))
        magCL.append(20 * np.log10(constantCL) +
                     20 * np.log10(np.abs(num[i])) -
                     20 * np.log10(np.abs(den3[i])))
        phaseCL.append(
            (180 / math.pi) * (np.angle(num[i]) - np.angle(den3[i])))
        magOL.append(20 * np.log10(K) + 20 * np.log10(np.abs(num[i])) -
                     20 * np.log10(np.abs(den3OL[i])))
        phaseOL.append((180 / math.pi) *
                       (np.angle(num[i]) - np.angle(den3OL[i])) - 180)
        magvcoTF.append(20 * np.log10(np.abs(vcoTFNum[i])) -
                        20 * np.log10(np.abs(den3[i])))
        magprescalerTF.append(magCL[i] + 20 * np.log10(1 / Ndig))
        magpfdcpTF.append(magCL[i] + 20 * np.log10(1 / CPGain))
        denR2_R.append((C1 + C2 + C3) -
                       ((2 * math.pi * f[i])**2) * C3 * C2 * C1 * R2 * R3)
        denR2_X.append(2 * math.pi * f[i] * (C3 * R3 * (C1 + C2) + C2 * R2 *
                                             (C1 + C3)))
        denR2.append(complex(denR2_R[i], denR2_X[i]))
        magR2TF.append(20 * np.log10(C2) - 20 * np.log10(np.abs(denR2[i])))
        magLFTF_num_R.append(-KVCO * A1 * (f2[i])**2)
        magLFTF_num_X.append(A0 * KVCO * f2[i] - A2 * KVCO * (f2[i])**3)
        magLFTF_num.append(complex(magLFTF_num_R[i], magLFTF_num_X[i]))
        magLFTF_den_R.append(A2 * (f2[i])**4 - A0 * (f2[i])**2 + K)
        magLFTF_den_X.append(K * T2 * f2[i] - A1 * (f2[i])**3)
        magLFTF_den.append(complex(magLFTF_den_R[i], magLFTF_den_X[i]))
        magLFTF.append(20 * np.log10(np.abs(magLFTF_num[i])) -
                       20 * np.log10(np.abs(magLFTF_den[i])))
        magLFTFR2.append(magLFTF[i] + magR2TF[i])  #adds the R2 TF and the LFTF
        numR3_R.append(C1 + C2)
        numR3_X.append(2 * math.pi * f[i] * C1 * C2 * R2)
        numR3.append(complex(numR3_R[i], numR3_X[i]))
        denR3_R.append((C1 + C2 + C3) -
                       ((2 * math.pi * f[i])**2) * C3 * C2 * C1 * R2 * R3)
        denR3_X.append(2 * math.pi * f[i] * (C3 * R3 * (C1 + C2) + C2 * R2 *
                                             (C1 + C3)))
        denR3.append(complex(denR3_R[i], denR3_X[i]))
        magR3TF.append(20 * np.log10(np.abs(numR3[i])) -
                       20 * np.log10(np.abs(denR3[i])))
        magLFTFR3.append(magLFTF[i] + magR3TF[i])  #adds the R3 TF and the LFTF
    return C1 / 1e-9, C2 / 1e-9, C3 / 1e-9, R2 / 1e3, R3 / 1e3, f, magCL, magOL, phaseOL, magvcoTF, magprescalerTF, magpfdcpTF, magLFTFR2, magLFTFR3
Example #53
0
def absDiff(one, two, ignoreSign=False):
    if ignoreSign:
        (one, two) = (math.fabs(one), math.fabs(two))
    return max(one, two) - min(one, two)
Example #54
0
	def distance(self, latA, lonA, latB, lonB):
		dy = 0.113 * math.fabs(latA - latB) 	# 0.113 = distance between two latitude circles in meters
		dx = 0.0715 * math.fabs(lonA - lonB)	# 0.0715 = distance between two longitude circles in meters
		return math.sqrt(dx * dx + dy * dy)
Distance = float('-inf')
for i in range(len(container['sceneCenLat'])):
    lines_number = float(container['sceneCenLine_number'][i])
    pixel_number = float(container['sceneCenPixel_number'][i])

    if Distance < (lines_number - cen_line)**2 + (cen_pixel - pixel_number)**2:
        Distance = (lines_number - cen_line)**2 + (cen_pixel - pixel_number)**2
for i in range(len(container['sceneCenLat'])):
    lines_number = float(container['sceneCenLine_number'][i])
    pixel_number = float(container['sceneCenPixel_number'][i])

    if Distance == ((lines_number - cen_line)**2 +
                    (cen_pixel - pixel_number)**2):
        norm_orbit, norm_orbit_line = intrp_orbit(
            int(math.fabs(cen_line - Line_location_index_up)), container,
            image_num - 1)
        coord_xyz                =lph2xyz(int(math.fabs(cen_line-Line_location_index_up)),cen_pixel,container,norm_orbit_line,\
                         float(container['sceneCenLon'][i]),float(container['sceneCenLat'][i]),float(container['height'][i]))
        phi_lam_height = xyz2ell(coord_xyz)
        container['Image_centroid_lon'] = phi_lam_height[1]
        container['Image_centroid_lat'] = phi_lam_height[0]
        height = phi_lam_height[2]

# ---------------------------------------------------------------------------------------------------------
outStream.write('===============================================\n')
outStream.write('MASTER RESULTFILE:                    %s\n' %
                outputFile_Name_res)
outStream.write('Created by:                           %s\n')
outStream.write(
    'InSAR Processor: Doris (Delft o-o Radar Interferometric Software)\n')
 def onEnterOk(self, position):
     order = position.getEntryOrder()
     if order.getAction() == broker.Order.Action.BUY:
         self.logOp("COMPRA", order)
         if self._stopTrailing:
             stopOrder = TrailingStopOrder(broker.Order.Action.SELL, position, self._stopPer, position.getShares(), order.getInstrumentTraits())
             stopOrder.setGoodTillCanceled(True)
             position._Position__submitAndRegisterOrder(stopOrder)
             position._Position__exitOrder = stopOrder
         else:
             position.exitStop(order.getExecutionInfo().getPrice() * (1 - self._stopPer), True)
     else:
         self.logOp("VENTA CORTA", order)
         if self._stopTrailing:
             stopOrder = TrailingStopOrder(broker.Order.Action.BUY_TO_COVER, position, self._stopPer, math.fabs(position.getShares()), order.getInstrumentTraits())
             stopOrder.setGoodTillCanceled(True)
             position._Position__submitAndRegisterOrder(stopOrder)
             position._Position__exitOrder = stopOrder
         else:
             position.exitStop(order.getExecutionInfo().getPrice() * (1 + self._stopPer), True)
Example #57
0
def decisionProcess(partsList,
                    notePrediction,
                    beginningData,
                    lastNotePosition,
                    countdown,
                    firstNotePage=None,
                    lastNotePage=None):
    '''
    It decides which of the given parts of the score has a better matching with
    the recorded part of the song.
    If there is not a part of the score with a high probability to be the correct part,
    it starts a "countdown" in order stop the score following if the bad matching persists.
    In this case, it does not match the recorded part of the song with any part of the score.

    Inputs: partsList, contains all the possible parts of the score, sorted from the
    higher probability to be the best matching at the beginning to the lowest probability.
    notePrediction is the position of the score in which the next note should start.
    beginningData is a list with all the beginnings of the used fragments of the score to find
    the best matching.
    lastNotePosition is the position of the score in which the last matched fragment of the
    score finishes.
    Countdown is a counter of consecutive errors in the matching process.

    Outputs: It returns the beginning of the best matching fragment of
    score and the countdown.


    >>> scNotes = corpus.parse('luca/gloria').parts[0].flat.notes.stream()
    >>> scoreStream = scNotes
    >>> import os #_DOCS_HIDE
    >>> sfp = common.getSourceFilePath() #_DOCS_HIDE
    >>> readPath = sfp + os.path.sep + 'audioSearch' + os.path.sep + 'test_audio.wav' #_DOCS_HIDE
    >>> freqFromAQList = audioSearch.getFrequenciesFromAudioFile(waveFilename=readPath) #_DOCS_HIDE
    
    >>> tf = 'test_audio.wav'
    >>> #_DOCS_SHOW freqFromAQList = audioSearch.getFrequenciesFromAudioFile(waveFilename=tf)
    >>> chrome = scale.ChromaticScale('C4')
    >>> detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, useScale=chrome)
    >>> detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq)
    >>> (detectedPitches, listplot) = audioSearch.pitchFrequenciesToObjects(
    ...                                             detectedPitchesFreq, useScale=chrome)
    >>> (notesList, durationList) = audioSearch.joinConsecutiveIdenticalPitches(detectedPitches)
    >>> transcribedScore, qle = audioSearch.notesAndDurationsToStream(notesList, durationList, 
    ...                                             scNotes=scNotes, qle=None)
    >>> hop = 6
    >>> tn_recording = 24
    >>> totScores = []
    >>> beginningData = []
    >>> lengthData = []
    >>> for i in range(4):
    ...     scNotes = scoreStream[i * hop + 1:i * hop + tn_recording + 1]
    ...     name = "%d" % i
    ...     beginningData.append(i * hop + 1)
    ...     lengthData.append(tn_recording)
    ...     scNotes.id = name
    ...     totScores.append(scNotes)
    >>> listOfParts = search.approximateNoteSearch(transcribedScore.flat.notes.stream(), totScores)
    >>> notePrediction = 0
    >>> lastNotePosition = 0
    >>> countdown = 0
    >>> positionInList, countdown = audioSearch.decisionProcess(
    ...          listOfParts, notePrediction, beginningData, lastNotePosition, countdown)
    >>> print(positionInList)
    0
    
    The countdown result is 1 because the song used is completely different from the score!!
    
    >>> print(countdown)
    1
    '''
    i = 0
    position = 0
    while i < len(partsList) and beginningData[int(
            partsList[i].id)] < notePrediction:
        i = i + 1
        position = i
    if len(
            partsList
    ) == 1:  # it happens when you don't play anything during a recording period
        position = 0

    dist = math.fabs(beginningData[0] - notePrediction)
    for i in range(len(partsList)):
        if ((partsList[i].matchProbability >=
             0.9 * partsList[0].matchProbability) and (beginningData[int(
                 partsList[i].id)] > lastNotePosition)):  #let's take a 90%
            if math.fabs(beginningData[int(partsList[i].id)] -
                         notePrediction) < dist:
                dist = math.fabs(beginningData[int(partsList[i].id)] -
                                 notePrediction)
                position = i
                environLocal.printDebug("NICE")

    #print("ERRORS", position, len(partsList), lastNotePosition,
    #      partsList[position].matchProbability , beginningData[int(partsList[position].id)])
    if position < len(partsList) and beginningData[int(
            partsList[position].id)] <= lastNotePosition:
        environLocal.printDebug(
            " error ? %d, %d" %
            (beginningData[int(partsList[position].id)], lastNotePosition))
    if partsList[position].matchProbability < 0.6 or len(partsList) == 1:
        #the latter for the all-rest case
        environLocal.printDebug("Are you sure you are playing the right song?")
        countdown = countdown + 1
    elif dist > 20 and countdown == 0:
        countdown += 1
        environLocal.printDebug("Excessive distance....? dist=%d" % dist)

    elif dist > 30 and countdown == 1:
        countdown += 1
        environLocal.printDebug("Excessive distance....? dist=%d" % dist)

    elif ((firstNotePage != None and lastNotePage != None)
          and ((beginningData[int(partsList[position].id)] < firstNotePage
                or beginningData[int(partsList[position].id)] > lastNotePage)
               and countdown < 2)):
        countdown += 1
        environLocal.printDebug('playing in a not shown part')
    else:
        countdown = 0
    environLocal.printDebug([
        '****????**** DECISION PROCESS: dist from expected:', dist,
        'beginning data:', beginningData[int(partsList[i].id)], 'lastNotePos',
        lastNotePosition
    ])
    return position, countdown
Example #58
0
 def shadowRotate(self, ribfile, angle, x, y, z):
     """
     To place the cam for shadow map
     """
     if math.fabs(angle) > 0.001:
         ribfile.write("Rotate %0.2f %0.2f %0.2f %0.2f\n"% (angle, x, y, z))
import random
import math
from numpy import double

times= int(input("How many times to try? "))
r=200
circle_num = 0
total_num = 0
oo= (double(r),double(r))

i=0
while i < times:
    i += 1
    x= math.fabs(double(random.random()*2*r))
    y= math.fabs(double(random.random()*2*r))
    distance_square = double((x-oo[0]))**2 + double((y-oo[1]))**2
    total_num +=1
    if double(distance_square) < double(r**2):
        circle_num +=1
        pi = 4*(double(circle_num)/total_num)

print(pi)

Example #60
0
#newtonsqrt.py
import math
c=int(input("Enter the non negative numbers:- "))
t=c
t=(c/t + t)/2
eps=1*(math.e)-15
while  math.fabs(t-c/t)>eps*t:
	print("greater")
	break