def get_dist_avg(self, ignore_attr=None):
     if ignore_attr is None:
         attr_vals = [x for x in self.get_dist_vals() if x is not None]
         return sum(attr_vals) / len(attr_vals)
     else:
         attr_vals = [dist for key, dist in self.dist_dict.items() if dist is not None and key != ignore_attr]
         return sum(attr_vals) / len(attr_vals)
Esempio n. 2
0
    def test_flatten_url_tree_url_import_with_routers(self):

        class MockApiViewSet(ModelViewSet):
            serializer_class = CommentSerializer
            model = User

        class AnotherMockApiViewSet(ModelViewSet):
            serializer_class = CommentSerializer
            model = User

        router = DefaultRouter()
        router.register(r'other_views', MockApiViewSet)
        router.register(r'more_views', MockApiViewSet)

        urls_app = patterns('', url(r'^', include(router.urls)))
        urls = patterns(
            '',
            url(r'api/', include(urls_app)),
            url(r'test/', include(urls_app))
        )
        urlparser = UrlParser()
        apis = urlparser.get_apis(urls)

        self.assertEqual(sum(api['path'].find('api') != -1 for api in apis), 4)
        self.assertEqual(sum(api['path'].find('test') != -1 for api in apis), 4)
Esempio n. 3
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Esempio n. 4
0
def run_merge(filenames):
    """Merges all Skype databases to a new database."""
    dbs = [skypedata.SkypeDatabase(f) for f in filenames]
    db_base = dbs.pop()
    counts = collections.defaultdict(lambda: collections.defaultdict(int))
    postbacks = Queue.Queue()
    postfunc = lambda r: postbacks.put(r)
    worker = workers.MergeThread(postfunc)

    name, ext = os.path.splitext(os.path.split(db_base.filename)[-1])
    now = datetime.datetime.now().strftime("%Y%m%d")
    filename_final = util.unique_path("%s.merged.%s%s" %  (name, now, ext))
    print("Creating %s, using %s as base." % (filename_final, db_base))
    shutil.copyfile(db_base.filename, filename_final)
    db2 = skypedata.SkypeDatabase(filename_final)
    chats2 = db2.get_conversations()
    db2.get_conversations_stats(chats2)

    for db1 in dbs:
        chats = db1.get_conversations()
        db1.get_conversations_stats(chats)
        bar_total = sum(c["message_count"] for c in chats)
        bar_text = " Processing %.*s.." % (30, db1)
        bar = ProgressBar(max=bar_total, afterword=bar_text)
        bar.start()
        args = {"db1": db1, "db2": db2, "chats": chats,
                "type": "diff_merge_left"}
        worker.work(args)
        while True:
            result = postbacks.get()
            if "error" in result:
                print("Error merging %s:\n\n%s" % (db1, result["error"]))
                worker = None # Signal for global break
                break # break while True
            if "done" in result:
                break # break while True
            if "diff" in result:
                counts[db1]["chats"] += 1
                counts[db1]["msgs"] += len(result["diff"]["messages"])
                msgcounts = sum(c["message_count"] for c in result["chats"])
                bar.update(bar.value + msgcounts)
            if result["output"]:
                log(result["output"])
        if not worker:
            break # break for db1 in dbs
        bar.stop()
        bar.afterword = " Processed %s." % db1
        bar.update(bar_total)
        print

    if not counts:
        print("Nothing new to merge.")
        db2.close()
        os.unlink(filename_final)
    else:
        for db1 in dbs:
            print("Merged %s in %s from %s." %
                  (util.plural("message", counts[db1]["msgs"]),
                   util.plural("chat", counts[db1]["chats"]), db1))
        print("Merge into %s complete." % db2)
Esempio n. 5
0
    def _format_for_solver(self, constr_map, solver):
        """Formats the problem for the solver.

        Parameters
        ----------
        constr_map : dict
            A map of constraint type to a list of constraints.
        solver: str
            The solver being targetted.

        Returns
        -------
        dict
            The dimensions of the cones.
        """
        # Initialize dimensions.
        dims = {}
        dims[s.EQ_DIM] = sum(c.size[0]*c.size[1] for c in constr_map[s.EQ])
        dims[s.LEQ_DIM] = sum(c.size[0]*c.size[1] for c in constr_map[s.LEQ])
        dims[s.SOC_DIM] = []
        dims[s.SDP_DIM] = []
        dims[s.EXP_DIM] = 0
        # Formats SOC, SOC_EW, SDP, and EXP constraints for the solver.
        nonlin = constr_map[s.SOC] + constr_map[s.SDP] + constr_map[s.EXP]
        for constr in nonlin:
            constr.format(constr_map[s.EQ], constr_map[s.LEQ], dims, solver)

        return dims
Esempio n. 6
0
def parcntrd(im, subR, xg, yg):
    '''Finds more exact coordinates of a star center, by fitting a parabola
    to the row-wise and col-wise sums, the three points including the max and
    its immediate neighbors. yg,xg are the initial guesses for the center, subR
    is the radius of the subframe and yc,xc are better centroid coords.'''
    
    # Step 1: extract a subframe and build row-wise and col-wise sums
    sf = im[(yg-subR):(yg+subR),(xg-subR):(xg+subR)]
    rS = sum(sf, axis=1)
    cS = sum(sf, axis=0)
    
    # Step 2: Find the maxima
    rP = argmax(rS) # index of the peak in the row-sum
    cP = argmax(cS) # index of the peak in the col-sum
    
    # Step 3: Fit parabolas to the maxima.
    iV = arange(2*subR)
    a_r = ((rS[rP-1]-rS[rP])/(iV[rP-1]-iV[rP]) - (rS[rP]-rS[rP+1])/(iV[rP]-iV[rP+1]))/(iV[rP-1]-iV[rP+1])
    b_r = (rS[rP-1]-rS[rP])/(iV[rP-1]-iV[rP]) - a_r * (iV[rP-1]+iV[rP])
    yc = -0.5 * b_r / a_r
    
    a_c = ((cS[cP-1]-cS[cP])/(iV[cP-1]-iV[cP]) - (cS[cP]-cS[cP+1])/(iV[cP]-iV[cP+1]))/(iV[cP-1]-iV[cP+1])
    b_c = (cS[cP-1]-cS[cP])/(iV[cP-1]-iV[cP]) - a_c * (iV[cP-1]+iV[cP])
    xc = -0.5 * b_c / a_c
    
    return(xc + xg - subR, yc + yg - subR)
Esempio n. 7
0
    def exportSheetToImage(self, fileName):
        """ exportSheetToImage() -> None
        Montage all the cell images and export to a file

        """
        (rCount, cCount) = self.getDimension()
        if rCount < 1 or cCount < 1:
            return
        cellHeights = [self.getCellRect(r, 0).height() for r in xrange(rCount)]
        cellWidths = [self.getCellRect(0, c).width() for c in xrange(cCount)]
        finalImage = QtGui.QImage(sum(cellWidths), sum(cellHeights), QtGui.QImage.Format_ARGB32)
        finalImage.fill(0xFFFFFFFF)
        painter = QtGui.QPainter(finalImage)
        y = 0
        for r in xrange(rCount):
            x = 0
            for c in xrange(cCount):
                widget = self.getCell(r, c)
                if widget:
                    pix = widget.grabWindowPixmap()
                    cx = (cellWidths[c] - pix.width()) // 2
                    cy = (cellHeights[r] - pix.height()) // 2
                    painter.drawPixmap(x + cx, y + cy, widget.grabWindowPixmap())
                x += cellWidths[c]
            y += cellHeights[r]
        painter.end()

        # forcing png format if no extension was provided
        (_, ext) = os.path.splitext(fileName)
        if ext == "":
            finalImage.save(fileName, "png")
        else:
            # try to guess based on the extension
            finalImage.save(fileName)
Esempio n. 8
0
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    p1 = sum(vec2Classify * p1Vec) + log(pClass1)
    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
    if p1 > p0:
        return 1
    else:
        return 0
Esempio n. 9
0
def apPhot(im, xg, yg, fwhm, apR, inR, outR):
    '''Performs aperture photometry for the object near xg, yg. A more precise center
    is found, then the flux within apR is found and a robust estimate of the backgnd
    for that aperture is found.'''
    
    #STEP 0: Estimate the mean and sdev of the image & subtract the mean
    thresh = 5.0
    mn, sd = robomad(im, thresh)
    im2 = im - mn
    
    # STEP 1: Get a better centroid.
    # xc, yc = gcntrd(im2, fwhm, xg, yg)
    xc, yc = cntrd(im2, fwhm, xg, yg)
    #print "Found centers at x,y = ", xc, yc    
    
    # STEP 2: Now make the aperture mask using pixwt()
    sf = 10.0
    apM = pixwt(im2, xc, yc, apR, sf) # inner aperture
    nApPix = sum(apM) # number of pixels in the aperture mask
    
    # STEP 3: Make a list of pixels that lie between inR and outR. Estimate the backgnd
    # from the robust mean of these pixels. No fractional pixels in the backgnd calculation.
    
    (y,x) = indices(im2.shape)
    r = sqrt((x-xc)**2 + (y-yc)**2)
    bkList = logical_and((r >= inR), (r <= outR))
    
    bkAve, bkSDev = robomad(im2, thresh)
    
    
    # STEP 4: Get flux in apR and return 
    sFlux = sum(im2 * apM)
    
    return(sFlux, bkAve*nApPix)
Esempio n. 10
0
def calcThreshMeasure(goPrediction):
	#print "calcing top N measure"
	returnVals = []
	
	inputs = [(target, goPrediction) for target in goPrediction.targetToTermToScore.keys()]
	
	p=None
#	if len(inputs) > 1000: 
#		pass
##		p=Pool(processes=10)
##		results = p.map(topNmeasure, inputs,chunksize=50)
#	else:
	results = map(threshMeasure, inputs)
	
	for i in range(100):
		allPrec = []
		allRecs = []
		for result in results:
			if result != None and len(result) > 0 and result[i] !=  (-1,-1):
				if len(result) >= i+1:
					allRecs.append(result[i][0])
					allPrec.append(result[i][1])
		
#		if i==0:
#			print "First prec: " + str(allPrec)

		if len(allPrec) == 0:
			returnVals.append((0, 0))
		else:
			returnVals.append((sum(allPrec) / len(allPrec), sum(allRecs) / goPrediction.numberOfTargets))
	
	print "\n".join([str(tuplei) for tuplei in returnVals])
	return returnVals
def sim_pearson(prefs,p1,p2):
  # Get the list of mutually rated items
  si={}
  for item in prefs[p1]:
    if item in prefs[p2]: si[item]=1

  # if they are no ratings in common, return 0
  if len(si)==0: return 0

  # Sum calculations
  n=len(si)

  # Sums of all the preferences
  sum1=sum([prefs[p1][it] for it in si])
  sum2=sum([prefs[p2][it] for it in si])

  # Sums of the squares
  sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
  sum2Sq=sum([pow(prefs[p2][it],2) for it in si])

  # Sum of the products
  pSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])

  # Calculate r (Pearson score)
  num=pSum-(sum1*sum2/n)
  den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
  if den==0: return 0

  r=num/den

  return r
Esempio n. 12
0
def modularity(data, COST=4, distDict={}, edgeDict={}, medoids={},
               edges_are_shortest_paths = False):  # as yet, only for named points
    '''
    Algorithm for modularity computation in order
    to assess clustering performance
    '''
    # modularity itself
    Q = 0.0
    # total number of edges (times 2, since edges are counted twice)
    M = 0.0
    degree = {}

    # if edges of the graph in hand are constructed from shortest paths in the
    # original graph ...   (rarely used)
    if edges_are_shortest_paths == True:
        iDist = 0.0
        iFuzzyAff = {}   # kinda affinity (not 1/(1/aff),  but reciprocal to average shortest paths on edges 1/aff)
        for i in data:
            iDist = distDict[i].values()
            iFuzzyAff[i] = [1.0/j for j in iDist if j!=0]
            # weighted degree
            degree[i] = sum(iFuzzyAff[i])
            # since edges are shortest paths on affinities:
            M += degree[i]

        for med in medoids:
            for pointA in medoids[med]:
                for pointB in medoids[med]:    # Should one include pointA == pointB terms ??? FIX IT
                    if pointA != pointB:
                        # 0.5 due to double computation (modularity of node pairs
                        # ab and ba enter the sum separately)
                        Q += 0.5*( 1.0 / distDict[pointA][pointB] - degree[pointA]*degree[pointB] / (2*0.5*M) )
        Q = Q / (2*0.5*M)

    # if edges of the graph in hand are edges from original graph
    else:
        for i in data:
            # weighted degree
            degree[i] = sum(edgeDict[i].values())
            M += degree[i]
        print "M: ", M

        for med in medoids:
            #print "current Q: ", Q
            for pointA in medoids[med]:
                #print "current Q: ", Q
                for pointB in medoids[med]:    # Should one include pointA == pointB terms ??? FIX IT ...FIXED! yes, it should!
                    #if pointA != pointB:
                        try:
                            Aab = edgeDict[pointA][pointB]
                        except:
                            Aab = 0
                        # 0.5 due to double computation (modularity of node pairs
                        # ab and ba enter the sum separately)
                        Q += 0.5*( Aab - degree[pointA]*degree[pointB] / (2*0.5*M) )
                        #print "current delta Q: ", 0.5*( Aab - degree[pointA]*degree[pointB] / (2*0.5*M) )

        Q = Q / (2*0.5*M)
        print "final modularity Q: ", Q
    return(Q)
Esempio n. 13
0
    def find_cones(self, raw_laser_data):
        step = 10
        data = np.array(raw_laser_data)
        mask = (data == 0)
        data[mask] = 65535  # workaround to simplify arg_min

        arr = []
        for i in range(0, len(data), step):
            arr.append(min_dist(data[i:i+step]))

        ret = []
        for i in range(1, len(arr) - 3):
            if (arr[i] is not None and
               (arr[i-1] is None or arr[i] < arr[i-1] - ZONE_RADIUS) and
               (arr[i+1] is None or arr[i] < arr[i+1] - ZONE_RADIUS)):
                ii = data[i*step:(i+1)*step].argmin() + i*step
                width = sum(data[i*step:(i+1)*step] < data[ii] + 1000)
                ret.append( (ii, data[ii], width) )
            elif (arr[i] is not None and arr[i+1] is not None and
                 (abs(arr[i] - arr[i+1]) < MAX_CONE_SIZE) and
                 (arr[i-1] is None or arr[i] < arr[i-1] - ZONE_RADIUS) and
                 (arr[i+2] is None or arr[i] < arr[i+2] - ZONE_RADIUS)):
                ii = data[i*step:(i+2)*step].argmin() + i*step
                width = sum(data[i*step:(i+2)*step] < data[ii] + 1000)
                ret.append( (ii, data[ii], width) )
        return ret
Esempio n. 14
0
def main(verbose=False):
    ans = all_base10_base2_palindromes(10 ** 6)
    if verbose:
        return '%s.\nThe full list of palindromes is: %s' % (
            sum(ans), ', '.join(str(number) for number in ans))
    else:
        return sum(ans)
Esempio n. 15
0
def foo(model, X, evidence, T, json, methods, runs):
    E = [[],[],[]]
    R = [{},{},{}]
    for m in methods:
        if m != 'VE':
            for i in range(runs):
                e, p = do_experiment(model, X, evidence, T, m, json, R)
                if m != 'GS':
                    E[0].append(quant_array(evidence_mse(e['E'],mean_e)))
                E[1].append(quant_array(posterior_mse(p,mean_p)))
                E[2].append(quant_dict(p))
            if m != 'GS':
                R[0][m] = {'E':map(lambda y: y/runs, [sum(map(lambda x: x[i], E[0])) for i in range(len(E[0][0]))])}

            R[1][m] = map(lambda y: y/runs, [sum(map(lambda x: x[i], E[1])) for i in range(len(E[1][0]))])
            for dic in E[2]:
                R[2][m] = {}
                for key, value in dic.iteritems():
                    R[2][m][key] = map(lambda y: y/runs, [sum(map(lambda x: x[key][i], E[2])) for i in range(len(value))])
            E = [[],[],[]]
        else:
            mean_e, mean_p = do_experiment(model, X, evidence, T, m, json, R)
            R[0][m] = mean_e
            R[1][m] = mean_p
            R[2][m] = mean_p
    return R
Esempio n. 16
0
def animate_yard(input, num_cycles, part2=False):
    light_grid = init_light_grid(input)
    if part2: turn_corners_on(light_grid)
    for i in range(num_cycles):
        light_grid = update_light_grid(light_grid)
        if part2: turn_corners_on(light_grid)
    return sum([sum(row) for row in light_grid])
Esempio n. 17
0
def compute_ks_by_contained(contigs_by_lib_name, sinks, sources):
    # compute median of maxmin as well as ks p-value of contained maxmin
    for lib_snk in contigs_by_lib_name:
        # for a fixed lib_snk; do all source libs together
        # contained_ctg: contig names of all source libraries stored by source library names
        contained_ctg=collections.defaultdict(set)
        for snkCtg in contigs_by_lib_name[lib_snk].itervalues():
            for srcCtg in snkCtg.contained_in:
                contained_ctg[srcCtg.lib].add(srcCtg.name)
        for lib_src in contigs_by_lib_name:
            if lib_src in contained_ctg:
                contained=[]
                not_contained=[]
                for ctg in contigs_by_lib_name[lib_src]:
                    if ctg in contained_ctg[lib_src]:
                        contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
                    else:
                        not_contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
 #               contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg in contained_ctg[lib_src]]
 #               not_contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg not in contained_ctg[lib_src]]
                ks_pvalue = stats.ks_2samp(contained, not_contained)[1]
                print lib_src, lib_snk, ks_pvalue, sum(contained)/len(contained), sum(not_contained)/len(not_contained)
                if ks_pvalue < 0.05 and np.median(contained) > np.median(not_contained):
                    sources[lib_snk] |= {lib_src}
                    sinks[lib_src] |= {lib_snk}
Esempio n. 18
0
def test_image_edges():
    f = plt.figure(figsize=[1, 1])
    ax = f.add_axes([0, 0, 1, 1], frameon=False)

    data = np.tile(np.arange(12), 15).reshape(20, 9)

    im = ax.imshow(data, origin='upper',
                   extent=[-10, 10, -10, 10], interpolation='none',
                   cmap='gray'
                   )

    x = y = 2
    ax.set_xlim([-x, x])
    ax.set_ylim([-y, y])

    ax.set_xticks([])
    ax.set_yticks([])

    buf = io.BytesIO()
    f.savefig(buf, facecolor=(0, 1, 0))

    buf.seek(0)

    im = plt.imread(buf)
    r, g, b, a = sum(im[:, 0])
    r, g, b, a = sum(im[:, -1])

    assert g != 100, 'Expected a non-green edge - but sadly, it was.'
Esempio n. 19
0
    def threeSum(self, nums):
        """
        :type nums: List[int]
        :rtype: List[List[int]]
        """
        
        s = (sorted((nums)))
        N = len(nums)
        sol = []
        for i in range(0, N-2):
            lo = i+1
            hi = N-1
            
            while lo < hi:
                
                tup = [s[i], s[lo], s[hi]]

                if sum(tup) == 0:
                    sol.append(tup) if tup not in sol else lambda: _
                    hi = hi -1
                
                elif sum(tup) < 0:
                    lo = lo + 1
                    
                else:
                    hi = hi - 1
                    
        return sol
Esempio n. 20
0
def combine_gaussians(mean_var_list):
    """@mean_var_list is like [(mean1, var1), (mean2, var2), ... ]
    returns a (mean, variance) that is the "product" of the input gaussians."""
    variance = 1.0 / sum([1.0 / v  for (m, v) in mean_var_list])
    mean_top = sum([m   / (2.0*v)  for (m, v) in mean_var_list])
    mean_bot = sum([1.0 / (2.0*v)  for (m, v) in mean_var_list])
    return (mean_top/mean_bot, variance)
Esempio n. 21
0
def main(verbose=False):
    message = get_data(59).split(',')

    message = [int(char) for char in message]

    possible_keys = []
    for ascii1 in range(97, 123):
        for ascii2 in range(97, 123):
            for ascii3 in range(97, 123):
                possible_keys.append([ascii1, ascii2, ascii3])

    for key in possible_keys:
        curr = translate(message, key)
        if (curr.upper().find('THE') != -1
                and curr.upper().find('IS') != -1
                and curr.upper().find('AND') != -1
                and curr.upper().find('OF') != -1
                and curr.upper().find('ARE') != -1):
            break

    key_as_word = ''.join(chr(val) for val in key)
    result = '\n\nActual Message:\n%s\n\nThe key is: %s or %s.' % (
        curr, key_as_word, key)

    if verbose:
        return '%s%s' % (sum(ord(letter) for letter in curr), result)
    else:
        return sum(ord(letter) for letter in curr)
Esempio n. 22
0
    def woodCut_wrong(self, L, k):
        # write your code here
        if not L or k < 1:
            return -1
        # length = 0
        # for i in range(len(L)):
        #     length = max(length, L[i])
        length = max(L)
        left, right = 1, length
        while left < right:
            mid = (left+right)/2
            cnt = sum(wood/mid for wood in L)
            if cnt < k:
                right = mid - 1
            elif cnt > k:
                #left = mid + 1  # wrong for case 6, after mid+1, cnt will be bigger(a lot bigger), but max will be excluded??
                left = mid # after modified, dead loop here for case 5 and 6
            else:
                if mid == left:
                    #return mid  # can NOT directly return here, check case 3, right maybe be lost out of possible right answer
                    if sum(wood/right for wood in L) == k:
                        return right
                    else:
                        return left
                else:
                    #left = mid + 1  # can't skip here, mid may still be included in answer, check case 1,2
                    left = mid

        #print ("reached out side")
        #return left #wrong for case 4, out when left > right
        # return min(left, right) #wrong for case 5
        return min(mid, right)
def compute_cost( X, y, theta, lam ):

    '''Compute cost for logistic regression.'''
    
    # Number of training examples
    m = y.shape[0]

    # Compute the prediction based on theta and X
    predictions = X.dot( theta )

    # Preprocessing values before sending to sigmoid function.
    # If the argument to sigmoid function >= 0, we know that the
    # sigmoid value is 1. Similarly for the negative values.
    predictions[ where( predictions >= 20 ) ] = 20
    predictions[ where( predictions <= -500 ) ] = -500
    hypothesis = sigmoid( predictions )

    hypothesis[ where( hypothesis == 1.0 ) ] = 0.99999

    # Part of the cost function without regularization
    J1 = ( -1.0 / m ) * sum( ( y * np.log( hypothesis ) ) + 
                            ( ( 1.0 - y ) * np.log( 1.0 - hypothesis ) ) ) 

    # Computing the regularization term
    J2 = lam / ( 2.0 * m ) * sum( theta[ 1:, ] * theta[ 1:, ] )
    error = hypothesis - y

    return J1 + J2
def maxpresent_mincost(mydict,tup,tschedule):
     """get max number presenter with min cost"""
     presenter=[]
     for pres in combinations(mydict,3):
       thour=sum(int(mydict[i][0]) for i in pres)
       tcost =sum(int(mydict[i][1][1:]) for i in pres)
       presenter.append((pres,thour,tcost))
     mydict['presenter']=presenter
     output = OrderedDict()
     count=0;
     minlist=[]
     for pres,thour,tcost in mydict['presenter']:  
        if thour==tschedule:
            count +=1
            if count==1:
                minlist.append(tup(pres,thour,tcost))
                output['mincost']=minlist
            elif count:
               #print pres,thour,tcost
               #print output['mincost'][0].cost
               if output['mincost'][0].cost == tcost:
                 minlist.append(tup(pres,thour,tcost))
                 output['mincost']=minlist
               if output['mincost'][0].cost >tcost:
                   minlist=[]
                   minlist.append(tup(pres,thour,tcost))
                   output['mincost']=minlist
               
     
     result_pattern(output)
     return output
Esempio n. 25
0
def get_value(k):
    p = MixedIntegerLinearProgram(maximization=True)

    a = p.new_variable(real=True)

    p.add_constraint(sum(a[i] * (i - _sage_const_1) for i in xrange(_sage_const_1, k + _sage_const_2)) == _sage_const_1)
    for j in xrange(_sage_const_1, k + _sage_const_1):
        L = []
        for t in xrange(_sage_const_1, j):
            L.append(-k + t)
        L.append(_sage_const_0)
        for t in xrange(j + _sage_const_1, k + _sage_const_2):
            L.append(t - _sage_const_1)
        print(L)
        p.add_constraint(
            sum(L[i - _sage_const_1] * a[i] for i in xrange(_sage_const_1, k + _sage_const_2)) >= _sage_const_0
        )

    for j in xrange(_sage_const_1, k + _sage_const_1):
        p.add_constraint(a[i] >= _sage_const_0)

    cur = p.get_backend()
    cur.set_verbosity(_sage_const_3)
    # for j in xrange(k):
    #     for i in xrange(j+1):
    #         p.add_constraint(res[(i,j)] >= alpha[i] - d[j])
    # for i in xrange(k):
    #     p.add_constraint(sum(res[(i,j)] for j in range(i,k)) <= f[0])

    print(p.constraints())
    p.set_objective(sum(a[j] for j in range(_sage_const_1, k + _sage_const_2)))
    print(p.solve())
    x = p.get_values(a)
    for key in x.keys():
        print(key, x[key])
Esempio n. 26
0
def buildOCTree(volume, nodecenter=cam.Point(0,0,0), level=0):
    # build octree of volume, return root node
    
    node = OCTNode( level, center = nodecenter , type = 1, childlist=None)
    
    flags = []
    for n in xrange(0,9): # test all points
        flags.append( volume.isInside( node.nodePoint(n) ) )
    
    if (sum(flags) == 0): # nothing is inside
        node.type = 0
        #print "nothing inside!"
        return node
    
    if (sum(flags) == 9): # everything is inside
        node.type = 2
        #print "all inside!"
        return node
        
    if level== OCTMax: # reached max levels
        return node #OCTNode(level, center= nodecenter, type = 2, childlist = None)
    
    
    # have to subdivide:
    childs = []
    child_centers = []
    for n in xrange(1,9):
        child_center = node.childCenter(n) 
        childs.append( buildOCTree( volume , nodecenter = child_center, level= level+1) )
    node.setChildren(childs)
    
    return node
Esempio n. 27
0
 def logValue(self,x,subset=None): 
   """Evaluate log F(x) = \sum_r log f_r(x_r) for some (full) configuration x
        if optional subset != None, uses *only* the factors in the Markov blanket of subset
   """
   factors = self.factors if subset==None else self.factorsWithAny(subset)
   if self.isLog: return sum( [ f.valueMap(x) for f in factors ] ) 
   else:          return sum( [ np.log(f.valueMap(x)) for f in factors ] )
Esempio n. 28
0
 def woodCut(self, L, k):
     # write your code here
     if not L or k < 1:
         return 0 # expect 0, not -1 here
     # length = 0
     # for i in range(len(L)):
     #     length = max(length, L[i])
     length = max(L)
     left, right = 0, length  # starting point should be 0, not 1, see case 7
     while left < right:
         mid = (left+right)/2
         if mid == left:
             # return mid  # can NOT directly return here, check case 3, right maybe be lost out of possible right answer
             if sum(wood / right for wood in L) >= k:
                 return right
             else:
                 return left
         cnt = sum(wood/mid for wood in L)
         if cnt < k:
             right = mid - 1
         elif cnt >= k:
             #left = mid + 1  # wrong for case 6, after mid+1, cnt will be bigger(a lot bigger), but max will be excluded??
             left = mid
     #print ("reached out side")
     #return left #wrong for case 4, out when left > right
     # return min(left, right) #wrong for case 5
     return min(mid, right)
Esempio n. 29
0
    def update_word_vectors(self, words, rate, batchUpdates):

        noiseWords = get_noise_words(words, self.numNoisySamples, self.vocabList)
        for i, word in enumerate(words):
            if i < self.windowSize: contextWords = words[0:i] + words[i+1:i+self.windowSize+1]
            else: contextWords = words[i-self.windowSize:i] + words[i+1:i+self.windowSize+1]

            wordContextScore = logistic(diff_score_word_and_noise(word, contextWords, self.numNoisySamples, self.noiseDist, self.wordBiases, self.wordVectors, self.vocab))
            noiseScores = [logistic(diff_score_word_and_noise(noiseWord, contextWords, self.numNoisySamples, self.noiseDist, self.wordBiases, self.wordVectors, self.vocab)) for noiseWord in noiseWords]

            updateInWordBias = 1-wordContextScore-sum(noiseScores)
            updateInWordVector = (1-wordContextScore)*grad_word(word, contextWords, self.wordVectors, self.vocab) - \
                                sum([noiseScores[j]*grad_word(noiseWord, contextWords, self.wordVectors, self.vocab) for j, noiseWord in enumerate(noiseWords)])

            wordIndex = self.vocab[word][0]
            if wordIndex not in batchUpdates: 
                batchUpdates[wordIndex] = [updateInWordVector, updateInWordBias, 1.]
            else: 
                batchUpdates[wordIndex][0] += updateInWordVector
                batchUpdates[wordIndex][1] += updateInWordBias
                batchUpdates[wordIndex][2] += 1.
        
        #self.add_gradient_to_words_adagrad(batchUpdates, rate)

        return batchUpdates
Esempio n. 30
0
def compute_output_csvs():
    payees = [Payee(rec) for rec in csv.reader(open(INPUT_CSV))]
    payees.sort(key=lambda o: o.gross, reverse=True)

    total_fees = sum([payee.assess_fee() for payee in payees])  # side-effective!
    total_net = sum([p.net for p in payees])
    total_gross = sum([p.gross for p in payees])
    assert total_fees + total_net == total_gross

    paypal_csv = csv.writer(open(PAYPAL_CSV, 'w+'))
    gittip_csv = csv.writer(open(GITTIP_CSV, 'w+'))
    print_rule()
    print("{:<24}{:<32} {:^7} {:^7} {:^7}".format("username", "email", "gross", "fee", "net"))
    print_rule()
    for payee in payees:
        paypal_csv.writerow((payee.email, payee.net, "usd"))
        gittip_csv.writerow(( payee.username
                            , payee.email
                            , payee.gross
                            , payee.fee
                            , payee.net
                            , payee.additional_note
                             ))
        print("{username:<24}{email:<32} {gross:>7} {fee:>7} {net:>7}".format(**payee.__dict__))

    print(" "*56, "-"*23)
    print("{:>64} {:>7} {:>7}".format(total_gross, total_fees, total_net))
Esempio n. 31
0
                    area = cv2.contourArea(i)
                    #print "cONTOUR_AREA", area
                    rect = cv2.boundingRect(i)
                    #print "RECT_CO-ORDINATES", rect
                    radius = rect[2] / 2
                    if (
                            area >= 10 and area <= 40
                    ):  # and abs(1-(rect[2]/rect[3]))<=0.2 and abs(1-(area/3.14159*(radius**2)))<=0.2):
                        cv2.circle(img, (x + ex + rect[0] + radius,
                                         y + ey + rect[1] + radius), radius,
                                   (255, 0, 0), 2)
                        ael_x.append(x + ex + rect[0] + radius)
                        ael_y.append(y + ey + rect[1] + radius)

            if (len(ael_x) != 0 and len(ael_y) != 0):
                mouse_x = sum(ael_x) / len(ael_x)
                mouse_y = sum(ael_y) / len(ael_y)
                m.move((mouse_x * (1366 / 640)), (mouse_y * (768 / 480)))
                print "mouse_x", mouse_x, "\tmouse_y", mouse_y

        cv2.imshow('frame', img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        ret, frame = cap.read()

    cap.release()
    cv2.destroyAllWindows()

    #while(True):
    #key = cv2.waitKey(20)
    #if key in [27, ord('Q'), ord('q')]:
def average(list):
    return int((sum(list)-max(list)-min(list))/(len(list)-2))
q = []
t = []
rep = int(input())
for i in range(rep):
    quality, time = (input()).split()
    q.append(quality)
    t.append(time)

index = 0
s = []
while index < len(q):
    ans = float(q[index]) * float(t[index])
    s.append(ans)
    index += 1

Sum = sum(s)
pass
print('%.3f' % Sum)
Esempio n. 34
0
 def i2len(self, pkt, val):
     return sum( self.field.i2len(pkt,v) for v in val )
Esempio n. 35
0
 def i2len(self, pkt, val):
     return sum( len(p) for p in val )
Esempio n. 36
0
db = [1,3,3.4,5.678,34,78.0009]
print("The List in Python")
print(db[0])
db[0] = db[0] + db[1]
print(db[0])
print("Add in the list")
db.append(111)
print(db)
print("Remove in the list")
db.remove(3)
print(db)
print("Sort in the list")
db.sort()
print(db)
db.reverse()
print(db)
print("Len in the list")
print(len(db))
print("For loop in the list")
for n_db in db:
	print(n_db)
print(min(db))
print(max(db))
print(sum(db))
my_food = ['rice', 'fish', 'meat']
friend_food = my_food
friend_food.append('ice cream')
print(my_food)
print(friend_food)
def RNG(nbits, a, b):
	nbytes = nbits // 8
	B = os.urandom(nbytes)
	return a * sum([B[i] * b ** i for i in range(len(B))]) % 2**nbits
Esempio n. 38
0
test_df.head(5)

# <font color=red>  Note: There is no target variable for the hold out data (i.e. "Survival" column is missing), so there's no way to use this as our cross validation sample.  Refer to Section 5.</font>

# ## 2. Data Quality & Missing Value Assessment

# In[ ]:

# check missing values in train dataset
titanic_df.isnull().sum()

# ### 2.1    Age - Missing Values

# In[ ]:

sum(pd.isnull(titanic_df['Age']))

# In[ ]:

# proportion of "Age" missing
round(177 / (len(titanic_df["PassengerId"])), 4)

# ~20% of entries for passenger age are missing. Let's see what the 'Age' variable looks like in general.

# In[ ]:

titanic_df["Age"].hist(bins=15, color='teal', alpha=0.8)

# Since "Age" is (right) skewed, using the mean might give us biased results by filling in ages that are older than desired.  To deal with this, we'll use the median to impute the missing values.

# In[ ]:
Esempio n. 39
0
 def __calculate_lyapunov(self):
     return 0.5 * sum(
         [(station_snapshot.available_bike_count_before_rebalance -
           station_snapshot.target_bike_count)**2
          for station_snapshot in self.station_snapshots.values()])
Esempio n. 40
0
bestk = []
kc = 0
for n_neighbors in range(1, 900, 2):
    kf = KFold(n_splits=10)
    #n_neighbors = 85
    kscore = []
    k = 0
    for train, test in kf.split(X):
        #print("%s %s" % (train, test))
        X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]

        #time.sleep(100)

        # we create an instance of Neighbors Classifier and fit the data.
        clf = neighbors.KNeighborsRegressor(n_neighbors, weights='distance')
        clf.fit(X_train, y_train)

        kscore.append(abs(clf.score(X_test, y_test)))
        #print kscore[k]
        k = k + 1

    print(n_neighbors)
    bestk.append(sum(kscore) / len(kscore))
    print bestk[kc]
    kc += 1

# to do here: given this array of E_outs in CV, find the max, its
# corresponding index, and its corresponding value of n_neighbors
# Prints the sorted data, in descending ordering.
print sorted(bestk, reverse=True)
Esempio n. 41
0
	def __init__(self, data=[], N=None, missingfn=None):
		for key,count in data:
			self[key] = self.get(key, 0) + int(count)   # 映射到map并去重
		self.N = float(N or sum(self.itervalues()))
		self.missingfn = missingfn or (lambda k, N: 1./N)
Esempio n. 42
0
 def __init__(self, Ar, Di, Co, Ca, Ta, In, Th):
     # Assigning initial properties of object
     self.Ar = Ar
     self.Di = Di
     self.Co = Co
     self.Ca = Ca
     self.Ta = Ta
     self.In = In
     self.Th = Th
     self.subtours = []  #empty subtours to start
     self.objective = -1  #objective values
     self.pairs = None  #where to stuff the matched areas
     # Creating inequality metrics
     SumCalls = sum(Ca.values())
     MaxIneq = (SumCalls / Ta) * (1 + In)
     MinIneq = (SumCalls / Ta) * (1 - In)
     self.ineq = [MaxIneq, MinIneq]
     # Creating contiguity graph
     G = networkx.Graph()
     for i in Ar:
         for j in Co[i]:
             G.add_edge(i, j)
     self.co_graph = G
     # Creating threshold vectors for decision variables
     NearAreas = {}
     Thresh = []
     for s in Ar:
         NearAreas[s] = []
         for d in Ar:
             if Di[s][d] < Th:
                 Thresh.append((s, d))
                 NearAreas[s].append(d)
     self.NearAreas = NearAreas
     self.Thresh = Thresh
     # Setting up the pulp problem
     P = pulp.LpProblem("P-Median", pulp.LpMinimize)
     # Decision variables
     assign_areas = pulp.LpVariable.dicts("SD",
                                          [(s, d) for (s, d) in Thresh],
                                          lowBound=0,
                                          upBound=1,
                                          cat=pulp.LpInteger)
     # Just setting the y_vars as the diagonal sources/destinations
     y_vars = {s: assign_areas[(s, s)] for s in Ar}
     tot_constraints = 0
     self.assign_areas = assign_areas
     self.y_vars = y_vars
     # Function to minimize
     P += pulp.lpSum(Ca[d] * Di[s][d] * assign_areas[(s, d)]
                     for (s, d) in Thresh)
     # Constraint on max number of areas
     P += pulp.lpSum(y_vars[s] for s in Ar) == Ta
     tot_constraints += 1
     # Constraint nooffbeat if local is not assigned (1)
     # Second is contiguity constraint
     for s, d in Thresh:
         P += assign_areas[(s, d)] - y_vars[s] <= 0
         tot_constraints += 1
         if s != d:
             # Identifying locations contiguous in nearest path
             both = set(networkx.shortest_path(G, s, d)) & set(Co[d])
             # Or if nearer to the source
             nearer = [a for a in Co[d] if Di[s][a] < Di[s][d]]
             # Combining, should alwayss have at least 1
             comb = list(both | set(nearer))
             # Contiguity constraint
             P += pulp.lpSum(assign_areas[(s, a)] for a in comb
                             if a in NearAreas[s]) >= assign_areas[(s, d)]
             tot_constraints += 1
     # Constraint every destination covered once
     # Then Min/Max inequality constraints
     for (sl, dl) in zip(Ar, Ar):
         P += pulp.lpSum(assign_areas[(s, dl)] for s in NearAreas[dl]) == 1
         P += pulp.lpSum(assign_areas[(sl, d)] * Ca[d]
                         for d in NearAreas[sl]) <= MaxIneq
         P += pulp.lpSum(assign_areas[(sl, d)] * Ca[d]
                         for d in NearAreas[sl]) >= MinIneq * y_vars[sl]
         tot_constraints += 3
     self.model = P
     print(f'Total number of decision variables {len(Thresh)}')
     print(f'Total number of constraints {tot_constraints}')
     av_solv = pulp.listSolvers(onlyAvailable=True)
     print(f'Available solvers from pulp, {av_solv}')
Esempio n. 43
0
n=int(input())
coke=input().split()
capacity=input().split()
for i in range(len(coke)):
    coke[i]=int(coke[i])
for i in range(len(capacity)):
    capacity[i]=int(capacity[i])
capacity.sort()
if (capacity[-1]+capacity[-2])>sum(coke):
    print('YES')
else:
    print('NO')
Esempio n. 44
0
        p = MPI[p]
    MPV.append(tuple([0 for i in range(r)]))
    MPV.reverse()

    apx_perm = []  #permutation given by the DP
    for i in range(1, len(MPV)):
        idx = [list(MPV[i])[j] - list(MPV[i - 1])[j]
               for j in range(r)].index(1)
        apx_perm.append(NI[idx][MPV[i - 1][idx]])
    end_time = time.time()

    tm.append(end_time - start_time)
    soln.append(MV[-1])
    vec.append(len(M))

print("avg_time: ", sum(tm) / num, '\n')
print('avg_num_vec:', sum(vec) / num, '\n')
print('soln:', soln, '\n')
'''
######################### find exact solution ##########
start_time = time.time()
l = list(permutations(range(n))) 

opt_dist = 0
opt_perm = []
for perm in l:
    #dist = sum(V[perm[j]]/sum((1+eps)**(1+Class[perm[k]]) for k in range(j+1)) for j in range(len(perm)))
    dist = sum(V[perm[j]]/sum(C[perm[k]] for k in range(j+1)) for j in range(len(perm)))
    if dist > opt_dist:
        opt_dist = dist
        opt_perm = perm
ct=[]
schedule_array.sort()
for arr in schedule_array:
    ct.append(arr[2])
print("CT : ",ct)

tat=[]
for i in range(n):
    tat.append(ct[i]-tempat[i])
print("TAT : ",tat)

wt=[]
for i in range(n):
    wt.append(tat[i]-tempbt[i])
print("WT : ",wt)

rt=[]
for i in range(n):
    if schedule_array[i][1]==0:
        rt.append(0)
    else:
        rt.append(ct[i-1]-tempat[i])
print("RT : ",rt)

avgtat=sum(tat)/n
avgwt=sum(wt)/n
throughput=float(n)/ct[len(ct)-1]
print("Average TAT : ",avgtat)
print("Average WT : ",avgwt)
print("Throughput",throughput)
Esempio n. 46
0
def polyharmonic(params, N):
    signal = []
    for i in range(N):
        signal.append(sum(harmonic_single(amplitude=param[0], frequency=param[1], phase=param[2], N=N, i=i) for param in params))
    return list(signal)
Esempio n. 47
0
                        for i in range(0,65):
                            pudict[seqid][i]=[]
                        pudict[seqid][hexcount].append(float(val))
                    except ValueError:
                        #catch any remaining '-' out there
                        sys.stderr.write('theres a dash here: %s\n' % tmpline)
                    hexcount+=1
                    if hexcount >= 65:
                        break
metadict={}
with open('meanpu.byseq.out','w') as outfile:
    for id,posdict in pudict.items():
        sourcefile='_'.join(id.split('_')[:-1])
        for pos,puvals in posdict.items():
            try:
                meanpu=sum(puvals)/float(len(puvals))
                sempu=ss.sem(puvals)
            except ZeroDivisionError:
                sys.stderr.write('%s,%s,%s\n' % (id,pos,puvals))
                continue 
            try:
                metadict[sourcefile][pos].append(meanpu)
            except KeyError:
                metadict[sourcefile]={}
                for i in range(0,65):
                     metadict[sourcefile][i]=[]
                     metadict[sourcefile][pos].append(meanpu)
            outfile.write('%s\t%s\t%s\t%s\n' % (id,pos,meanpu,sempu))

with open('meanpu.bypos.out','w') as outfile:    
    for sourcefile,poslist in metadict.items():
Esempio n. 48
0
def _consteq(str1, str2):
    """ Constant-time string comparison. Suitable to compare bytestrings of fixed,
        known length only, because length difference is optimized. """
    return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
Esempio n. 49
0
b = 20
a, b = b, a
print("a is {}, b is {}".format(a, b))

## Python 是大小写敏感的
n = 10
N = 20

##数值类型
num = 10
num = num + 10
num += 10
num -= 10
num *= 10

## 乘方
print(math.pow(10, 3))

## 最大/小值
print(min(1, 2, 3))
print(max(1, 2, 3))

## sum
print(sum([1, 2, 3]))

## List Dict Tuple
ll = list[1, 2, 3]
dd = {"zhang": 10, "zhao": 10}

tt = (a, 1, 100, "str11")
Esempio n. 50
0
                cmp = np.array(cmp.data.cpu().numpy(), dtype=bool)
                batch_near_list = np.array(batch_near_list)
                batch_aid = np.array(batch_aid)
                batch_qid = np.array(batch_qid)
                qlist = batch_qid[cmp]
                alist = batch_aid[cmp]
                nlist = batch_near_list[cmp]
                for k in range(len(batch_qid[cmp])):
                    pair = (index2qid[qlist[k]], index2aid[alist[k]], index2aid[nlist[k]])
                    if pair in false_samples:
                        false_samples[pair] += 1
                    else:
                        false_samples[pair] = 1

                cmp = output[:, 0] > output[:, 1]
                acc += sum(cmp.data.cpu().numpy())
                tot += true_batch_size

                loss = marginRankingLoss(output[:, 0], output[:, 1], torch.autograd.Variable(torch.ones(1)))
                loss_num = loss.data.numpy()[0]
                loss.backward()
                optimizer.step()

        # Evaluate performance on validation set
        if iterations % args.dev_every == 1 and epoch != 1:
            # switch model into evaluation mode
            pw_model.eval()
            dev_iter.init_epoch()
            n_dev_correct = 0
            n_dev_total = 0
            dev_losses = []
Esempio n. 51
0
 def total_score(self):
     return sum(self.scores)
Esempio n. 52
0
 def size(self):
     return sum([node.size for node in self._loop.body_nodes])
Esempio n. 53
0
def count_clear():
    return sum(x.count('.') for x in room)
import Bio
from Bio import SeqIO

threshold=20

count=0
for seq in SeqIO.parse("Desktop/Downloads/sample.fastq", "fastq"):
    ave=sum(seq.letter_annotations["phred_quality"])/len(seq)
    if ave <threshold:
        count+=1

print(count)
def accuracy(real, predict):
    return sum(y_data == y_pred) / float(real.shape[0])
Esempio n. 56
0
        a = [str(eval(a.pop(0) + a.pop(0) + a.pop(0))), *a]
    return a[0]


def p2(a):
    while "+" in a:
        i = a.index("+")
        a[i - 1:i + 2] = [str(eval("".join(a[i - 1:i + 2])))]
    return p1(a)


def parse(a, calc):
    if a.count("(") == 0 and a.count(")") == 0: return int(calc([*a]))
    else:
        while "(" in a:
            i, prev = 0, 0
            while "(" in a[i:] and (low := a.index("(", i)) < (high := a.index(
                    ")", i)):
                i, prev = i + 1, low
            a[prev:high + 1] = [calc(a[prev + 1:high])]
        return int(calc([*a]))


with open("input.txt") as file:
    data = [
        x.replace("(", "( ").replace(")", " )")
        for x in file.read().split("\n")
    ]
    print(*[sum(parse(x.split(" "), p) for x in data) for p in [p1, p2]],
          sep="\n")
Esempio n. 57
0
def count_upper_case(message):
    if isinstance(message, str):
        return sum([1 for c in message if c.isupper()])
    else:
        return 0
Esempio n. 58
0
 def sum(self, *args):
     return sum(args)
def mini_max_sum(n):
    # remove .split()'s to accept array as input per HR test cases
    sums = [sum([int(j) for j in n.split()]) - int(i) for i in n.split()]

    print sorted(sums)[0], sorted(sums)[-1]
Esempio n. 60
0
            fingerSum = fingerSum + length
        fingerSum = (fingerSum - scaler.mean_[1]) / scaler.scale_[1]

        #Normalized longest X distance between fingertips feature
        shortIndex = 0
        longIndex = 0
        n = 0
        for finger in fingerPos:
            if finger[0] < fingerPos[shortIndex][0]:
                shortIndex = n
            if finger[0] > fingerPos[longIndex][0]:
                longIndex = n
            n += 1

        longestDist = hr.dist(fingerPos[shortIndex][0], fingerPos[longIndex][0], fingerPos[shortIndex][1], fingerPos[longIndex][1])
        longestDist = (longestDist - scaler.scale_[2]) / scaler.scale_[2]
        sampleFeatures = np.array([numFingers, fingerSum, longestDist])
        if model.predict(sampleFeatures)[0] == label_array[image]:
            success += 1
        else:
            failure += 1
            
    accuracy_array.append(float(success)/float(success + failure))

#Calculate final accuracy and standard deviation
accuracy_average = sum(accuracy_array)/len(accuracy_array)
np_accuracy_array = np.array(accuracy_array)
std = np.std(np_accuracy_array)
print "Average accuracy:", accuracy_average
print "Standard Deviation:", std