def weixin_konwledges_reply(age_by_day, number, msg, weatherinfo):
    age = int(age_by_day)
    knowls_all = Knowledge.objects.using('wjbbserverdb').filter(max__gte = age, min__lte = age)
    knowls = None
    count = knowls_all.count()
    if(number >= count):
        knowls = knowls_all
        number = count
    else:
        knowls = random.sample(list(knowls_all), number)
    picindexes = random.sample((0,1,2,3,4,5,6,7,8,9), number)
    for i in range(0, number):
        knowls[i].picurl = 'http://wjbb.cloudapp.net:8001/pic/'+str(picindexes[i])+'.jpg'
        knowls[i].url = 'http://wjbb.cloudapp.net/weixin/knowledge/%d/'%(knowls[i].id)
    context = {}
    context['knowls'] = knowls
    context['fromUser'] = msg['ToUserName']
    context['toUser'] = msg['FromUserName']
    context['number'] = str(number+1)
    context['create_time'] = str(int(time.time()))
    context["weather_info"] = weatherinfo
    context['temperature'] = weatherinfo['temperature']
    context['wind'] = weatherinfo['wind']
    context['windstrong'] = weatherinfo['windstrong']
    context['detailinfo'] = weatherinfo['detailinfo']
    context['weather_picurl'] = 'http://wjbb.cloudapp.net:8001/pic/'+str(picindexes[0])+'.jpg'
    context['weather_url'] = 'http://wjbb.cloudapp.net/weixin/knowledge/%d/'%(knowls[0].id)
    print("weather info", weatherinfo)
    t = get_template('weixin/knowledges_msg.xml')
    return t.render(Context(context))
    def __init__(self):
        self.commandGiven = False
        self.map = lab10_map.Map("lab10.map")
        print("distance from wall = ", self.map.closest_distance([0.5, 0.5], math.pi*0.0))

        self.sigma = 0.05
        self.distanceVariance = 0.1 #math.sqrt(0.05)  #distance variance is 5 cm
        self.directionVariance = 0.1 #math.sqrt(0.05)    #direction variance is 5 degrees
        self.scale = 100

        self.randomNumbers = random.sample(range(300), 200)
        self.randomTheta = random.sample(range(360), 200)
        self.numOfParticles = 10

        # for i in range(0, self.numOfParticles, 1):
        #     self.randomTheta[i] = math.radians(self.randomTheta[i])

        self.particles = []
        self.particleWeights = []

        for i in range(0, self.numOfParticles*2, 2):
            particle = Particle()
            particle.x = self.randomNumbers[i]/100
            particle.y = self.randomNumbers[i+1]/100
            particle.theta = 0 #math.radians(self.randomTheta[i])
            # print("theta = ", particle.theta)

            # tuple = (, self.randomNumbers[i + 1] / 100, 0.1, self.randomTheta[i])
            self.particles.append(particle)
Exemple #3
0
def ciaoruozi():
    print("@" + username + ": /ciaoruozi")
    # Ciao Ruozi.
    if username.lower() == "ruozir":
        # Manda un messaggio casuale tra quelli nella lista
        chosen_msg = random.sample(["Ciao me!",
                                    "Ciao ciao ciao!",
                                    "1 ciaoruozi = 1000€ per me",
                                    "Ruozi si dice: #CiaoRuozi",
                                    "Ciao eh me!",
                                    "Ehi me, ma ciao!",
                                    "Ma ciao me stesso!",
                                    "Me me me! Ciao!"], 1)[0]
        telegram.sendmessage(chosen_msg, sentin, source)
    else:
        # Manda un messaggio casuale tra quelli nella lista
        chosen_msg = random.sample(["Ciao Ruozi!",
                                    "Ciao ciao ciao!",
                                    "1 ciaoruozi = 1 prayer",
                                    "La RYG dice: #CiaoRuozi",
                                    "Ciao eh Ruozi!",
                                    "Ehi Ruozi, ma ciao!",
                                    "Ma ciao Ruozi!",
                                    "Ruozi ruozi ruozi! Ciao!"], 1)[0]
        telegram.sendmessage(chosen_msg, sentin, source)
def Fitch(rooted_tree):
    # using of Fitch algorithm 
    
    # traverse Tree in post-order
    for node in rooted_tree.traverse('postorder'):
        if not node.is_leaf():
            children = node.get_children()
            intersect = (children[0].data['split']).intersection(children[1].data['split'])
            if len(intersect) == 0:
                node.data['split'] = (children[0].data['split']).union(children[1].data['split'])
            else:
                node.data['split'] = intersect
    # traverse top-down 
    
    for node in rooted_tree.traverse('levelorder'):
        if node.is_root(): # for the root 
            # if the root has 2 candidatnode.add_features()e, randomly choose 1, and get the numeric value
            node.data['split'] = (random.sample(node.data['split'],1))[0] 
        else:
            # for children node, first check the data from the ancestor
            ancestors = node.get_ancestors() # get the list of ancestor
            data = ancestors[0].data['split'] # get the data from the parent
            if data in node.data['split']:# check if the node.data has value equal to its parent data
                node.data['split'] =data
            else:
                node.data['split'] = (random.sample(node.data['split'],1))[0]
    return rooted_tree
Exemple #5
0
def convert(snippet, phrase):
    class_names = [w.capitalize() for w in
               random.sample(WORDS, snippet.count("%%%"))]
    other_names = random.sample(WORDS, snippet.count("***"))
    results = []
    param_names = []

    for i in range(0, snippet.count("@@@")):
        param_count = random.randint(1,3)
        param_names.append(', '.join(random.sample(WORDS, param_count)))

    for sentence in snippet, phrase:
        result = sentence[:]

        for word in class_names:
            result = result.replace("%%%", word, 1)

        for word in other_names:
            result = result.replace("***", word, 1)

        for word in param_names:
            result = result.replace("@@@", word, 1)

        results.append(result)

    return results
Exemple #6
0
def generateTrials(myDict, nTestTrials):
    '''
    generates a list of trials to write to csv file
    supply 3 columns to be factorially combined
    myDict = {'contrast': [1.0, 0.75, 0.5, 0.25], 'orientation':[225, 315],
        'probeMatch':[1, 0], 'correctKey':[1]}
    '''
    columnHeaders = myDict.keys()
    trialList = data.createFactorialTrialList(myDict)
    for item in trialList:
        if item['probeMatch'] == 1:
            item['correctKey'] = expInfo['sameKey']
        else:
            item['correctKey'] =expInfo['differentKey']

    # write trial list for experiment loop:       
    if expInfo['experimentType']=='practise':
        trialList = rand.sample(trialList, nTestTrials)
        
    with open('mainTrials.csv', 'wb') as mainTrialsFile:
        writer = csv.writer(mainTrialsFile, dialect='excel')
        writer.writerow(columnHeaders)
        for row in trialList:
            writer.writerow(row.values())

    if expInfo['experimentType']=='practise':
        # generate trial list for practise loop:
        practiseTrialList = rand.sample(trialList, nTestTrials)
        # write trial list for practise loop:       
        with open('practiseTrials.csv', 'wb') as practiseTrialsFile:
            writer = csv.writer(practiseTrialsFile, dialect='excel')
            writer.writerow(columnHeaders)
            for row in practiseTrialList:
                writer.writerow(row.values())
def convert(snippet, phrase):
	#snippet = question, phrase = answer
	class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))]
	other_names = random.sample(WORDS, snippet.count("***"))
	results = []
	param_names = []
	
	for i in range(0, snippet.count("@@@")):
		param_count = random.randint(1,3)
		param_names.append(', '.join(random.sample(WORDS, param_count)))
	
	for sentence in snippet, phrase:
		# copy sentence in result
		result = sentence[:]
		
		# fake class names
		for word in class_names:
			result = result.replace("%%%", word, 1)
		
		# fake other names
		for word in other_names:
			result = result.replace("***", word, 1)
		
		# fake parameter lists
		for word in param_names:
			result = result.replace("@@@", word, 1)
			
		results.append(result)
	
	return results
Exemple #8
0
 def repaint2(self, event):
     """ Period. """
     s = float(random.sample(range(0,5), 1)[0])
     d = float(random.sample(range(1,20), 1)[0])/10.
     self.GetTopLevelParent().GetStatusBar().SetStatusText('New period: %f,%f'%(s,s+d))
     self.t1.SetTime(s,s+d)
     self.t2.SetTime(s,s+d)
def generateRandomInices(r, c, p, t):
	l = []
	while len(l) < p:
		randomIndex = (random.sample(range(r),1)[0], random.sample(range(c),1)[0])
		if randomIndex not in t and randomIndex not in l:
			l += [randomIndex]
	return l
def create_sample_MAS_instance(node_count=100, optimum_lower_bound=0.8, density=0.4):
	"""
	Creates a directed graph, subject to the constraints:
		- Some solution must contain at at least (optimum_lower_bound)% of the edges.
		- The graph has density (density)
	"""
	
	# Create directed graph on nodes 1,...,node_count
	graph = networkx.DiGraph()
	graph.add_nodes_from(xrange(1, node_count+1))

	# Generate all possible directed edges, forward and backward
	possible_forward_edges = [(u,v) for u in graph.nodes_iter() for v in graph.nodes_iter() if u < v]
	possible_backward_edges = [(u,v) for u in graph.nodes_iter() for v in graph.nodes_iter() if u > v]

	# Compute balance of forward and backward edges
	edge_count = density * node_count * (node_count - 1) / 2
	
	# Sample subsets of forward and backward edges
	chosen_forward_edges = random.sample(possible_forward_edges, int(optimum_lower_bound * edge_count))
	chosen_backward_edges = random.sample(possible_backward_edges, int( (1 - optimum_lower_bound) * edge_count ))
	graph.add_edges_from(chosen_forward_edges)
	graph.add_edges_from(chosen_backward_edges)

	return permute_graph(graph)
Exemple #11
0
def disturb(g,cl):
    ng=g.copy()
    ng.remove_edges_from(ng.edges())
    for i in range(len(cl)-1):#连接簇之间不变的线
        j=i+1
        while j<len(cl):
            for x in itertools.product(cl[i],cl[j]):#簇之间两两(cl[i],cl[j])笛卡尔积
                if g.has_edge(x[0],x[1]):
                    ng.add_edge(x[0],x[1])
            j+=1
    sub=[]
    for i in range(len(cl)):#打乱簇内线
        sub=g.subgraph(cl[i])
        edges=sub.edges()
        numOfe=sub.number_of_edges()
        sub.remove_edges_from(edges)
        setE=[]
        tupleE=[]
        for k in range(numOfe):#生成numOfe条线
            l=set(random.sample(cl[i],2))#随机生成cl[i]内两个数,并生成集合,因为集合无序,容易判断该两个数是否已经生成了
            while l in setE:
                l=set(random.sample(cl[i],2))
            setE.append(l)
        
        for item in setE:#集合变元组,用来添加边
            tupleE.append(tuple(item))
        ng.add_edges_from(tupleE)
    return ng
    def cpl_2_change(self):
        """Change the playlist with random deletions, additions and reordering."""
        p_id = self.playlists['playlist to change'][-1]
        tracks = self.api.get_playlist_songs(p_id)

        #Apply random modifications.
        delete, add_dupe, add_blank, reorder = [random.choice([True, False]) for i in xrange(4)]

        if tracks and delete:
            log.debug("deleting tracks")
            track_is = range(len(tracks))
            #Select a random number of indices to delete.
            del_is = set(random.sample(track_is, random.choice(track_is)))
            tracks = [track for i, track in enumerate(tracks) if not i in del_is]

        if add_dupe:
            log.debug("adding dupe tracks from same playlist")
            tracks.extend(random.sample(tracks, random.randrange(len(tracks))))

        if add_blank:
            log.debug("adding random tracks with no eid")
            tracks.extend(random.sample(self.library, random.randrange(len(tracks))))

        if reorder:
            log.debug("shuffling tracks")
            random.shuffle(tracks)

        self.api.change_playlist(p_id, tracks)

        server_tracks = self.api.get_playlist_songs(p_id)

        self.assertEqual(len(tracks), len(server_tracks))

        for local_t, server_t in zip(tracks, server_tracks):
            self.assertEqual(local_t['id'], server_t['id'])
Exemple #13
0
def set_list_prob(dirr): #2.2.(1)
    line_1 = range(1,10001)
    if dirr[1] == 0:
        list_prob_1 = []
    else:
        list_prob_1 = random.sample(line_1, dirr[1])
        for i in list_prob_1:
            line_1.remove(i)
    if dirr[2] == 0:
        list_prob_2 = []
    else:
        list_prob_2 = random.sample(line_1, dirr[2])
        for i in list_prob_2:
            line_1.remove(i)
    if dirr[3] == 0:
        list_prob_3 = []
    else:   
        list_prob_3 = random.sample(line_1, dirr[3])
        for i in list_prob_3:
            line_1.remove(i)
    if dirr[4] == 0:
        list_prob_4 = []
    else:
        list_prob_4 = random.sample(line_1, dirr[4])
        for i in list_prob_4:
            line_1.remove(i)
    list_prob_0 = line_1
    return list_prob_0,list_prob_1,list_prob_2,list_prob_3,list_prob_4
def numberFactsLikeThis(klass, nf, rseed=None):
#    tolerances=[0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10, 25, 50, 100]
    if rseed != None:
        seed(rseed)
    tolerances=[1.0, 2.5, 5.0, 10, 25, 50, 100]
    for tolerance in tolerances:
        ce=closeEnoughNumberFact(klass, nf.magnitude, nf.scale, tolerance, nf.measure)
        ce.remove(nf)
        candidates = []
        for nf_a in ce:
            duplicate = False
            for nf_b in candidates:
                if nf_b.value == nf_a.value:
                    duplicate = True
                    break
            if  not(duplicate):
                candidates.append(nf_a)    

        if len(candidates)>=4:
            bestTolerance = tolerance
            bestComparisons = sample(candidates[1:-1],2)
            bestComparisons.append(candidates[0])
            bestComparisons.append(candidates[-1])
            bestComparisons = sample(bestComparisons,4)
            break
        bestTolerance = tolerance
        bestComparisons = sample(candidates,len(candidates))
    score = round(1*log10(bestTolerance/1000)**2)*(len(bestComparisons)-1)
    return bestComparisons, bestTolerance, score
Exemple #15
0
def doge(phenny,input):
    text = input.groups()
    if not text[1]:
        phenny.reply('  no word       such fail             wow               bad say')
        return
    syn = get_from_thesaurus(text[1])
    if not syn:
        phenny.reply('  no word       such fail             wow               bad say')
        return
    syn = [(x.split())[0] for x in syn]
    syn = set(syn)
    n = min([random.randint(3,6), len(syn)])
    dog = random.sample(shibe,n)
    ss = random.sample(syn,n)
    out = []
    wow = 0
    for i in range(0,n):
        sp = [' ' for j in range(0,random.randint(5,20))]
        if not wow and random.randint(0,1):
            out.append(''.join(sp)+'wow')
            wow = 1
            i = i - 1
        else:
            out.append(''.join(sp)+dog[i]+ss[i])
    phenny.reply(' '.join(out))
def convert(snippet, phrase):   # input:key&value from PHRASES
    class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("###"))]
    other_names = random.sample(WORDS, snippet.count("***"))    # random.sample:get counted number on WORDS.
    results = []
    param_names = []

    for i in range(0, snippet.count("@@@")):
        param_count = random.randint(1,3)
        param_names.append(', '.join(random.sample(WORDS, param_count)))
        

    for sentence in snippet, phrase:
        result = sentence[:]


        for word in class_names:
            result = result.replace("###", word, 1)


        for word in other_names:
            result = result.replace("***", word, 1)

        for word in param_names:
            result = result.replace("@@@", word, 1)

        results.append(result)
    return results
    def change_to_Random_Dir(self):
        self.logger.debug("Current DIR: %s" % self.client.pwd())
        level1_dirs = []
        for file_name in self.client.nlst():
            try:
                self.client.cwd('/' + file_name)
                level1_dirs.append(file_name)
                self.logger.debug("Directory [L-1]: %s" % file_name)
            except ftplib.error_perm as detail:
                self.logger.debug("It's probably not a directory [L-1]: %s : %s" % (file_name, detail))
        self.logger.debug("Number of L1-Dirs: %i" % len(level1_dirs))

        randomly_sampled_L1_dir = random.sample(level1_dirs, 1)[0]
        self.client.cwd('/' + randomly_sampled_L1_dir)
        self.logger.debug("Current Level-1 DIR selected: %s" % self.client.pwd())

        level2_dirs = []
        for file_name_l2 in self.client.nlst():
            try:
                self.client.cwd('/' + randomly_sampled_L1_dir + '/' +file_name_l2)
                level2_dirs.append(file_name_l2)
                self.logger.debug("Directory [L-2]: %s" % file_name_l2)
            except ftplib.error_perm as detail:
                self.logger.debug("It's probably not a directory [L-2]: %s : %s" % (file_name_l2, detail))
        self.logger.debug("Number of L2-Dirs: %i" % len(level2_dirs))

        rand_L2_dir = random.sample(level2_dirs, 1)[0]
        self.client.cwd('/' + randomly_sampled_L1_dir + '/' + rand_L2_dir)
        self.logger.debug("Current Level-2 DIR selected: %s" % self.client.pwd())
        return self.client.pwd() + '/'
def name():
  colors = ['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']
  shapes = ['Triangle', 'Square', 'Pentagon', 'Hexagon', 'Septagon', 'Octagon']
  return ('%s %s #%d' %
          (''.join(random.sample(colors, 1)),
           ''.join(random.sample(shapes, 1)),
           random.randint(0, 255)))
def get_fake_cdr():
    cdr = dict()
    cdr['posting_time'] = str(datetime.datetime.now())
    cdr['order_time'] = str(datetime.datetime.now())
    cdr['call_flow'] = "[(324, '2013-09-09 00:00:07.421090'), (324, '2013-09-09 00:00:07.431099', 0),(314, '2013-09-09 00:00:07.432024'), (314, '2013-09-09 00:00:07.472149', 0),(300, '2013-09-09 00:00:07.472315'), (302, '2013-09-09 00:00:16.160595'),(302, '2013-09-09 00:00:16.249476', 0), (300, '2013-09-09 00:00:16.249529', 0),(325, '2013-09-09 00:00:16.250241'), (325, '2013-09-09 00:00:16.260295', 0)]"
    cdr['events'] ='{"input2":"None","input3":"None","input0":"+911149426200","input1":"+911143790236","input6":"None","cdr_si":true,"input4":"None","input5":"None","input8":"None","input9":"None","voicemailurl":null,"POSTPROCESS":false,"recordingFile":[],"input7":"None","hangup_time":"2013-09-09 00:00:16.000000","conf_called_to":"+911143790236","answered":true,"status_from_bridge_api":{"Legb_Start_Time":"2013-09-09 00:00:07","Hangup_Cause":"NORMAL_CLEARING","HangupBy":"LegB","HangupTime":"2013-09-09 00:00:16","other_leg_cdr":[]"Channel_Num":"FreeTDM/6:3/01143790236","LegB_UUID":"c5e4a6c9-d48d-4868-b1b5-475932673c80","Bridge_Number":"+911143790236","JoinTime":"2013-09-09 00:00:08"},"session_uuid":"05eb3730-9b86-48cd-b495-15216a536fd2","confFile":"05eb3730-9b86-48cd-b495-15216a536fd2","hangup_cause":900,"member_info":{"Hangup_Cause":"NORMAL_CLEARING","HangupBy":"LegB","HangupTime":"2013-09-09 00:00:16","LegB_UUID":"c5e4a6c9-d48d-4868-b1b5-475932673c80","Bridge_Number":"+911143790236","JoinTime":"2013-09-09 00:00:08"},"action_id":"1116","direction":"_incoming","incall_answered":true,"start_time":"2013-09-09 00:00:07.391113","pickup_time":"2013-09-09 00:00:07.000000","LegB_join_time":"2013-09-09 00:00:08","session_knumber":"43790229","ivr_disp_number":"+911143790229","display_number":"+911143790229","reference_number":800056382,"INCOMING":true,"recordingFileUUID":[],"caller":"+911149426200","LegB_hangup_time":"2013-09-09 00:00:16","debug":false,"record_custom":[],"called":"+911143790229","unique_id":"05eb3730-9b86-48cd-b495-15216a536fd2"}'
    cdr['fs_chan_info'] = "FreeTDM/3:2/43790229"
    cdr['ref_uuid'] = str(uuid.uuid4())
    cdr['hangup_time'] = str(datetime.datetime.now())
    cdr['pickup_time'] = str(datetime.datetime.now())
    cdr['ivr_refnum'] = random.randint(81200345, 812000000)
    cdr['did_number'] = "+9111"+str(random.randint(43790229, 45000000))
    cdr['call_type'] = random.sample(['incoming','outgoing'],1)[0]
    cdr['attempt_number'] = random.sample(['1', '2', '3'],1)[0]
    cdr['fs'] = "fs7prod"
    cdr['data'] = 'null'
    cdr['hangup_cause'] = str(random.randint(400, 404))
    cdr['priority'] = str(random.randint(1, 6))
    cdr['end_time'] = str(datetime.datetime.now()+datetime.timedelta(minutes = 4))
    cdr['start_time'] = str(datetime.datetime.now())
    cdr['called'] = "+91"+str(random.randint(9654000000, 9654999999))
    cdr['caller'] = "+91"+str(random.randint(9654000000, 9654999999))
    cdr['unique_id'] = cdr['ref_uuid']
    cdr = json.dumps(cdr)
    return cdr
Exemple #20
0
def init_m(coef,set,sol):
    if set['initial_m'] == 'rectangular_1_tip':
        y = set['Ny']/2 + 5
        x = 5
        if y % 2 == 0:
            y += 1
        sol['m'][x,y] = 1
    elif set['initial_m'] == 'rectangular_tip':
        '''Randomly spotted in domain'''
        for tt in range(0,250):
            idx_m_1 = random.sample(range(1,set['Nx'],2),15)
            idx_m_2 = random.sample(range(1,set['Ny'],2),15)
            for id in range(0,len(idx_m_1)):
                if not sol['m'][idx_m_1[id], idx_m_2[id]] == 1 and not sol['n'][idx_m_1[id], idx_m_2[id]] == 1:
                    sol['m'][idx_m_1[id], idx_m_2[id]] = 1
        del idx_m_1
        del idx_m_2
    elif set['initial_m'] == 'retina_1_tip':
        sol['m'][187,213] = 1
    elif set['initial_m'] == 'retina_tip':
        '''Randomly spotted in domain'''
        for tt in range(0,250):
            idx_m_1 = random.sample(range(1,set['Nx'],2),100)
            idx_m_2 = random.sample(range(1,set['Ny'],2),100)
            for id in range(0,len(idx_m_1)):
                r_f = numpy.sqrt((idx_m_1[id]*set['Hh']-set['O_x'])**2 + (idx_m_2[id]*set['Hh']-set['O_y'])**2)
                if not sol['m'][idx_m_1[id], idx_m_2[id]] == 1 and not sol['n'][idx_m_1[id], idx_m_2[id]] == 1  and r_f > set['R_min']:
                    sol['m'][idx_m_1[id], idx_m_2[id]] = 1
        del idx_m_1
        del idx_m_2
    return sol
Exemple #21
0
def train_sample(feature_str, label, pos_train=0.5, neg_train=1000):
    """Perform training and testing using disjont samples from the full
    set of label_values. This is equivalent to doing one round of cross
    validation (see classipy.cross_validation) only it keeps the values around
    for display.

    Args:

    """
    all_hashes = list(cass.get_image_hashes())
    pos_hashes = [_[0] for _ in cass.buffered_get_row(cf_labels, label)]
    neg_hashes = list(set(all_hashes) - set(pos_hashes))

    if 0 < pos_train <= 1: pos_train = int(pos_train * len(pos_hashes))
    if 0 < neg_train <= 1: neg_train = int(neg_train * len(neg_hashes))

    # Choose a training sample and a testing sample
    if len(pos_hashes) < pos_train:
        raise ValueError('Not enough positive examples %s(%d)' % \
                         (label, len(pos_hashes)))
    if len(neg_hashes) < neg_train:
        raise ValueError('Not enough negative examples %s(%d)' % \
                         (label, len(neg_hashes)))

    pos_sample = random.sample(pos_hashes, pos_train)
    neg_sample = random.sample(neg_hashes, neg_train)

    labels = [-1 for _ in neg_sample] + [1 for _ in pos_sample]
    values = cass.get_feature_values(feature_str, neg_sample+pos_sample)

    global label_values
    label_values = zip(labels, values)

    print 'Training classifier with sample %d' % len(label_values)
    train_classifier(label_values)
Exemple #22
0
 def setUp(self):
     IMP.test.TestCase.setUp(self)
     self.numbers = random.sample(range(0, 1000), 100)
     self.keys = random.sample(range(0, 1000), 100)
     self.dictionary = dict()
     for key, val in zip(self.keys, self.numbers):
         self.dictionary[key] = val
Exemple #23
0
def split_gtf(gtf, sample_size=None, out_dir=None):
    """
    split a GTF file into two equal parts, randomly selecting genes.
    sample_size will select up to sample_size genes in total
    """
    if out_dir:
        part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
        part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
        part1 = os.path.join(out_dir, part1_fn)
        part2 = os.path.join(out_dir, part2_fn)
        if file_exists(part1) and file_exists(part2):
            return part1, part2
    else:
        part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
        part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name

    db = get_gtf_db(gtf)
    gene_ids = set([x['gene_id'][0] for x in db.all_features()])
    if not sample_size or (sample_size and sample_size > len(gene_ids)):
        sample_size = len(gene_ids)
    gene_ids = set(random.sample(gene_ids, sample_size))
    part1_ids = set(random.sample(gene_ids, sample_size / 2))
    part2_ids = gene_ids.difference(part1_ids)
    with open(part1, "w") as part1_handle:
        for gene in part1_ids:
            for feature in db.children(gene):
                part1_handle.write(str(feature) + "\n")
    with open(part2, "w") as part2_handle:
        for gene in part2_ids:
            for feature in db.children(gene):
                part2_handle.write(str(feature) + "\n")
    return part1, part2
Exemple #24
0
def convert(snippets,phrase):
    class_names=[w.capitalize() for w in random.sample(words,snippet.count("%%%"))]
    #print class_names
    other_names=random.sample(words,snippet.count("***"))
    #print other_names
    results=[]
    param_names=[]
    
    for i in range(0,snippet.count("@@@")):
        param_count=random.randint(1,3)
        param_names.append(','.join(random.sample(words,param_count)))
        #print param_names
        
    for sentence in [snippet, phrase]:
        #print sentence
        result=sentence[:]
        #print result
    #result=[snippet,phrase]     
        for word in class_names:
            result=result.replace('%%%',word,1)
            #print word
            #print result+"a class names"
        for word in other_names :
            result=result.replace("***",word,1)
            #print word
            #print result+"a other names"
        for word in param_names:
            result=result.replace("@@@",word,1)
            #print word
            #print result+"a param names"

        results.append(result)
    #print results
    #print result
    return results
    def __init__(self, n=1000, k=10, p=0.02947368):
        self.n = n
        self.k = k
        self.p = p
        self.ws = nx.watts_strogatz_graph(self.n, self.k, self.p, seed='nsll')
        nx.set_node_attributes(self.ws, 'SIR', 'S')
        self.clustering = nx.clustering(self.ws)
        self.betweenness = nx.betweenness_centrality(self.ws)
        p_r_0 = 0.001
        r_0 = int(self.n * p_r_0)
        if r_0 < 1:
            r_0 = 1
        random.seed('nsll')
        self.r = random.sample(self.ws.nodes(), r_0)

        i_0 = 4
        if i_0 < r_0:
            i_0 += 1
        random.seed('nsll')
        self.infected = random.sample(self.ws.nodes(), i_0)
        for n in self.infected:
            self.ws.node[n]['SIR'] = 'I'
        for n in self.r:
            self.ws.node[n]['SIR'] = 'R'
        self.s = self.n - len(self.infected) - len(self.r)
        print(self.r)
        print(self.infected)
def createCrossValidationFiles(n):
    # Make copies of the original positive and negative review files
    copyFile('hotelPosT-train.txt', 'postrain-reviews.txt')
    copyFile('hoteNegT-train.txt', 'negtrain-reviews.txt')
    
    # Read the positive and negative reviews into two separate lists
    posReviews = readFileIntoList('postrain-reviews.txt')
    negReviews = readFileIntoList('negtrain-reviews.txt')    
    
    # Use n random reviews for the positive review test set
    # Use the remaining reviews for the positive training set
    testPosReviews = random.sample(posReviews, n)
    trainingPosReviews = [review for review in posReviews if review not in testPosReviews]
    
    # Use n random reviews for the negative review test set
    # Use the remaining reviews for the negative training set
    testNegReviews = random.sample(negReviews, n)
    trainingNegReviews = [review for review in negReviews if review not in testNegReviews]
    
    # Write the test reviews to the test set file
    writeListToFile('test-reviews.txt', testPosReviews, False)
    writeListToFile('test-reviews.txt', testNegReviews, True)
    
    # Write the training reviews to the test set file
    writeListToFile('postrain-reviews.txt', trainingPosReviews, False)
    writeListToFile('negtrain-reviews.txt', trainingNegReviews, False) 
Exemple #27
0
    def pick_one(table):
        """Return one random element of a sequence or dict"""

        try:
            return table[random.sample(table.keys(), 1)[0]]
        except AttributeError:
            return random.sample(table, 1)[0]
def ReWeightedRW(G, incomes, sample_size):
	node = random.sample(G, 1)[0]

	sampling = list()
	node_degrees = list()
	node_incomes = list()

	for i in range(sample_size):
		sampling.append(node)
		node_degrees.append(len(G[node]))
		node_incomes.append(incomes[node])

		# select a random neighbor of node
		node = random.sample(G.get(node), 1)[0]

	# the normal random walk. biased, without correction.
	biased_average_degrees = numpy.average(node_degrees)
	biased_average_incomes = numpy.average(node_incomes)

	# correcting the random walk sampling with inversed-node-degree prob
	normalization_constant = 0.0
	for x in node_degrees:
		normalization_constant += (1.0 / x)

	prob = list()
	for x in node_degrees:
		temp = (1.0 / x) / normalization_constant
		prob.append(temp)

	reweighted_average_degrees = sum(i*j for i, j in zip(prob,node_degrees))
	reweighted_average_incomes = sum(i*j for i, j in zip(prob,node_incomes))
	
	return [biased_average_degrees, reweighted_average_degrees, biased_average_incomes, reweighted_average_incomes]
Exemple #29
0
def convert(snippet, phrase):
    '''

    :param snippet:
    :param phrase:
    :return:
    '''
    class_names = [w.capitalize() for w in
                   random.sample(WORDS, snippet.count("%%%"))]
    other_names = random.sample(WORDS, snippet.count("***"))
    results = []
    param_names = []

    for i in range(0, snippet.count("@@@")):
        param_count = random.randint(1,3)
        param_names.append(', '.join(random.sample(WORDS, param_count)))

    for sentence in snippet, phrase:
        result = sentence[:]# result is a list. a copy of sentence

        # fake class names
        for word in class_names:
            result = result.replace("%%%", word, 1) #.replace replaces a string.

        # fake other names
        for word in other_names:
            result = result.replace("***", word, 1)

        # fake parameter lists
        for word in param_names:
            result = result.replace("@@@", word, 1)

        results.append(result)

    return results
Exemple #30
0
def sequential_rotor_estimation_cuda(reference_model_array, query_model_array, n_samples, n_objects_per_sample, mutation_probability=None):

    # Stack up a list of numbers
    total_matches = n_samples*n_objects_per_sample
    sample_indices = random.sample(range(total_matches), total_matches)

    n_mvs = reference_model_array.shape[0]
    sample_indices = [i%n_mvs for i in sample_indices]

    if mutation_probability is not None:
        reference_model_array_new = []
        mutation_flag = np.random.binomial(1,mutation_probability,total_matches)
        for mut, i in zip(mutation_flag, sample_indices):
            if mut:
                ref_ind = random.sample(range(len(reference_model_array)), 1)[0]
            else:
                ref_ind = i
            reference_model_array_new.append(reference_model_array[ref_ind,:])
        reference_model_array_new = np.array(reference_model_array_new)
    else:
        reference_model_array_new = np.array([reference_model_array[i,:] for i in sample_indices], dtype=np.float64)
    query_model_array_new = np.array([query_model_array[i, :] for i in sample_indices], dtype=np.float64)

    output = np.zeros((n_samples,32), dtype=np.float64)
    cost_array = np.zeros(n_samples, dtype=np.float64)

    blockdim = 64
    griddim = int(math.ceil(reference_model_array_new.shape[0] / blockdim))

    sequential_rotor_estimation_kernel[griddim, blockdim](reference_model_array_new, query_model_array_new, output, cost_array)

    return output, cost_array
def equalSample(ageF, out1F, out2F, g):
    userAge = dict()
    for line in open(ageF, 'rb'):
        items = line.split("::")
        userAge[items[0]] = int(items[1])
    
    userGroup = dict()
    
    # We have six groups
    groups = g
    for i in range(groups):
        userGroup[i + 1] = set()
        
    for user in userAge:
        if 1 <= g and userAge[user] <= 18:
            userGroup[1].add(user)
        elif 2 <= g and userAge[user] <= 22:
            userGroup[2].add(user)
        elif 3 <= g and userAge[user] <= 33:
            userGroup[3].add(user)
        elif 4 <= g and userAge[user] <= 45:
            userGroup[4].add(user)
        elif 5 <= g and userAge[user] <= 65:
            userGroup[5].add(user)
        elif 6 <= g:
            userGroup[6].add(user)
    
    glen = []
    for i in range(groups):
        glen.append(len(userGroup[i + 1]))

    print glen
    
    benchmark = len(userGroup[3])    
    
    outf1 = open(out1F, 'w')
    outf2 = open(out2F, 'w')
    distR = []
    for i in range(groups):
        distR.append(0) 
    sampledUser = set()
    for ag in userGroup:
        users = userGroup[ag]
        if len(users) > benchmark:
            users = set(random.sample(users, benchmark))
        sampledUser |= users
        
        outList = []
        for i in range(groups):
            if i == (ag - 1):
                outList.append('1')
            else:
                outList.append('0')
        for user in users:
            print >> outf2, ' '.join(outList)
        distR[ag - 1] = len(users) 

    for line in open(ageF, 'rb'):
        user = line.split("::")[0]
        if user in sampledUser:
            print >> outf1, line.rstrip('\r\n')
            
    outf1.close()
    outf2.close()
    
    print '---------------Finish the equal sampling------------------'
    print distR
Exemple #32
0
 def _get_mprankmap(self, prankconn, rinfo):
     return random.sample(range(len(rinfo)), len(rinfo))
Exemple #33
0
        lista[int(relations.index(name))].append(model)
    #for v in lista:
    #   print(v[0])
    #   print('------')
    in_file.close()

    iterations = 100
    acc_svm = 0
    acc_apost = 0
    for ite in range(0, iterations):
        lista_aTratar = []
        pert = []
        lista_samples = np.zeros(len(lista), dtype=object)
        for i in range(0, len(lista)):
            if len(lista[i]) > 500:
                lista_samples[i] = random.sample(lista[i], 500)
            else:
                lista_samples[i] = lista[i]
            for item in lista_samples[i]:
                lista_aTratar.append(item)
                pert.append(i + 1)
        for v in lista_samples:
            print(len(v))
        X_train, X_test, y_train, y_test = train_test_split(lista_aTratar,
                                                            pert,
                                                            test_size=0.4,
                                                            random_state=0)
        [datos, muestras,
         muestras_posteriori] = modelar(X_train, y_train, X_test)
        print(muestras_posteriori[0])
        from sklearn import svm
Exemple #34
0
    def __init__(self):
        self.WIDTH = 600
        self.HEIGHT = 700

        self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))

        pygame.display.set_caption("Minesweeper")
        pygame.init()
        pygame.font.init()
        self.FONT = pygame.font.SysFont('comicsans', 30)
        self.END_FONT = pygame.font.SysFont('comicsans', 60)

        self.FPS = 60
        self.clock = pygame.time.Clock()
        self.runWIN = True
        self.end_won = False
        self.end_lost = False
        self.first_move = True

        icon = pygame.image.load("images/bomb.png")
        pygame.display.set_icon(icon)

        #objects2D - IMAGES
        self.stone = pygame.image.load("images/stone.png")
        self.flag = pygame.image.load("images/flag.png")
        self.not_flag = pygame.image.load("images/not_flag.png")
        self.num_1 = pygame.image.load("images/1.png")
        self.num_2 = pygame.image.load("images/2.png")
        self.num_3 = pygame.image.load("images/3.png")
        self.num_4 = pygame.image.load("images/4.png")
        self.num_5 = pygame.image.load("images/5.png")
        self.num_6 = pygame.image.load("images/6.png")
        self.num_7 = pygame.image.load("images/7.png")
        self.bomb = pygame.image.load("images/bomb.png")
        self.bomb_exp = pygame.image.load("images/bomb_exp.png")
        self.nothing = pygame.image.load("images/nothing.png")
        self.smiley = pygame.image.load("images/smiley.png")
        self.lost_smiley = pygame.image.load("images/lost.png")

        self.size_imgs = (30, 30)
        self.stone = pygame.transform.scale(self.stone, self.size_imgs)
        self.flag = pygame.transform.scale(self.flag, self.size_imgs)
        self.not_flag = pygame.transform.scale(self.not_flag, self.size_imgs)
        self.num_1 = pygame.transform.scale(self.num_1, self.size_imgs)
        self.num_2 = pygame.transform.scale(self.num_2, self.size_imgs)
        self.num_3 = pygame.transform.scale(self.num_3, self.size_imgs)
        self.num_4 = pygame.transform.scale(self.num_4, self.size_imgs)
        self.num_5 = pygame.transform.scale(self.num_5, self.size_imgs)
        self.num_6 = pygame.transform.scale(self.num_6, self.size_imgs)
        self.num_7 = pygame.transform.scale(self.num_7, self.size_imgs)
        self.bomb = pygame.transform.scale(self.bomb, self.size_imgs)
        self.bomb_exp = pygame.transform.scale(self.bomb_exp, self.size_imgs)
        self.nothing = pygame.transform.scale(self.nothing, self.size_imgs)
        self.smiley = pygame.transform.scale(self.smiley, (int(self.size_imgs[0] * 1.5), int(self.size_imgs[1] * 1.5)))
        self.lost_smiley = pygame.transform.scale(self.lost_smiley, (int(self.size_imgs[0] * 1.5), int(self.size_imgs[1] * 1.5)))


        #colors
        self.BLACK = (0, 0, 0)
        self.WHITE = (255, 255, 255)
        self.GRAY = (150, 150, 150)
        self.RED = (255, 0, 0)

        #offset for grid
        self.y_offset = 100

        self.objects = []
        self.field_x, self.field_y = 20, 20

        for elem in range(self.field_x * self.field_y):
            self.objects.append(object2D("nothing"))

        self.number_of_bombs = self.field_x * self.field_y // 10
        random_indices = random.sample(range(0, len(self.objects)), self.number_of_bombs)

        for index in random_indices:
            self.objects[index].bomb = True
            self.objects[index].object_name = "bomb"

        for index, object in enumerate(self.objects):
            if not object.bomb:
                nb_indexes = self.get_neighbours(object, index)

                num_bombs = 0
                for nb_index in nb_indexes:
                    if self.objects[nb_index].bomb == True:
                        num_bombs += 1

                self.objects[index].bomb_neighbours = num_bombs

                object.pic = self.stone

                if num_bombs == 0:
                    object.pic_not_hidden = self.nothing
                elif  num_bombs == 1:
                    object.pic_not_hidden = self.num_1
                    object.object_name = "neighbour"
                elif  num_bombs == 2:
                    object.pic_not_hidden = self.num_2
                    object.object_name = "neighbour"
                elif  num_bombs == 3:
                    object.pic_not_hidden = self.num_3
                    object.object_name = "neighbour"
                elif  num_bombs == 4:
                    object.pic_not_hidden = self.num_4
                    object.object_name = "neighbour"
                elif  num_bombs == 5:
                    object.pic_not_hidden = self.num_5
                    object.object_name = "neighbour"
                elif  num_bombs == 6:
                    object.pic_not_hidden = self.num_6
                    object.object_name = "neighbour"
                elif  num_bombs == 7:
                    object.pic_not_hidden = self.num_7
                    object.object_name = "neighbour"
 def sample(self):
     return random.sample(self.buffer, self.batch_size)
Exemple #36
0
def run(
    local_rank: int,
    device: str,
    experiment_name: str,
    gpus: Optional[Union[int, List[int], str]] = None,
    dataset_root: str = "./dataset",
    log_dir: str = "./log",
    model: str = "fasterrcnn_resnet50_fpn",
    epochs: int = 13,
    batch_size: int = 4,
    lr: float = 0.01,
    download: bool = False,
    image_size: int = 256,
    resume_from: Optional[dict] = None,
) -> None:
    bbox_params = A.BboxParams(format="pascal_voc")
    train_transform = A.Compose(
        [A.HorizontalFlip(p=0.5), ToTensorV2()],
        bbox_params=bbox_params,
    )
    val_transform = A.Compose([ToTensorV2()], bbox_params=bbox_params)

    download = local_rank == 0 and download
    train_dataset = Dataset(root=dataset_root,
                            download=download,
                            image_set="train",
                            transforms=train_transform)
    val_dataset = Dataset(root=dataset_root,
                          download=download,
                          image_set="val",
                          transforms=val_transform)
    vis_dataset = Subset(val_dataset,
                         random.sample(range(len(val_dataset)), k=16))

    train_dataloader = idist.auto_dataloader(train_dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             collate_fn=collate_fn,
                                             num_workers=4)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                collate_fn=collate_fn,
                                num_workers=4)
    vis_dataloader = DataLoader(vis_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                collate_fn=collate_fn,
                                num_workers=4)

    model = idist.auto_model(model)
    scaler = GradScaler()
    optimizer = SGD(lr=lr, params=model.parameters())
    optimizer = idist.auto_optim(optimizer)
    scheduler = OneCycleLR(optimizer,
                           max_lr=lr,
                           total_steps=len(train_dataloader) * epochs)

    def update_model(engine, batch):
        model.train()
        images, targets = batch
        images = list(image.to(device) for image in images)
        targets = [{
            k: v.to(device)
            for k, v in t.items() if isinstance(v, torch.Tensor)
        } for t in targets]

        with torch.autocast(device, enabled=True):
            loss_dict = model(images, targets)
            loss = sum(loss for loss in loss_dict.values())

        optimizer.zero_grad()
        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()

        loss_items = {k: v.item() for k, v in loss_dict.items()}
        loss_items["loss_average"] = loss.item() / 4

        return loss_items

    @torch.no_grad()
    def inference(engine, batch):
        model.eval()
        images, targets = batch
        images = list(image.to(device) for image in images)
        outputs = model(images)
        outputs = [{k: v.to("cpu") for k, v in t.items()} for t in outputs]
        return {
            "y_pred": outputs,
            "y": targets,
            "x": [i.cpu() for i in images]
        }

    trainer = Engine(update_model)
    evaluator = Engine(inference)
    visualizer = Engine(inference)

    aim_logger = AimLogger(
        repo=os.path.join(log_dir, "aim"),
        experiment=experiment_name,
    )

    CocoMetric(convert_to_coco_api(val_dataset)).attach(evaluator, "mAP")

    @trainer.on(Events.EPOCH_COMPLETED)
    @one_rank_only()
    def log_validation_results(engine):
        evaluator.run(val_dataloader)
        visualizer.run(vis_dataloader)

    @trainer.on(Events.ITERATION_COMPLETED)
    def step_scheduler(engine):
        scheduler.step()
        aim_logger.log_metrics({"lr": scheduler.get_last_lr()[0]},
                               step=engine.state.iteration)

    @visualizer.on(Events.EPOCH_STARTED)
    def reset_vis_images(engine):
        engine.state.model_outputs = []

    @visualizer.on(Events.ITERATION_COMPLETED)
    def add_vis_images(engine):
        engine.state.model_outputs.append(engine.state.output)

    @visualizer.on(Events.ITERATION_COMPLETED)
    def submit_vis_images(engine):
        aim_images = []
        for outputs in engine.state.model_outputs:
            for image, target, pred in zip(outputs["x"], outputs["y"],
                                           outputs["y_pred"]):
                image = (image * 255).byte()
                pred_labels = [
                    Dataset.class2name[label.item()]
                    for label in pred["labels"]
                ]
                pred_boxes = pred["boxes"].long()
                image = draw_bounding_boxes(image,
                                            pred_boxes,
                                            pred_labels,
                                            colors="red")

                target_labels = [
                    Dataset.class2name[label.item()]
                    for label in target["labels"]
                ]
                target_boxes = target["boxes"].long()
                image = draw_bounding_boxes(image,
                                            target_boxes,
                                            target_labels,
                                            colors="green")

                aim_images.append(aim.Image(image.numpy().transpose(
                    (1, 2, 0))))
        aim_logger.experiment.track(aim_images,
                                    name="vis",
                                    step=trainer.state.epoch)

    losses = [
        "loss_classifier", "loss_box_reg", "loss_objectness",
        "loss_rpn_box_reg", "loss_average"
    ]
    for loss_name in losses:
        RunningAverage(output_transform=lambda x: x[loss_name]).attach(
            trainer, loss_name)
    ProgressBar().attach(trainer, losses)
    ProgressBar().attach(evaluator)

    objects_to_checkpoint = {
        "trainer": trainer,
        "model": model,
        "optimizer": optimizer,
        "lr_scheduler": scheduler,
        "scaler": scaler,
    }
    checkpoint = Checkpoint(
        to_save=objects_to_checkpoint,
        save_handler=DiskSaver(log_dir, require_empty=False),
        n_saved=3,
        score_name="mAP",
        global_step_transform=lambda *_: trainer.state.epoch,
    )
    evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint)
    if resume_from:
        Checkpoint.load_objects(objects_to_checkpoint, torch.load(resume_from))

    aim_logger.log_params({
        "lr": lr,
        "image_size": image_size,
        "batch_size": batch_size,
        "epochs": epochs,
    })
    aim_logger.attach_output_handler(trainer,
                                     event_name=Events.ITERATION_COMPLETED,
                                     tag="train",
                                     output_transform=lambda loss: loss)
    aim_logger.attach_output_handler(
        evaluator,
        event_name=Events.EPOCH_COMPLETED,
        tag="val",
        metric_names=["mAP"],
        global_step_transform=global_step_from_engine(
            trainer, Events.ITERATION_COMPLETED),
    )

    trainer.run(train_dataloader, max_epochs=epochs)
Exemple #37
0
print(random.randrange(10))

# 문법) randrange(min,max) : min값부터 max값사이의 랜덤값을 int타입으로 리턴
print(random.randrange(10, 20))

# random모듈의 choice(리스트)함수는 리스트 내부에 있는 요소를 랜덤하게 선택
print(random.choice([1, 2, 3, 4, 5]))

# random모듈의 shuffle(리스트)함수는 리스트의 요소들을 랜덤하게 섞어서 제공
list = ["ice cream", "pancakes", "brownies", "cookies", "candy"]
random.shuffle(list)
print(list)

# random모듈의 sample(리스트, k) 함수는
# 리스트의 요소 중에 k개를 랜덤으로 뽑아낸다.
print(random.sample([1, 2, 3, 4, 5], k=2))

# 예제. 계산 문제를 맞추는 게임 -> random_1.py파일 생성
# 예제. 타자 게임 -> typing.py파일 생성
# 예제. 거북이 그래픽 모듈 사용하기 -> turtle_1.py파일 생성

import sys

print(sys.getwindowsversion())
print("-------")
print(sys.copyright)
print("-------")
print(sys.version)

# 프로그램 강제종료
# sys.exit()
Exemple #38
0
def createName():
	name = "".join(random.sample(string.printable[:26], 10))
	return name
Exemple #39
0
temp_xml = glob(r'F:\RM\DJI_ROCO\robo*\*\*')
saveBasePath = r"F:\RM\DJI_ROCO"

trainval_percent = 1
train_percent = 1

total_xml = []
for xml in temp_xml:
    if xml.endswith(".xml"):
        total_xml.append(xml)

num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)

print("train and val size", tv)
print("traub suze", tr)
ftrainval = open(os.path.join(saveBasePath, 'trainval.txt'), 'w')
ftest = open(os.path.join(saveBasePath, 'test.txt'), 'w')
ftrain = open(os.path.join(saveBasePath, 'train.txt'), 'w')
fval = open(os.path.join(saveBasePath, 'val.txt'), 'w')

for i in list:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftrain.write(name)
Exemple #40
0
    "StormRider",
    "Gold",
    "Door",
    "Close",
]

# 当前路径
data_path = getcwd()

train_percent = 0.9
Annotations_path = 'Annotations'
total_xml = os.listdir(Annotations_path)
num = len(total_xml)
list = range(num)
num_train = int(num * train_percent)
train_list = random.sample(list, num_train)
ftrain = open('train.txt', 'w')
fval = open('val.txt', 'w')
for i in list:
    xml_path = os.path.join(getcwd(), Annotations_path, total_xml[i])
    xml_content = open(xml_path, 'r')
    write_content = xml_path.replace('Annotations',
                                     'JPGImages').replace('xml', 'jpg')
    tree = ET.parse(xml_path)
    root = tree.getroot()

    for obj in root.iter('object'):
        difficult = obj.find('difficult').text
        cls = obj.find('name').text
        if cls not in classes or int(difficult) == 1:
            continue
Exemple #41
0
def CoordPlot(image_dir, coord_file, nplot=None, 
              save_w=4000, save_h=3000, tile_size=100, 
              makegrid=True, random_select=0, plotfile='coordplot.png'):
    """
    Plot individual images as tiles according to provided coordinates
    """
    
    # read data
    coords = np.genfromtxt(coord_file, delimiter=',')
    filenames = sorted(os.listdir(image_dir))

    if nplot==None:
       nplot = len(filenames)

    # subsample if necessary
    if nplot < len(filenames):
        if random_select:
            smpl = random.sample(range(len(filenames)), nplot)
        else:
            smpl = [i for i in range(nplot)]
        filenames = [filenames[s] for s in smpl]
        coords = coords[smpl,:]
    
    # min-max tsne coordinate scaling
    for i in range(2):
        coords[:,i] = coords[:,i] - coords[:,i].min()
        coords[:,i] = coords[:,i] / coords[:,i].max()
    tx = coords[:,0]
    ty = coords[:,1]
    
    
    full_image = Image.new('RGB', (save_w, save_h))
    for fn, x, y in zip(filenames, tx, ty):
        
        img = Image.open(os.path.join(image_dir, fn)) 	# load raw png image
        
        npi = np.array(img, np.uint8)					# convert to uint np arrat
        rsz = resize(npi, (tile_size, tile_size),						# resize, which converts to float64
                    mode='constant', 
                    anti_aliasing=True)
        npi = (2**8 - 1) * rsz / rsz.max() 					# rescale back up to original 8 bit res, with max
        npi = npi.astype(np.uint8) 						# recast as uint8
        img = Image.fromarray(npi) 						# convert back to image
        full_image.paste(img, (int((save_w - tile_size) * x), int((save_h - tile_size) * y)))

    full_image.save(plotfile)


    def Cloud2Grid(coords, gridw, gridh):
        """ convert points into a grid
        """
        nx = coords[:,0]
        ny = coords[:,1]
        nx = nx - nx.min()
        ny = ny - ny.min()
        nx = gridw * nx // nx.max()
        ny = gridh * ny // ny.max()
        nc = np.column_stack((nx, ny))
        return(nc)
               

    if makegrid:
        grid_assignment = Cloud2Grid(coords, gridw=save_w/tile_size, gridh=save_h/tile_size)
        grid_assignment = grid_assignment * tile_size

        grid_image = Image.new('RGB', (save_w, save_h))
        for img, grid_pos in zip(filenames, grid_assignment):
            x, y = grid_pos
            
            tile = Image.open(os.path.join(image_dir, img))
            
            tile = tile.resize((tile_size, tile_size), Image.ANTIALIAS)
            grid_image.paste(tile, (int(x), int(y)))

        grid_image.save(plotfile[:len(plotfile)-4] + '_grid' + plotfile[-4:])
Exemple #42
0
def random_batch():
    return random.sample(list(zip(sentences[:10000],labels[:10000])),50)
 def _get_pin(self,length=5):
     """ Return a numeric PIN with length digits """
     return str(random.sample(range(10**(length-1), 10**length), 1)[0])
Exemple #44
0
    região = random.choice(
        [r for lista in regiões for r in lista if r.habitável])
    return Nômade(nome=nome,
                  região=região,
                  população=POPULAÇÃO,
                  natalidade=random.random() * NATALIDADE,
                  mortalidade=random.random() * MORTALIDADE)


# Definição das condições iniciais do ambiente:
regiões = [[região_aleatória() for _ in range(NUM_REGIÕES)]
           for _ in range(NUM_REGIÕES)]

nômades = [
    tribo_aleatória(nome, regiões)
    for nome in random.sample(TRIBOS_BRASILEIRAS, NUM_TRIBOS)
]

for época in range(100):
    for lista in regiões:
        for região in lista:
            região.regenera()

    for tribo in nômades:
        if tribo.população > 0:
            tribo.subsistência()
            tribo.atualiza_população()
            if tribo.vai_migrar():
                região = tribo.seleciona(vizinhança(tribo.região))
                migração(tribo, região)
Exemple #45
0
os.chdir('D:/Documents/1-ENSAE/3A/S2/Data_storytelling/data_storytelling/website')

df = pd.read_csv('../Data/df_sub_sirene.csv', ',')

df = df[df["montant"]>0]
df = df[df["Géolocalisation de l'établissement"].notna()]

df["nom_beneficiaire"] = df['nom_beneficiaire'].str.replace('[^\w\s]','')
df["nom_beneficiaire"] = df['nom_beneficiaire'].apply(unidecode.unidecode)

df[['latitude','longitude']]  = df["Géolocalisation de l'établissement"].str.split(',', expand=True)


#On tire 100 asso de manière aléatoire pour les projeter sur la carte
noms_random = random.sample(list(df["nom_beneficiaire"]), 100)

map_noms = folium.Map(location=[48.86, 2.34], zoom_start=12)
mc = folium.plugins.MarkerCluster()

#on retient les monuments pour lesquels il existe une latitude et une longitude
for nom in noms_random:
    lat = df['latitude'][df["nom_beneficiaire"]==nom].iloc[0]
    lon = df['longitude'][df["nom_beneficiaire"]==nom].iloc[0]
    mc.add_child(folium.Marker(location=[lat,lon], popup=(folium.Popup(nom, parse_html=True))))
                   
map_noms.add_child(mc)
map_noms


map_noms.save('map_noms.html')
Exemple #46
0
"""
Compares two lists and outputs a list that has common elements
"""

import random as rn
a = rn.sample(range(1, 30), 12)
b = rn.sample(range(1, 30), 16)
newlist = []
[newlist.append(n1) for n1 in a if n1 in b and n1 not in newlist]
print(sorted(newlist))
Exemple #47
0
motifs = [
    # seq.start, seq.end, shape, width, height, fgcolor, bgcolor
    [120, 130, ">", 34, 13, "black", "red", None],
    [145, 150, "<", 60, 5, "black", "green", None],
    [10, 30, "o", 100, 10, "black", "rgradient:blue", None],
    [20, 50, "[]", 100, 10, "blue", "pink", None],
    [55, 80, "()", 100, 10, "blue", "rgradient:purple", None],
    [160, 170, "^", 50, 14, "black", "yellow", None],
    [172, 180, "v", 20, 12, "black", "rgradient:orange", None],
    [185, 190, "o", 12, 12, "black", "brown", None],
    [198, 200, "<>", 15, 15, "black", "rgradient:gold", None],
    [210, 240, "compactseq", 2, 10, None, None, None],
    [300, 320, "seq", 10, 10, None, None, None],
    [340, 350, "<>", 15, 15, "black", "rgradient:black", None],
]
# Show usage help for SeqMotifFace
print SeqMotifFace.__doc__
 
# Create a random tree and add to each leaf a random set of motifs
# from the original set
t = Tree()
t.populate(40)
for l in t.iter_leaves():
    seq_motifs = sample(motifs, randint(2, len(motifs))) 
   
    seqFace = SeqMotifFace(seq, seq_motifs, intermotif_format="line", seqtail_format="compactseq")
    seqFace.margin_bottom = 4
    f = l.add_face(seqFace, 0, "aligned")
    
t.show()
Exemple #48
0
 def aSmallSet(a, b):
     return random.sample(range(a,b),100)
Exemple #49
0
    while True:
        n = p - m + 1
        for i in range(1, n // r + 1):
            insertion_sort(A, m + (i - 1) * r, m + i * r - 1)
            # 把中间值收集到A(m:p)的前部
            inter_change(A, m + i - 1, m + (i - 1) * r + r // 2 - 1)

        j = SEL(A, m, m + n // r - 1, math.ceil(n / r / 2))  # mm
        inter_change(A, m, j)  # 产生划分元素
        j = p
        j = partition(A, m, j)
        if j - m + 1 == k:
            return j
        elif j - m + 1 > k:
            p = j - 1
        else:
            k -= j - m + 1
            m = j + 1


# 测试代码
if __name__ == '__main__':
    for i in range(3):
        A = random.sample(range(0, 100), 30)  # 随机生成30个数的列表
        A.insert(0, 'START')

        i = SEL(A, 1, 30, 8)

        print('原始的A数组为:', A)
        print('第', 8, '小的元素为:', A[i])
Exemple #50
0
    return model


model = autoencoder(hidden_layer_size=154)

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(x_train, x_train, epochs=10)
output = model.predict(x_test)

import matplotlib.pyplot as plt
import random
fig, ((ax1,ax2,ax3,ax4,ax5),(ax6,ax7,ax8,ax9,ax10)) = \
    plt.subplots(2,5,figsize=(20,7))

# 이미지 다섯 개를 무작위로 고른다.
random_images = random.sample(range(output.shape[0]), 5)

# 원본(입력) 이미지를 맨 위에 그린다.
for i, ax in enumerate([ax1, ax2, ax3, ax4, ax5]):
    ax.imshow(x_test[random_images[i]].reshape(28, 28), cmap='gray')
    if i == 0:
        ax.set_ylabel("INPUT", size=20)
    ax.grid(False)
    ax.set_xticks([])
    ax.set_yticks([])

# 오토 인코더가 출력한 이미지를 아래에 그린다.
for i, ax in enumerate([ax6, ax7, ax8, ax9, ax10]):
    ax.imshow(output[random_images[i]].reshape(28, 28), cmap='gray')
    if i == 0:
        ax.set_ylabel("OUTPUT", size=20)
Exemple #51
0
def neutralizer(vasp_folder, atoms, cell, struct_list, file, struct_type,
                wrongs, bcodes):
    struct_num = 0
    max_trials = 1000  # number of trials
    max_structures = 50  # number of trials
    Num = [0, 0]  # number of Ni and S in structure
    remove_ids = []  # id of all undercoordinated atoms of type_to_remove
    remove_bcodes = [
    ]  # bcodes of all uandercoordinated atoms of type_to_remove
    Num[1] = sum(at[2][0] for at in atoms)  # S ids are 1
    Num[0] = len(atoms) - Num[1]
    remove_num = np.abs(
        Num[1] -
        Num[0])  # number of atoms to be remove to retain charge neutrality

    if remove_num != 0:
        if Num[1] > Num[0]:
            type_to_remove = 1  # S should be removed
            print(remove_num, "S atoms will be removed from", end=" ")
        else:
            type_to_remove = 0  # Ni should be removed
            print(remove_num, "Ni atoms will be removed from", end=" ")
        i = 0
        for at in atoms:
            if at[2][0] == type_to_remove and at[3] != 132 and at[3] != 53:
                remove_ids.append([
                    at[0], at[3]
                ])  # all undercoordinate atom of type_to_remove are candidates
                remove_bcodes.append(at[3])  # store their bcodes
                i += 1
        print(i, "undercoordinated atoms")
        remove_bcodes_set = list(
            set(remove_bcodes))  # unique bcodes in removal candidates
        remove_ids_sorted = sorted(remove_ids, key=lambda x: x[1])
        # removed ids are grouped according to their bcodes
        remove_ids_grouped = [[key, [g[0] for g in group]]
                              for key, group in itertools.groupby(
                                  remove_ids_sorted, lambda x: x[1])]
        structures = 1
        trial = 0
        while trial <= max_trials and structures <= max_structures:
            trial += 1
            rm_ids = []
            i = [''] * remove_num
            # """select remove_num elements from remove_bcodes_set randomly. There are bcodes removed atoms will be
            # selected from. If we select atoms to remove directly, the choice will be biased towards bcodes with large
            # numbers"""
            flag = 1
            while flag == 1:
                flag = 0
                for ii in range(remove_num):
                    i[ii] = random.choice(remove_bcodes_set)
                # the number of bcode in i should be smaller than remove_bcodes
                for elem in i:
                    if i.count(elem) > remove_bcodes.count(elem):
                        flag = 1
            for b in remove_bcodes_set:
                n = i.count(b)
                if n > 0:
                    for sub_list in remove_ids_grouped:
                        if b == sub_list[0]:
                            temp = random.sample(sub_list[1], n)
                            rm_ids.append(temp)
            flat_list = [item for sublist in rm_ids for item in sublist]
            neut_atoms = [x for x in atoms if x[0] not in flat_list]
            neut_atoms = CN(neut_atoms, cell)
            zero_bond = any((x[3] == 0 or x[3] == 81) for x in neut_atoms)
            neut_code = struc_code(neut_atoms)
            if any(elem == neut_code for elem in struct_list) or zero_bond:
                pass
            else:
                flag = 0
                for id, atom in enumerate(neut_atoms):
                    bcodes.add(atom[3])
                    if (any(x > 2
                            for x in atom[2]) or atom[2][1] > 1) and flag == 0:
                        wrongs.append([file, len(struct_list), struct_type])
                        flag = 1
                        break
                struct_list.append(neut_code)
                struct_num += 1
                write_poscar(vasp_folder, file, len(struct_list), struct_num,
                             neut_atoms, cell, struct_type)
                structures += 1
                path = vasp_folder / str(len(struct_list)) / 'atoms.json'
                write_to_json(path, neut_atoms)
    else:
        neut_code = struc_code(atoms)
        neut_atoms = atoms
        flag = 0
        for id, atom in enumerate(neut_atoms):
            bcodes.add(atom[3])
            if (any(x > 2 for x in atom[2]) or atom[2][1] > 1) and flag == 0:
                wrongs.append([file, len(struct_list), struct_type])
                flag = 1
                break
        if any(elem == neut_code for elem in struct_list):
            pass
        else:
            struct_list.append(neut_code)
            struct_num += 1
            # print(struct_num, ": ", neut_code)
            write_poscar(vasp_folder, file, len(struct_list), struct_num,
                         neut_atoms, cell, struct_type)
            path = vasp_folder / str(len(struct_list)) / 'atoms.json'
            write_to_json(path, neut_atoms)

    return struct_list, neut_atoms, wrongs, bcodes
Exemple #52
0
    def __cloneFilesSources(self):
        SOURCE_URL = FileManager.datasets['source']['url']
        TRAINING_URL = FileManager.datasets['training']['url']
        TESTING_URL = FileManager.datasets['testing']['url']

        # foreach directory in '/Lang' folder ...
        languagesExamplesCounter = {}
        for languageFolder in [f for f in os.scandir(SOURCE_URL) if f.is_dir()]:
            language = str(languageFolder.name).lower()
            languagesExamplesCounter[language] = 0
            # parse only selected languages
            if language in ConfigurationManager.getLanguages():
                # preparing empty {languageFolder.name} for each dataset
                if not (os.path.isdir(os.path.join(TRAINING_URL, language))):
                    os.mkdir(os.path.join(TRAINING_URL, language))
                if not (os.path.isdir(os.path.join(TESTING_URL, language))):
                    os.mkdir(os.path.join(TESTING_URL, language))

                # count example foreach language
                for exampleFolder in FileManager.getExamplesFolders(languageFolder.path):
                    for _ in FileManager.getExampleFiles(exampleFolder.path):
                        languagesExamplesCounter[language] += 1

                # print languages with examples counter less than {TRAINING_EXAMPLES_NUMBER}
                if languagesExamplesCounter[language] < TRAINING_EXAMPLES_NUMBER:
                    print(' >  [dataset] the total number of examples for the '
                          + language + ' is less than ' + str(TRAINING_EXAMPLES_NUMBER))
                    continue

                # for this language, the total examples number could be less than {TRAINING_EXAMPLES_NUMBER}
                indexesOfTrainingExamples = random.sample(
                    range(1, languagesExamplesCounter[language]),
                    TRAINING_EXAMPLES_NUMBER
                )

                # list all examples in {languageFolder.name} folder
                exampleIndex = 0
                for exampleFolder in FileManager.getExamplesFolders(languageFolder.path):
                    # list all examples versions in {exampleFolder.name} folder
                    for exampleVersionFile in FileManager.getExampleFiles(exampleFolder.path):
                        exampleIndex += 1
                        # move file to right dataset
                        if exampleIndex in indexesOfTrainingExamples:
                            DATASET_TYPE = TRAINING_URL
                        else:
                            DATASET_TYPE = TESTING_URL

                        # prepare destination folder
                        example = str(exampleVersionFile.name).lower()
                        exampleFolderUri = os.path.join(DATASET_TYPE, language, example)
                        os.mkdir(exampleFolderUri)
                        # copy the ORIGINAL source file content
                        originalFileUri = FileManager.getOriginalFileUrl(exampleFolderUri)
                        FileManager.createFile(originalFileUri)
                        shutil.copyfile(exampleVersionFile.path, originalFileUri)
                        # create the  'PARSED' version of the orginal file
                        parsedFileUri = FileManager.getParsedFileUrl(exampleFolderUri)
                        FileManager.createFile(parsedFileUri)
                        parser = Parser()
                        parser.initialize(originalFileUri, parsedFileUri)
                        parser.parse()

        return self
Exemple #53
0
 def sample(self, batch_size):# ssample random experience from memory
     return random.sample(self.memory, batch_size)
Exemple #54
0
 def sample(self, batch_size=64):
     """Randomly sample a batch of experiences from memory."""
     return random.sample(self.memory, k=self.batch_size)
def extract_2d_blocks_training(inputul, outputul, iteration, block_size_input,
                               block_size_output, dim_output):

    ## inputul -- shape (num_batch, width, height, num_imaging_modalities)
    ## outputul -- shape (num_batch, width, height, num_imaging_modalitie)

    #### this will extract 4 training examples ####

    lista = np.arange(inputul.shape[0])
    np.random.seed(iteration)
    np.random.shuffle(lista)
    current_index = lista[:2]
    semi_block_size_input = int(block_size_input // 2)
    semi_block_size_input2 = block_size_input - semi_block_size_input
    semi_block_size_output = int(block_size_output // 2)
    semi_block_size_output2 = block_size_output - semi_block_size_output
    list_blocks_input = []
    list_blocks_segmentation = []

    for _ in current_index:

        ##### iterating over 2D images #####
        ### pad current input and output scan to avoid problems ####

        current_input = inputul[_, ...]
        current_output = outputul[_, ...]

        #### shape of current scan ####
        current_shape = inputul[_, ...].shape

        #################################################################################################################
        #### random places being extracted -- most likely not containing any segmentation besides background class ######
        #################################################################################################################

        list_of_random_places1 = random.sample(
            range(semi_block_size_output,
                  current_shape[0] - semi_block_size_output2), 2)
        list_of_random_places2 = random.sample(
            range(semi_block_size_output,
                  current_shape[1] - semi_block_size_output2), 2)

        for __ in range(2):

            #### iterate over the 2 locations of the 3D cubes #####
            central_points = [
                list_of_random_places1[__], list_of_random_places2[__]
            ]

            current_input_padded, current_output_padded, central_points = check_and_add_zero_padding_2d_image(
                current_input, current_output, central_points,
                semi_block_size_input, semi_block_size_input2)

            list_blocks_segmentation.append(
                crop_2D_block(current_output_padded, central_points,
                              semi_block_size_output, semi_block_size_output2))
            list_blocks_input.append(
                crop_2D_block(current_input_padded, central_points,
                              semi_block_size_input, semi_block_size_input2))

        ###############################################################################################
        ##### specifically extract 2D patches with a non-background class #############################
        ###############################################################################################

        #########################
        ##### Class number 1 ####
        #########################

        indices_tumor = np.where(current_output[..., 0] == 1.0)
        indices_tumor_dim1 = indices_tumor[0]
        indices_tumor_dim2 = indices_tumor[1]

        if len(indices_tumor_dim1) == 0:

            print('tumor not found')

        else:

            list_of_random_places = random.sample(
                range(0, len(indices_tumor_dim1)), 2)

            for __ in range(2):

                central_points = [
                    indices_tumor_dim1[list_of_random_places[__]],
                    indices_tumor_dim2[list_of_random_places[__]]
                ]

                current_input_padded, current_output_padded, central_points = check_and_add_zero_padding_2d_image(
                    current_input, current_output, central_points,
                    semi_block_size_input, semi_block_size_input2)

                list_blocks_segmentation.append(
                    crop_2D_block(current_output_padded, central_points,
                                  semi_block_size_output,
                                  semi_block_size_output2))
                list_blocks_input.append(
                    crop_2D_block(current_input_padded, central_points,
                                  semi_block_size_input,
                                  semi_block_size_input2))

    list_blocks_input = np.stack(list_blocks_input)
    list_blocks_segmentation = np.stack(list_blocks_segmentation)

    shape_of_seg = list_blocks_segmentation.shape
    list_blocks_segmentation = list_blocks_segmentation.reshape((-1, 1))
    #list_blocks_segmentation = output_transformation(list_blocks_segmentation)
    #enc = preprocessing.OneHotEncoder()
    #enc.fit(list_blocks_segmentation)
    #list_blocks_segmentation = enc.transform(list_blocks_segmentation).toarray()
    #list_blocks_segmentation = list_blocks_segmentation.reshape((-1,1))
    list_blocks_segmentation = OneHotEncoder(list_blocks_segmentation)
    list_blocks_segmentation = list_blocks_segmentation.reshape(
        (shape_of_seg[0], shape_of_seg[1], shape_of_seg[2], dim_output))

    return list_blocks_input, list_blocks_segmentation
Exemple #56
0
async def monitor(session):
    ret = subprocess.getoutput('top -b -n1')
    fn = ''.join(random.sample(string.ascii_letters + string.digits,
                               16)) + '.png'
    draw_image(ret, os.path.join('..', 'data', 'image', fn))
    await session.send("[CQ:image,file=" + fn + "]")
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import random
import pickle
from matplotlib.colors import ListedColormap

# load values and pdf
values = np.array(np.array(pd.read_pickle("X.p")).T)
density = np.array(pd.read_pickle("Y.p"))

plot = True

if plot:
    # take k random samples to make runtime manageable
    k = 10000
    indices = random.sample(range(len(density)), k)
    values = values[:,indices]
    density = density[indices]
    density /= max(density)

    
    # Make custom colormap so alpha value is 0 at min of range
    cmap = plt.cm.get_cmap("Greys")
    alpha_cmap = cmap(np.arange(cmap.N))
    alpha_cmap[:,-1] = np.linspace(0, 1, cmap.N)
    alpha_cmap = ListedColormap(alpha_cmap)

    fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 16), subplot_kw=dict(projection='3d'))
    im = axes[0].scatter(*values, c=[d**2 for d in density], cmap=alpha_cmap, marker='.')
    fig.colorbar(im, ax=axes[0])
    axes[0].set_title("Before KDE")
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized."
                                    % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
def get_shuffled_names():
    all_name = ["".join(cs) for cs in itertools.product(*chrsets)]
    return random.sample(all_name, len(all_name))
Exemple #60
0
def generateInitialSet(choices, k):
    return random.sample(choices, k)