Esempio n. 1
0
def expression():  #随机生成表达式
    symbol = ['+', '-', '*', '/']
    brackets = ['(', '', ')']
    # 随机产生计算符
    s1 = randint(0, 2)
    s2 = randint(0, 3)
    s3 = randint(0, 3)
    #随机产生括号bt表示左括号,br表示右括号
    bt1 = randint(0, 1)
    bt2 = randint(0, 1)
    bt3 = randint(0, 1)
    br1 = randint(1, 2)
    br2 = randint(1, 2)
    br3 = randint(1, 2)
    if bt1 == 0:
        bt2 = 1
        bt3 = 1
        if br1 == 2:
            br2 = 1
            br3 = 1
        else:
            br2 = 2
            br3 = 1
    else:
        if bt2 == 0:
            bt3 = 1
            br1 = 1
            if (br2 == 2):
                br3 = 1
            else:
                br3 = 2
        else:
            bt3 = 0
            br1 = 1
            br2 = 1
            br3 = 2
    num1 = uf(0, 1)
    # 对随机产生的分子分母做最大限制
    num1 = fr(num1).limit_denominator(10)
    num2 = uf(0, 1)
    num2 = fr(num2).limit_denominator(10)
    num3 = randint(1, 10)
    num4 = randint(1, 10)
    # 产生随机表达式
    ran_exp = brackets[bt1] + str(num1) + symbol[s1] + brackets[bt2] + str(num2) + brackets[br1] + \
              symbol[s2] + brackets[bt3] + str(num3) + brackets[br2] + symbol[s3] + str(num4) + brackets[br3]
    ran_exp = str(ran_exp)
    return ran_exp
Esempio n. 2
0
    def test(self, state_dict=None):
        if state_dict is not None:
            print('load')
            self.policy_net.load_state(state_dict)
        for _ in range(50):
            self.rocket.set_state(uf(-100, 100), uf(650, 750), uf(-2.5, 2.5),
                                  uf(-20, 20), uf(-120, -80), uf(-0.6, 0.6))
            while not self.rocket.dead:
                state_0 = torch.tensor(self.rocket.get_state(),
                                       device=self.device).float()
                # select next action
                with torch.no_grad():
                    out = self.policy_net(state_0)
                action_idx = torch.max(out, dim=0)[1].item()

                # execute action (state_0, action, reward, state_1, terminal)
                action = ACTION[action_idx]  # to feed the rocket
                transition = self.rocket.update(*action)
                self.render.frame(self.rocket, transition[2], realtime=True)
Esempio n. 3
0
def random_longitude()->str:
    return '%.7f'% uf(-180,180)
Esempio n. 4
0
def random_latitude()->str:
    return '%.7f'% uf(-90,90)
Esempio n. 5
0
def mkjson():


	global now_time
	global data
	global time_mac
	global start_id
	global data_s
	
	temp = {}
	stime = datetime.datetime.strptime(data['time'],'%a %b %d %H:%M:%S %Y')	
	time = stime + datetime.timedelta(seconds = 60)
	now_time = time
	etime = time.strftime('%a %b %d %H:%M:%S %Y')
	hour = time.hour
	mac_count = 0					
	mac_incount = len(time_mac.keys())		
	mac_outcount = 0				
	mac_list = []				
	mac0 = '00:00:00:00'
	mac1 = 0
	mac2 = 0
	mac_tmp = mac_incount
	if hour >= 6 and hour <= 18:
		mac_count = rg(20,30)
		mac_tmp = rg(8,12)			
	elif hour > 18 and hour <=24:
		mac_count = rg(18,28)
		mac_tmp = rg(6,10)
	elif hour >= 0 and hour < 6:
		mac_count = rg(12,24)
		mac_tmp = rg(4,8)
	if len(time_mac.keys())	!= 0:
		for x in time_mac.keys():	
			if (time_mac[x] - time).total_seconds() <= 0: 
				time_mac.pop(x)
				mac_incount-=1
			else:				
				s = {}
				s['range'] = rg(30)
				s['mac'] = x
				s['rssi'] = '-'+str(rg(40,100))
				mac_list.append(s)
	mac_outcount = mac_count - mac_incount
	if mac_tmp - mac_incount > 4:			
		for x in range(rg(mac_tmp-mac_incount)): 
			mac = ''
			while True:
				mac1 = str(rg(99)).zfill(2)
				mac2 = str(rg(99)).zfill(2)
				mac = mac0+':'+mac1+':'+mac2
				if mac not in time_mac:
					break
			t = rg(3,20)		
			time_mac[mac] = time+datetime.timedelta(minutes = t) 
			s = {}	
			s['mac'] = mac
			s['range'] = rg(30)
			s['rssi'] = '-'+str(rg(40,100))
			mac_list.append(s)
	else:
		mac_tmp = mac_incount
	mac_outcount = mac_count - mac_tmp
	for x in range(mac_outcount):		
		mac = ''
		while True:
			mac1 = str(rg(99)).zfill(2)
			mac2 = str(rg(99)).zfill(2)
			mac = mac0+':'+mac1+':'+mac2
			if mac not in time_mac:
				break
		s = {}	
		s['mac'] = mac
		s['range'] =rg(30,100)
		s['rssi'] = '-'+str(rg(40,100))
		mac_list.append(s)


	temp['time'] = etime
	temp['data'] = mac_list[:]
	temp['lat'] = str(round(uf(125.293995,125.300265),6))	
	temp['lon'] = str(round(uf(43.860149,43.860461),6))	
	temp['location'] = [temp['lon'],temp['lat']]
	time = datetime.datetime.strptime(temp['time'],'%a %b %d %H:%M:%S %Y')
        temp['@timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S.000Z')          
        index = 'sou'
        doc_type = 'tz_{number}'.format(number=start_id)
	_id = time.strftime('%a %b %d %H:%M:%S %Y')
	dic = {'_id':_id,'_index':index,'_type':doc_type,'_source':temp}
	data_s.append(dic)
	data = temp
import random
from random import uniform as uf
r = 0.5
caiu_dentro = 0
caiu_fora = 0
picalculado = float()
piReal = 3.141
testes = 50000

for x in range(0, testes):
    xAleatorio = uf(-0.5, 0.5)
    yAleatorio = uf(-0.5, 0.5)

    soma = (xAleatorio**2 + yAleatorio**2)**0.5

    if soma <= r:
        caiu_dentro += 1

    else:
        caiu_fora += 1
print('\n')
print('The point was within the circle ' + str(caiu_dentro) + ' times.')
print('The point was outside the circle ' + str(caiu_fora) + ' times.')

picalculado = caiu_dentro * 4 / testes

print('\n')
print('The estimated pi was ' + str(picalculado))
print('\n')
print('And that corresponds to an error of ' + str(picalculado - piReal))
Esempio n. 7
0
def mkjson():

    global now_time
    global data
    global time_mac
    global start_id
    global data_s

    temp = {}
    stime = datetime.datetime.strptime(data['time'],
                                       '%a %b %d %H:%M:%S %Y')  #获取上一个时间
    time = stime + datetime.timedelta(seconds=60)  #+10s
    now_time = time
    etime = time.strftime('%a %b %d %H:%M:%S %Y')
    hour = time.hour
    mac_count = 0  #mac总数
    mac_incount = len(time_mac.keys())  #上一状态店内顾客数
    mac_outcount = 0  #未进店mac数
    mac_list = []  #新的data列表
    mac0 = '00:00:00:00'
    mac1 = 0
    mac2 = 0
    mac_tmp = mac_incount
    if hour >= 6 and hour <= 18:
        mac_count = rg(80, 120)
        mac_tmp = rg(8, 18)  #随机出进店人数
    elif hour > 18 and hour <= 24:
        mac_count = rg(70, 110)
        mac_tmp = rg(6, 15)
    elif hour >= 0 and hour < 6:
        mac_count = rg(40, 80)
        mac_tmp = rg(4, 14)
    if len(time_mac.keys()) != 0:
        for x in time_mac.keys():  #清除超时
            if (time_mac[x] - time).total_seconds() <= 0:  #超时
                time_mac.pop(x)
                mac_incount -= 1
            else:  #添加未超时数据到新列表
                s = {}
                s['range'] = rg(30)
                s['mac'] = x
                s['rssi'] = '-' + str(rg(40, 100))
                mac_list.append(s)
    mac_outcount = mac_count - mac_incount
    if mac_tmp - mac_incount > 4:  #当随机的进店人数比实际进店人数多4时
        for x in range(rg(mac_tmp - mac_incount)):  #补足入店人数
            mac = ''
            while True:
                mac1 = str(rg(99)).zfill(2)
                mac2 = str(rg(99)).zfill(2)
                mac = mac0 + ':' + mac1 + ':' + mac2
                if mac not in time_mac:
                    break
            t = rg(3, 20)  #设置改mac店内时间
            time_mac[mac] = time + datetime.timedelta(minutes=t)  #改mac截止时间
            s = {}
            s['mac'] = mac
            s['range'] = rg(30)
            s['rssi'] = '-' + str(rg(40, 100))
            mac_list.append(s)
    else:
        mac_tmp = mac_incount
    mac_outcount = mac_count - mac_tmp
    for x in range(mac_outcount):  #未入店顾客
        mac = ''
        while True:
            mac1 = str(rg(99)).zfill(2)
            mac2 = str(rg(99)).zfill(2)
            mac = mac0 + ':' + mac1 + ':' + mac2
            if mac not in time_mac:
                break
        s = {}
        s['mac'] = mac
        s['range'] = rg(30, 100)
        s['rssi'] = '-' + str(rg(40, 100))
        mac_list.append(s)

    temp['time'] = etime
    temp['data'] = mac_list[:]
    temp['lat'] = str(round(uf(125.293995, 125.300265), 6))  #定义经度
    temp['lon'] = str(round(uf(43.860149, 43.860461), 6))  #定义维度
    temp['location'] = [temp['lon'], temp['lat']]
    time = datetime.datetime.strptime(temp['time'], '%a %b %d %H:%M:%S %Y')
    temp['@timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S.000Z')  #定义存储时间
    index = 'test'
    doc_type = 'tz_{number}'.format(number=start_id)
    _id = time.strftime('%a %b %d %H:%M:%S %Y')
    dic = {'_id': _id, '_index': index, '_type': doc_type, '_source': temp}
    data_s.append(dic)
    data = temp
    if not es.indices.exists(index=index):
        es.indices.create(index=index)
Esempio n. 8
0
    def iteration(self, m):
        # keep stats
        losses = []
        scores = []

        for game in range(m):
            eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(
                -1. * game / EPS_DECAY)

            # randomly set state_0, reset score
            game_duration = 0
            score = 0
            loss = 0
            self.rocket.set_state(uf(-100, 100), uf(650, 750), uf(-2.5, 2.5),
                                  uf(-20, 20), uf(-120, -80), uf(-0.6, 0.6))
            state_0 = torch.tensor(self.rocket.get_state(),
                                   device=self.device).float()
            while not self.rocket.dead:
                game_duration += 1
                # select next action
                if random.random() < eps_threshold:
                    action_idx = random.randint(0, len(ACTION) - 1)
                else:
                    with torch.no_grad():
                        out = self.policy_net(state_0)
                    action_idx = torch.max(out, dim=0)[1].item()

                # execute action (state_0, action, reward, state_1, terminal)
                action = ACTION[action_idx]  # to feed the rocket
                transition = self.rocket.update(*action)
                score += transition[2]

                if self.vis:
                    if game % 50 == 0:
                        self.render.frame(self.rocket,
                                          transition[2],
                                          realtime=True)
                    else:
                        self.render.clear(game)

                # keep transition in memory (state_0, action, reward, state_1, terminal)
                self.memory.push(transition, action_idx)

                # train minibatch
                loss += self.train()
                state_0 = torch.tensor(transition[3],
                                       device=self.device).float()

            # stats
            losses.append(loss / game_duration)
            scores.append(score)

            mean_loss = sum(scores[-100:]) / len(scores[-100:])
            print(
                'game: {} score: {:.2f} mean: {:.2f} loss: {:.3f} eps: {:.2f}'.
                format(game, score, mean_loss, losses[-1], eps_threshold))

            # save state, update target net weights
            if (game + 1) % 100 == 0:
                dump = {'scores': scores, 'losses': losses}
                self.policy_net.save_state()
                # pickle.dump(dump, open('runs/' + self.date + '_stats.pkl', 'wb'))
            if game % TARGET_UPDATE == 0:
                self.target_net.load_state(
                    deepcopy(self.policy_net.get_state()))