def click_boss_notice(self, mode, queue): """ 点击发现超鬼王提示 :param mode: :param queue: 队列对象 :return: """ num = 1 while True: if not queue.empty(): self._running = queue.get() if self._running == 1: catch_img = ImageGrab.grab(self.bossobj.scan_area) img_hash = get_hash(catch_img) num += 1 r1 = False if mode == 1: r1, r2 = hamming(img_hash, self.bossobj.hash[0], 15) if self.debug: logging('%s boss %s:%s:%s' % (num, img_hash, r1, r2)) elif mode == 2: r11, r21 = hamming(img_hash, self.bossobj.hash[0], 15) r21, r22 = hamming(img_hash, self.bossobj.hash[1], 15) if r11 or r21: r1 = True if r1: queue.put(0) self.bossobj.custom_click(0) return time.sleep(1) elif self._running == 0: return
def settle_phase(self, queue): """ 战斗结算阶段 :param queue: 队列对象 :return: """ battle_buttun_is_appear = False for battle_round in range(0, 20): if not queue.empty(): self._running = queue.get() if self._running == 1: # 当出现战斗数据按钮时,则视为进入结算界面 catch_img = ImageGrab.grab(self.battledataobj.scan_area) img_hash = get_hash(catch_img) r1, r2 = hamming(img_hash, self.battledataobj.hash, 40) if self.debug: logging('[%s]round%s %s:%s:%s' % ('settle_phase', battle_round, img_hash, r1, r2)) if r1: battle_buttun_is_appear = True # 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次 self.rewardobj.area_click(int(random.uniform(1, 3))) elif not r1: if battle_buttun_is_appear: time.sleep(2) break else: self.special_settle_phase() break elif self._running == 0: break time.sleep(round(random.uniform(0.5, 1.0), 2))
def special_settle_phase(self): """ 没有战斗数据按钮的结算流程 :return: """ for battle_round in range(0, 10): # 当镜头旋转结束,出现结算达摩,则视为进入结算界面 catch_img = ImageGrab.grab(self.rewardobj.scan_area) img_hash = get_hash(catch_img) r1, r2 = hamming(img_hash, self.rewardobj.hash, 40) if self.debug: logging( '[%s]round%s %s:%s:%s' % ('special_settle_phase1', battle_round, img_hash, r1, r2)) if r1: break else: # 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次 self.rewardobj.area_click(int(random.uniform(1, 3))) time.sleep(round(random.uniform(0.5, 1.0), 2)) for battle_round in range(0, 10): catch_img = ImageGrab.grab(self.rewardobj.scan_area) img_hash = get_hash(catch_img) # 当结算达摩消失时,视为结算结束 r1, r2 = hamming(img_hash, self.rewardobj.hash, 40) if self.debug: logging( '[%s]round%s %s:%s:%s' % ('special_settle_phase2', battle_round, img_hash, r1, r2)) if not r1: break else: # 在右侧边缘范围内随机移动鼠标位置,并随机点击1-3次,直到结算结束 self.rewardobj.area_click(int(random.uniform(1, 3))) time.sleep(round(random.uniform(0.5, 1.0), 2))
def train_batch(input_batch, target): model.train() input1, input2 = input_batch sim = target output0, output1, output = model(input1, input2) loss = criterion(output, sim) optimizer.zero_grad() loss.backward() optimizer.step() loss = loss / input1.size()[0] loss_sum = loss.data.sum() inf = float("inf") if loss_sum == inf or loss_sum == -inf: logging("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 else: loss_value = loss.data[0] return loss_value
def validation(input_batch, target): model.eval() input1, input2 = input_batch sim = target output0, output1, output = model(input1, input2) # print torch.cat((output, target), 1) loss = criterion(output, sim) loss = loss / input1.size()[0] loss_sum = loss.data.sum() inf = float("inf") if loss_sum == inf or loss_sum == -inf: logging("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 else: loss_value = loss.data[0] # accuracy = 1.0 * nCorrect / nFrame logging('Test loss = {}, accuracy = {}'.format(loss_value, 0))
def wait_fight_finish_phase(self, mode, clear_time, queue): """ 等待战斗结束阶段 :param mode: 组队模式 :param clear_time: 平均通关时间 :param queue: 队列对象 :return: """ if mode == '乘客': clear_time = clear_time - 3 t = 0 while t < clear_time: if not queue.empty(): self._running = queue.get() if self._running == 1: time.sleep(1) t = t + 1 elif self._running == 0: break while True: if not queue.empty(): self._running = queue.get() if self._running == 1: catch_img = ImageGrab.grab(self.exitobj.scan_area) img_hash = get_hash(catch_img) # 当退出战斗按钮消失时,视为战斗结束 r1, r2 = hamming(img_hash, self.exitobj.hash, 30) if self.debug: logging( '[%s]%s %s:%s:%s' % ('wait_fight_finish_phase', mode, img_hash, r1, r2)) if r1: pass else: break elif self._running == 0: return time.sleep(0.5)
def form_team_phase(self, mode, fight_num, queue): """ 组队阶段控制方法 :param mode: 组队模式 :param fight_num: 车队人数 :param queue: 队列对象 :return: """ if mode == '单刷': # 移动到挑战按钮并点击 每次移动在按钮范围内加入随机坐标位移 if self.debug: logging('[%s]%s' % ('form_team_phase', mode)) self.singleobj.area_click() return elif mode == '司机': # 检测是否进入组队界面 while True: if not queue.empty(): self._running = queue.get() if self._running == 1: catch_img = ImageGrab.grab(self.formteamobj.scan_area) img_hash = get_hash(catch_img) r1, r2 = hamming(img_hash, self.formteamobj.hash, 30) if self.debug: logging('[%s]%s %s:%s:%s' % ('form_team_phase1', mode, img_hash, r1, r2)) if r1: break time.sleep(0.5) elif self._running == 0: return # 检测队伍人数,符合预期再点开始战斗 while True: if not queue.empty(): self._running = queue.get() if self._running == 1: num = 0 for i in range(1, 3): catch_img = ImageGrab.grab( self.mutipleobj.scan_area[i]) img_hash = get_hash(catch_img) r1, r2 = hamming(img_hash, self.mutipleobj.hashes[i], 10) if self.debug: logging('[%s]%s 乘客%s %s:%s:%s' % ('form_team_phase2', num, mode, img_hash, r1, r2)) if not r1: num = num + 1 if num == fight_num - 1: break time.sleep(0.5) elif self._running == 0: return # 移动到开始战斗按钮并点击 每次移动在按钮范围内加入随机坐标位移 self.mutipleobj.area_click(2) elif mode == '乘客': # 检测是否进入战斗状态 while True: if not queue.empty(): self._running = queue.get() if self._running == 1: catch_img = ImageGrab.grab(self.exitobj.scan_area) img_hash = get_hash(catch_img) r1, r2 = hamming(img_hash, self.exitobj.hash, 30) if self.debug: logging('[%s]%s %s:%s:%s' % ('form_team_phase', mode, img_hash, r1, r2)) if r1: break time.sleep(0.5) elif self._running == 0: return
**kwargs) manualSeed = 9302 #random.randint(1, 10000) # fix seed print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) g_config = get_config() model_dir = args.model_dir setupLogger(os.path.join(model_dir, 'log.txt')) g_config.model_dir = model_dir criterion = nn.HingeEmbeddingLoss() model = Siamese() # load model snapshot load_path = args.load_path if load_path is not '': snapshot = torch.load(load_path) # loadModelState(model, snapshot) model.load_state_dict(snapshot['state_dict']) logging('Model loaded from {}'.format(load_path)) train_model(model, criterion, train_loader, test_loader, g_config, use_cuda=False)
print('Loading model...') model_dir = 'models/snapshot/' model_load_path = os.path.join(model_dir, 'snapshot_epoch_1.pt') gConfig = get_config() gConfig.model_dir = model_dir criterion = nn.HingeEmbeddingLoss() model = Siamese() package = torch.load(model_load_path) model.load_state_dict(package['state_dict']) model.eval() print('Model loaded from {}'.format(model_load_path)) logging('Model configuration:\n{}'.format(model)) modelSize, nParamsEachLayer = modelSize(model) logging('Model size: {}\n{}'.format(modelSize, nParamsEachLayer)) params = model.parameters() for i, a_param in enumerate(params): print a_param exit(0) imagePath = '../data/demo.png' img = loadAndResizeImage(imagePath) text, raw = recognizeImageLexiconFree(model, img) print('Recognized text: {} (raw: {})'.format(text, raw))
def train_model(model, criterion, train_loader, test_loader, g_config, use_cuda=True): optimizer = g_config.optimizer(model.parameters(), lr=0.01, momentum=0.9) def train_batch(input_batch, target): model.train() input1, input2 = input_batch sim = target output0, output1, output = model(input1, input2) loss = criterion(output, sim) optimizer.zero_grad() loss.backward() optimizer.step() loss = loss / input1.size()[0] loss_sum = loss.data.sum() inf = float("inf") if loss_sum == inf or loss_sum == -inf: logging("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 else: loss_value = loss.data[0] return loss_value def validation(input_batch, target): model.eval() input1, input2 = input_batch sim = target output0, output1, output = model(input1, input2) # print torch.cat((output, target), 1) loss = criterion(output, sim) loss = loss / input1.size()[0] loss_sum = loss.data.sum() inf = float("inf") if loss_sum == inf or loss_sum == -inf: logging("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 else: loss_value = loss.data[0] # accuracy = 1.0 * nCorrect / nFrame logging('Test loss = {}, accuracy = {}'.format(loss_value, 0)) # train loop avg_loss = 0 epoch = 0 while True: # validation for data0, data1, target in test_loader: if use_cuda: data0, data1, target = data0.cuda(), data1.cuda(), target.cuda( ) data0, data1, target = Variable(data0, volatile=True), Variable( data1, volatile=True), Variable(target) validation((data0, data1), target) # train batch for batch_idx, (data0, data1, target) in enumerate(train_loader): if use_cuda: data0, data1, target = data0.cuda(), data1.cuda(), target.cuda( ) data0, data1, target = Variable(data1), Variable(data0), Variable( target) avg_loss += train_batch((data0, data1), target) # display if batch_idx % g_config.displayInterval == 0: avg_loss = avg_loss / g_config.displayInterval logging('Batch {} - train loss = {}'.format( batch_idx, avg_loss)) diagnoseGradients(model.parameters()) avg_loss = 0 # save snapshot save_path = os.path.join(g_config.model_dir, 'snapshot_epoch_{}.pt'.format(epoch)) torch.save(checkpoint(model, epoch), save_path) logging('Snapshot saved to {}'.format(save_path)) # terminate if epoch > g_config.maxIterations: logging('Maximum epoch reached, terminating ...') break epoch += 1