def last_skill(bot, update): print("last_skill") try: user_questions = open('user_questions.txt').read() user_questions = user_questions.split(":") chat_id = user_questions[0] questions = user_questions[1].strip().replace("'", "") if chat_id == str(update.message.chat.id): selector = DataBaseSelector(questions) link = selector.query_to_base_skill() cards = get_cards(link) for card in cards: if card['verified'] == True: pay_metod = VERIFIED else: pay_metod = '' url = card['link'] update.message.reply_text(PAY_MESSAGE.format( title=card['title'], time=card['time'], description=card['description'], list_skill=card['list_skill'], price=card['price'], pay_metod=pay_metod, bids=card['bids']), reply_markup=card_link_kb(url)) except FileNotFoundError: pass
def main(): turn_counter = 0 try: while (1): time.sleep(3) #print('') #keypressed = input("Press 1 to continue... q to quit ") turn_start = detect_start_turn() print('testing_turn', turn_start) if turn_start == True: turn_counter += 1 #use NPs then check for chains #print('move attack') pyautogui.moveTo(1378, 696) #print('click attack') pyautogui.click() # TEST time.sleep(2.0) if turn_counter > 9: time.sleep(1) #print('') #print('using NPs') #check NPs #np1 = get_card_raw("NP1", screen) #np2 = get_card_raw("NP2", screen) #np3 = get_card_raw("NP3", screen) click_location("NP1") click_location("NP2") click_location("NP3") #np_cards = [np1,np2,np3] #for np in np_cards: # np_chain_list, bool_keep = NP_brave_chain_check(np,card_list,brave_chain_raw_img_list) # if bool_keep == True:# # # break screen = grab_screen_fgo() card_list, brave_chain_raw_img_list = get_cards(screen) card_list, brave_chain_raw_img_list = brave_chain_check( card_list, brave_chain_raw_img_list) pick_cards_from_card_list(card_list) else: continue except KeyboardInterrupt: print('interrupted!')
def query_to_base_get_skill(bot, update, user_data): """Прогоняем запрос пользователя по базе, запускает парсер и выводит ответы в телеграмм""" logging.info('in def query_to_base_get_skill') user_skill = update.message.text none_tip = [] try: selector = DataBaseSelector(user_skill) link = selector.query_to_base_skill() except AttributeError: logging.info('AttributeError') corrector = DataBaseSelector(user_skill) user_tip = corrector.find_in_key_words() logging.info(user_tip) if user_tip == '[]': message = CORRECTOR_NONE else: message = CORRECTOR.format(user_tip) update.message.reply_text(message, reply_markup=get_keyboard()) return 'skill' #Пускаем парсер по ссылке cards = get_cards(link) for card in cards: if card['verified'] == True: pay_metod = VERIFIED else: pay_metod = '' url = card['link'] update.message.reply_text(PAY_MESSAGE.format( title=card['title'], time=card['time'], description=card['description'], list_skill=card['list_skill'], price=card['price'], pay_metod=pay_metod, bids=card['bids']), reply_markup=card_link_kb(url)) ###записываем пользователя и запрос chat_id = update.message.chat.id chat_text = update.message.text user = "******".format(chat_id, chat_text) with open('user_questions.txt', 'w', encoding='utf-8') as f: f.write(user) return ConversationHandler.END
def process_sprint(unplanned, sprint, list_, bugs): missing = set(sprint) for card in utils.get_cards(list_): match = re.match('#(\d{4})', card.name) if match: bug_id = int(match.group(1)) if bug_id in missing: missing.remove(bug_id) else: print 4 * ' ', bug_id, card.name for bug_id in sorted(missing): if bug_id in bugs: bug = bugs[bug_id] else: bug_data = utils.get_trac_bug(bug_id) bug = bug_data['summary'] if not bug.startswith('#%04d' % bug_id): bug = '#%04d %s' % (bug_id, bug) print 4 * ' ', 'GOING TO insert', bug list_.add_card(bug)
def main(skill_usage_list): skill_usage_num = 0 turn_counter = 0 # np_barrage determies if # the bot will try to use all the NPs every turn # most of the time it will use it after it has finished # all the commands in the battle plan. # the other way is if no battle plan is provided. # and the first item in the skill usage list is `brute_force` # then from the get go it will use NPs. This is the basic # bot behavior from before the updates. if skill_usage_list[0] == 'brute_force': np_barrage = True else: np_barrage = False try: while (1): time.sleep(3) #print('') #keypressed = input("Press 1 to continue... q to quit ") turn_start = detect_start_turn() print('testing_turn', turn_start) if turn_start == True: # round number testing, doing a 2 out of 3 type thing # idea is to try to avoid false positives # round_number_list = [] for i in range(3): round_number_list.append(detect_round_counter()) round_number = max(set(round_number_list), key=round_number_list.count) if round_number == 'one': round_int = 1 if round_number == 'two': round_int = 2 if round_number == 'three': round_int = 3 print(round_number, round_number_list) turn_counter += 1 #still tracking turn counter based on detecting the attack button ''' get agents values or something ''' skill_list1 = [ ] #get this from skill sheets but can populate them here hero1_state = [round_int / 3] + [ turn_counter / 15 ] + get_buff_game_state(TEAM) + get_skill_use_game_state(TEAM) hero1_action, hero1_pred_class, hero1_preds = TEAM.hero_dict[ 'hero1'].get_action(hero1_state) print('hero1 action: ', hero1_action[0]) if hero1_action[0] != 'pass': apply_buffs(TEAM, hero1_action[1]) if hero1_action[0] == 'sk1': TEAM.hero_dict['hero1'].used_skill_1 = 1 skill_list1.append('s1s1') elif hero1_action[0] == 'sk2': TEAM.hero_dict['hero1'].used_skill_2 = 1 skill_list1.append('s1s2') elif hero1_action[0] == 'sk3': TEAM.hero_dict['hero1'].used_skill_3 = 1 skill_list1.append('s1s3') hero2_state = [round_int / 3] + [ turn_counter / 15 ] + get_buff_game_state(TEAM) + get_skill_use_game_state(TEAM) hero2_action, hero2_pred_class, hero2_preds = TEAM.hero_dict[ 'hero2'].get_action(hero2_state) print('hero2 action: ', hero2_action[0]) if hero2_action[0] != 'pass': apply_buffs(TEAM, hero2_action[1]) if hero2_action[0] == 'sk1': TEAM.hero_dict['hero2'].used_skill_1 = 1 skill_list1.append('s2s1') elif hero2_action[0] == 'sk2': TEAM.hero_dict['hero2'].used_skill_2 = 1 skill_list1.append('s2s2') elif hero2_action[0] == 'sk3': TEAM.hero_dict['hero2'].used_skill_3 = 1 skill_list1.append('s2s3') hero3_state = [round_int / 3] + [ turn_counter / 15 ] + get_buff_game_state(TEAM) + get_skill_use_game_state(TEAM) hero3_action, hero3_pred_class, hero3_preds = TEAM.hero_dict[ 'hero3'].get_action(hero3_state) print('hero3 action: ', hero3_action[0]) if hero3_action[0] != 'pass': apply_buffs(TEAM, hero3_action[1]) if hero3_action[0] == 'sk1': TEAM.hero_dict['hero3'].used_skill_1 = 1 skill_list1.append('s3s1') elif hero3_action[0] == 'sk2': TEAM.hero_dict['hero3'].used_skill_2 = 1 skill_list1.append('s3s2') elif hero3_action[0] == 'sk3': TEAM.hero_dict['hero3'].used_skill_3 = 1 skill_list1.append('s3s3') if len(skill_list1) > 0: skill_list1.append('attack_2') for command_ in skill_list1: command = command_.split('_')[0] if len(command_.split('_')) == 1: sleep_seconds = 3.0 else: sleep_seconds = float(command_.split('_')[1]) print(command) pyautogui.moveTo(skill_dict[command][0], skill_dict[command][1]) pyautogui.click() time.sleep(sleep_seconds) skill_usage_num += 1 if round_int > 1: click_location("NP2") click_location("NP3") click_location("NP1") # if they are not equal then press the attack button and gogogo # if np_barrage is True then the bot will try to use all the NPs elif len(skill_list1) == 0: pyautogui.moveTo(skill_dict['attack'][0], skill_dict['attack'][1]) pyautogui.click() time.sleep(2.0) if np_barrage is True: if round_int > 1: click_location("NP2") click_location("NP3") click_location("NP1") increment_turns_for_buffs_gather_mods(TEAM) # this small bit at the end is the previous core for Pendragon Alter # basically get the screen capture and feed the card lists to the # RL bot. I have more or less disabled the saimese nn for the time being screen = grab_screen_fgo() card_list, brave_chain_raw_img_list = get_cards(screen) converted_cards = convert_card_list(card_list) card_picker_state = converted_cards + get_buff_game_state(TEAM) #brave_chain_bool = False #if brave_chain_bool == True: # print('brave') # pick_cards_from_card_list(card_list_brave) print('card_bot') card_indices = TEAM.card_picker.get_action(card_picker_state) print(card_indices) for card_index in card_indices: if card_index == 0: click_location('c1') elif card_index == 1: click_location('c2') elif card_index == 2: click_location('c3') elif card_index == 3: click_location('c4') else: click_location('c5') else: continue except KeyboardInterrupt: print('interrupted!')
def train(): with fluid.dygraph.guard(place): if args.ce: print("ce mode") seed = 90 np.random.seed(seed) fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed processor = reader.SentaProcessor(data_dir=args.data_dir, vocab_path=args.vocab_path, random_seed=args.random_seed) num_labels = len(processor.get_labels()) num_train_examples = processor.get_num_examples(phase="train") max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count if not args.ce: train_data_generator = processor.data_generator( batch_size=args.batch_size, phase='train', epoch=args.epoch, shuffle=True) eval_data_generator = processor.data_generator( batch_size=args.batch_size, phase='dev', epoch=args.epoch, shuffle=False) else: train_data_generator = processor.data_generator( batch_size=args.batch_size, phase='train', epoch=args.epoch, shuffle=False) eval_data_generator = processor.data_generator( batch_size=args.batch_size, phase='dev', epoch=args.epoch, shuffle=False) if args.model_type == 'cnn_net': model = nets.CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': model = nets.BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': model = nets.GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': model = nets.BiGRU(args.vocab_size, args.batch_size, args.padding_size) sgd_optimizer = fluid.optimizer.Adagrad( learning_rate=args.lr, parameter_list=model.parameters()) steps = 0 total_cost, total_acc, total_num_seqs = [], [], [] gru_hidden_data = np.zeros((args.batch_size, 128), dtype='float32') ce_time, ce_infor = [], [] for eop in range(args.epoch): time_begin = time.time() for batch_id, data in enumerate(train_data_generator()): enable_profile = steps > args.profile_steps with profile_context(enable_profile): steps += 1 doc = to_variable( np.array([ np.pad(x[0][0:args.padding_size], (0, args.padding_size - len(x[0][0:args.padding_size])), 'constant', constant_values=(args.vocab_size)) for x in data ]).astype('int64').reshape(-1)) label = to_variable( np.array([x[1] for x in data]).astype('int64').reshape( args.batch_size, 1)) model.train() avg_cost, prediction, acc = model(doc, label) avg_cost.backward() np_mask = (doc.numpy() != args.vocab_size).astype('int32') word_num = np.sum(np_mask) sgd_optimizer.minimize(avg_cost) model.clear_gradients() total_cost.append(avg_cost.numpy() * word_num) total_acc.append(acc.numpy() * word_num) total_num_seqs.append(word_num) if steps % args.skip_steps == 0: time_end = time.time() used_time = time_end - time_begin print("step: %d, ave loss: %f, " "ave acc: %f, speed: %f steps/s" % (steps, np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), args.skip_steps / used_time)) ce_time.append(used_time) ce_infor.append( np.sum(total_acc) / np.sum(total_num_seqs)) total_cost, total_acc, total_num_seqs = [], [], [] time_begin = time.time() if steps % args.validation_steps == 0: total_eval_cost, total_eval_acc, total_eval_num_seqs = [], [], [] model.eval() eval_steps = 0 gru_hidden_data = np.zeros((args.batch_size, 128), dtype='float32') for eval_batch_id, eval_data in enumerate( eval_data_generator()): eval_np_doc = np.array([ np.pad(x[0][0:args.padding_size], (0, args.padding_size - len(x[0][0:args.padding_size])), 'constant', constant_values=(args.vocab_size)) for x in eval_data ]).astype('int64').reshape(-1) eval_label = to_variable( np.array([x[1] for x in eval_data ]).astype('int64').reshape( args.batch_size, 1)) eval_doc = to_variable(eval_np_doc) eval_avg_cost, eval_prediction, eval_acc = model( eval_doc, eval_label) eval_np_mask = (eval_np_doc != args.vocab_size).astype('int32') eval_word_num = np.sum(eval_np_mask) total_eval_cost.append(eval_avg_cost.numpy() * eval_word_num) total_eval_acc.append(eval_acc.numpy() * eval_word_num) total_eval_num_seqs.append(eval_word_num) eval_steps += 1 time_end = time.time() used_time = time_end - time_begin print( "Final validation result: step: %d, ave loss: %f, " "ave acc: %f, speed: %f steps/s" % (steps, np.sum(total_eval_cost) / np.sum(total_eval_num_seqs), np.sum(total_eval_acc) / np.sum(total_eval_num_seqs), eval_steps / used_time)) time_begin = time.time() if args.ce: print("kpis\ttrain_loss\t%0.3f" % (np.sum(total_eval_cost) / np.sum(total_eval_num_seqs))) print("kpis\ttrain_acc\t%0.3f" % (np.sum(total_eval_acc) / np.sum(total_eval_num_seqs))) if steps % args.save_steps == 0: save_path = args.checkpoints + "/" + "save_dir_" + str( steps) print('save model to: ' + save_path) fluid.dygraph.save_dygraph(model.state_dict(), save_path) if enable_profile: print('save profile result into /tmp/profile_file') return if args.ce: card_num = get_cards() _acc = 0 _time = 0 try: _time = ce_time[-1] _acc = ce_infor[-1] except: print("ce info error") print("kpis\ttrain_duration_card%s\t%s" % (card_num, _time)) print("kpis\ttrain_acc_card%s\t%f" % (card_num, _acc))
sys.exit() sys.stdout.write(sock.recv(4096) + 'start\n') sock.send('start\n') input_data = '' sys.stdout.write(sock.recv(4096)) while True: data = sock.recv(4096) if not data: print('-' * 20 + ' Connection closed ' + '-' * 20) break else: sys.stdout.write(data) if 'A resposta é:' in data: input_data += data valores = get_cards(input_data) resp = '%d\n' % (solver(valores)) sys.stdout.write(resp) sock.send(resp) input_data = '' elif 'HACKAFLAG{' in data: break else: input_data += data sock.close()
def train(): # with fluid.dygraph.guard(place): with fluid.dygraph.guard(): if args.ce: print("ce mode") seed = args.random_seed np.random.seed(seed) fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed processor = reader.SentaProcessor(data_dir=args.data_dir, vocab_path=args.vocab_path, random_seed=args.random_seed) num_labels = len(processor.get_labels()) if not args.ce: train_data_generator = processor.data_generator( batch_size=args.batch_size, phase='train', epoch=args.epoch, shuffle=True) eval_data_generator = processor.data_generator( batch_size=args.batch_size, phase='dev', epoch=args.epoch, shuffle=False) else: train_data_generator = processor.data_generator( batch_size=args.batch_size, phase='train', epoch=args.epoch, shuffle=False) eval_data_generator = processor.data_generator( batch_size=args.batch_size, phase='dev', epoch=args.epoch, shuffle=False) model = nets.CNN(args.vocab_size) # save initial param to files param_dict = {} for param_name in model.state_dict(): param_dict[param_name] = model.state_dict()[param_name].numpy() if 'embedding' in param_name: state_dict = model.state_dict() param_dict[param_name][0] = 0 state_dict[param_name] = paddle.to_tensor( param_dict[param_name]) model.set_dict(state_dict) # print(param_dict[param_name][0]) np.savez('./paramters.npz', **param_dict) for parameters in model.named_parameters(): print(parameters[0]) if 'embedding' in parameters[0]: print(model.state_dict()[parameters[0]][0].shape) # sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr,parameter_list=model.parameters()) sgd_optimizer = paddle.fluid.optimizer.SGD( learning_rate=args.lr, parameter_list=model.parameters()) steps = 0 total_cost, total_acc, total_num_seqs = [], [], [] gru_hidden_data = np.zeros((args.batch_size, 128), dtype='float64') ce_time, ce_infor = [], [] reader_time = 0.0 num_train_examples = processor.get_num_examples(phase="train") for eop in range(args.epoch): time_begin = time.time() for batch_id, data in enumerate(train_data_generator()): reader_begin = time.time() seq_len_arr = np.array([len(x[0]) for x in data], dtype="int64") steps += 1 seq_len = paddle.to_tensor(seq_len_arr) doc = paddle.to_tensor( np.array([ np.pad(x[0][0:args.padding_size], (0, args.padding_size - len(x[0][0:args.padding_size])), 'constant', constant_values=0) for x in data ]).astype('int64')) label = paddle.to_tensor( np.array([x[1] for x in data ]).astype('int64').reshape(args.batch_size, 1)) reader_end = time.time() reader_time += (reader_end - reader_begin) model.train() avg_cost, prediction, acc = model(doc, seq_len, args.padding_size, label) model.clear_gradients() avg_cost.backward() sgd_optimizer.minimize(avg_cost) # np_mask = (doc.numpy() != 0).astype('int32') # word_num = np.sum(np_mask) word_num = np.sum(seq_len_arr) total_cost.append(avg_cost.numpy() * word_num) total_acc.append(acc.numpy() * word_num) total_num_seqs.append(word_num) if steps % args.skip_steps == 0: time_end = time.time() used_time = time_end - time_begin print( "step: %d, ave loss: %f, " "ave acc: %f, speed: %f steps/s, reader speed: %f steps/s" % (steps, np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), args.skip_steps / used_time, args.skip_steps / reader_time)) reader_time = 0.0 ce_time.append(used_time) ce_infor.append(np.sum(total_acc) / np.sum(total_num_seqs)) total_cost, total_acc, total_num_seqs = [], [], [] time_begin = time.time() # if steps % args.validation_steps == 0: # total_eval_cost, total_eval_acc, total_eval_num_seqs = [], [], [] # model.eval() # eval_steps = 0 # gru_hidden_data = np.zeros((args.batch_size, 128), dtype='float64') # for eval_batch_id, eval_data in enumerate( # eval_data_generator()): # eval_seq_arr = np.array([len(x[0]) for x in data], dtype="int64") # eval_seq_len = to_variable(eval_seq_arr) # eval_np_doc = np.array([ # np.pad(x[0][0:args.padding_size], # (0, args.padding_size - # len(x[0][0:args.padding_size])), # 'constant', # constant_values=0) # args.vocab_size)) # for x in eval_data # ]).astype('int64')# .reshape(-1) # eval_label = to_variable( # np.array([x[1] for x in eval_data]).astype( # 'int64').reshape(args.batch_size, 1)) # eval_doc = to_variable(eval_np_doc) # eval_avg_cost, eval_prediction, eval_acc = model( # eval_doc, eval_seq_len, args.padding_size, eval_label) # eval_np_mask = ( # eval_np_doc != 0).astype('int32') # # eval_np_doc != args.vocab_size).astype('int32') # # eval_word_num = np.sum(eval_np_mask) # eval_word_num = np.sum(eval_seq_arr) # total_eval_cost.append(eval_avg_cost.numpy() * # eval_word_num) # total_eval_acc.append(eval_acc.numpy() * # eval_word_num) # total_eval_num_seqs.append(eval_word_num) # eval_steps += 1 # time_end = time.time() # used_time = time_end - time_begin # print( # "Final validation result: step: %d, ave loss: %f, " # "ave acc: %f, speed: %f steps/s" % # (steps, np.sum(total_eval_cost) / # np.sum(total_eval_num_seqs), np.sum(total_eval_acc) # / np.sum(total_eval_num_seqs), # eval_steps / used_time)) # time_begin = time.time() # if args.ce: # print("kpis\ttrain_loss\t%0.3f" % # (np.sum(total_eval_cost) / # np.sum(total_eval_num_seqs))) # print("kpis\ttrain_acc\t%0.3f" % # (np.sum(total_eval_acc) / # np.sum(total_eval_num_seqs))) # if steps % args.save_steps == 0: # save_path = args.checkpoints+"/"+"save_dir_" + str(steps) # print('save model to: ' + save_path) # fluid.dygraph.save_dygraph(model.state_dict(), # save_path) # fluid.dygraph.save_dygraph(model.state_dict(), # save_path) if args.ce: card_num = get_cards() _acc = 0 _time = 0 try: _time = ce_time[-1] _acc = ce_infor[-1] except: print("ce info error") print("kpis\ttrain_duration_card%s\t%s" % (card_num, _time)) print("kpis\ttrain_acc_card%s\t%f" % (card_num, _acc))