def get_logger(args, train_iters, val_iters): if not torch.distributed.is_initialized() or torch.distributed.get_rank( ) == 0: logger_backends = [ log.JsonBackend(os.path.join(args.save, 'raport.json'), log_level=1), log.StdOut1LBackend(train_iters, val_iters, args.epochs, log_level=0), ] try: import wandb run = wandb.init(project=args.project, entity="jianfeic", config=args, name=args.save) code = wandb.Artifact('project-source', type='code') for path in glob.glob('*.py', recursive=True): code.add_file(path) run.log_artifact(code) logger_backends.append(log.WandbBackend(wandb)) print('Logging to wandb...') except ImportError: print('Wandb not found, logging to stdout and json...') logger = log.Logger(args.print_freq, logger_backends) for k, v in args.__dict__.items(): logger.log_run_tag(k, v) else: logger = None return logger
def __init__(self, common_cnn=CNN_structures.zeronn8, policy_cnn=CNN.Params([[2, 1]], []), value_cnn=CNN.Params([[1, 1], 1], [256]), kp=0.5, lr_init=0.0001, lr_dec_rate=0.95, batch_size=256, ckpt_idx=-1, save_epochs=2, epoch=10, verbose=None, act=tf.nn.relu, l2=1e-4, path=None, lock_model_path=None, num_samples=None, logger=None): """ verbose: set verbose an integer to output the training history or None not to output logger: if a logger is provided, the outputs will use the given logger """ self.common_cnn = common_cnn self.policy_cnn = policy_cnn self.value_cnn = value_cnn self.kp = kp self.num_samples = num_samples self.lr_init = lr_init self.lr_dec_rate = lr_dec_rate self.batch_size = batch_size self.epoch = epoch self.verbose = verbose self.lock_model_path = lock_model_path self.act = act self.save_epochs = save_epochs self.l2 = l2 self.path = None if path is None else mkdir(path) self.logger = log.Logger( None if self.path is None else join( self.path, logfn('ZeroNN-' + curr_time_str())), verbose is not None) if logger is not None: self.logger = logger self.sess = None self.ts = {} self.ckpt_idx = ckpt_idx # collections of variables self.var_names = [ 'x', 'kp', 'y_value', 'y_policy', 'is_train', 'loss_policy', 'loss_value', 'pred_value', 'acc_value', 'pred_policy', 'loss_l2', 'loss_total', 'global_step', 'train_step' ] self.trained_model_paths = []
# -*- coding: utf-8 -*- import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from global_ import global_cls from utils import log mylogger = log.Logger(logger='send_email').getlog() # 第三方 SMTP 服务 mail_host = global_cls.email_server # 设置服务器 mail_user = global_cls.email_user # 用户名 mail_pass = global_cls.email_pass # 口令 sender = mail_user # 发送邮件 receivers = global_cls.email_receivers # 接收邮件,可设置为你的QQ邮箱或者其他邮箱 message = MIMEMultipart() # 创建一个带附件的实例 message['From'] = sender # 发件人地址 message['To'] = 'my fans' # 收件人地址 subject = 'Automation Test Result' # 发送邮件主题 testCase_path = global_cls.testCase_path def send_mail(report_path, log_path): message['Subject'] = Header(subject, 'utf-8') # 正文 mes = 'Dear All: <br/>' \ ' 这是自动测试的报告,请下载附件。 <br/>' \ ' Thanks <br/>' \ 'FengHuJie<br/>' html_start = '<font face="Courier New, Courier, monospace"><pre>'
from utils import log logger = log.Logger(Misc) while True: if not Player.BuffsExist('Lightning Strike'): Spells.CastBushido('Lightning Strike') Misc.Pause(300) continue if not Player.BuffsExist('Curse Weapon'): scroll = Items.FindByID(8803, -1, Player.Backpack.Serial) Items.UseItem(scroll) Misc.Pause(2000) continue if not Player.BuffsExist('Enemy Of One'): Spells.CastChivalry('Enemy Of One') Misc.Pause(2000) continue if not Player.BuffsExist('Divine Fury'): Spells.CastChivalry('Divine Fury') Misc.Pause(2000) continue if not Player.BuffsExist('Consecrate Weapon'): Spells.CastChivalry('Consecrate Weapon') Misc.Pause(2000) continue logger.info('no cast needed') Misc.Pause(1000)
def __init__(self, folder, board_rows=int(rcn[0]), board_cols=int(rcn[1]), n_in_row=int(rcn[2]), train_ratio=0.6, mcts_sims=512, self_play_cnt=10000, batch_size=128, verbose=True, n_eval_processes=2, best_player_path='106343', n_play_processes=16, plays=8, start_nozero=False, do_opt=True, mp_parallel=True): self.mp_parallel = mp_parallel self.Lock = mp.Lock if mp_parallel else threading.Lock self.Process = mp.Process if mp_parallel else threading.Thread # no sharing self.n_play_processes = n_play_processes self.n_eval_processes = n_eval_processes self.do_opt = do_opt self.folder = mkdir(folder) self.manager = mp.Manager() self.lock_train_data = self.Lock() self.lock_model_paths = self.Lock() self.lock_model_best = self.Lock() self.unchecked_model_paths = self.manager.list( []) if mp_parallel else list([]) self.loss_hists = [] # thread sharings self.batch_size = batch_size self.logger = log.init_LoggerMP(join(self.folder, logfn('ZeroNNTrainer-' + curr_time_str())), verbose) \ if mp_parallel else log.Logger(join(self.folder, logfn('ZeroNNTrainer-' + curr_time_str())), verbose) self.train_ratio = train_ratio # shared constants self.folder_selfplay = mkdir(join(self.folder, 'selfplay')) # self.data_path = [join(self.folder_selfplay, npfn('selfplay' + str(i))) for i in range(3)] self.data_path = join(self.folder_selfplay, 'selfplay') self.board_rows = board_rows if board_rows > n_in_row else board_rows + 10 self.board_cols = board_cols if board_cols > n_in_row else board_cols + 10 self.n_in_row = n_in_row self.mcts_sims = mcts_sims self.folder_NNs = mkdir(join(self.folder, 'NNs')) self.path_loss_hists = join(self.folder_selfplay, npfn('selfplay' + '_loss', False)) self.shared_constants = { 'data_path': self.data_path, 'board_rows': self.board_rows, 'board_cols': self.board_cols, 'n_in_row': self.n_in_row, 'mcts_sims': self.mcts_sims, 'folder_NNs': self.folder_NNs, 'folder_selfplay': self.folder_selfplay, 'path_loss_hists': self.path_loss_hists } # shared variables curr_generation = 0 # self.nozero_mcts is the initial MCTS # self.nozero_mcts use no ZeroNN for seaching and we use it to generate initial training data # instead of using zeroNNs with randomized parameters # After the first generation of zeroNN trained, self.nozero_mcts would be set None since it is not needed anymore if exists(self.path_loss_hists): self.loss_hists = np.load(self.path_loss_hists).tolist() try: curr_generation = self.loss_hists[-1][0] except: curr_generation = 0 else: self.loss_hists = [] self.loss_hists = self.manager.list( self.loss_hists) if mp_parallel else list(self.loss_hists) self.shared_vars = { 'self_play_cnt': self_play_cnt, 'plays': plays, 'best_player_path': join(self.shared_constants['folder_NNs'], 'model.ckpt-' + str(best_player_path)), 'model_avail': not start_nozero, 'data_avail': False, 'resign_val': 100, 'curr_generation': curr_generation, 'nozero_mcts_sims': int(mcts_sims * 2), 'nozero_mcts': start_nozero, 'noise': 0.25, } self.shared_vars = self.manager.dict( self.shared_vars) if mp_parallel else dict(self.shared_vars)
self.webserver.run_app() def spawn_chrome(): time.sleep(2) if platform.system() == 'Windows': subprocess.Popen(["start", "chrome", "http://localhost:8080"], shell=True) elif platform.system() == 'Darwin': # webbrowser.geit('macosx').open("http://localhost:8080") pass if __name__ == "__main__": __builtins__.profile = "--profile" in sys.argv debuglogger = log.Logger(DEBUGLOG) __builtins__.dlog = debuglogger profilelogger = log.Logger(PROFILELOG) __builtins__.plog = profilelogger dbops = SQLite_DBOps(DBPATH) __builtins__.dbops = dbops __builtins__.cache = {} __builtins__.ignore_scrips = IGNORE_SCRIPS dlog.info("====================== Version 0.1 =========================") dlog.info("Starting server at port %s" % ("8080", )) # TODO: Ideally request logs should go in DB.
if timestamp in csvs_db: continue try: p = Populator(self.dbops) p.start([csv]) except: error = True traceback.print_exc() dlog.info("Population failed for %s" % (csv,)) else: self.put_csv_in_db(timestamp) dlog.info("Population done for %s" % (csv,)) if error: dlog.info("Daily update done with errors.") else: dlog.info("Daily update done successfully.") dlog.info("Press Enter to Quit ...") raw_input() if __name__ == "__main__": __builtins__.dlog = log.Logger('stdout', logname='stdout') # __builtins__.rlog = log.Logger(NULLLOG) dbname = "/Users/amitkulkarni/temp_Derivatives/daily_update_1.db" updater = DailyUpdate(DBPATH, CSVSTORE) updater.start()