scheduler = L.CosineAnnealingLR(optim.optimizer, T_max=config.epoch) # total number of parameters param_count = 0 for param in model.parameters(): param_count += param.view(-1).size()[0] if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging( log_path + 'log.txt') # 这种方式也值得学习,单独写一个logging的函数,直接调用,既print,又记录到Log文件里。 logging_csv = utils.logging_csv(log_path + 'record.csv') for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model) + "\n\n") logging('total number of parameters: %d\n\n' % param_count) logging('score function is %s\n\n' % opt.score) if opt.restore: updates = checkpoints['updates'] else: updates = 0 total_loss, start_time = 0, time.time()
if config.schedule: scheduler = L.CosineAnnealingLR(optim.optimizer, T_max=config.epoch) param_count = 0 for param in model.parameters(): param_count += param.view(-1).size()[0] if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path + 'model_config.txt') logging_csv = utils.logging_csv(log_path + 'record.csv') for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model) + "\n\n") logging('total number of parameters: %d\n\n' % param_count) if opt.restore: updates = checkpoints['updates'] else: updates = 0 with open(opt.label_dict_file, 'r') as f: label_dict = json.load(f)
optim.set_parameters(model.parameters()) param_count = 0 for param in model.parameters(): param_count += param.view(-1).size()[0] #log if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + str(int(time.time() * 1000)) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path + 'log.txt') for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model) + "\n\n") logging('total number of parameters: %d\n\n' % param_count) logging('score function is %s\n\n' % opt.score) #checkpoint if opt.restore: updates = checkpoints['updates'] else: updates = 0 total_loss, start_time = 0, time.time() report_total, report_correct = 0, 0
param_count = 0 for param in model.parameters(): param_count += param.view(-1).size()[0] # log为记录文件 # config.log是记录的文件夹, 最后一定是/ # opt.log是此次运行时记录的文件夹的名字 if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path+'log.txt') # 往这个文件里写记录 logging_csv = utils.logging_csv(log_path+'record.csv') # 往这个文件里写记录 for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model)+"\n\n") # 记录这个文件的框架 logging('total number of parameters: %d\n\n' % param_count) logging('score function is %s\n\n' % opt.score) # updates是已经进行了几个epoch, 防止中间出现程序中断的情况. if opt.restore: updates = checkpoints['updates'] else: updates = 0