# total number of parameters param_count = 0 for param in model.parameters(): param_count += param.view(-1).size()[0] if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging( log_path + 'log.txt') # 这种方式也值得学习,单独写一个logging的函数,直接调用,既print,又记录到Log文件里。 logging_csv = utils.logging_csv(log_path + 'record.csv') for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model) + "\n\n") logging('total number of parameters: %d\n\n' % param_count) logging('score function is %s\n\n' % opt.score) if opt.restore: updates = checkpoints['updates'] else: updates = 0 total_loss, start_time = 0, time.time() report_total, report_correct = 0, 0
else: updates = 0 # ====================================================================================================================== """log config""" if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path+'model_config.txt') logging_csv = utils.logging_csv(log_path + 'model_record.csv') for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model)+"\n\n") logging('total number of parameters: %d\n\n' % param_count) # ====================================================================================================================== """train""" scores = [[] for metric in config.metric] scores = collections.OrderedDict(zip(config.metric, scores)) loss_function = nn.CrossEntropyLoss() def train(epoch):
for param in model.parameters(): param_count += param.view(-1).size()[0] if not os.path.exists(config.log): os.makedirs(config.log) if config.log.endswith('/'): log_path = config.log else: log_path = config.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path + 'log.txt') if not opt.notrain: logging_train_loss = utils.logging_csv(log_path + 'train_loss.csv', ['epoch', 'updates', 'log_loss']) logging_valid_loss = utils.logging_csv(log_path + 'valid_loss.csv', ['epoch', 'updates', 'log_loss']) logging_metric = utils.logging_dict_csv(log_path + 'metrics.csv', ['epoch', 'updates'] + all_metrics) for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model) + "\n\n") logging('total number of parameters: %d\n\n' % param_count) if opt.restore: updates = checkpoints['updates'] else:
for param in model.parameters(): param_count += param.view(-1).size()[0] # log为记录文件 # config.log是记录的文件夹, 最后一定是/ # opt.log是此次运行时记录的文件夹的名字 if not os.path.exists(config.log): os.mkdir(config.log) if opt.log == '': log_path = config.log + utils.format_time(time.localtime()) + '/' else: log_path = config.log + opt.log + '/' if not os.path.exists(log_path): os.mkdir(log_path) logging = utils.logging(log_path+'log.txt') # 往这个文件里写记录 logging_csv = utils.logging_csv(log_path+'record.csv') # 往这个文件里写记录 for k, v in config.items(): logging("%s:\t%s\n" % (str(k), str(v))) logging("\n") logging(repr(model)+"\n\n") # 记录这个文件的框架 logging('total number of parameters: %d\n\n' % param_count) logging('score function is %s\n\n' % opt.score) # updates是已经进行了几个epoch, 防止中间出现程序中断的情况. if opt.restore: updates = checkpoints['updates'] else: updates = 0 total_loss, start_time = 0, time.time()