args.rand_seed) configure2str(arch_genotypes["last"], str(last_config_path)) logger.log("save the last config int {:} :\n{:}".format( last_config_path, arch_genotypes["last"])) best_arch, valid_acc = arch_genotypes["best"], valid_accuracies["best"] for key, config in arch_genotypes.items(): if key == "last": continue FLOP_ratio = config["estimated_FLOP"] / MAX_FLOP if abs(FLOP_ratio - args.FLOP_ratio) <= args.FLOP_tolerant: if valid_acc < valid_accuracies[key]: best_arch, valid_acc = config, valid_accuracies[key] print("Best-Arch : {:}\nRatio={:}, Valid-ACC={:}".format( best_arch, best_arch["estimated_FLOP"] / MAX_FLOP, valid_acc)) best_config_path = logger.path("log") / "seed-{:}-best.config".format( args.rand_seed) configure2str(best_arch, str(best_config_path)) logger.log("save the last config int {:} :\n{:}".format( best_config_path, best_arch)) logger.log("\n" + "-" * 200) logger.log( "Finish training/validation in {:}, and save final checkpoint into {:}" .format(convert_secs2time(epoch_time.sum, True), logger.path("info"))) logger.close() if __name__ == "__main__": args = obtain_args() main(args)
'epoch': epoch, 'args': deepcopy(args), 'arch': model_config.arch, 'state_dict': net.state_dict(), 'detector': detector.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) last_info = save_checkpoint( { 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, detector, criterion, epoch_str, logger, opt_config) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.close() if __name__ == '__main__': args = obtain_args(True) main(args)