def __init__(self) -> None: self._logger = create_logger() self._create_api() self.metadata_manager = MetadataManager.get_instance() self.run_loop = True self._logger.info("Manager set up and waiting for data.") self.run()
def __init__(self, working_directories: Dict[Type[theory_base.TheoryBase], str], keep_full_history: bool = False): """ :param working_directories: Mapping from theory classes to working directory. Current directory will be changed accordingly. :param keep_full_history: whether to keep the full history for each (env, theory) pair or not """ self._logger = logger_config.create_logger('rs') self._best_results = { } # Dict[Tuple[Type[env_base.EnvironmentBase], Type[theory_base.TheoryBase]], theory_base.TheoryBase] self._logger.info(( 'Creating RoboScientist object with the following configurations:\n' 'working directories: {}\nkeep_full_history: {}').format( working_directories, keep_full_history)) self._working_directories = working_directories self._keep_full_history = keep_full_history self._history = {} if self._keep_full_history else None
parser.add_argument("-p", type=int, default=1) parser.add_argument("--ntrain", type=int, default=1000) parser.add_argument("--seed", type=int, default=0) parser.add_argument( "--root_data", type=str, default="/private/home/asablayrolles/code/projects/whitebox_blackbox/") parser.add_argument("--normed", type=boolify, default=True) return parser if __name__ == "__main__": parser = get_parser() args = parser.parse_args() logger = create_logger() state = np.random.RandomState(args.seed) # Loading data X = np.load(join(args.root_data, "cifar10_cnn.npy")) label = np.load(join(args.root_data, "cifar10_label.npy")).astype(int).ravel() if args.normed: X /= np.linalg.norm(X, axis=1, keepdims=True) # Shuffling data indices = np.nonzero(label >= 8)[0] indices = indices[state.permutation(indices.shape[0])] X = X[indices] y = (label[indices] == 9).astype(int)
# random seed import torch import numpy as np np.random.seed(cfg["seed"]) torch.manual_seed(cfg["seed"]) assert cfg["cuda"], "only support gpu" if cfg["cuda"]: torch.cuda.manual_seed(cfg["seed"]) import time timestamp = time.strftime('%Y-%m-%d-%H-%M') # create logger import logging from lib.logger import create_logger _ = create_logger(cfg["output_path"], cfg["comment"], timestamp) # backup config import shutil import os shutil.copyfile( args.cfg, os.path.join(cfg["output_path"], "{}_{}".format(timestamp, cfg["config_fn"]))) # set env os.environ["CUDA_VISIBLE_DEVICES"] = ",".join( map(lambda x: str(x), cfg["gpu_ids"])) from optimizer.adam import Adam from eval import cal_avg_auc
from lib.user_pigeonhole import save_tag def parse_args(): parser = argparse.ArgumentParser(description='Train setting') parser.add_argument('--cfg', help='configure file name', required=True, type=str) # 命令行里包含config文件位置 args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() update_config(cfg, args) # 从config文件更新配置 logger, _ = create_logger(cfg) logger.info(cfg) if cfg.TYPE == 'tag': for tag in cfg.TAG: save_tag(cfg, str(tag)) elif cfg.TYPE == 'USER': for user in cfg.USER: person_blog(cfg, str(user)) else: logger.info('=> failed in cfg TYPE. Please enter correct configure.')