help='config file path (default: None)') args.add_argument('--resume', help='path to latest checkpoint (default: None)') args.add_argument('--device', type=str, help="indices of GPUs to enable") args.add_argument('--mini_train', action="store_true") args.add_argument('--disable_workers', action="store_true") args.add_argument('--train_single_epoch', action="store_true") args.add_argument('--seeds', default="0", help="comma separated list of seeds") args.add_argument("--dbg", default="ipdb.set_trace") args.add_argument( '--purge_exp_dir', action="store_true", help="remove all previous experiments with the given config") args = ConfigParser(args) os.environ["PYTHONBREAKPOINT"] = args._args.dbg if args._args.disable_workers: print("Disabling data loader workers....") args["data_loader"]["args"]["num_workers"] = 0 if args._args.train_single_epoch: print("Restring training to a single epoch....") args["trainer"]["epochs"] = 1 args["trainer"]["save_period"] = 1 args["trainer"]["skip_first_n_saves"] = 0 msg = ( f"Expected the number of training epochs ({args['trainer']['epochs']})" f"to exceed the save period ({args['trainer']['save_period']}), otherwise"
optimizer = config.init_obj('optimizer', torch.optim, trainable_params) lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer) trainer = Trainer(model, criterion, metrics, optimizer, config=config, device=device, data_loader=data_loader, valid_data_loader=valid_data_loader, lr_scheduler=lr_scheduler) trainer.train() if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') # custom cli options to modify configuration from default values given in json file. CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [ CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'), CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size') ] config = ConfigParser.from_args(args, options) main(config)
'meta_info': meta_infos } save_fname = ckpt_path.parent / f'test_score.pt' tic = time.time() logger.info("Saving score matrix: {} ...".format(save_fname)) torch.save(metric, save_fname) logger.info(f"Done in {time.time() - tic:.3f}s") if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument('--config', default='configs/ce/test.json', type=str, help='config file path') args.add_argument('--resume', default=None, type=str, help='path to checkpoint for test') args.add_argument('--device', default=None, type=str, help='indices of GPUs to enable') test_config = ConfigParser(args) msg = "For evaluation, a model checkpoint must be specified via the --resume flag" assert test_config._args.resume, msg test(test_config)
# total_metrics[i] += metric(output, target) * batch_size # n_samples = len(data_loader.sampler) # log = {'loss': total_loss / n_samples} # log.update({ # met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns) # }) # logger.info(log) if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') config = ConfigParser.from_args(args) main(config)
def __init__( self, model: Module, loss_fn: Callable, loss_args: Dict[str, Any], metric_fns: List[Callable], metric_args: List[Dict[str, Any]], optimizer: Optimizer, config: ConfigParser, ): self.config: ConfigParser = config self.logger: Logger = config.get_logger("trainer", config["trainer"]["verbosity"]) # Setup GPU device if available. self.device: torch.device device_ids: List[int] self.device, device_ids = self._prepare_device(config["n_gpu"]) # Move model into configured device(s). self.model: Module = model.to(self.device) if len(device_ids) > 1: self.model = DataParallel(model, device_ids=device_ids) # Set loss function and arguments. self.loss_fn: Callable = loss_fn self.loss_args: Dict[str, Any] = loss_args # Set all metric functions and associated arguments. self.metric_fns: List[Callable] = metric_fns self.metric_args: List[Dict[str, Any]] = metric_args # Set optimizer. self.optimizer: Optimizer = optimizer # Set training configuration. cfg_trainer: Dict[str, Any] = config["trainer"] self.epochs: int = cfg_trainer["epochs"] self.save_period: int = cfg_trainer["save_period"] self.monitor: str = cfg_trainer.get("monitor", "off") # Configuration to monitor model performance and save best. if self.monitor == "off": self.mnt_mode: str = "off" self.mnt_best: float = 0 else: self.mnt_metric: str self.mnt_mode, self.mnt_metric = self.monitor.split() assert self.mnt_mode in ["min", "max"] self.mnt_best = inf if self.mnt_mode == "min" else -inf self.early_stop: float = cfg_trainer.get("early_stop", inf) self.start_epoch: int = 1 self.checkpoint_dir: Path = config.save_dir # Setup visualization writer instance. self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer["tensorboard"]) if config.resume is not None: self._resume_checkpoint(config.resume)
# custom cli options to modify configuration from default values given in json file. CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [ CustomArgs(['--lr', '--learning_rate'], type=float, target=('optimizer', 'args', 'lr')), CustomArgs(['--bs', '--batch_size'], type=int, target=('data_loader', 'args', 'batch_size')), CustomArgs(['--lamb', '--lamb'], type=float, target=('train_loss', 'args', 'lambda')), CustomArgs(['--beta', '--beta'], type=float, target=('train_loss', 'args', 'beta')), CustomArgs(['--percent', '--percent'], type=float, target=('trainer', 'percent')), CustomArgs(['--asym', '--asym'], type=bool, target=('trainer', 'asym')), CustomArgs(['--name', '--exp_name'], type=str, target=('name', )), CustomArgs(['--seed', '--seed'], type=int, target=('seed', )) ] config = ConfigParser.get_instance(args, options) random.seed(config['seed']) torch.manual_seed(config['seed']) torch.cuda.manual_seed_all(config['seed']) main(config)
trainer = Trainer.TrainerJoint( model, loss, optimizer, config=config, data_loaders=data_loaders, lr_scheduler=lr_scheduler, ) trainer.train() best_ckpt_path = config.save_dir / "trained_model.pth" duration = time.strftime('%Hh%Mm%Ss', time.gmtime(time.time() - tic)) logger.info(f"Training took {duration}") if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument('--config', default='configs/ce/train.json', type=str) args.add_argument('--logdir', default='base', type=str) args.add_argument('--device', default=None, type=str) args.add_argument('--resume', default=None, type=str) args.add_argument('--seeds', default="0", type=str) args = ConfigParser(args, 'train') print("Launching experiment with config:") print(args) main(args)
def main(config: ConfigParser): logger = config.get_logger('train') data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=config['data_loader']['args']['batch_size'], shuffle=config['data_loader']['args']['shuffle'], validation_split=config['data_loader']['args']['validation_split'], num_batches=config['data_loader']['args']['num_batches'], training=True, num_workers=config['data_loader']['args']['num_workers'], pin_memory=config['data_loader']['args']['pin_memory']) valid_data_loader = data_loader.split_validation() test_data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=128, shuffle=False, validation_split=0.0, training=False, num_workers=2).split_validation() # build model architecture, then print to console model = config.initialize('arch', module_arch) train_loss = getattr(module_loss, config['train_loss']) val_loss = getattr(module_loss, config['val_loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] logger.info(str(model).split('\n')[-1]) # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler trainable_params = [{ 'params': [ p for p in model.parameters() if (not getattr(p, 'bin_gate', False)) and ( not getattr(p, 'bin_theta', False)) and ( not getattr(p, 'srelu_bias', False)) and getattr(p, 'requires_grad', False) ] }, { 'params': [ p for p in model.parameters() if getattr(p, 'bin_gate', False) and getattr(p, 'requires_grad', False) ], 'lr': config['optimizer']['args']['lr'] * 10, 'weight_decay': 0 }, { 'params': [ p for p in model.parameters() if getattr(p, 'srelu_bias', False) and getattr(p, 'requires_grad', False) ], 'weight_decay': 0 }, { 'params': [ p for p in model.parameters() if getattr(p, 'bin_theta', False) and getattr(p, 'requires_grad', False) ], 'lr': config['optimizer']['args']['lr'], 'weight_decay': 0 }] optimizer = config.initialize('optimizer', torch.optim, trainable_params) lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer) trainer = Trainer(model, train_loss, metrics, optimizer, config=config, data_loader=data_loader, valid_data_loader=valid_data_loader, test_data_loader=test_data_loader, lr_scheduler=lr_scheduler, val_criterion=val_loss) trainer.train() logger = config.get_logger('trainer', config['trainer']['verbosity']) cfg_trainer = config['trainer']
process_file_list(config, name) elif os.path.isdir(name): process_files_in_dir(config, name) else: print( 'Cannot process (not a mesh file, a filelist (.txt) or a directory)', name) if __name__ == '__main__': args = argparse.ArgumentParser(description='Deep-MVLM') args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') args.add_argument( '-n', '--name', default=None, type=str, help='name of file, filelist (.txt) or directory to be processed') global_config = ConfigParser(args) main(global_config)
def pred(self, paths, metas, m_cfg, id): print('pred') self.cfg = m_cfg res = Response() if len(paths) != len(metas): res.code = -2 res.msg = "The length of images and meta is not same." return res # if self.pred_th is not None: # if self.pred_th.is_alive(): # res.code = -3 # res.msg = "There is a task running, please wait it finish." # return res try: m_typename = m_cfg["name"].split("-")[1] if m_typename == "Deeplab" or m_typename == "UNet": from .predthread import SegPredThread self.device = torch.device( 'cuda:0' if self.n_gpu_use > 0 else 'cpu') torch.set_grad_enabled(False) m_cfg["save_dir"] = str(self.tmp_path) config = ConfigParser(m_cfg, Path(m_cfg["path"])) self.logger = config.get_logger('PredServer') self.model = config.init_obj('arch', module_arch) self.logger.info('Loading checkpoint: {} ...'.format( config.resume)) if self.n_gpu_use > 1: self.model = torch.nn.DataParallel(self.model) if self.n_gpu_use > 0: checkpoint = torch.load(config.resume) else: checkpoint = torch.load(config.resume, map_location=torch.device('cpu')) state_dict = checkpoint['state_dict'] self.model.load_state_dict(state_dict) self.model = self.model.to(self.device) self.model.eval() if "crop_size" in config["tester"]: self.crop_size = config["tester"]["crop_size"] if 'postprocessor' in config["tester"]: module_name = config["tester"]['postprocessor']['type'] module_args = dict( config["tester"]['postprocessor']['args']) self.postprocessor = getattr(postps_crf, module_name)(**module_args) self.tmp_path.mkdir(parents=True, exist_ok=True) self.pred_ths.append( SegPredThread(self, paths, metas, self.tmp_path, id)) elif m_typename == "CycleGAN": from .predthread import CycleGANPredThread from model import CycleGANOptions, CycleGANModel # config = ConfigParser(m_cfg, Path(m_cfg["path"])) opt = CycleGANOptions(**m_cfg["arch"]["args"]) opt.batch_size = self.batch_size opt.serial_batches = True opt.no_flip = True # no flip; opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. opt.isTrain = False opt.gpu_ids = [] for i in range(0, self.n_gpu_use): opt.gpu_ids.append(i) opt.checkpoints_dir = str(self.tmp_path) opt.preprocess = "none" opt.direction = 'AtoB' self.model = CycleGANModel(opt) orig_save_dir = self.model.save_dir self.model.save_dir = "" self.model.load_networks(m_cfg["path"]) self.model.save_dir = orig_save_dir torch.set_grad_enabled(False) self.model.set_requires_grad( [self.model.netG_A, self.model.netG_B], False) self.pred_ths.append( CycleGANPredThread(self, paths, metas, self.tmp_path, id)) else: raise NotImplementedError("Model type:", m_typename, "is not supported.") print('NotifyStartThread') self.pred_ths[-1].start() # self.pred_th.is_alive() except Exception as e: res.code = -1 res.msg = str(e) return res res.code = 0 res.msg = "Success" return res
if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') config = ConfigParser(args) logger = config.get_logger('test') # setup data_loader instances processor = config.initialize('processor', module_processor, logger, config) # build model architecture, then print to console # build model architecture, then print to console if config.bert_config_path: bert_config = BertConfig(config.bert_config_path) model = config.initialize('arch', module_arch, config=bert_config, num_labels=processor.nums_label()) else: model = config.initialize_bert_model('arch', module_arch,
def parse(self): self.add_static_arguments() self.add_dynamic_arguments() config = ConfigParser.from_args(self.static_arguments, self.dynamic_arguments) return config
val_data_loader = config.init_obj('val_data_loader', module_data_loader, image_transforms=val_transform, target_transforms=target_transforms) criterion = config.init_obj('supervised_loss', module_loss) metrics = [getattr(module_metric, x) for x in config['metrics']] model = config.init_obj('model', module_model) optimizer = config.init_obj('optimizer', module_optimizer, model.parameters()) lr_scheduler = config.init_obj('lr_scheduler', module_lr_scheduler, optimizer) logger.info(model) if config['trainer']['name'] == 'SegmentationTrainer': trainer = SegmentationTrainer(model, criterion, metrics, optimizer, config, lr_scheduler) elif config['trainer']['name'] == 'AdversarialTrainer': trained_model = config.init_obj('pre_trained', module_model) trainer = AdversarialTrainer(model, trained_model, criterion, metrics, optimizer, config, lr_scheduler) else: raise NotImplementedError("Unsupported trainer") trainer.setup_loader(None, val_data_loader, None) trainer.eval(save_result=False, save_for_visual=True) if __name__ == '__main__': args_parser = argparse.ArgumentParser(description='Semantic Segmentation') args_parser.add_argument('-c', '--config', default='config.json', type=str, help='config file path, default: config.json') args = args_parser.parse_args() config = ConfigParser(args.config) main(config)
# transformation and registration modules transformation_module, registration_module = config.init_transformation_and_registration_modules( ) # losses metrics = config.init_metrics() # run the model trainer = Trainer(config, data_loader, losses, transformation_module, registration_module, metrics) trainer.run() if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser(description='MCMC') parser.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args = parser.parse_args() # config timestamp = datetime.now().strftime(r'%m%d_%H%M%S') config = ConfigParser.from_args(parser, timestamp=timestamp) # run the model run(config)
parser = argparse.ArgumentParser(description='S5p Superres') clean_files('data/seqs/test') # Training settings parser.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') parser.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') parser.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') # custom cli options to modify configuration from default values given in json file. CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [ CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'), CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size') ] config = ConfigParser.from_args(parser, options) main(config)
import tensorflow as tf from tensorflow.keras.utils import Progbar from datas.list_generator import ListGenerator from language_model.char_rnn_lm import CharRnnLmWrapperSingleton from lip_model.training_graph import TransformerTrainGraph from lip_model.inference_graph import TransformerInferenceGraph import json import shutil import threading import copy import queue app = Flask(__name__) args = argparse.ArgumentParser() config = ConfigParser(args) model = config.init('arch', module_arch) logger = config.get_logger('test') tic = time.time() with open(os.path.join('./misc/pretrained_models', 'KWS_Net.pth'), 'rb') as f: checkpoint = torch.load(f) state_dict = canonical_state_dict_keys(checkpoint['state_dict']) model.load_state_dict(state_dict) logger.info(f"Finished loading ckpt in {time.time() - tic:.3f}s") logger.info(f"CUDA device count: {torch.cuda.device_count()}") device_count = torch.cuda.device_count() models = [] for device_ind in range(device_count): device = f"cuda:{device_ind}"
yticklabels=tgt_seq) fig.xaxis.set_label_position('top') fig.figure.show() if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument('-c', '--config', default="config.json", type=str, help='config file path (default: None)') args.add_argument('--model-path', type=str, required=True, help='path to model.pth to test (default: None') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-de', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') # custom cli options to modify configuration from default values given in json file. CustomArgs = collections.namedtuple('CustomArgs', 'flags type target help') config, args = ConfigParser.from_args(args) main(config, args.model_path)
_, label = predict( data['query'], data['target']) # 从json数据里面读取text字段,生成返回 response = {'label': list(map(int, label))} except (KeyError, TypeError, ValueError): # 捕获数据类型异常 raise JsonError(description='Invalid value.') # 将异常反馈会调用 return response # 正常返回,这个response的内容会被转成json格式 if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') config = ConfigParser(args) logger = config.get_logger('test') # setup data_loader instances processor = config.initialize( 'processor', module_processor, logger, config) # build model architecture, then print to console model = config.initialize( 'arch', module_arch, vocab_size=processor.vocab_size, num_labels=processor.nums_label()) # logger.info(model) agent = Agent(model, config=config) app.run(host='0.0.0.0', port=5000, debug=True)
def main(config: ConfigParser): logger = config.get_logger('train') data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=config['data_loader']['args']['batch_size'], shuffle=config['data_loader']['args']['shuffle'], validation_split=config['data_loader']['args']['validation_split'], num_batches=config['data_loader']['args']['num_batches'], training=True, num_workers=config['data_loader']['args']['num_workers'], pin_memory=config['data_loader']['args']['pin_memory']) # valid_data_loader = data_loader.split_validation() valid_data_loader = None # test_data_loader = None test_data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=128, shuffle=False, validation_split=0.0, training=False, num_workers=2) #.split_validation() # build model architecture, then print to console model = config.initialize('arch', module_arch) # get function handles of loss and metrics logger.info(config.config) if hasattr(data_loader.dataset, 'num_raw_example'): num_examp = data_loader.dataset.num_raw_example else: num_examp = len(data_loader.dataset) train_loss = getattr(module_loss, config['train_loss']['type'])( num_examp=num_examp, num_classes=config['num_classes'], beta=config['train_loss']['args']['beta']) val_loss = getattr(module_loss, config['val_loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler trainable_params = filter(lambda p: p.requires_grad, model.parameters()) optimizer = config.initialize('optimizer', torch.optim, [{ 'params': trainable_params }]) lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer) trainer = Trainer(model, train_loss, metrics, optimizer, config=config, data_loader=data_loader, valid_data_loader=valid_data_loader, test_data_loader=test_data_loader, lr_scheduler=lr_scheduler, val_criterion=val_loss) trainer.train() logger = config.get_logger('trainer', config['trainer']['verbosity']) cfg_trainer = config['trainer']
Iscores = np.argsort(-scores_word) labels_word = labels_word[Iscores] recall_k_all_words_1.append( recall_at_k(labels_word, 1, original_labels_word)) recall_k_all_words_5.append( recall_at_k(labels_word, 5, original_labels_word)) recall_k_all_words_10.append( recall_at_k(labels_word, 10, original_labels_word)) print("R@1 {}".format(np.mean(np.asarray(recall_k_all_words_1)))) print("R@5 {}".format(np.mean(np.asarray(recall_k_all_words_5)))) print("R@10 {}".format(np.mean(np.asarray(recall_k_all_words_10)))) print("mAP {}".format(np.mean(np.asarray(map_all_words)))) print("EER {}".format(eer)) if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('--config', default=None, type=str, help="config file path") args.add_argument( '--resume', help='path to checkpoint for evaluation', default="data/lili-ckpts/localization_loss_bilstm.pth.tar") args.add_argument('--device', type=str, help="indices of GPUs to enable") eval_config = ConfigParser(args) msg = "For evaluation, a model checkpoint must be specified via the --resume flag" assert eval_config._args.resume, msg evaluation(eval_config)
args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') args.add_argument( '-e', '--evaluate', default=Evaluate.TEST, type=Evaluate, help= 'Either "training" or "test"; Determines the prefix of the folders to use' ) args.add_argument('-m', '--dataset_type', default=Dataset.ISBI, type=Dataset, help='Dataset to use') config = ConfigParser(*parse_cmd_args(args)) main(config)
help='if set, we compute optimal taxonomy (default: True)') args.add_argument( '-m', '--model', default=True, type=bool, help='if set, we compute model-based taxonomy (default: True)') args.add_argument( '-i', '--insert', default=True, type=bool, help='if set, we do sequential insertion evaluation (default: True)') args_outer = args.parse_args() config = ConfigParser(args) if args_outer.optimal: vocab_optimal, T_opt = get_optimal_ordering(config, args_outer) else: with open(args_outer.optimal_taxo_path, "rb") as f: T_opt = pickle.load(f) vocab_optimal = list(nx.topological_sort(T_opt)) if args_outer.model: vocab_model, T_model = get_insertion_ordering(config, args_outer) else: with open(args_outer.model_taxo_path, "rb") as f: T_model = pickle.load(f)
if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-c', '--config', default='config/ae_config.json', type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') # custom cli options to modify configuration from default values given in json file. CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [ CustomArgs(['--lr', '--learning_rate'], type=float, target=('optimizer', 'args', 'lr')), CustomArgs(['--bs', '--batch_size'], type=int, target=('data_loader', 'args', 'batch_size')) ] config = ConfigParser(args, options) main(config)
train_loader, test_loader=test_loader, scheduler=scheduler) trainer.train() if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument('-c', '--config', default='config.yaml', type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [ CustomArgs(['-l', '--learning_rate'], type=float, target=('optimizer', 'args', 'lr')), CustomArgs(['-b', '--batch_size'], type=int, target=('data_loader', 'args', 'batch_size')) ] config_parser = ConfigParser(args, options) main(config_parser)
if n_not_improve >= early_stop: print( "Performance has not improved for %d epochs, stop training." % early_stop) print("The best score: %f" % best_score) txt_file.write("Epoch %d \n" % epoch) txt_file.write("Train score: %f, Train loss: %f \n" % (train_score, train_loss)) txt_file.write("Valid score: %f, Valid loss: %f \n" % (valid_score, valid_loss)) txt_file.write("Best score: %f, Best loss: %f" % (best_score, best_loss)) txt_file.close() break return best_score if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', default=None, type=str) parser.add_argument('-r', '--resume', default=None, type=str) parser.add_argument('-t', '--target', default=None, type=str) args = parser.parse_args() assert args.target in ['instrument', 'pitch', 'family', 'dynamic'] config = ConfigParser.from_args(args, testing=True) latent_classifer(config, args.target)
def main(config: ConfigParser): logger = config.get_logger('train') logger.info(config.config) # setup data_loader instances data_loader1 = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=config['data_loader']['args']['batch_size'], shuffle=config['data_loader']['args']['shuffle'], validation_split=config['data_loader']['args']['validation_split'], num_batches=config['data_loader']['args']['num_batches'], training=True, num_workers=config['data_loader']['args']['num_workers'], pin_memory=config['data_loader']['args']['pin_memory']) data_loader2 = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=config['data_loader']['args']['batch_size2'], shuffle=config['data_loader']['args']['shuffle'], validation_split=config['data_loader']['args']['validation_split'], num_batches=config['data_loader']['args']['num_batches'], training=True, num_workers=config['data_loader']['args']['num_workers'], pin_memory=config['data_loader']['args']['pin_memory']) valid_data_loader = data_loader1.split_validation() test_data_loader = getattr(module_data, config['data_loader']['type'])( config['data_loader']['args']['data_dir'], batch_size=128, shuffle=False, validation_split=0.0, training=False, num_workers=2).split_validation() # build model architecture model1 = config.initialize('arch1', module_arch) model_ema1 = config.initialize('arch1', module_arch) model_ema1_copy = config.initialize('arch1', module_arch) model2 = config.initialize('arch2', module_arch) model_ema2 = config.initialize('arch2', module_arch) model_ema2_copy = config.initialize('arch2', module_arch) # get function handles of loss and metrics device_id = list(range(min(torch.cuda.device_count(), config['n_gpu']))) if hasattr(data_loader1.dataset, 'num_raw_example') and hasattr( data_loader2.dataset, 'num_raw_example'): num_examp1 = data_loader1.dataset.num_raw_example num_examp2 = data_loader2.dataset.num_raw_example else: num_examp1 = len(data_loader1.dataset) num_examp2 = len(data_loader2.dataset) train_loss1 = getattr(module_loss, config['train_loss']['type'])( num_examp=num_examp1, num_classes=config['num_classes'], device='cuda:' + str(device_id[0]), config=config.config, beta=config['train_loss']['args']['beta']) train_loss2 = getattr(module_loss, config['train_loss']['type'])( num_examp=num_examp2, num_classes=config['num_classes'], device='cuda:' + str(device_id[-1]), config=config.config, beta=config['train_loss']['args']['beta']) val_loss = getattr(module_loss, config['val_loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler trainable_params1 = filter(lambda p: p.requires_grad, model1.parameters()) trainable_params2 = filter(lambda p: p.requires_grad, model2.parameters()) optimizer1 = config.initialize('optimizer1', torch.optim, [{ 'params': trainable_params1 }]) optimizer2 = config.initialize('optimizer2', torch.optim, [{ 'params': trainable_params2 }]) lr_scheduler1 = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer1) lr_scheduler2 = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer2) trainer = Trainer(model1, model2, model_ema1, model_ema2, train_loss1, train_loss2, metrics, optimizer1, optimizer2, config=config, data_loader1=data_loader1, data_loader2=data_loader2, valid_data_loader=valid_data_loader, test_data_loader=test_data_loader, lr_scheduler1=lr_scheduler1, lr_scheduler2=lr_scheduler2, val_criterion=val_loss, model_ema1_copy=model_ema1_copy, model_ema2_copy=model_ema2_copy) trainer.train() logger = config.get_logger('trainer', config['trainer']['verbosity']) cfg_trainer = config['trainer']
n_samples = len(data_loader.sampler) log = {'loss': total_loss / n_samples} log.update({ met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns) }) logger.info(log) if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') config = ConfigParser.get_instance(args, '') #config = ConfigParser(args) main(config)
output_list = [] with torch.no_grad(): for i in torch.linspace(0, 1, total_steps): code = i * end_code + (1 - i) * start_code output = model(z=code).sigmoid().cpu() output_list.append(make_grid(output, nrow=4)) gif(filename, output_list) if __name__ == '__main__': parser = argparse.ArgumentParser(description='PyTorch Template') parser.add_argument('filename', type=str) parser.add_argument('--seed', type=int, default=0) parser.add_argument('-t', default=5, type=float) parser.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') parser.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') args = parser.parse_args() config = ConfigParser(parser) torch.manual_seed(args.seed) main(config, args.resume, args.t, args.filename)
trainer.train() if __name__ == '__main__': args = argparse.ArgumentParser(description='PyTorch Template') args.add_argument('-c', '--config', default="config.json", type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default="0", type=str, help='indices of GPUs to enable (default: all)') args.add_argument('-f', '--fold_id', type=str, help='fold_id') args.add_argument('-da', '--np_data_dir', type=str, help='Directory containing numpy files') CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') options = [] args2 = args.parse_args() fold_id = int(args2.fold_id) config = ConfigParser.from_args(args, fold_id, options) if "shhs" in args2.np_data_dir: folds_data = load_folds_data_shhs(args2.np_data_dir, config["data_loader"]["args"]["num_folds"]) else: folds_data = load_folds_data(args2.np_data_dir, config["data_loader"]["args"]["num_folds"]) main(config, fold_id)
str(datetime.timedelta(seconds=time_left))) else: print('File', wrl_name, ' does not exists') def main(config): test_on_bu_3d_fe(config) if __name__ == '__main__': args = argparse.ArgumentParser(description='Deep-MVLM') args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)') args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)') args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)') cfg_global = ConfigParser(args) main(cfg_global)