def get_model_and_data_loaders( config: ConfigParser, logger: logging.Logger, ckpt_path: Path, ) -> Tuple[torch.nn.Module, module_data.ExpertDataLoader]: expert_dims, raw_input_dims, text_dim = compute_dims(config) data_loaders = config.init( name='data_loader', module=module_data, logger=logger, raw_input_dims=raw_input_dims, challenge_mode=config.get("challenge_mode", False), text_dim=text_dim, text_feat=config["experts"]["text_feat"], text_agg=config["experts"]["text_agg"], use_zeros_for_missing=config["experts"].get("use_zeros_for_missing", False), task=config.get("task", "retrieval"), eval_only=True, distil_params=config.get("distil_params", None), training_file=config.get("training_file", None), caption_masks=config.get("caption_masks", None), ce_shared_dim=config["experts"].get("ce_shared_dim", None), ) trn_config = compute_trn_config(config) model = config.init( name='arch', module=module_arch, trn_config=trn_config, expert_dims=expert_dims, text_dim=text_dim, disable_nan_checks=config["disable_nan_checks"], task=config.get("task", "retrieval"), ce_shared_dim=config["experts"].get("ce_shared_dim", None), feat_aggregation=config["data_loader"]["args"]["feat_aggregation"], trn_cat=config["data_loader"]["args"].get("trn_cat", 0), ) ckpt_path = config._args.resume logger.info(f"Loading checkpoint: {ckpt_path} ...") checkpoint = torch.load(ckpt_path) state_dict = checkpoint['state_dict'] if config['n_gpu'] > 1: model = torch.nn.DataParallel(model) # support backwards compatibility deprecated = ["ce.moe_fc_bottleneck1", "ce.moe_cg", "ce.moe_fc_proj"] for mod in deprecated: for suffix in ("weight", "bias"): key = f"{mod}.{suffix}" if key in state_dict: print(f"WARNING: Removing deprecated key {key} from model") state_dict.pop(key) model.load_state_dict(state_dict) return model, data_loaders
def get_model_and_data_loaders( config: ConfigParser, logger: logging.Logger, ckpt_path: Path, ) -> Tuple[torch.nn.Module, module_data.ExpertDataLoader]: expert_dims, raw_input_dims = compute_dims(config) trn_config = compute_trn_config(config) data_loaders = config.init( name='data_loader', module=module_data, logger=logger, raw_input_dims=raw_input_dims, challenge_mode=config.get("challenge_mode", False), text_feat=config["experts"]["text_feat"], text_dim=config["experts"]["text_dim"], text_agg=config["experts"]["text_agg"], use_zeros_for_missing=config["experts"].get("use_zeros_for_missing", False), task=config.get("task", "retrieval"), eval_only=True, ) model = config.init( name='arch', module=module_arch, trn_config=trn_config, expert_dims=expert_dims, text_dim=config["experts"]["text_dim"], disable_nan_checks=config["disable_nan_checks"], task=config.get("task", "retrieval"), ce_shared_dim=config["experts"].get("ce_shared_dim", None), feat_aggregation=config["data_loader"]["args"]["feat_aggregation"], trn_cat=config["data_loader"]["args"].get("trn_cat", 0), ) ckpt_path = config._args.resume logger.info(f"Loading checkpoint: {ckpt_path} ...") checkpoint = torch.load(ckpt_path) state_dict = checkpoint['state_dict'] if config['n_gpu'] > 1: model = torch.nn.DataParallel(model) model.load_state_dict(state_dict) return model, data_loaders