def main(cfg: DictConfig): # turn "models" in your project configuration to "full/path/to/models" cfg = utils.get_absolute_paths_from_cfg(cfg) log.info('configuration used: ') log.info(cfg.pretty()) weights = projects.get_weightfile_from_cfg(cfg, model_type='sequence') assert weights is not None, 'Must either specify a weightfile or use reload.latest=True' if cfg.sequence.latent_name is None: # find the latent name used in the weight file you loaded rundir = os.path.dirname(weights) loaded_cfg = utils.load_yaml(os.path.join(rundir, 'config.yaml')) latent_name = loaded_cfg['sequence']['latent_name'] # if this latent name is also None, use the arch of the feature extractor # this should never happen if latent_name is None: latent_name = loaded_cfg['feature_extractor']['arch'] else: latent_name = cfg.sequence.latent_name # the output name will be a group in the output hdf5 dataset containing probabilities, etc if cfg.sequence.output_name is None: output_name = cfg.sequence.arch else: output_name = cfg.sequence.output_name directory_list = cfg.inference.directory_list if directory_list is None or len(directory_list) == 0: raise ValueError('must pass list of directories from commmand line. ' 'Ex: directory_list=[path_to_dir1,path_to_dir2] or directory_list=all') elif type(directory_list) == str and directory_list == 'all': basedir = cfg.project.data_path directory_list = utils.get_subfiles(basedir, 'directory') outputfiles = [] for directory in directory_list: assert os.path.isdir(directory), 'Not a directory: {}'.format(directory) record = projects.get_record_from_subdir(directory) assert record['output'] is not None outputfiles.append(record['output']) model = build_model_from_cfg(cfg, 1024, len(cfg.project.class_names)) log.info('model: {}'.format(model)) model = utils.load_weights(model, weights) metrics_file = os.path.join(os.path.dirname(weights), 'classification_metrics.h5') with h5py.File(metrics_file, 'r') as f: thresholds = f['threshold_curves']['val']['optimum'][:] log.info('thresholds: {}'.format(thresholds)) device = 'cuda:{}'.format(cfg.compute.gpu_id) class_names = cfg.project.class_names class_names = np.array(class_names) extract(model, outputfiles, thresholds, cfg.feature_extractor.final_activation, latent_name, output_name, cfg.sequence.sequence_length, None, True, device, cfg.inference.ignore_error, cfg.inference.overwrite, class_names=class_names)
def main(cfg: DictConfig) -> None: log.debug('cwd: {}'.format(os.getcwd())) # only two custom overwrites of the configuration file # first, change the project paths from relative to absolute cfg = utils.get_absolute_paths_from_cfg(cfg) if cfg.sequence.latent_name is None: cfg.sequence.latent_name = cfg.feature_extractor.arch # second, use the model directory to find the most recent run of each model type # cfg = projects.overwrite_cfg_with_latest_weights(cfg, cfg.project.model_path, model_type='flow_generator') # SHOULD NEVER MODIFY / MAKE ASSIGNMENTS TO THE CFG OBJECT AFTER RIGHT HERE! log.info('Configuration used: ') log.info(cfg.pretty()) model = train_from_cfg(cfg)
def main(cfg: DictConfig) -> None: log.debug('cwd: {}'.format(os.getcwd())) # only two custom overwrites of the configuration file # first, change the project paths from relative to absolute cfg = utils.get_absolute_paths_from_cfg(cfg) # second, use the model directory to find the most recent run of each model type # cfg = projects.overwrite_cfg_with_latest_weights(cfg, cfg.project.model_path, model_type='flow_generator') # SHOULD NEVER MODIFY / MAKE ASSIGNMENTS TO THE CFG OBJECT AFTER RIGHT HERE! log.info('Configuration used: ') log.info(cfg.pretty()) try: model = train_from_cfg(cfg) except KeyboardInterrupt: torch.cuda.empty_cache() raise
def main(cfg: DictConfig): # turn "models" in your project configuration to "full/path/to/models" cfg = utils.get_absolute_paths_from_cfg(cfg) log.info('configuration used in inference: ') log.info(cfg.pretty()) if cfg.sequence.latent_name is None: latent_name = cfg.feature_extractor.arch else: latent_name = cfg.sequence.latent_name directory_list = cfg.inference.directory_list if directory_list is None or len(directory_list) == 0: raise ValueError('must pass list of directories from commmand line. ' 'Ex: directory_list=[path_to_dir1,path_to_dir2]') elif type(directory_list) == str and directory_list == 'all': basedir = cfg.project.data_path directory_list = utils.get_subfiles(basedir, 'directory') # video files are found in your input list of directories using the records.yaml file that should be present # in each directory records = [] for directory in directory_list: assert os.path.isdir(directory), 'Not a directory: {}'.format( directory) record = projects.get_record_from_subdir(directory) assert record['rgb'] is not None records.append(record) assert cfg.feature_extractor.n_flows + 1 == cfg.flow_generator.n_rgb, 'Flow generator inputs must be one greater ' \ 'than feature extractor num flows ' input_images = cfg.feature_extractor.n_flows + 1 mode = '3d' if '3d' in cfg.feature_extractor.arch.lower() else '2d' # get the validation transforms. should have resizing, etc transform = get_transforms(cfg.augs, input_images, mode)['val'] rgb = [] for record in records: rgb.append(record['rgb']) model = build_feature_extractor(cfg) device = 'cuda:{}'.format(cfg.compute.gpu_id) feature_extractor_weights = projects.get_weightfile_from_cfg( cfg, 'feature_extractor') metrics_file = os.path.join(os.path.dirname(feature_extractor_weights), 'classification_metrics.h5') assert os.path.isfile(metrics_file) with h5py.File(metrics_file, 'r') as f: thresholds = f['threshold_curves']['val']['optimum'][:] log.info('thresholds: {}'.format(thresholds)) class_names = list(cfg.project.class_names) # class_names = projects.get_classes_from_project(cfg) class_names = np.array(class_names) extract(rgb, model, final_activation=cfg.feature_extractor.final_activation, thresholds=thresholds, fusion=cfg.feature_extractor.fusion, num_rgb=input_images, latent_name=latent_name, device=device, transform=transform, ignore_error=cfg.inference.ignore_error, overwrite=cfg.inference.overwrite, class_names=class_names, conv_2d=mode == '2d') # update each record file in the subdirectory to add our new output files projects.write_all_records(cfg.project.data_path)