class BirdsBasicTrainerCV: ''' classdocs ''' # Number of intermediate models to save # during training: MODEL_ARCHIVE_SIZE = 20 # For some tensorboard displays: # for how many epochs in the past # to display data: DISPLAY_HISTORY_LEN = 10 #------------------------------------ # Constructor #------------------- def __init__(self, config_info, device=0, percentage=None, debugging=False): ''' :param config_info: all path and training parameters :type config_info: NeuralNetConfig :param debugging: output lots of debug info :type debugging: bool :param device: number of GPU to use; default is dev 0 if any GPU is available :type device: {None | int} :param percentage: percentage of training data to use :type percentage: {int | float} ''' self.log = LoggingService() if debugging: self.log.logging_level = DEBUG if percentage is not None: # Integrity check: if type(percentage) not in [int, float]: raise TypeError( f"Percentage must be int or float, not {type(percentage)}") if percentage < 1 or percentage > 100: raise ValueError( f"Percentage must be between 1 and 100, not {percentage}") if device is None: device = 0 torch.cuda.set_device(device) else: available_gpus = torch.cuda.device_count() if available_gpus == 0: self.log.info("No GPU available; running on CPU") else: if device > available_gpus - 1: raise ValueError( f"Asked to operate on device {device}, but only {available_gpus} are available" ) torch.cuda.set_device(device) self.curr_dir = os.path.dirname(os.path.abspath(__file__)) try: self.config = self.initialize_config_struct(config_info) except Exception as e: msg = f"During config init: {repr(e)}" self.log.err(msg) raise RuntimeError(msg) from e try: self.root_train_test_data = self.config.getpath( 'Paths', 'root_train_test_data', relative_to=self.curr_dir) except ValueError as e: raise ValueError( "Config file must contain an entry 'root_train_test_data' in section 'Paths'" ) from e self.batch_size = self.config.getint('Training', 'batch_size') self.kernel_size = self.config.getint('Training', 'kernel_size') self.min_epochs = self.config.Training.getint('min_epochs') self.max_epochs = self.config.Training.getint('max_epochs') self.lr = self.config.Training.getfloat('lr') self.net_name = self.config.Training.net_name self.pretrained = self.config.Training.getboolean('pretrained', False) self.num_folds = self.config.Training.getint('num_folds') self.freeze = self.config.Training.getint('freeze', 0) self.to_grayscale = self.config.Training.getboolean( 'to_grayscale', True) self.set_seed(42) self.log.info("Parameter summary:") self.log.info(f"network {self.net_name}") self.log.info(f"pretrained {self.pretrained}") if self.pretrained: self.log.info(f"freeze {self.freeze}") self.log.info(f"min epochs {self.min_epochs}") self.log.info(f"max epochs {self.max_epochs}") self.log.info(f"batch_size {self.batch_size}") self.fastest_device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.device = self.fastest_device self.num_classes = self.find_num_classes(self.root_train_test_data) self.initialize_model() sample_width = self.config.getint('Training', 'sample_width', 400) sample_height = self.config.getint('Training', 'sample_height', 400) self.train_loader = self.get_dataloader(sample_width, sample_height, perc_data_to_use=percentage) self.log.info(f"Expecting {len(self.train_loader)} batches per epoch") num_train_samples = len(self.train_loader.dataset) num_classes = len(self.train_loader.dataset.class_names()) self.log.info( f"Training set contains {num_train_samples} samples across {num_classes} classes" ) self.class_names = self.train_loader.dataset.class_names() log_dir = os.path.join(self.curr_dir, 'runs') raw_data_dir = os.path.join(self.curr_dir, 'runs_raw_results') self.setup_tensorboard(log_dir, raw_data_dir=raw_data_dir) # Log a few example spectrograms to tensorboard; # one per class: TensorBoardPlotter.write_img_grid( self.writer, self.root_train_test_data, len(self.class_names), # Num of train examples ) # All ResultTally instances are # collected here: (num_folds * num-epochs) # each for training and validation steps. self.step_results = ResultCollection() self.log.debug( f"Just before train: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) try: final_step = self.train() self.visualize_final_epoch_results(final_step) finally: self.close_tensorboard() #------------------------------------ # train #------------------- def train(self): overall_start_time = datetime.datetime.now() # Just for sanity: keep track # of number of batches... total_batch_num = 0 # Note: since we are cross validating, the # data loader's set_epoch() method is only # called once (automatically) during instantiation # of the associated sampler. Moving from split # to split includes shuffling if the caller # specified that. # Training for split_num in range(self.train_loader.num_folds): split_start_time = datetime.datetime.now() self.initialize_model() for epoch in range(self.max_epochs): # Set model to train mode: self.model.train() epoch_start_time = datetime.datetime.now() self.log.info(f"Starting epoch {epoch} training") # Sanity check record: will record # how many samples from each class were # used: self.class_coverage = {} # Sanity records: will record number # of samples of each class that are used # during training and validation: label_distrib = {} batch_num = 0 self.log.info( f"Train epoch {epoch}/{self.max_epochs} split {split_num}/{self.train_loader.num_folds}" ) try: for batch, targets in self.train_loader: # Update the sanity check # num of batches seen, and distribution # of samples across classes: batch_num += 1 total_batch_num += 1 # Update sanity check records: for lbl in targets: lbl = int(lbl) try: label_distrib[lbl] += 1 except KeyError: label_distrib[lbl] = 1 try: self.class_coverage[lbl]['train'] += 1 except KeyError: self.class_coverage[lbl] = { 'train': 1, 'val': 0 } self.log.debug( f"Top of training loop: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) images = FileUtils.to_device(batch, 'gpu') labels = FileUtils.to_device(targets, 'gpu') outputs = self.model(images) loss = self.loss_fn(outputs, labels) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Remember the last batch's train result of this # split (results for earlier batches of # the same split will be overwritten). This statement # must sit before deleting output and labels: step_num = self.step_number(epoch, split_num, self.num_folds) self.remember_results(LearningPhase.TRAINING, step_num, outputs, labels, loss) self.log.debug( f"Just before clearing gpu: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) images = FileUtils.to_device(images, 'cpu') outputs = FileUtils.to_device(outputs, 'cpu') labels = FileUtils.to_device(labels, 'cpu') loss = FileUtils.to_device(loss, 'cpu') del images del outputs del labels del loss torch.cuda.empty_cache() self.log.debug( f"Just after clearing gpu: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) except EndOfSplit: end_time = datetime.datetime.now() train_time_duration = end_time - epoch_start_time # A human readable duration st down to minutes: duration_str = FileUtils.time_delta_str( train_time_duration, granularity=4) self.log.info( f"Done training epoch {epoch} of split {split_num} (duration: {duration_str})" ) #*********** #print(f"****** num_batches in split: {batch_num}" ) #print(f"****** LblDist: {label_distrib}") #*********** self.validate_split(step_num) self.visualize_step(step_num) # Save model, keeping self.model_archive_size models: self.model_archive.save_model(self.model, epoch) self.log.debug( f"After eval: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) # Next Epoch continue end_time = datetime.datetime.now() train_time_duration = end_time - split_start_time # A human readable duration st down to minutes: duration_str = FileUtils.time_delta_str(train_time_duration, granularity=4) self.log.info( f"Done training split {split_num} (duration: {duration_str})") # Next split continue end_time = datetime.datetime.now() epoch_duration = end_time - epoch_start_time epoch_dur_str = FileUtils.time_delta_str(epoch_duration, granularity=4) cumulative_dur = end_time - overall_start_time cum_dur_str = FileUtils.time_delta_str(cumulative_dur, granularity=4) msg = f"Done epoch {epoch} (epoch duration: {epoch_dur_str}; cumulative: {cum_dur_str})" self.log.info(msg) #******self.scheduler.step() # Fresh results tallying #self.results.clear() self.log.info( f"Training complete after {self.train_loader.num_folds} splits") # Report the sanity checks: self.log.info(f"Total batches processed: {total_batch_num}") for cid in self.class_coverage.keys(): train_use, val_use = self.class_coverage[cid].items() self.log.info( f"{self.class_names[cid]} Training: {train_use}, Validation: {val_use}" ) # All seems to have gone well. Report the # overall result of the final epoch for the # hparms config used in this process: self.report_hparams_summary(self.latest_result) # The final epoch number: return epoch #------------------------------------ # validate_split #------------------- def validate_split(self, step): ''' Validate one split, using that split's validation fold. Return time taken. Record results for tensorboard and other record keeping. :param step: current combination of epoch and split :type step: int :return: number of epoch seconds needed for the validation :rtype: int ''' # Validation self.log.debug( f"Start of validation: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) start_time = datetime.datetime.now() self.log.info(f"Starting validation for step {step}") self.model.eval() with torch.no_grad(): for img_tensor, target in self.train_loader.validation_samples(): expanded_img_tensor = unsqueeze(img_tensor, dim=0) expanded_target = unsqueeze(target, dim=0) # Update sanity record: self.class_coverage[int(target)]['val'] += 1 images = FileUtils.to_device(expanded_img_tensor, 'gpu') label = FileUtils.to_device(expanded_target, 'gpu') outputs = self.model(images) loss = self.loss_fn(outputs, label) images = FileUtils.to_device(images, 'cpu') outputs = FileUtils.to_device(outputs, 'cpu') label = FileUtils.to_device(label, 'cpu') loss = FileUtils.to_device(loss, 'cpu') self.remember_results(LearningPhase.VALIDATING, step, outputs, label, loss) del images del outputs del label del loss torch.cuda.empty_cache() end_time = datetime.datetime.now() val_time_duration = end_time - start_time # A human readable duration st down to minues: duration_str = FileUtils.time_delta_str(val_time_duration, granularity=4) self.log.info(f"Done validation (duration: {duration_str})") return val_time_duration # ------------- Utils ----------- #------------------------------------ # report_acc_loss #------------------- def report_acc_loss(self, phase, epoch, accumulated_loss): self.writer.add_scalar(f"loss/{phase}", accumulated_loss, epoch) #------------------------------------ # remember_results #------------------- def remember_results( self, phase, step, outputs, labels, loss, ): # Add the results tally = ResultTally(step, phase, outputs, labels, loss, self.num_classes, self.batch_size) # Add result to intermediate results collection of # tallies: self.results[step] = tally # Same with the session-wide # collection: self.step_results.add(tally) #------------------------------------ # visualize_step #------------------- def visualize_step(self, step): ''' Take the ResultTally instances in the train and val ResultCollections in self.results, and report appropriate aggregates to tensorboard. Computes f1 scores, accuracies, etc. for given step. Separately for train and validation results: build one long array of predictions, and a corresponding array of labels. Also, average the loss across all instances. The preds and labels as rows to csv files. ''' val_tally = self.results[(step, str(LearningPhase.VALIDATING))] train_tally = self.results[(step, str(LearningPhase.TRAINING))] result_coll = ResultCollection() result_coll.add(val_tally, step) result_coll.add(train_tally, step) self.latest_result = {'train': train_tally, 'val': val_tally} # If we are to write preds and labels to # .csv for later additional processing: if self.csv_writer is not None: self.csv_writer.writerow([ step, train_tally.preds, train_tally.labels, val_tally.preds, val_tally.labels ]) TensorBoardPlotter.visualize_step( result_coll, self.writer, [LearningPhase.TRAINING, LearningPhase.VALIDATING], step, self.class_names) # History of learning rate adjustments: lr_this_step = self.optimizer.param_groups[0]['lr'] self.writer.add_scalar('learning_rate', lr_this_step, global_step=step) #------------------------------------ # visualize_final_epoch_results #------------------- def visualize_final_epoch_results(self, epoch): ''' Reports to tensorboard just for the final epoch. Expect self.latest_result to be the latest ResultTally. ''' # DISPLAY_HISTORY_LEN holds the number # of historic epochs we will show. Two # results per epochs --> need # 2*DISPLAY_HISTORY_LEN results. But check # that there are that many, and show fewer # if needed: num_res_to_show = min(len(self.step_results), 2 * self.DISPLAY_HISTORY_LEN) f1_hist = self.step_results[-num_res_to_show:] # First: the table of train and val f1-macro # scores for the past few epochs: # # |phase|ep0 |ep1 |ep2 | # |-----|-----|----|----| # |train| f1_0|f1_1|f1_2| # | val| f1_0|f1_1|f1_2| f1_macro_tbl = TensorBoardPlotter.make_f1_train_val_table(f1_hist) self.writer.add_text('f1/history', f1_macro_tbl) # Now, in the same tensorboard row: the # per_class train/val f1 scores for each # class separately: # # |class|weighted mean f1 train|weighted mean f1 val| # |-----|----------------------|--------------------| # | c1 |0.1 |0.6 | # | c2 |0.1 |0.6 | # | c3 |0.1 |0.6 | # ------|----------------------|--------------------| f1_all_classes = TensorBoardPlotter.make_all_classes_f1_table( self.latest_result, self.class_names) self.writer.add_text('f1/per-class', f1_all_classes) #------------------------------------ # report_hparams_summary #------------------- def report_hparams_summary(self, latest_result): ''' Called at the end of training. Constructs a summary to report for the hyperparameters used in this process. Reports to the tensorboard. Hyperparameters reported: o lr o optimizer o batch_size o kernel_size Included in the measures are: o balanced_accuracy (train and val) o mean_accuracy_train (train and val) o epoch_prec_weighted o epoch_recall_weighted o epoch_mean_loss (train and val) :param latest_result: dict with keys 'train' and 'val', holding the respective most recent (i.e. last-epoch) ResultTally :type latest_result: {'train' : ResultTally, 'val' : ResultTally } ''' # Get the latest validation tally: train_tally = latest_result['train'] val_tally = latest_result['val'] hparms_vals = OrderedDict({ 'net': self.net_name, 'pretrained': f"{self.pretrained}", 'lr_initial': self.config.Training.lr, 'optimizer': self.config.Training.opt_name, 'batch_size': self.config.getint('Training', 'batch_size'), 'kernel_size': self.config.getint('Training', 'kernel_size'), 'to_grayscale': self.to_grayscale }) metric_results = { 'zz_balanced_adj_acc_train': train_tally.balanced_acc, 'zz_balanced_adj_acc_val': val_tally.balanced_acc, 'zz_acc_train': train_tally.accuracy, 'zz_acc_val': val_tally.accuracy, 'zz_epoch_weighted_prec': val_tally.prec_weighted, 'zz_epoch_weighted_recall': val_tally.recall_weighted, 'zz_epoch_mean_loss_train': train_tally.mean_loss, 'zz_epoch_mean_loss_val': val_tally.mean_loss } self.writer.add_hparams(hparms_vals, metric_results) #------------------------------------ # get_dataloader #------------------- def get_dataloader(self, sample_width, sample_height, perc_data_to_use=None): ''' Returns a cross validating dataloader. If perc_data_to_use is None, all samples under self.root_train_test_data will be used for training. Else percentage indicates the percentage of those samples to use. The selection is random. :param sample_width: pixel width of returned images :type sample_width: int :param sample_height: pixel height of returned images :type sample_height: int :param perc_data_to_use: amount of available training data to use. :type perc_data_to_use: {None | int | float} :return: a data loader that serves batches of images and their assiated labels :rtype: CrossValidatingDataLoader ''' data_root = self.root_train_test_data train_dataset = SingleRootImageDataset(data_root, sample_width=sample_width, sample_height=sample_height, percentage=perc_data_to_use, to_grayscale=True) sampler = SKFSampler(train_dataset, num_folds=self.num_folds, seed=42, shuffle=True, drop_last=True) train_loader = CrossValidatingDataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, sampler=sampler, num_folds=self.num_folds) return train_loader #------------------------------------ # initialize_model #------------------- def initialize_model(self): self.model = NetUtils.get_net(self.net_name, num_classes=self.num_classes, pretrained=self.pretrained, freeze=self.freeze, to_grayscale=self.to_grayscale) self.log.debug( f"Before any gpu push: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) FileUtils.to_device(self.model, 'gpu') self.log.debug( f"Before after model push: \n{'none--on CPU' if self.fastest_device.type == 'cpu' else torch.cuda.memory_summary()}" ) self.opt_name = self.config.Training.get('optimizer', 'Adam') # Default self.optimizer = self.get_optimizer(self.opt_name, self.model, self.lr) self.loss_fn = nn.CrossEntropyLoss() self.scheduler = optim.lr_scheduler.CosineAnnealingLR( self.optimizer, self.min_epochs) #------------------------------------ # find_num_classes #------------------- def find_num_classes(self, data_root): ''' Expect two subdirectories under data_root: train and validation. Underneath each are further subdirectories whose names are the classes: train validation class1 class2 class3 class1 class2 class3 imgs imgs imgs imgs imgs imgs No error checking to confirm this structure :param data_root: path to parent of train/validation :type data_root: str :return: number of unique classes as obtained from the directory names :rtype: int ''' self.classes = FileUtils.find_class_names(data_root) return len(self.classes) #------------------------------------ # setup_tensorboard #------------------- def setup_tensorboard(self, logdir, raw_data_dir=True): ''' Initialize tensorboard. To easily compare experiments, use runs/exp1, runs/exp2, etc. Method creates the dir if needed. Additionally, sets self.csv_pred_writer and self.csv_label_writer to None, or open CSV writers, depending on the value of raw_data_dir, see create_csv_writer() :param logdir: root for tensorboard events :type logdir: str ''' if not os.path.isdir(logdir): os.makedirs(logdir) # For storing train/val preds/labels # for every epoch. Used to create charts # after run is finished: self.csv_writer = self.create_csv_writer(raw_data_dir) # Place to store intermediate models: self.model_archive = \ self.create_model_archive(self.config, self.num_classes ) # Use SummaryWriterPlus to avoid confusing # directory creations when calling add_hparams() # on the writer: self.writer = SummaryWriterPlus(log_dir=logdir) # Intermediate storage for train and val results: self.results = ResultCollection() self.log.info( f"To view tensorboard charts: in shell: tensorboard --logdir {logdir}; then browser: localhost:6006" ) #------------------------------------ # create_csv_writer #------------------- def create_csv_writer(self, raw_data_dir): ''' Create a csv_writer that will fill a csv file during training/validation as follows: epoch train_preds train_labels val_preds val_labels Cols after the integer 'epoch' col will each be an array of ints: train_preds train_lbls val_preds val_lbls 2,"[2,5,1,2,3]","[2,6,1,2,1]","[1,2]", "[1,3]" If raw_data_dir is provided as a str, it is taken as the directory where csv file with predictions and labels are to be written. The dir is created if necessary. If the arg is instead set to True, a dir 'runs_raw_results' is created under this script's directory if it does not exist. Then a subdirectory is created for this run, using the hparam settings to build a file name. The dir is created if needed. Result ex.: <script_dir> runs_raw_results Run_lr_0.001_br_32 run_2021_05_ ... _lr_0.001_br_32.csv Then file name is created, again from the run hparam settings. If this file exists, user is asked whether to remove or append. The inst var self.csv_writer is initialized to: o None if csv file exists, but is not to be overwritten nor appended-to o A filed descriptor for a file open for either 'write' or 'append. :param raw_data_dir: If simply True, create dir and file names from hparams, and create as needed. If a string, it is assumed to be the directory where a .csv file is to be created. If None, self.csv_writer is set to None. :type raw_data_dir: {None | True | str| :return: CSV writer ready for action. Set either to write a fresh file, or append to an existing file. Unless file exists, and user decided not to overwrite :rtype: {None | csv.writer} ''' # Ensure the csv file root dir exists if # we'll do a csv dir and run-file below it: if type(raw_data_dir) == str: raw_data_root = raw_data_dir else: raw_data_root = os.path.join(self.curr_dir, 'runs_raw_results') if not os.path.exists(raw_data_root): os.mkdir(raw_data_root) # Can rely on raw_data_root being defined and existing: if raw_data_dir is None: return None # Create both a raw dir sub-directory and a .csv file # for this run: csv_subdir_name = FileUtils.construct_filename(self.config.Training, prefix='Run', incl_date=True) os.makedirs(csv_subdir_name) # Create a csv file name: csv_file_nm = FileUtils.construct_filename(self.config.Training, prefix='run', suffix='.csv', incl_date=True) csv_path = os.path.join(raw_data_root, csv_file_nm) # Get csv_raw_fd appropriately: if os.path.exists(csv_path): do_overwrite = FileUtils.user_confirm( f"File {csv_path} exists; overwrite?", default='N') if not do_overwrite: do_append = FileUtils.user_confirm(f"Append instead?", default='N') if not do_append: return None else: mode = 'a' else: mode = 'w' csv_writer = CSVWriterCloseable(csv_path, mode=mode, delimiter=',') header = [ 'epoch', 'train_preds', 'train_labels', 'val_preds', 'val_labels' ] csv_writer.writerow(header) return csv_writer #------------------------------------ # create_model_archive #------------------- def create_model_archive(self, config, num_classes): ''' Creates facility for saving partially trained models along the way. :param config: :type config: :param num_classes: :type num_classes: :return: ModelArchive instance ready for calls to save_model() :rtype: ModelArchive ''' model_archive = ModelArchive(config, num_classes, history_len=self.MODEL_ARCHIVE_SIZE, log=self.log) return model_archive #------------------------------------ # close_tensorboard #------------------- def close_tensorboard(self): if self.csv_writer is not None: try: self.csv_writer.close() except Exception as e: self.log.warn(f"Could not close csv file: {repr(e)}") try: self.writer.close() except AttributeError: self.log.warn( "Method close_tensorboard() called before setup_tensorboard()?" ) except Exception as e: raise RuntimeError( f"Problem closing tensorboard: {repr(e)}") from e #------------------------------------ # get_optimizer #------------------- def get_optimizer(self, optimizer_name, model, lr): optimizer_name = optimizer_name.lower() if optimizer_name == 'adam': optimizer = optim.Adam(model.parameters(), lr=lr, eps=1e-3, amsgrad=True) return optimizer if optimizer_name == 'sgd': optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9) return optimizer if optimizer_name == 'rmsprop': optimizer = optim.RMSprop(model.parameters(), lr=lr, momentum=0.9) return optimizer raise ValueError(f"Optimizer {optimizer_name} not supported") #------------------------------------ # initialize_config_struct #------------------- def initialize_config_struct(self, config_info): ''' Initialize a config dict of dict with the application's configurations. Sections will be: config['Paths'] -> dict[attr : val] config['Training'] -> dict[attr : val] config['Parallelism'] -> dict[attr : val] The config read method will handle config_info being None. If config_info is a string, it is assumed either to be a file containing the configuration, or a JSON string that defines the config. Else config_info is assumed to be a NeuralNetConfig. The latter is relevant only if using this file as a library, rather than a command line tool. If given a NeuralNetConfig instance, it is returned unchanged. :param config_info: the information needed to construct the structure :type config_info: {NeuralNetConfig | str} :return a NeuralNetConfig instance with all parms initialized :rtype NeuralNetConfig ''' if isinstance(config_info, str): # Is it a JSON str? Should have a better test! if config_info.startswith('{'): # JSON String: config = NeuralNetConfig.from_json(config_info) else: config = self.read_configuration(config_info) elif isinstance(config_info, NeuralNetConfig): config = config_info else: msg = f"Error: must have a config file, not {config_info}. See config.cfg.Example in project root" # Since logdir may be in config, need to use print here: print(msg) raise ConfigError(msg) return config #------------------------------------ # read_configuration #------------------- def read_configuration(self, conf_file): ''' Parses config file that describes training parameters, various file paths, and how many GPUs different machines have. Syntax follows Python's configfile package, which includes sections, and attr/val pairs in each section. Expected sections: o Paths: various file paths for the application o Training: holds batch sizes, number of epochs, etc. o Parallelism: holds number of GPUs on different machines For Parallelism, expect entries like: foo.bar.com = 4 127.0.0.1 = 5 localhost = 3 172.12.145.1 = 6 Method identifies which of the entries is 'localhost' by comparing against local hostname. Though 'localhost' or '127.0.0.1' may be provided. Returns a dict of dicts: config[section-names][attr-names-within-section] Types of standard entries, such as epochs, batch_size, etc. are coerced, so that, e.g. config['Training']['epochs'] will be an int. Clients may add non-standard entries. For those the client must convert values from string (the type in which values are stored by default) to the required type. This can be done the usual way: int(...), or using one of the configparser's retrieval methods getboolean(), getint(), and getfloat(): config['Training'].getfloat('learning_rate') :param other_gpu_config_file: path to configuration file :type other_gpu_config_file: str :return: a dict of dicts mirroring the config file sections/entries :rtype: dict[dict] :raises ValueErr :raises TypeError ''' if conf_file is None: return self.init_defaults() config = DottableConfigParser(conf_file) if len(config.sections()) == 0: # Config file exists, but empty: return (self.init_defaults(config)) # Do type conversion also in other entries that # are standard: types = { 'epochs': int, 'batch_size': int, 'kernel_size': int, 'sample_width': int, 'sample_height': int, 'seed': int, 'pytorch_comm_port': int, 'num_pretrained_layers': int, 'root_train_test_data': str, 'net_name': str, } for section in config.sections(): for attr_name in config[section].keys(): try: str_val = config[section][attr_name] required_type = types[attr_name] config[section][attr_name] = required_type(str_val) except KeyError: # Current attribute is not standard; # users of the corresponding value need # to do their own type conversion when # accessing this configuration entry: continue except TypeError: raise ValueError( f"Config file error: {section}.{attr_name} should be convertible to {required_type}" ) return config #------------------------------------ # set_seed #------------------- def set_seed(self, seed): ''' Set the seed across all different necessary platforms to allow for comparison of different models and runs :param seed: random seed to set for all random num generators :type seed: int ''' torch.manual_seed(seed) cuda.manual_seed_all(seed) # Not totally sure what these two do! torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) #------------------------------------ # time_delta_str #------------------- def time_delta_str(self, epoch_delta, granularity=2): ''' Takes the difference between two datetime times: start_time = datetime.datetime.now() <some time elapses> end_time = datetime.datetime.now() delta = end_time - start_time time_delta_str(delta Depending on granularity, returns a string like: Granularity: 1 '160.0 weeks' 2 '160.0 weeks, 4.0 days' 3 '160.0 weeks, 4.0 days, 6.0 hours' 4 '160.0 weeks, 4.0 days, 6.0 hours, 42.0 minutes' 5 '160.0 weeks, 4.0 days, 6.0 hours, 42.0 minutes, 13.0 seconds' For smaller time deltas, such as 10 seconds, does not include leading zero times. For any granularity: '10.0 seconds' If duration is less than second, returns '< 1sec>' :param epoch_delta: :type epoch_delta: :param granularity: :type granularity: ''' intervals = ( ('weeks', 604800), # 60 * 60 * 24 * 7 ('days', 86400), # 60 * 60 * 24 ('hours', 3600), # 60 * 60 ('minutes', 60), ('seconds', 1), ) secs = epoch_delta.total_seconds() result = [] for name, count in intervals: value = secs // count if value: secs -= value * count if value == 1: name = name.rstrip('s') result.append("{} {}".format(value, name)) dur_str = ', '.join(result[:granularity]) if len(dur_str) == 0: dur_str = '< 1sec>' return dur_str #------------------------------------ # step_number #------------------- def step_number(self, epoch, split_num, num_folds): ''' Combines an epoch with a split number into a single integer series as epochs increase, and split_num cycles from 0 to num_folds. :param epoch: epoch to encode :type epoch: int :param split_num: split number to encode :type split_num: int :param num_folds: number of folds for CV splitting must be contant! :type num_folds: int :return: an integer the combines epoch and split-num :rtype: int ''' step_num = epoch * num_folds + split_num return step_num #------------------------------------ # cleanup #------------------- def cleanup(self): ''' Recover resources taken by collaborating processes. OK to call multiple times. ''' # self.clear_gpu() try: self.writer.close() except Exception as e: self.log.err(f"Could not close tensorboard writer: {repr(e)}")
class TrainScriptLauncher: #------------------------------------ # Constructor #------------------- # Use distributed torch default port: COMM_PORT = '5678' def __init__(self, unittesting=False): self.hostname = socket.getfqdn() if unittesting: # Let unittests create an instance # and call individual methods: return # Logging to console during launch: self.log = LoggingService() # Convenience: directory of this # script, and project root directory curr_dir = Path(__file__).parent proj_root = curr_dir.joinpath('../..').resolve() self.curr_dir = str(curr_dir) self.proj_root = str(proj_root) args_parser = BirdsTrainingArgumentsParser( formatter_class=BirdsTrainingArgumentsParser. BlankLinesHelpFormatter, description="PyTorch distributed training launch " "helper to spawn multiple distributed " "birds_train_parallel.py processes") all_args = args_parser.parse_args() # Separate the args for this launch script # from the args destined for the copies of # the train script: self.launch_args = all_args['launch_args'] self.script_args = all_args['script_args'] # Build the gpu_landscape dict: self.gather_world_layout(self.launch_args) self.GPUS_USED_THIS_MACHINE = self.gpu_landscape[ self.hostname]['num_gpus'] #------------------------------------ # gather_world_layout #------------------- def gather_world_layout(self, launch_args): ''' # Compute a unique number for each GPU within # the group of nodes (machines). Starting with # the master node's first GPU as 0 (if master node # has a GPU. # The resulting GPU layout is assigned to # variable gpu_landscape: :param launch_args: :type launch_args: ''' try: config_file = launch_args['config'] if not os.path.exists(config_file): raise ConfigError( f"Configuration file {config_file} that was provided as command line arg does not exist." ) except KeyError: raise RuntimeError( "Error: launch args must include a config file. See config.cfg.Example in project root" ) self.config = DottableConfigParser(config_file) # Ensure that the launch_args contains # the path to the training script. It # will be there if provided on the cmd line. # But it may instead be under Path:train_script # in the configuration: try: self.launch_args['training_script'] except KeyError: # The training script was not specified # on the command line. Is it in the config # file: try: self.launch_args['training_script'] = self.config.getpath( 'Paths', 'train_script', relative_to=self.curr_dict) except KeyError: raise ValueError( "No training script specified on command line or in config file" ) try: self.world_map_path = self.config.getpath( 'Paths', 'world_map', relative_to=self.curr_dir) except KeyError: raise RuntimeError( f"Could not find entry for 'world_map' in config file {config_file}" ) self.world_map = self.read_world_map(self.world_map_path) # Ensure that this machine has an # entry in the world_map: try: # Get this machine's info (sub)dict: _my_world_info = self.world_map[self.hostname] except KeyError: raise ConfigError( f"World map file does not contain entry for this machine ({self.hostname})" ) self.compute_landscape = {} # Whether or not machine running this # code is the master node: self.am_master_node = False # Build gpu_landscape, which maps # machine names to the rank range # that they occupy via the number of # their GPUs # # {machine_name1 : [1], # machine_name2 : [0], # machine_name3 : [1,2,3], self.gpu_landscape = self.build_compute_landscape(self.world_map) if self.master_hostname is None: raise ConfigError( f'No master machine in {self.world_map_path}; one entry needs to be "master" : 1' ) # Common pytorch port is either in the config file, # or we use the pytorch default self.MASTER_PORT = self.config.getint('Parallelism', 'master_port', self.COMM_PORT) # Handle special case: no GPUs anywere, and # we are on node 0: in that case start a single # copy of the training script. If it is written # properly, it will detect the absence of a GPU, # and use the CPU. This happens during debugging # on a laptop: if self.WORLD_SIZE == 0 and self.am_master_node: self.WORLD_SIZE += 1 # If trying to launch on a node without GPUs, # when GPUs are available elsewhere, refuse to # start the script (is this needed?): if not TESTING: if self.my_gpus == 0 and self.WORLD_SIZE > 0: raise RuntimeError( "This machine does not have any GPU, but others do; training script not started." ) #------------------------------------ # launch_scripts #------------------- def launch_scripts(self): ''' Launch (possibly) multiple copies of the training script. Use world_map.json to know how many, and which GPUs this machine is to use. Each copy is told: o MASTER_ADDR # Where to reach the coordinating process o MASTER_PORT # Corresponding port o RANK # The copy's sequence number, which is # Unique across all participating machines o LOCAL_RANK # Which of this machine's GPU to use (0-origin) o WORLD_SIZE # How many GPUs are used on all machines together o GPUS_USED_THIS_MACHINE # Number of GPUs *used* on this # machine, according to the world_map. ''' # Compute a unique number for each GPU within # the group of nodes (machines). Starting with # the master node's first GPU as 0 (if master node # has a GPU. # The resulting GPU layout is assigned to # variable gpu_landscape: # # {<machine_name> : # This machine's range of ranks: rank_range = self.gpu_landscape[self.hostname]['rank_range'] this_machine_gpu_ids = self.gpu_landscape[ self.hostname]['gpu_device_ids'] min_rank_this_machine = self.gpu_landscape[self.hostname]['start_rank'] local_rank = 0 # Map from process object to rank (for debug msgs): self.who_is_who = OrderedDict() for rank in rank_range: cmd = self.training_script_start_cmd( rank, len(this_machine_gpu_ids), local_rank, min_rank_this_machine, self.launch_args, self.script_args) # Copy stdin, and give the copy to the subprocess. # This enables the subprocess to ask user whether # to save training state in case of a cnt-C: newstdin = os.fdopen(os.dup(sys.stdin.fileno())) # Spawn one training script. process = subprocess.Popen( cmd, stdin=newstdin, stdout=None, # Script inherits this launch stderr=None # ... script's stdout/stderr ) self.who_is_who[process] = rank local_rank += 1 if not self.launch_args['quiet']: print( f"Node {self.hostname} {os.path.basename(sys.argv[0])}: Num processes launched: {len(self.who_is_who)}" ) if self.am_master_node: print(f"Awaiting {self.WORLD_SIZE} process(es) to finish...") else: print(f"Awaiting {self.my_gpus} process(es) to finish...") failed_processes = [] try: for process in self.who_is_who.keys(): process.wait() if process.returncode != 0: failed_processes.append(process) continue except KeyboardInterrupt: # Gently kill the training scripts: self.handle_cnt_c() pass # See which processes get the interrupt num_failed = len(failed_processes) if num_failed > 0: print(f"Number of failed training scripts: {num_failed}") for failed_process in failed_processes: train_script = self.launch_args['training_script'] script_rank = self.who_is_who[failed_process] msg = ( f"Training script {train_script} (rank {script_rank}) encountered error(s); see logfile" ) print(msg) #------------------------------------ # training_script_start_cmd #------------------- def training_script_start_cmd(self, rank, gpus_used_this_machine, local_rank, min_rank_this_machine, launch_args, script_args): ''' From provided information, creates a legal command string for starting the training script. :param rank: rank of the script; i.e. it's process' place in the sequence of all train script processes across all machines :type rank: int :param gpus_used_this_machine: number of GPU devices to be used, according to the world_map; may be less than number of available GPUs :type gpus_used_this_machine: int :param local_rank: index into the local sequence of GPUs for for the GPU that the script is to use :type local_rank: int :param min_rank_this_machine: the lowest of the ranks among the training scripts on this machine :type min_rank_this_machine: int :param launch_args: command line arguments intended for the launch script, as opposed to being destined for the train script :type launch_args: {str : Any} :param script_args: additional args for the train script :type script_args: {str : Any} ''' # Build the shell command line, # starting with 'python -u': cmd = [sys.executable, "-u"] cmd.append(launch_args['training_script']) # Add the args for the script that were # in the command line: for arg_name in script_args.keys(): script_arg_val = script_args[arg_name] if script_arg_val is None or arg_name == 'config': # Skip over non-specified CLI args: continue cmd.append(f"--{arg_name}={script_args[arg_name]}") # Add the 'secret' args that tell the training # script all the communication parameters: cmd.extend([ f"--MASTER_ADDR={self.MASTER_ADDR}", f"--MASTER_PORT={self.MASTER_PORT}", f"--RANK={rank}", f"--LOCAL_RANK={local_rank}", f"--MIN_RANK_THIS_MACHINE={min_rank_this_machine}", f"--WORLD_SIZE={self.WORLD_SIZE}", f"--GPUS_USED_THIS_MACHINE={gpus_used_this_machine}" ]) # Finally, the obligatory non-option arg # to the training script: the configuration # file: config_file_name = script_args['config'] cmd.append(config_file_name) self.log.debug(f"****** Launch: the cmd is {cmd}") return cmd #------------------------------------ # read_world_map #------------------- def read_world_map(self, path): ''' Read the JSON5 world map file, and return a corresponding dict. JSON5 allows something like: /* This is a block comment. Notice the lacking quote chars around the keys below. The are optional in JSON5 */ {quintus.stanford.edu : { "master" : Yes "gpus" : 2 }, quatro.stanford.edu : { "gpus" : 2, "devices" : [1,2] } } BUT: JSON5 gets angry at dots in the keys. So we first read the file, and try to find the machine names. We temporarily replace them with an acceptable marker, and then convert back. :param path: path to world map file :type path: string ''' dot_substitute = '___' try: # Read all the world map file lines: with open(path, 'r') as world_map_fd: tmp_world_map = world_map_fd.readlines() except IOError as e: raise IOError(f"World map file at {path} not found") from e # Replace occurrences of '.' with dot_substitute: new_text = [] for line in tmp_world_map: new_text.append(line.replace('.', dot_substitute)) # ... and make one string from all the lines: json_str = '\n'.join(new_text) try: # Hopefully, JSON5 will eat it now: world_map_almost = json5.loads(json_str) except JSONError as e: raise JSONError( f"World map file at {path} contains bad JSON") from e # Need to fix all the dot substitutions. # At this point the data structure is # { <machine_name> : {spec_attr1 : val1, # spec_attr2 : val2, # } # } # Fix the machine names first: mach_names_fixed = [ machine_name.replace(dot_substitute, '.') for machine_name in world_map_almost.keys() ] machine_specs_fixed = [] # Now dig into each of the nested machine spec # dicts, and fix attrs and values there: for spec in world_map_almost.values(): # Spec is a dict nested inside the outer one: spec_fixed = { key.replace(dot_substitute, '.'): val.replace( dot_substitute, '.') if isinstance(val, str) else val for key, val in spec.items() } machine_specs_fixed.append(spec_fixed) # Put it all together: world_map = { machine_name: spec_dict for machine_name, spec_dict in zip(mach_names_fixed, machine_specs_fixed) } return world_map #------------------------------------ # build_compute_landscape #------------------- def build_compute_landscape(self, world_map): ''' # Using the world_map.json config file, build # a dict self.gpu_landscape like this: # # {'machine_name1' : {'start_rank' : <int>, # 'num_gpus' : <int>, # 'gpu_device_ids': [<int>,<int>,...] # {'machine_name2' : {'start_rank' : <int>, # 'num_gpus' : <int>, # 'gpu_device_ids': [<int>,<int>,...] # } # # Also sets # o self.master_hostname, the hostname # running the one process that coordinates all others. # o self.WORLD_SIZE, number of GPUs used across all machines # o self.my_gpus, the number of GPUs on this machine :param world_map: :type world_map: :return: information about how many GPUs are on each node :rtype: OrderedDict ''' if not self.hostname in world_map.keys(): raise ConfigError( f"World map does not contain an entry for this machine {self.hostname}" ) # World size is the number of training script processes, # which is equal to number of GPUs used on all participating # machines combined: # Number of GPUs across all machines: self.WORLD_SIZE = 0 self.master_hostname = None # Go through the world map, machine (a.k.a. node) # one at a time, in alpha order of the machine # names to ensure all copies of this script # come to the same conclusions about ranks # Build gpu_landscape: # # {'machine_name1' : {'start_rank' : <int>, # 'num_gpus' : <int>, # 'gpu_device_ids': [<int>,<int>,...] # {'machine_name2' : {'start_rank' : <int>, # 'num_gpus' : <int>, # 'gpu_device_ids': [<int>,<int>,...] # } # # The structure is an OrderedDict(), containing # machines alphabetically by name. This discipline # is required so that all copies of this launch script # (one copy per machine) arrive at the same ordering of # GPUs: gpu_landscape = OrderedDict({}) for machine_name in sorted(world_map.keys()): # Get dict of info about the machine: machine_info = world_map[machine_name] try: machine_gpus = machine_info['gpus'] except KeyError: print( "World map must include a 'gpus' entry; the value may be 0" ) gpu_landscape[machine_name] = {} gpu_landscape[machine_name]['num_gpus'] = machine_gpus # List of GPU numbers to use is optional # in world_maps: machine_gpus_to_use = machine_info.get('devices', None) if machine_gpus_to_use is None: # Use all GPUs on that machine: machine_gpus_to_use = list(range(machine_gpus)) gpu_landscape[machine_name]['gpu_device_ids'] = machine_gpus_to_use # Accept all kinds of affirmatives as values: # for identification of the master node entry: is_master_node = machine_info.get('master', False) \ in [1, 'True', 'true', 'Yes', 'yes'] if is_master_node: self.master_hostname = machine_name if machine_name == self.hostname: self.am_master_node = True try: self.MASTER_ADDR = socket.gethostbyname(machine_name) except socket.gaierror: # For machines that have no # findable IP address: self.MASTER_ADDR = '127.0.0.1' self.WORLD_SIZE += machine_gpus # Go through the machine enries in gpu_landscape, and # assign rank ranges to each. Must start with # the master node, b/c it must start with rank 0. # For the master node, it is possible that it has # no GPUs master_info = gpu_landscape[self.master_hostname] master_info['rank_range'] = list(range(master_info['num_gpus'])) master_info['start_rank'] = 0 if len(master_info['rank_range']) == 0: # Master only has a GPU: master_info['rank_range'] = [0] # Start assigning more ranks after # the GPUs of the master: running_rank = master_info['rank_range'][-1] + 1 for machine_name in gpu_landscape.keys(): if machine_name == self.master_hostname: # We already did the master node continue mach_info = gpu_landscape[machine_name] mach_info['start_rank'] = running_rank num_gpus = mach_info['num_gpus'] range_bound = running_rank + (num_gpus if num_gpus > 0 else 1) mach_info['rank_range'] = list(range(running_rank, range_bound)) running_rank += (num_gpus if num_gpus > 0 else 1) self.my_gpus = gpu_landscape[self.hostname]['num_gpus'] self.gpu_landscape = gpu_landscape return gpu_landscape #------------------------------------ # handle_cnt_c #------------------- def handle_cnt_c(self): ''' Given a list of process instances, Send SIGINT (cnt-C) to them: :param procs: :type procs: ''' # Line processes up, highest rank first, # master process last: procs_terminate = sorted([proc for proc in self.who_is_who.keys()], key=lambda obj: self.who_is_who[obj], reverse=True) for process in procs_terminate: # If process is no longer running, # forget about it: if process.poll is not None: # Process dead: continue process.send_signal(signal.SIGTERM) process.wait()