def get_optimizer(model, hps): # Optimizer betas = (hps.beta1, hps.beta2) if hps.fp16_opt: opt = FP16FusedAdam(model.parameters(), lr=hps.lr, weight_decay=hps.weight_decay, betas=betas, eps=hps.eps) else: opt = FusedAdam(model.parameters(), lr=hps.lr, weight_decay=hps.weight_decay, betas=betas, eps=hps.eps) # lr scheduler shd = get_lr_scheduler(opt, hps) restore_path = hps.restore_prior if hps.prior else hps.restore_vqvae restore_opt(opt, shd, restore_path) # fp16 dynamic loss scaler scalar = None if hps.fp16: rank = dist.get_rank() local_rank = rank % 8 scalar = LossScalar(hps.fp16_loss_scale, scale_factor=2**(1. / hps.fp16_scale_window)) if local_rank == 0: print(scalar.__dict__) zero_grad(model) return opt, shd, scalar
def get_ddp(model, hps): rank = dist.get_rank() local_rank = rank % 8 ddp = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False, bucket_cap_mb=hps.bucket) return ddp
def init_dataset(self, hps): # Load list of files and starts/durations files = librosa.util.find_files(f'{hps.audio_files_dir}', ['mp3', 'opus', 'm4a', 'aac', 'wav']) print_all(f"Found {len(files)} files. Getting durations") cache = dist.get_rank() % 8 == 0 if dist.is_available() else True durations = np.array([get_duration_sec(file, cache=cache) * self.sr for file in files]) # Could be approximate self.filter(files, durations) if self.labels: self.labeller = Labeller(hps.max_bow_genre_size, hps.n_tokens, self.sample_length, v3=hps.labels_v3)
def lr_lambda(step): if hps.lr_use_linear_decay: lr_scale = hps.lr_scale * min(1.0, step / hps.lr_warmup) decay = max( 0.0, 1.0 - max(0.0, step - hps.lr_start_linear_decay) / hps.lr_decay) if decay == 0.0: if dist.get_rank() == 0: print("Reached end of training") return lr_scale * decay else: return hps.lr_scale * (hps.lr_gamma**(step // hps.lr_decay)) * min( 1.0, step / hps.lr_warmup)
def get_ema(model, hps): mu = hps.mu or (1. - (hps.bs * hps.ngpus / 8.) / 1000) ema = None if hps.ema and hps.train: if hps.cpu_ema: if dist.get_rank() == 0: print("Using CPU EMA") ema = CPUEMA(model.parameters(), mu=mu, freq=hps.cpu_ema_freq) elif hps.ema_fused: ema = FusedEMA(model.parameters(), mu=mu) else: ema = EMA(model.parameters(), mu=mu) return ema
def load_checkpoint(path): restore = path if restore[:5] == 'gs://': gs_path = restore local_path = os.path.join(os.path.expanduser("~/.cache"), gs_path[5:]) if dist.get_rank() % 8 == 0: print("Downloading from gce") if not os.path.exists(os.path.dirname(local_path)): os.makedirs(os.path.dirname(local_path)) if not os.path.exists(local_path): download(gs_path, local_path) restore = local_path dist.barrier() checkpoint = t.load(restore, map_location=t.device('cpu')) print("Restored from {}".format(restore)) return checkpoint
def _setup_dist_from_mpi(master_addr, backend, port, n_attempts, verbose): from mpi4py import MPI # This must be imported in order to get e rrors from all ranks to show up mpi_rank = MPI.COMM_WORLD.Get_rank() mpi_size = MPI.COMM_WORLD.Get_size() os.environ["RANK"] = str(mpi_rank) os.environ["WORLD_SIZE"] = str(mpi_size) os.environ["MASTER_ADDR"] = master_addr os.environ["MASTER_PORT"] = str(port) os.environ["NCCL_LL_THRESHOLD"] = "0" os.environ["NCCL_NSOCKS_PERTHREAD"] = "2" os.environ["NCCL_SOCKET_NTHREADS"] = "8" # Pin this rank to a specific GPU on the node local_rank = mpi_rank % 8 if torch.cuda.is_available(): torch.cuda.set_device(local_rank) if verbose: print(f"Connecting to master_addr: {master_addr}") # There is a race condition when initializing NCCL with a large number of ranks (e.g 500 ranks) # We guard against the failure and then retry for attempt_idx in range(n_attempts): try: dist.init_process_group(backend=backend, init_method=f"env://") assert dist.get_rank() == mpi_rank use_cuda = torch.cuda.is_available() print(f'Using cuda {use_cuda}') local_rank = mpi_rank % 8 device = torch.device( "cuda", local_rank) if use_cuda else torch.device("cpu") torch.cuda.set_device(local_rank) return mpi_rank, local_rank, device except RuntimeError as e: print( f"Caught error during NCCL init (attempt {attempt_idx} of {n_attempts}): {e}" ) sleep(1 + (0.01 * mpi_rank)) # Sleep to avoid thundering herd pass raise RuntimeError("Failed to initialize NCCL")
def __init__(self, n_in, n_depth, m_conv=1.0, dilation_growth_rate=1, dilation_cycle=None, zero_out=False, res_scale=False, reverse_dilation=False, checkpoint_res=False): super().__init__() def _get_depth(depth): if dilation_cycle is None: return depth else: return depth % dilation_cycle blocks = [ResConv1DBlock(n_in, int(m_conv * n_in), dilation=dilation_growth_rate ** _get_depth(depth), zero_out=zero_out, res_scale=1.0 if not res_scale else 1.0 / math.sqrt(n_depth)) for depth in range(n_depth)] if reverse_dilation: blocks = blocks[::-1] self.checkpoint_res = checkpoint_res if self.checkpoint_res == 1: if dist.get_rank() == 0: print("Checkpointing convs") self.blocks = nn.ModuleList(blocks) else: self.model = nn.Sequential(*blocks)
def calculate_bandwidth(dataset, hps, duration=600): hps = DefaultSTFTValues(hps) n_samples = int(dataset.sr * duration) l1, total, total_sq, n_seen, idx = 0.0, 0.0, 0.0, 0.0, dist.get_rank() spec_norm_total, spec_nelem = 0.0, 0.0 while n_seen < n_samples: x = dataset[idx] if isinstance(x, (tuple, list)): x, y = x samples = x.astype(np.float64) stft = librosa.core.stft(np.mean(samples, axis=1), hps.n_fft, hop_length=hps.hop_length, win_length=hps.window_size) spec = np.absolute(stft) spec_norm_total += np.linalg.norm(spec) spec_nelem += 1 n_seen += int(np.prod(samples.shape)) l1 += np.sum(np.abs(samples)) total += np.sum(samples) total_sq += np.sum(samples**2) idx += max(16, dist.get_world_size()) if dist.is_available(): #from jukebox.utils.dist_utils import allreduce from utils.dist_utils import allreduce n_seen = allreduce(n_seen) total = allreduce(total) total_sq = allreduce(total_sq) l1 = allreduce(l1) spec_nelem = allreduce(spec_nelem) spec_norm_total = allreduce(spec_norm_total) mean = total / n_seen bandwidth = dict(l2=total_sq / n_seen - mean**2, l1=l1 / n_seen, spec=spec_norm_total / spec_nelem) print_once(bandwidth) return bandwidth
def print_once(msg): if (not dist.is_available()) or dist.get_rank() == 0: print(msg)
def print_all(msg): if (not dist.is_available()): print(msg) elif dist.get_rank() % 8 == 0: print(f'{dist.get_rank()//8}: {msg}')
def train(model, orig_model, opt, shd, scalar, ema, logger, metrics, data_processor, hps): model.train() orig_model.train() if hps.prior: _print_keys = dict(l="loss", bpd="bpd", gn="gn", g_l="gen_loss", p_l="prime_loss") else: _print_keys = dict(l="loss", sl="spectral_loss", rl="recons_loss", e="entropy", u="usage", uc="used_curr", gn="gn", pn="pn", dk="dk") for i, x in logger.get_range(data_processor.train_loader): if isinstance(x, (tuple, list)): x, y = x else: y = None x = x.to('cuda', non_blocking=True) if y is not None: y = y.to('cuda', non_blocking=True) x_in = x = audio_preprocess(x, hps) log_input_output = (logger.iters % hps.save_iters == 0) if hps.prior: forw_kwargs = dict(y=y, fp16=hps.fp16, decode=log_input_output) else: forw_kwargs = dict(loss_fn=hps.loss_fn, hps=hps) # Forward x_out, loss, _metrics = model(x, **forw_kwargs) # Backward loss, scale, grad_norm, overflow_loss, overflow_grad = backward( loss=loss, params=list(model.parameters()), scalar=scalar, fp16=hps.fp16, logger=logger) # Skip step if overflow grad_norm = allreduce(grad_norm, op=dist.ReduceOp.MAX) if overflow_loss or overflow_grad or grad_norm > hps.ignore_grad_norm > 0: zero_grad(orig_model) continue # Step opt. Divide by scale to include clipping and fp16 scaling logger.step() opt.step(scale=clipped_grad_scale(grad_norm, hps.clip, scale)) zero_grad(orig_model) lr = hps.lr if shd is None else shd.get_lr()[0] if shd is not None: shd.step() if ema is not None: ema.step() next_lr = hps.lr if shd is None else shd.get_lr()[0] finished_training = (next_lr == 0.0) # Logging for key, val in _metrics.items(): _metrics[key] = val.item() _metrics["loss"] = loss = loss.item( ) * hps.iters_before_update # Make sure to call to free graph _metrics["gn"] = grad_norm _metrics["lr"] = lr _metrics["lg_loss_scale"] = np.log2(scale) # Average and log for key, val in _metrics.items(): _metrics[key] = metrics.update(key, val, x.shape[0]) if logger.iters % hps.log_steps == 0: logger.add_scalar(key, _metrics[key]) # Save checkpoint with t.no_grad(): if hps.save and (logger.iters % hps.save_iters == 1 or finished_training): if ema is not None: ema.swap() orig_model.eval() name = 'latest' if hps.prior else f'step_{logger.iters}' if dist.get_rank() % 8 == 0: save_checkpoint(logger, name, orig_model, opt, dict(step=logger.iters), hps) orig_model.train() if ema is not None: ema.swap() # Sample with t.no_grad(): if (logger.iters % 12000) in list( range(1, 1 + hps.iters_before_update)) or finished_training: if hps.prior: sample_prior(orig_model, ema, logger, x_in, y, hps) # Input/Output with t.no_grad(): if log_input_output: log_inputs(orig_model, logger, x_in, y, x_out, hps) logger.set_postfix(**{ print_key: _metrics[key] for print_key, key in _print_keys.items() }) if finished_training: dist.barrier() exit() logger.close_range() return {key: metrics.avg(key) for key in _metrics.keys()}
def get_range(x): if dist.get_rank() == 0: return def_tqdm(x) else: return x