def __init__(self, n_in, n_depth, m_conv=1.0, dilation_growth_rate=1, dilation_cycle=None, zero_out=False, res_scale=False, reverse_dilation=False, checkpoint_res=False): super().__init__() def _get_depth(depth): if dilation_cycle is None: return depth else: return depth % dilation_cycle blocks = [ ResConv1DBlock(n_in, int(m_conv * n_in), dilation=dilation_growth_rate**_get_depth(depth), zero_out=zero_out, res_scale=1.0 if not res_scale else 1.0 / math.sqrt(n_depth)) for depth in range(n_depth) ] if reverse_dilation: blocks = blocks[::-1] self.checkpoint_res = checkpoint_res if self.checkpoint_res == 1: if dist.get_rank() == 0: print("Checkpointing convs") self.blocks = nn.ModuleList(blocks) else: self.model = nn.Sequential(*blocks)
def init_dataset(self, hps): # Load list of files and starts/durations files = librosa.util.find_files(f'{hps.audio_files_dir}', ['mp3', 'opus', 'm4a', 'aac', 'wav']) print_all(f"Found {len(files)} files. Getting durations") cache = dist.get_rank() % 8 == 0 if dist.is_available() else True durations = np.array([ get_duration_sec(file, cache=cache) * self.sr for file in files ]) # Could be approximate self.filter(files, durations) if self.labels: self.labeller = Labeller(hps.max_bow_genre_size, hps.n_tokens, self.sample_length, v3=hps.labels_v3)
def load_checkpoint(path): restore = path if restore[:5] == 'gs://': gs_path = restore local_path = os.path.join(os.path.expanduser("~/.cache"), gs_path[5:]) if dist.get_rank() % 8 == 0: print("Downloading from gce") if not os.path.exists(os.path.dirname(local_path)): os.makedirs(os.path.dirname(local_path)) if not os.path.exists(local_path): download(gs_path, local_path) restore = local_path dist.barrier() checkpoint = t.load(restore, map_location=t.device('cpu')) print("Restored from {}".format(restore)) return checkpoint
def _setup_dist_from_mpi(master_addr, backend, port, n_attempts, verbose): from mpi4py import MPI # This must be imported in order to get e rrors from all ranks to show up mpi_rank = MPI.COMM_WORLD.Get_rank() mpi_size = MPI.COMM_WORLD.Get_size() os.environ["RANK"] = str(mpi_rank) os.environ["WORLD_SIZE"] = str(mpi_size) os.environ["MASTER_ADDR"] = master_addr os.environ["MASTER_PORT"] = str(port) os.environ["NCCL_LL_THRESHOLD"] = "0" os.environ["NCCL_NSOCKS_PERTHREAD"] = "2" os.environ["NCCL_SOCKET_NTHREADS"] = "8" # Pin this rank to a specific GPU on the node local_rank = mpi_rank % 8 if torch.cuda.is_available(): torch.cuda.set_device(local_rank) if verbose: print(f"Connecting to master_addr: {master_addr}") # There is a race condition when initializing NCCL with a large number of ranks (e.g 500 ranks) # We guard against the failure and then retry for attempt_idx in range(n_attempts): try: dist.init_process_group(backend=backend, init_method=f"env://") assert dist.get_rank() == mpi_rank use_cuda = torch.cuda.is_available() print(f'Using cuda {use_cuda}') local_rank = mpi_rank % 8 device = torch.device("cuda", local_rank) if use_cuda else torch.device("cpu") torch.cuda.set_device(local_rank) return mpi_rank, local_rank, device except RuntimeError as e: print(f"Caught error during NCCL init (attempt {attempt_idx} of {n_attempts}): {e}") sleep(1 + (0.01 * mpi_rank)) # Sleep to avoid thundering herd pass raise RuntimeError("Failed to initialize NCCL")
def calculate_bandwidth(dataset, hps, duration=600): hps = DefaultSTFTValues(hps) n_samples = int(dataset.sr * duration) l1, total, total_sq, n_seen, idx = 0.0, 0.0, 0.0, 0.0, dist.get_rank() spec_norm_total, spec_nelem = 0.0, 0.0 while n_seen < n_samples: x = dataset[idx] if isinstance(x, (tuple, list)): x, y = x samples = x.astype(np.float64) stft = librosa.core.stft(np.mean(samples, axis=1), hps.n_fft, hop_length=hps.hop_length, win_length=hps.window_size) spec = np.absolute(stft) spec_norm_total += np.linalg.norm(spec) spec_nelem += 1 n_seen += int(np.prod(samples.shape)) l1 += np.sum(np.abs(samples)) total += np.sum(samples) total_sq += np.sum(samples**2) idx += max(16, dist.get_world_size()) if dist.is_available(): from jukebox.utils.dist_utils import allreduce n_seen = allreduce(n_seen) total = allreduce(total) total_sq = allreduce(total_sq) l1 = allreduce(l1) spec_nelem = allreduce(spec_nelem) spec_norm_total = allreduce(spec_norm_total) mean = total / n_seen bandwidth = dict(l2=total_sq / n_seen - mean**2, l1=l1 / n_seen, spec=spec_norm_total / spec_nelem) print_once(bandwidth) return bandwidth
def sample(self, n_samples, z=None, z_conds=None, y=None, fp16=False, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None): N = n_samples if z is not None: assert z.shape[ 0] == N, f"Expected shape ({N},**), got shape {z.shape}" if y is not None: assert y.shape[ 0] == N, f"Expected shape ({N},**), got shape {y.shape}" if z_conds is not None: for z_cond in z_conds: assert z_cond.shape[ 0] == N, f"Expected shape ({N},**), got shape {z_cond.shape}" no_past_context = (z is None or z.shape[1] == 0) if dist.get_rank() == 0: name = {True: 'Ancestral', False: 'Primed'}[no_past_context] print( f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}" ) with t.no_grad(): # Currently x_cond only uses immediately above layer x_cond, y_cond, prime = self.get_cond(z_conds, y) if self.single_enc_dec: # assert chunk_size % self.prime_loss_dims == 0. TODO: Check if needed if no_past_context: z, x_cond = self.prior_preprocess([prime], [None, x_cond]) else: z, x_cond = self.prior_preprocess([prime, z], [None, x_cond]) if sample_tokens is not None: sample_tokens += self.n_tokens z = self.prior.primed_sample(n_samples, z, x_cond, y_cond, fp16=fp16, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) z = self.prior_postprocess(z) else: encoder_kv = self.get_encoder_kv(prime, fp16=fp16, sample=True) if no_past_context: z = self.prior.sample(n_samples, x_cond, y_cond, encoder_kv, fp16=fp16, temp=temp, top_k=top_k, top_p=top_p, sample_tokens=sample_tokens) else: z = self.prior.primed_sample(n_samples, z, x_cond, y_cond, encoder_kv, fp16=fp16, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) if sample_tokens is None: assert_shape(z, (N, *self.z_shape)) return z
def __init__(self, z_shapes, l_bins, encoder, decoder, level, downs_t, strides_t, labels, prior_kwargs, x_cond_kwargs, y_cond_kwargs, prime_kwargs, copy_input, labels_v3=False, merged_decoder=False, single_enc_dec=False): super().__init__() self.use_tokens = prime_kwargs.pop('use_tokens') self.n_tokens = prime_kwargs.pop('n_tokens') self.prime_loss_fraction = prime_kwargs.pop('prime_loss_fraction') self.copy_input = copy_input if self.copy_input: prime_kwargs['bins'] = l_bins self.z_shapes = z_shapes self.levels = len(self.z_shapes) self.z_shape = self.z_shapes[level] self.level = level assert level < self.levels, f"Total levels {self.levels}, got level {level}" self.l_bins = l_bins # Passing functions instead of the vqvae module to avoid getting params self.encoder = encoder self.decoder = decoder # X conditioning self.x_cond = (level != (self.levels - 1)) self.cond_level = level + 1 # Y conditioning self.y_cond = labels self.single_enc_dec = single_enc_dec # X conditioning if self.x_cond: self.conditioner_blocks = nn.ModuleList() conditioner_block = lambda _level: Conditioner( input_shape=z_shapes[_level], bins=l_bins, down_t=downs_t[_level], stride_t=strides_t[_level], **x_cond_kwargs) if dist.get_rank() == 0: print(f"Conditioning on 1 above level(s)") self.conditioner_blocks.append(conditioner_block(self.cond_level)) # Y conditioning if self.y_cond: self.n_time = self.z_shape[ 0] # Assuming STFT=TF order and raw=T1 order, so T is first dim self.y_emb = LabelConditioner(n_time=self.n_time, include_time_signal=not self.x_cond, **y_cond_kwargs) # Lyric conditioning if single_enc_dec: # Single encoder-decoder transformer self.prior_shapes = [(self.n_tokens, ), prior_kwargs.pop('input_shape')] self.prior_bins = [prime_kwargs['bins'], prior_kwargs.pop('bins')] self.prior_dims = [np.prod(shape) for shape in self.prior_shapes] self.prior_bins_shift = np.cumsum([0, *self.prior_bins])[:-1] self.prior_width = prior_kwargs['width'] print_once( f'Creating cond. autoregress with prior bins {self.prior_bins}, ' ) print_once(f'dims {self.prior_dims}, ') print_once(f'shift {self.prior_bins_shift}') print_once(f'input shape {sum(self.prior_dims)}') print_once(f'input bins {sum(self.prior_bins)}') print_once(f'Self copy is {self.copy_input}') self.prime_loss_dims, self.gen_loss_dims = self.prior_dims[ 0], self.prior_dims[1] self.total_loss_dims = self.prime_loss_dims + self.gen_loss_dims self.prior = ConditionalAutoregressive2D( input_shape=(sum(self.prior_dims), ), bins=sum(self.prior_bins), x_cond=(self.x_cond or self.y_cond), y_cond=True, prime_len=self.prime_loss_dims, **prior_kwargs) else: # Separate encoder-decoder transformer if self.n_tokens != 0 and self.use_tokens: from app.jukebox.transformer.ops import Conv1D prime_input_shape = (self.n_tokens, ) self.prime_loss_dims = np.prod(prime_input_shape) self.prime_acts_width, self.prime_state_width = prime_kwargs[ 'width'], prior_kwargs['width'] self.prime_prior = ConditionalAutoregressive2D( input_shape=prime_input_shape, x_cond=False, y_cond=False, only_encode=True, **prime_kwargs) self.prime_state_proj = Conv1D( self.prime_acts_width, self.prime_state_width, init_scale=prime_kwargs['init_scale']) self.prime_state_ln = LayerNorm(self.prime_state_width) self.prime_bins = prime_kwargs['bins'] self.prime_x_out = nn.Linear(self.prime_state_width, self.prime_bins, bias=False) nn.init.normal_(self.prime_x_out.weight, std=0.02 * prior_kwargs['init_scale']) else: self.prime_loss_dims = 0 self.gen_loss_dims = np.prod(self.z_shape) self.total_loss_dims = self.prime_loss_dims + self.gen_loss_dims self.prior = ConditionalAutoregressive2D( x_cond=(self.x_cond or self.y_cond), y_cond=self.y_cond, encoder_dims=self.prime_loss_dims, merged_decoder=merged_decoder, **prior_kwargs) self.n_ctx = self.gen_loss_dims self.downsamples = calculate_strides(strides_t, downs_t) self.cond_downsample = self.downsamples[ level + 1] if level != self.levels - 1 else None self.raw_to_tokens = np.prod(self.downsamples[:level + 1]) self.sample_length = self.n_ctx * self.raw_to_tokens if labels: self.labels_v3 = labels_v3 self.labeller = Labeller(self.y_emb.max_bow_genre_size, self.n_tokens, self.sample_length, v3=self.labels_v3) else: self.labeller = EmptyLabeller() print( f"Level:{level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample length:{self.sample_length}" )
def get_range(x): if dist.get_rank() == 0: return def_tqdm(x) else: return x
def print_all(msg): if (not dist.is_available()): print(msg) elif dist.get_rank()%8==0: print(f'{dist.get_rank()//8}: {msg}')
def print_once(msg): if (not dist.is_available()) or dist.get_rank()==0: print(msg)