def calculate_loss(network, network_is_catmod, batch_gen, sharpen, can_mods_offsets=None, mod_cat_weights=None, mod_factor_t=None, calc_grads=False): total_chunk_count = 0 total_fval = 0 total_samples = 0 total_bases = 0 rejection_dict = defaultdict(int) n_subbatches = 0 for (indata, seqs, seqlens, mod_cats, sub_batch_size, batch_rejections) in batch_gen: n_subbatches += 1 # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v total_chunk_count += sub_batch_size with torch.set_grad_enabled(calc_grads): outputs = network(indata) if network_is_catmod: lossvector = ctc.cat_mod_flipflop_loss(outputs, seqs, seqlens, mod_cats, can_mods_offsets, mod_cat_weights, mod_factor_t, sharpen) else: lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, sharpen) non_zero_seqlens = (seqlens > 0.0).float().sum() # In multi-GPU mode, gradients are synchronised when # loss.backward() is called. We need to make sure we are # calculating a gradient that can be synchronised across processes # - so loss must be per-block-in-batch loss = lossvector.sum() / non_zero_seqlens fval = float(loss) total_fval += fval total_samples += int(indata.nelement()) total_bases += int(seqlens.sum()) if calc_grads: loss.backward() if calc_grads: for p in network.parameters(): if p.grad is not None: p.grad /= n_subbatches return total_chunk_count, total_fval / n_subbatches, \ total_samples, total_bases, rejection_dict
def calculate_loss(net_info, batch_gen, sharpen, mod_cat_weights=None, mod_factor=None, calc_grads=False): can_mods_offsets = net_info.metadata.can_mods_offsets total_chunk_count = total_fval = total_samples = total_bases = \ n_subbatches = 0 rejection_dict = defaultdict(int) for (indata, seqs, seqlens, mod_cats, sub_batch_size, batch_rejections) in batch_gen: n_subbatches += 1 # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v total_chunk_count += sub_batch_size with torch.set_grad_enabled(calc_grads): outputs = net_info.net( indata.to(get_model_device(net_info.net), non_blocking=True)) nblk = float(outputs.shape[0]) ntrans = outputs.shape[2] if net_info.metadata.is_cat_mod: lossvector = ctc.cat_mod_flipflop_loss( outputs, seqs, seqlens, mod_cats, can_mods_offsets, mod_cat_weights * mod_factor, sharpen) ntrans -= can_mods_offsets[-1] else: lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, sharpen) lossvector += layers.flipflop_logpartition( outputs[:, :, :ntrans]) / nblk # In multi-GPU mode, gradients are synchronised when # loss.backward() is called. We need to make sure we are # calculating a gradient that can be synchronised across processes # - so loss must be per-block-in-batch loss = lossvector.mean() if calc_grads: loss.backward() fval = float(loss) total_fval += fval total_samples += int(indata.nelement()) total_bases += int(seqlens.sum()) if calc_grads: for p in net_info.net.parameters(): if p.grad is not None: p.grad /= n_subbatches return total_chunk_count, total_fval / n_subbatches, \ total_samples, total_bases, rejection_dict
def main(): args = parser.parse_args() log, loss_log, chunk_log, device = _setup_and_logs(args) read_data, alphabet_info = _load_data(args, log) # Get parameters for filtering by sampling a subset of the reads # Result is a tuple median mean_dwell, mad mean_dwell # Choose a chunk length in the middle of the range for this filter_parameters = chunk_selection.sample_filter_parameters( read_data, args.sample_nreads_before_filtering, (args.chunk_len_min + args.chunk_len_max) // 2, args, log, chunk_log=chunk_log) log.write(("* Sampled {} chunks: median(mean_dwell)={:.2f}, " + "mad(mean_dwell)={:.2f}\n").format( args.sample_nreads_before_filtering, *filter_parameters)) log.write('* Reading network from {}\n'.format(args.model)) model_kwargs = { 'insize': 1, 'winlen': args.winlen, 'stride': args.stride, 'size': args.size, 'alphabet_info': alphabet_info } network = helpers.load_model(args.model, **model_kwargs).to(device) if not isinstance(network.sublayers[-1], layers.GlobalNormFlipFlopCatMod): log.write( 'ERROR: Model must end with GlobalNormCatModFlipFlop layer, ' + 'not {}.\n'.format(str(network.sublayers[-1]))) sys.exit(1) can_mods_offsets = network.sublayers[-1].can_mods_offsets flipflop_can_labels = network.sublayers[-1].can_labels flipflop_mod_labels = network.sublayers[-1].mod_labels flipflop_ncan_base = network.sublayers[-1].ncan_base log.write('* Loaded categorical modifications flip-flop model.\n') log.write('* Network has {} parameters.\n'.format( sum([p.nelement() for p in network.parameters()]))) optimizer = torch.optim.Adam(network.parameters(), lr=args.lr_max, betas=args.adam, weight_decay=args.weight_decay) lr_scheduler = optim.CosineFollowedByFlatLR(optimizer, args.lr_min, args.lr_cosine_iters) if args.scale_mod_loss: try: mod_cat_weights = alphabet_info.compute_mod_inv_freq_weights( read_data, args.num_inv_freq_reads) log.write('* Modified base weights: {}\n'.format( str(mod_cat_weights))) except NotImplementedError: log.write( '* WARNING: Some mods not found when computing inverse ' + 'frequency weights. Consider raising ' + '[--num_inv_freq_reads].\n') mod_cat_weights = np.ones(alphabet_info.nbase, dtype=np.float32) else: mod_cat_weights = np.ones(alphabet_info.nbase, dtype=np.float32) log.write('* Dumping initial model\n') save_model(network, args.outdir, 0) total_bases = 0 total_chunks = 0 total_samples = 0 # To count the numbers of different sorts of chunk rejection rejection_dict = defaultdict(int) score_smoothed = helpers.WindowedExpSmoother() t0 = time.time() log.write('* Training\n') for i in range(args.niteration): lr_scheduler.step() mod_factor_t = torch.tensor(args.mod_factor, dtype=torch.float32) # Chunk length is chosen randomly in the range given but forced to # be a multiple of the stride batch_chunk_len = ( np.random.randint(args.chunk_len_min, args.chunk_len_max + 1) // args.stride) * args.stride # We choose the batch size so that the size of the data in the batch # is about the same as args.min_batch_size chunks of length # args.chunk_len_max target_batch_size = int(args.min_batch_size * args.chunk_len_max / batch_chunk_len + 0.5) # ...but it can't be more than the number of reads. batch_size = min(target_batch_size, len(read_data)) # If the logging threshold is 0 then we log all chunks, including those # rejected, so pass the log # object into assemble_batch if args.chunk_logging_threshold == 0: log_rejected_chunks = chunk_log else: log_rejected_chunks = None # chunk_batch is a list of dicts. chunk_batch, batch_rejections = chunk_selection.assemble_batch( read_data, batch_size, batch_chunk_len, filter_parameters, args, log, chunk_log=log_rejected_chunks) total_chunks += len(chunk_batch) # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v # Shape of input tensor must be: # (timesteps) x (batch size) x (input channels) # in this case: # batch_chunk_len x batch_size x 1 stacked_current = np.vstack([d['current'] for d in chunk_batch]).T indata = torch.tensor(stacked_current, device=device, dtype=torch.float32).unsqueeze(2) seqs, mod_cats, seqlens = [], [], [] for chunk in chunk_batch: chunk_labels = chunk['sequence'] seqlens.append(len(chunk_labels)) chunk_seq = flipflop_code( np.ascontiguousarray(flipflop_can_labels[chunk_labels]), flipflop_ncan_base) chunk_mod_cats = np.ascontiguousarray( flipflop_mod_labels[chunk_labels]) seqs.append(chunk_seq) mod_cats.append(chunk_mod_cats) seqs, mod_cats = np.concatenate(seqs), np.concatenate(mod_cats) seqs = torch.tensor(seqs, dtype=torch.float32, device=device) seqlens = torch.tensor(seqlens, dtype=torch.long, device=device) mod_cats = torch.tensor(mod_cats, dtype=torch.long, device=device) optimizer.zero_grad() outputs = network(indata) lossvector = ctc.cat_mod_flipflop_loss(outputs, seqs, seqlens, mod_cats, can_mods_offsets, mod_cat_weights, mod_factor_t, args.sharpen) loss = lossvector.sum() / (seqlens > 0.0).float().sum() loss.backward() optimizer.step() fval = float(loss) score_smoothed.update(fval) # Check for poison chunk and save losses and chunk locations if we're # poisoned If args.chunk_logging_threshold set to zero then we log # everything if fval / score_smoothed.value >= args.chunk_logging_threshold: chunk_log.write_batch(i, chunk_batch, lossvector) total_bases += int(seqlens.sum()) total_samples += int(indata.nelement()) del indata, seqs, mod_cats, seqlens, outputs, loss, lossvector if device.type == 'cuda': torch.cuda.empty_cache() loss_log.write('{}\t{:.10f}\t{:.10f}\n'.format(i, fval, score_smoothed.value)) if (i + 1) % args.save_every == 0: save_model(network, args.outdir, (i + 1) // args.save_every) log.write('C') else: log.write('.') if (i + 1) % 50 == 0: # In case of super batching, additional functionality must be # added here learning_rate = lr_scheduler.get_lr()[0] tn = time.time() dt = tn - t0 log.write((' {:5d} {:5.3f} {:5.2f}s ({:.2f} ksample/s ' + '{:.2f} kbase/s) lr={:.2e}').format( (i + 1) // 50, score_smoothed.value, dt, total_samples / 1000 / dt, total_bases / 1000 / dt, learning_rate)) # Write summary of chunk rejection reasons for k, v in rejection_dict.items(): log.write(" {}:{} ".format(k, v)) log.write("\n") total_bases = 0 total_samples = 0 t0 = tn save_model(network, args.outdir) return