def test_grad(self): """Check that gradient accurately gives result of small change in output (transition weight) matrix""" self.outputs.requires_grad = True for ks, seq in self.sequences.items(): lossvector = ctc.crf_flipflop_loss(self.outputs, seq, self.seqlens, self.sharpen) print("Sequence: {}: P={:3.4f}, loss={:3.4f}".format( ks, float(torch.exp(-lossvector * self.nblocks)), float(lossvector))) loss = torch.sum(lossvector) if self.outputs.grad is not None: self.outputs.grad.data.zero_() loss.backward() # Make random small change in outputs small_change = torch.randn_like(self.outputs) * self.dx_size outputs2 = self.outputs.detach() + small_change lossvector2 = ctc.crf_flipflop_loss(outputs2, seq, self.seqlens, self.sharpen) loss2 = torch.sum(lossvector2) loss_change = float(loss2 - loss) loss_change_from_grad = float( torch.sum(small_change * self.outputs.grad)) print((" Change in loss = {:3.7f}, est from " + "grad = {:3.7f}").format(loss_change, loss_change_from_grad)) self.assertAlmostEqual(loss_change / float(loss), loss_change_from_grad / float(loss), places=self.grad_dp)
def calculate_loss(network, network_is_catmod, batch_gen, sharpen, can_mods_offsets=None, mod_cat_weights=None, mod_factor_t=None, calc_grads=False): total_chunk_count = 0 total_fval = 0 total_samples = 0 total_bases = 0 rejection_dict = defaultdict(int) n_subbatches = 0 for (indata, seqs, seqlens, mod_cats, sub_batch_size, batch_rejections) in batch_gen: n_subbatches += 1 # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v total_chunk_count += sub_batch_size with torch.set_grad_enabled(calc_grads): outputs = network(indata) if network_is_catmod: lossvector = ctc.cat_mod_flipflop_loss(outputs, seqs, seqlens, mod_cats, can_mods_offsets, mod_cat_weights, mod_factor_t, sharpen) else: lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, sharpen) non_zero_seqlens = (seqlens > 0.0).float().sum() # In multi-GPU mode, gradients are synchronised when # loss.backward() is called. We need to make sure we are # calculating a gradient that can be synchronised across processes # - so loss must be per-block-in-batch loss = lossvector.sum() / non_zero_seqlens fval = float(loss) total_fval += fval total_samples += int(indata.nelement()) total_bases += int(seqlens.sum()) if calc_grads: loss.backward() if calc_grads: for p in network.parameters(): if p.grad is not None: p.grad /= n_subbatches return total_chunk_count, total_fval / n_subbatches, \ total_samples, total_bases, rejection_dict
def calculate_loss(net_info, batch_gen, sharpen, mod_cat_weights=None, mod_factor=None, calc_grads=False): can_mods_offsets = net_info.metadata.can_mods_offsets total_chunk_count = total_fval = total_samples = total_bases = \ n_subbatches = 0 rejection_dict = defaultdict(int) for (indata, seqs, seqlens, mod_cats, sub_batch_size, batch_rejections) in batch_gen: n_subbatches += 1 # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v total_chunk_count += sub_batch_size with torch.set_grad_enabled(calc_grads): outputs = net_info.net( indata.to(get_model_device(net_info.net), non_blocking=True)) nblk = float(outputs.shape[0]) ntrans = outputs.shape[2] if net_info.metadata.is_cat_mod: lossvector = ctc.cat_mod_flipflop_loss( outputs, seqs, seqlens, mod_cats, can_mods_offsets, mod_cat_weights * mod_factor, sharpen) ntrans -= can_mods_offsets[-1] else: lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, sharpen) lossvector += layers.flipflop_logpartition( outputs[:, :, :ntrans]) / nblk # In multi-GPU mode, gradients are synchronised when # loss.backward() is called. We need to make sure we are # calculating a gradient that can be synchronised across processes # - so loss must be per-block-in-batch loss = lossvector.mean() if calc_grads: loss.backward() fval = float(loss) total_fval += fval total_samples += int(indata.nelement()) total_bases += int(seqlens.sum()) if calc_grads: for p in net_info.net.parameters(): if p.grad is not None: p.grad /= n_subbatches return total_chunk_count, total_fval / n_subbatches, \ total_samples, total_bases, rejection_dict
def test_loss(self): """Test that loss = exp(-sequence probability)""" self.outputs.requires_grad = False # First check normalisation of output matrix logpart = float(layers.log_partition_flipflop(self.outputs)) # Print output will appear only if test fails print("Check normalisation: exp(log_partition_flipflop) =" + " {:3.4f}, log={:3.4f}".format(np.exp(logpart), logpart)) self.assertAlmostEqual(logpart, 0.0) # Now check probabilities for three sequences for sequence_name, sequence in self.sequences.items(): sequence_prob = self.path_probabilities[sequence_name] print("Sequence {} P={:3.3f} ".format(sequence_name, sequence_prob)) lossvector = ctc.crf_flipflop_loss(self.outputs, sequence, self.seqlens, self.sharpen) sequence_prob_from_ctc = float( torch.exp(-lossvector * self.nblocks)) print("Pctc={:3.4f}, loss={:3.4f}".format(sequence_prob_from_ctc, float(lossvector))) self.assertAlmostEqual(sequence_prob, sequence_prob_from_ctc)
def main(): args = parser.parse_args() np.random.seed(args.seed) device = torch.device(args.device) if device.type == 'cuda': try: torch.cuda.set_device(device) except AttributeError: sys.stderr.write('ERROR: Torch not compiled with CUDA enabled ' + 'and GPU device set.') sys.exit(1) if not os.path.exists(args.output): os.mkdir(args.output) elif not args.overwrite: sys.stderr.write('Error: Output directory {} exists but --overwrite ' + 'is false\n'.format(args.output)) exit(1) if not os.path.isdir(args.output): sys.stderr.write('Error: Output location {} is not directory\n'.format( args.output)) exit(1) copyfile(args.model, os.path.join(args.output, 'model.py')) # Create a logging file to save details of chunks. # If args.chunk_logging_threshold is set to 0 then we log all chunks # including those rejected. chunk_log = chunk_selection.ChunkLog(args.output) log = helpers.Logger(os.path.join(args.output, 'model.log'), args.quiet) log.write('* Taiyaki version {}\n'.format(__version__)) log.write('* Command line\n') log.write(' '.join(sys.argv) + '\n') log.write('* Loading data from {}\n'.format(args.input)) log.write('* Per read file MD5 {}\n'.format(helpers.file_md5(args.input))) if args.input_strand_list is not None: read_ids = list(set(helpers.get_read_ids(args.input_strand_list))) log.write(('* Will train from a subset of {} strands, determined ' + 'by read_ids in input strand list\n').format(len(read_ids))) else: log.write('* Reads not filtered by id\n') read_ids = 'all' if args.limit is not None: log.write('* Limiting number of strands to {}\n'.format(args.limit)) with mapped_signal_files.HDF5Reader(args.input) as per_read_file: alphabet, _, _ = per_read_file.get_alphabet_information() read_data = per_read_file.get_multiple_reads(read_ids, max_reads=args.limit) # read_data now contains a list of reads # (each an instance of the Read class defined in # mapped_signal_files.py, based on dict) if len(read_data) == 0: log.write('* No reads remaining for training, exiting.\n') exit(1) log.write('* Loaded {} reads.\n'.format(len(read_data))) # Get parameters for filtering by sampling a subset of the reads # Result is a tuple median mean_dwell, mad mean_dwell # Choose a chunk length in the middle of the range for this sampling_chunk_len = (args.chunk_len_min + args.chunk_len_max) // 2 filter_parameters = chunk_selection.sample_filter_parameters( read_data, args.sample_nreads_before_filtering, sampling_chunk_len, args, log, chunk_log=chunk_log) medmd, madmd = filter_parameters log.write( "* Sampled {} chunks: median(mean_dwell)={:.2f}, mad(mean_dwell)={:.2f}\n" .format(args.sample_nreads_before_filtering, medmd, madmd)) log.write('* Reading network from {}\n'.format(args.model)) nbase = len(alphabet) model_kwargs = { 'stride': args.stride, 'winlen': args.winlen, # Number of input features to model e.g. was >1 for event-based # models (level, std, dwell) 'insize': 1, 'size': args.size, 'outsize': flipflopfings.nstate_flipflop(nbase) } network = helpers.load_model(args.model, **model_kwargs).to(device) log.write('* Network has {} parameters.\n'.format( sum([p.nelement() for p in network.parameters()]))) optimizer = torch.optim.Adam(network.parameters(), lr=args.lr_max, betas=args.adam, weight_decay=args.weight_decay) lr_scheduler = optim.CosineFollowedByFlatLR(optimizer, args.lr_min, args.lr_cosine_iters) score_smoothed = helpers.WindowedExpSmoother() log.write('* Dumping initial model\n') helpers.save_model(network, args.output, 0) total_bases = 0 total_samples = 0 total_chunks = 0 # To count the numbers of different sorts of chunk rejection rejection_dict = defaultdict(int) t0 = time.time() log.write('* Training\n') for i in range(args.niteration): lr_scheduler.step() # Chunk length is chosen randomly in the range given but forced to # be a multiple of the stride batch_chunk_len = ( np.random.randint(args.chunk_len_min, args.chunk_len_max + 1) // args.stride) * args.stride # We choose the batch size so that the size of the data in the batch # is about the same as args.min_batch_size chunks of length # args.chunk_len_max target_batch_size = int(args.min_batch_size * args.chunk_len_max / batch_chunk_len + 0.5) # ...but it can't be more than the number of reads. batch_size = min(target_batch_size, len(read_data)) # If the logging threshold is 0 then we log all chunks, including those # rejected, so pass the log # object into assemble_batch if args.chunk_logging_threshold == 0: log_rejected_chunks = chunk_log else: log_rejected_chunks = None # Chunk_batch is a list of dicts. chunk_batch, batch_rejections = chunk_selection.assemble_batch( read_data, batch_size, batch_chunk_len, filter_parameters, args, log, chunk_log=log_rejected_chunks) total_chunks += len(chunk_batch) # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v # Shape of input tensor must be: # (timesteps) x (batch size) x (input channels) # in this case: # batch_chunk_len x batch_size x 1 stacked_current = np.vstack([d['current'] for d in chunk_batch]).T indata = torch.tensor(stacked_current, device=device, dtype=torch.float32).unsqueeze(2) # Sequence input tensor is just a 1D vector, and so is seqlens seqs = torch.tensor(np.concatenate([ flipflopfings.flipflop_code(d['sequence'], nbase) for d in chunk_batch ]), device=device, dtype=torch.long) seqlens = torch.tensor([len(d['sequence']) for d in chunk_batch], dtype=torch.long, device=device) optimizer.zero_grad() outputs = network(indata) lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, args.sharpen) loss = lossvector.sum() / (seqlens > 0.0).float().sum() loss.backward() optimizer.step() fval = float(loss) score_smoothed.update(fval) # Check for poison chunk and save losses and chunk locations if we're # poisoned If args.chunk_logging_threshold set to zero then we log # everything if fval / score_smoothed.value >= args.chunk_logging_threshold: chunk_log.write_batch(i, chunk_batch, lossvector) total_bases += int(seqlens.sum()) total_samples += int(indata.nelement()) # Doing this deletion leads to less CUDA memory usage. del indata, seqs, seqlens, outputs, loss, lossvector if device.type == 'cuda': torch.cuda.empty_cache() if (i + 1) % args.save_every == 0: helpers.save_model(network, args.output, (i + 1) // args.save_every) log.write('C') else: log.write('.') if (i + 1) % DOTROWLENGTH == 0: # In case of super batching, additional functionality must be # added here learning_rate = lr_scheduler.get_lr()[0] tn = time.time() dt = tn - t0 t = ( ' {:5d} {:5.3f} {:5.2f}s ({:.2f} ksample/s {:.2f} kbase/s) ' + 'lr={:.2e}') log.write( t.format((i + 1) // DOTROWLENGTH, score_smoothed.value, dt, total_samples / 1000.0 / dt, total_bases / 1000.0 / dt, learning_rate)) # Write summary of chunk rejection reasons for k, v in rejection_dict.items(): log.write(" {}:{} ".format(k, v)) log.write("\n") total_bases = 0 total_samples = 0 t0 = tn helpers.save_model(network, args.output)
log.write('* Training\n') for i in range(args.niteration): idx = np.random.randint(len(chunks), size=args.batch_size) indata = chunks[idx].transpose(1, 0) indata = torch.tensor(indata[...,np.newaxis], device=device, dtype=torch.float32) seqs = [seq_dict[i] for i in idx] seqlens = torch.tensor([len(seq) for seq in seqs], dtype=torch.long, device=device) seqs = torch.tensor(np.concatenate(seqs), device=device, dtype=torch.long) optimizer.zero_grad() outputs = network(indata) lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, 1.0) loss = lossvector.sum() / (seqlens > 0.0).float().sum() loss.backward() optimizer.step() fval = float(loss) score_smoothed.update(fval) total_bases += int(seqlens.sum()) total_samples += int(indata.nelement()) # Doing this deletion leads to less CUDA memory usage. del indata, seqs, seqlens, outputs, loss, lossvector if device.type == 'cuda': torch.cuda.empty_cache()
indata = torch.tensor(stacked_current, device=device, dtype=torch.float32).unsqueeze(2) # Sequence input tensor is just a 1D vector, and so is seqlens seqs = torch.tensor(np.concatenate([ flipflopfings.flip_flop_code(d['sequence']) for d in chunk_batch ]), device=device, dtype=torch.long) seqlens = torch.tensor([len(d['sequence']) for d in chunk_batch], dtype=torch.long, device=device) optimizer.zero_grad() outputs = network(indata) lossvector = ctc.crf_flipflop_loss(outputs, seqs, seqlens, args.sharpen) loss = lossvector.sum() / (seqlens > 0.0).float().sum() loss.backward() optimizer.step() fval = float(loss) score_smoothed.update(fval) # Check for poison chunk and save losses and chunk locations if we're poisoned # If args.chunk_logging_threshold set to zero then we log everything if fval / score_smoothed.value >= args.chunk_logging_threshold: chunk_log.write_batch(i, chunk_batch, lossvector) total_bases += int(seqlens.sum()) total_samples += int(indata.nelement())