def worker_init(device, modelname, chunk_size, overlap, read_params, alphabet, max_concurrent_chunks, fastq, qscore_scale, qscore_offset, beam, posterior, temperature): global all_read_params global process_read_partial all_read_params = read_params device = helpers.set_torch_device(device) model = load_model(modelname).to(device) stride = guess_model_stride(model) chunk_size = chunk_size * stride overlap = overlap * stride n_can_base = len(alphabet) n_can_state = nstate_flipflop(n_can_base) def process_read_partial(read_filename, read_id, read_params): res = process_read(read_filename, read_id, model, chunk_size, overlap, read_params, n_can_state, stride, alphabet, max_concurrent_chunks, fastq, qscore_scale, qscore_offset, beam, posterior, temperature) return (read_id, *res)
def main(): args = parser.parse_args() device = helpers.set_torch_device(args.device) # TODO convert to logging sys.stderr.write("* Loading model.\n") model = load_model(args.model).to(device) is_cat_mod = isinstance(model.sublayers[-1], layers.GlobalNormFlipFlopCatMod) do_output_mods = args.modified_base_output is not None if do_output_mods and not is_cat_mod: sys.stderr.write( "Cannot output modified bases from canonical base only model.") sys.exit() n_can_states = nstate_flipflop(model.sublayers[-1].nbase) stride = guess_model_stride(model) chunk_size = args.chunk_size * stride chunk_overlap = args.overlap * stride sys.stderr.write("* Initializing reads file search.\n") fast5_reads = list( fast5utils.iterate_fast5_reads(args.input_folder, limit=args.limit, strand_list=args.input_strand_list, recursive=args.recursive)) sys.stderr.write("* Found {} reads.\n".format(len(fast5_reads))) if args.scaling is not None: sys.stderr.write("* Loading read scaling parameters from {}.\n".format( args.scaling)) all_read_params = get_per_read_params_dict_from_tsv(args.scaling) input_read_ids = frozenset(rec[1] for rec in fast5_reads) scaling_read_ids = frozenset(all_read_params.keys()) sys.stderr.write("* {} / {} reads have scaling information.\n".format( len(input_read_ids & scaling_read_ids), len(input_read_ids))) fast5_reads = [ rec for rec in fast5_reads if rec[1] in scaling_read_ids ] else: all_read_params = {} mods_fp = None if do_output_mods: mods_fp = h5py.File(args.modified_base_output) mods_fp.create_group('Reads') mod_long_names = model.sublayers[-1].ordered_mod_long_names sys.stderr.write("* Preparing modified base output: {}.\n".format( ', '.join(map(str, mod_long_names)))) mods_fp.create_dataset('mod_long_names', data=np.array(mod_long_names, dtype='S'), dtype=h5py.special_dtype(vlen=str)) sys.stderr.write("* Calling reads.\n") nbase, ncalled, nread, nsample = 0, 0, 0, 0 t0 = time.time() progress = Progress(quiet=args.quiet) startcharacter = '@' if args.fastq else '>' try: with open_file_or_stdout(args.output) as fh: for read_filename, read_id in fast5_reads: read_params = all_read_params[ read_id] if read_id in all_read_params else None basecall, qstring, read_nsample = process_read( read_filename, read_id, model, chunk_size, chunk_overlap, read_params, n_can_states, stride, args.alphabet, is_cat_mod, mods_fp, args.max_concurrent_chunks, args.fastq, args.qscore_scale, args.qscore_offset) if basecall is not None: fh.write("{}{}\n{}\n".format( startcharacter, read_id, basecall[::-1] if args.reverse else basecall)) nbase += len(basecall) ncalled += 1 if args.fastq: fh.write("+\n{}\n".format( qstring[::-1] if args.reverse else qstring)) nread += 1 nsample += read_nsample progress.step() finally: if mods_fp is not None: mods_fp.close() total_time = time.time() - t0 sys.stderr.write("* Called {} reads in {:.2f}s\n".format( nread, int(total_time))) sys.stderr.write("* {:7.2f} kbase / s\n".format(nbase / total_time / 1000.0)) sys.stderr.write("* {:7.2f} ksample / s\n".format(nsample / total_time / 1000.0)) sys.stderr.write("* {} reads failed.\n".format(nread - ncalled)) return
basename = 'model_final' else: basename = 'model_checkpoint_{:05d}'.format(index) model_file = os.path.join(outdir, basename + '.checkpoint') torch.save(network, model_file) params_file = os.path.join(outdir, basename + '.params') torch.save(network.state_dict(), params_file) if __name__ == '__main__': args = parser.parse_args() np.random.seed(args.seed) device = helpers.set_torch_device(args.device) helpers.prepare_outdir(args.outdir, args.overwrite) copyfile(args.model, os.path.join(args.outdir, 'model.py')) log = helpers.Logger(os.path.join(args.outdir, 'model.log'), args.quiet) log.write(helpers.formatted_env_info(device)) log.write('* Loading data from {}\n'.format(args.chunks)) log.write('* Per read file MD5 {}\n'.format(helpers.file_md5(args.chunks))) if args.limit is not None: log.write('* Limiting number of strands to {}\n'.format(args.limit)) with h5py.File(args.chunks, 'r') as h5:
def main(): args = parser.parse_args() is_multi_gpu = (args.local_rank is not None) is_lead_process = (not is_multi_gpu) or args.local_rank == 0 if is_multi_gpu: #Use distributed parallel processing to run one process per GPU try: torch.distributed.init_process_group(backend='nccl') except: raise Exception( "Unable to start multiprocessing group. " + "The most likely reason is that the script is running with " + "local_rank set but without the set-up for distributed " + "operation. local_rank should be used " + "only by torch.distributed.launch. See the README.") device = helpers.set_torch_device(args.local_rank) if args.seed is not None: #Make sure processes get different random picks of training data np.random.seed(args.seed + args.local_rank) else: device = helpers.set_torch_device(args.device) np.random.seed(args.seed) if is_lead_process: helpers.prepare_outdir(args.outdir, args.overwrite) if args.model.endswith('.py'): copyfile(args.model, os.path.join(args.outdir, 'model.py')) batchlog = helpers.BatchLog(args.outdir) logfile = os.path.join(args.outdir, 'model.log') else: logfile = None log = helpers.Logger(logfile, args.quiet) log.write(helpers.formatted_env_info(device)) log.write('* Loading data from {}\n'.format(args.input)) log.write('* Per read file MD5 {}\n'.format(helpers.file_md5(args.input))) if args.input_strand_list is not None: read_ids = list(set(helpers.get_read_ids(args.input_strand_list))) log.write(('* Will train from a subset of {} strands, determined ' + 'by read_ids in input strand list\n').format(len(read_ids))) else: log.write('* Reads not filtered by id\n') read_ids = 'all' if args.limit is not None: log.write('* Limiting number of strands to {}\n'.format(args.limit)) with mapped_signal_files.HDF5Reader(args.input) as per_read_file: alphabet_info = per_read_file.get_alphabet_information() read_data = per_read_file.get_multiple_reads(read_ids, max_reads=args.limit) # read_data now contains a list of reads # (each an instance of the Read class defined in # mapped_signal_files.py, based on dict) log.write('* Using alphabet definition: {}\n'.format(str(alphabet_info))) if len(read_data) == 0: log.write('* No reads remaining for training, exiting.\n') exit(1) log.write('* Loaded {} reads.\n'.format(len(read_data))) # Get parameters for filtering by sampling a subset of the reads # Result is a tuple median mean_dwell, mad mean_dwell # Choose a chunk length in the middle of the range for this sampling_chunk_len = (args.chunk_len_min + args.chunk_len_max) // 2 filter_params = chunk_selection.sample_filter_parameters( read_data, args.sample_nreads_before_filtering, sampling_chunk_len, args.filter_mean_dwell, args.filter_max_dwell) log.write("* Sampled {} chunks".format( args.sample_nreads_before_filtering)) log.write(": median(mean_dwell)={:.2f}".format( filter_params.median_meandwell)) log.write(", mad(mean_dwell)={:.2f}\n".format(filter_params.mad_meandwell)) log.write('* Reading network from {}\n'.format(args.model)) model_kwargs = { 'stride': args.stride, 'winlen': args.winlen, # Number of input features to model e.g. was >1 for event-based # models (level, std, dwell) 'insize': 1, 'size': args.size, 'alphabet_info': alphabet_info } if is_lead_process: # Under pytorch's DistributedDataParallel scheme, we # need a clone of the start network to use as a template for saving # checkpoints. Necessary because DistributedParallel makes the class # structure different. network_save_skeleton = helpers.load_model(args.model, **model_kwargs) log.write('* Network has {} parameters.\n'.format( sum([p.nelement() for p in network_save_skeleton.parameters()]))) if not alphabet_info.is_compatible_model(network_save_skeleton): sys.stderr.write( '* ERROR: Model and mapped signal files contain incompatible ' + 'alphabet definitions (including modified bases).') sys.exit(1) if is_cat_mod_model(network_save_skeleton): log.write('* Loaded categorical modified base model.\n') if not alphabet_info.contains_modified_bases(): sys.stderr.write( '* ERROR: Modified bases model specified, but mapped ' + 'signal file does not contain modified bases.') sys.exit(1) else: log.write('* Loaded standard (canonical bases-only) model.\n') if alphabet_info.contains_modified_bases(): sys.stderr.write( '* ERROR: Standard (canonical bases only) model ' + 'specified, but mapped signal file does contains ' + 'modified bases.') sys.exit(1) log.write('* Dumping initial model\n') helpers.save_model(network_save_skeleton, args.outdir, 0) if is_multi_gpu: #so that processes 1,2,3.. don't try to load before process 0 has saved torch.distributed.barrier() log.write('* MultiGPU process {}'.format(args.local_rank)) log.write(': loading initial model saved by process 0\n') saved_startmodel_path = os.path.join( args.outdir, 'model_checkpoint_00000.checkpoint') network = helpers.load_model(saved_startmodel_path).to(device) # Wrap network for training in the DistributedDataParallel structure network = torch.nn.parallel.DistributedDataParallel( network, device_ids=[args.local_rank], output_device=args.local_rank) else: network = network_save_skeleton.to(device) network_save_skeleton = None optimizer = torch.optim.Adam(network.parameters(), lr=args.lr_max, betas=args.adam, weight_decay=args.weight_decay, eps=args.eps) if args.lr_warmup is None: lr_warmup = args.lr_min else: lr_warmup = args.lr_warmup if args.lr_frac_decay is not None: lr_scheduler = optim.ReciprocalLR(optimizer, args.lr_frac_decay, args.warmup_batches, lr_warmup) log.write('* Learning rate schedule lr_max*k/(k+t)') log.write(', k={}, t=iterations.\n'.format(args.lr_frac_decay)) else: lr_scheduler = optim.CosineFollowedByFlatLR(optimizer, args.lr_min, args.lr_cosine_iters, args.warmup_batches, lr_warmup) log.write('* Learning rate goes like cosine from lr_max to lr_min ') log.write('over {} iterations.\n'.format(args.lr_cosine_iters)) log.write('* At start, train for {} '.format(args.warmup_batches)) log.write('batches at warm-up learning rate {:3.2}\n'.format(lr_warmup)) score_smoothed = helpers.WindowedExpSmoother() # prepare modified base paramter tensors network_is_catmod = is_cat_mod_model(network) mod_factor_t = torch.tensor(args.mod_factor, dtype=torch.float32).to(device) can_mods_offsets = (network.sublayers[-1].can_mods_offsets if network_is_catmod else None) # mod cat inv freq weighting is currently disabled. Compute and set this # value to enable mod cat weighting mod_cat_weights = np.ones(alphabet_info.nbase, dtype=np.float32) #Generating list of batches for standard loss reporting reporting_chunk_len = (args.chunk_len_min + args.chunk_len_max) // 2 reporting_batch_list = list( prepare_random_batches(device, read_data, reporting_chunk_len, args.min_sub_batch_size, args.reporting_sub_batches, alphabet_info, filter_params, network, network_is_catmod, log)) log.write( ('* Standard loss report: chunk length = {} & sub-batch size ' + '= {} for {} sub-batches. \n').format(reporting_chunk_len, args.min_sub_batch_size, args.reporting_sub_batches)) #Set cap at very large value (before we have any gradient stats). gradient_cap = constants.LARGE_VAL if args.gradient_cap_fraction is None: log.write('* No gradient capping\n') else: rolling_quantile = maths.RollingQuantile(args.gradient_cap_fraction) log.write('* Gradient L2 norm cap will be upper' + ' {:3.2f} quantile of the last {} norms.\n'.format( args.gradient_cap_fraction, rolling_quantile.window)) total_bases = 0 total_samples = 0 total_chunks = 0 # To count the numbers of different sorts of chunk rejection rejection_dict = defaultdict(int) t0 = time.time() log.write('* Training\n') for i in range(args.niteration): # Chunk length is chosen randomly in the range given but forced to # be a multiple of the stride batch_chunk_len = ( np.random.randint(args.chunk_len_min, args.chunk_len_max + 1) // args.stride) * args.stride # We choose the size of a sub-batch so that the size of the data in # the sub-batch is about the same as args.min_sub_batch_size chunks of # length args.chunk_len_max sub_batch_size = int(args.min_sub_batch_size * args.chunk_len_max / batch_chunk_len + 0.5) optimizer.zero_grad() main_batch_gen = prepare_random_batches( device, read_data, batch_chunk_len, sub_batch_size, args.sub_batches, alphabet_info, filter_params, network, network_is_catmod, log) chunk_count, fval, chunk_samples, chunk_bases, batch_rejections = \ calculate_loss( network, network_is_catmod, main_batch_gen, args.sharpen, can_mods_offsets, mod_cat_weights, mod_factor_t, calc_grads = True ) gradnorm_uncapped = torch.nn.utils.clip_grad_norm_( network.parameters(), gradient_cap) if args.gradient_cap_fraction is not None: gradient_cap = rolling_quantile.update(gradnorm_uncapped) optimizer.step() if is_lead_process: batchlog.record( fval, gradnorm_uncapped, None if args.gradient_cap_fraction is None else gradient_cap) total_chunks += chunk_count total_samples += chunk_samples total_bases += chunk_bases # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v score_smoothed.update(fval) if (i + 1) % args.save_every == 0 and is_lead_process: helpers.save_model(network, args.outdir, (i + 1) // args.save_every, network_save_skeleton) log.write('C') else: log.write('.') if (i + 1) % DOTROWLENGTH == 0: _, rloss, _, _, _ = calculate_loss(network, network_is_catmod, reporting_batch_list, args.sharpen, can_mods_offsets, mod_cat_weights, mod_factor_t) # In case of super batching, additional functionality must be # added here learning_rate = lr_scheduler.get_lr()[0] tn = time.time() dt = tn - t0 t = (' {:5d} {:7.5f} {:7.5f} {:5.2f}s ({:.2f} ksample/s {:.2f} ' + 'kbase/s) lr={:.2e}') log.write( t.format((i + 1) // DOTROWLENGTH, score_smoothed.value, rloss, dt, total_samples / 1000.0 / dt, total_bases / 1000.0 / dt, learning_rate)) # Write summary of chunk rejection reasons if args.full_filter_status: for k, v in rejection_dict.items(): log.write(" {}:{} ".format(k, v)) else: n_tot = n_fail = 0 for k, v in rejection_dict.items(): n_tot += v if k != 'pass': n_fail += v log.write(" {:.1%} chunks filtered".format(n_fail / n_tot)) log.write("\n") total_bases = 0 total_samples = 0 t0 = tn # Uncomment the lines below to check synchronisation of models # between processes in multi-GPU operation #for p in network.parameters(): # v = p.data.reshape(-1)[:5].to('cpu') # u = p.data.reshape(-1)[-5:].to('cpu') # break #if args.local_rank is not None: # log.write("* GPU{} params:".format(args.local_rank)) #log.write("{}...{}\n".format(v,u)) lr_scheduler.step() if is_lead_process: helpers.save_model(network, args.outdir, model_skeleton=network_save_skeleton)
def parse_init_args(args): is_multi_gpu = (args.local_rank is not None) is_lead_process = (not is_multi_gpu) or args.local_rank == 0 # if seed is provided use this else generate random seed value seed = (np.random.randint(0, np.iinfo(np.uint32).max, dtype=np.uint32) if args.seed is None else args.seed) main_log_fn = os.path.join(args.outdir, MODEL_LOG_FILENAME) if is_lead_process: helpers.prepare_outdir(args.outdir, args.overwrite) if args.model.endswith('.py'): copyfile(args.model, os.path.join(args.outdir, 'model.py')) # note buffering=1 to enforce line buffering and enable # inspection/plotting during a run logs = LOGS(main=helpers.Logger(main_log_fn, args.quiet), batch=open(os.path.join(args.outdir, BATCH_LOG_FILENAME), 'w', buffering=1), validation=open(os.path.join(args.outdir, VAL_LOG_FILENAME), 'w', buffering=1)) logs.batch.write(BATCH_HEADER) logs.validation.write(VAL_HEADER) if args.save_every % DOTROWLENGTH != 0: # Illegal save_every, change se2 = int(math.ceil(args.save_every / DOTROWLENGTH)) * DOTROWLENGTH logs.main.write('* --save_every {} not a multiple of {}, rounding ' 'to {}'.format(args.save_every, DOTROWLENGTH, se2)) args.save_every = se2 if args.chunk_len_min > args.chunk_len_max: # Illegal chunk length parameters raise ValueError('--chunk_len_min greater than --chunk_len_max') logs.main.write('* Using random seed: {}\n'.format(seed)) else: logs = LOGS(main=helpers.Logger(main_log_fn, args.quiet)) if is_multi_gpu: # Use distributed parallel processing to run one process per GPU try: torch.distributed.init_process_group(backend='nccl') except Exception: raise Exception( 'Unable to start multiprocessing group. The most likely ' + 'reason is that the script is running with local_rank set ' + 'but without the set-up for distributed operation. ' + 'local_rank should be used only by torch.distributed.' + 'launch. See the README.') device = helpers.set_torch_device(args.local_rank) # offset seeds so different GPUs get different data streams seed += args.local_rank else: device = helpers.set_torch_device(args.device) logs.main.write(helpers.formatted_env_info(device)) # set random seed for this process np.random.seed(seed) torch.manual_seed(seed) if _MAKE_TORCH_DETERMINISTIC and device.type == 'cuda': torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False return RESOURCE_INFO(is_multi_gpu, is_lead_process, device), logs
def main(): args = parser.parse_args() np.random.seed(args.seed) helpers.prepare_outdir(args.outdir, args.overwrite) device = helpers.set_torch_device(args.device) log = helpers.Logger(os.path.join(args.outdir, 'model.log'), args.quiet) log.write(helpers.formatted_env_info(device)) if args.input_strand_list is not None: read_ids = list(set(helpers.get_read_ids(args.input_strand_list))) log.write('* Will train from a subset of {} strands\n'.format( len(read_ids))) else: log.write('* Reads not filtered by id\n') read_ids = 'all' if args.limit is not None: log.write('* Limiting number of strands to {}\n'.format(args.limit)) with mapped_signal_files.HDF5Reader(args.input) as per_read_file: alphabet_info = per_read_file.get_alphabet_information() assert alphabet_info.nbase == 4, ( 'Squiggle prediction with modified base training data is ' + 'not currenly supported.') read_data = per_read_file.get_multiple_reads(read_ids, max_reads=args.limit) # read_data now contains a list of reads # (each an instance of the Read class defined in mapped_signal_files.py, based on dict) if len(read_data) == 0: log.write('* No reads remaining for training, exiting.\n') exit(1) log.write('* Loaded {} reads.\n'.format(len(read_data))) # Get parameters for filtering by sampling a subset of the reads # Result is a tuple median mean_dwell, mad mean_dwell filter_parameters = chunk_selection.sample_filter_parameters( read_data, args.sample_nreads_before_filtering, args.target_len, args.filter_mean_dwell, args.filter_max_dwell) log.write( "* Sampled {} chunks: median(mean_dwell)={:.2f}, mad(mean_dwell)={:.2f}\n" .format(args.sample_nreads_before_filtering, filter_parameters.median_meandwell, filter_parameters.mad_meandwell)) conv_net = create_convolution(args.size, args.depth, args.winlen) nparam = sum([p.data.detach().numpy().size for p in conv_net.parameters()]) log.write('* Created network. {} parameters\n'.format(nparam)) log.write('* Depth {} layers ({} residual layers)\n'.format( args.depth + 2, args.depth)) log.write('* Window width {}\n'.format(args.winlen)) log.write('* Context +/- {} bases\n'.format( (args.depth + 2) * (args.winlen // 2))) conv_net = conv_net.to(device) optimizer = torch.optim.Adam(conv_net.parameters(), lr=args.lr_max, betas=args.adam, weight_decay=args.weight_decay, eps=args.eps) lr_scheduler = optim.ReciprocalLR(optimizer, args.lr_decay) rejection_dict = defaultdict( lambda: 0 ) # To count the numbers of different sorts of chunk rejection t0 = time.time() score_smoothed = helpers.WindowedExpSmoother() total_chunks = 0 for i in range(args.niteration): # If the logging threshold is 0 then we log all chunks, including those rejected, so pass the log # object into assemble_batch # chunk_batch is a list of dicts. chunk_batch, batch_rejections = chunk_selection.assemble_batch( read_data, args.batch_size, args.target_len, filter_parameters, chunk_len_means_sequence_len=True) if len(chunk_batch) < args.batch_size: log.write('* Warning: only {} chunks passed filters.\n'.format( len(chunk_batch))) total_chunks += len(chunk_batch) # Update counts of reasons for rejection for k, v in batch_rejections.items(): rejection_dict[k] += v # Shape of input needs to be seqlen x batchsize x embedding_dimension embedded_matrix = [ embed_sequence(d['sequence'], alphabet=None) for d in chunk_batch ] seq_embed = torch.tensor(embedded_matrix).permute(1, 0, 2).to(device) # Shape of labels is a flat vector batch_signal = torch.tensor( np.concatenate([d['current'] for d in chunk_batch])).to(device) # Shape of lens is also a flat vector batch_siglen = torch.tensor([len(d['current']) for d in chunk_batch]).to(device) #print("First 10 elements of first sequence in batch",seq_embed[:10,0,:]) #print("First 10 elements of signal batch",batch_signal[:10]) #print("First 10 lengths",batch_siglen[:10]) optimizer.zero_grad() predicted_squiggle = conv_net(seq_embed) batch_loss = squiggle_match_loss(predicted_squiggle, batch_signal, batch_siglen, args.back_prob) fval = batch_loss.sum() / float(batch_siglen.sum()) fval.backward() optimizer.step() score_smoothed.update(float(fval)) if (i + 1) % args.save_every == 0: helpers.save_model(conv_net, args.outdir, (i + 1) // args.save_every) log.write('C') else: log.write('.') if (i + 1) % DOTROWLENGTH == 0: tn = time.time() dt = tn - t0 t = ' {:5d} {:7.5f} {:5.2f}s' log.write( t.format((i + 1) // DOTROWLENGTH, score_smoothed.value, dt)) t0 = tn # Write summary of chunk rejection reasons if args.full_filter_status: for k, v in rejection_dict.items(): log.write(" {}:{} ".format(k, v)) else: n_tot = n_fail = 0 for k, v in rejection_dict.items(): n_tot += v if k != 'pass': n_fail += v log.write(" {:.1%} chunks filtered".format(n_fail / n_tot)) log.write("\n") lr_scheduler.step() helpers.save_model(conv_net, args.outdir)