def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) utils_logging.change_log_stdout_to_log_stderr() if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.mandatory_list_samples: mandatory_list_samples = options.mandatory_list_samples.split(',') else: mandatory_list_samples = None if options.list_samples: list_samples = options.list_samples.split(',') else: list_samples = None run_fine = vcf_to_simple_genotype( options.vcf_file, mandatory_list_samples, list_samples, options.nb_of_required_high_qual_genotype, options.print_all_genotype) if run_fine: logging.info('Run completed') else: logging.error('Run Failed') sys.exit(1)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) utils_logging.change_log_stdout_to_log_stderr() if options.print_commands: utils_logging.change_log_stdout_to_log_stderr() else: command_runner.set_command_to_run_localy() run_fine=run_BWA_Command(options.genome_file, options.fastq_file1, options.fastq_file2, options.output_dir, options.name,analysis_type=options.analysis, sort=options.sort, thread=options.thread, read_group=options.read_group, illumina=options.illumina) if run_fine: logging.info('Run completed') else: logging.error('Run Failed') sys.exit(1)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) utils_logging.change_log_stdout_to_log_stderr() if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.mandatory_list_samples: mandatory_list_samples = options.mandatory_list_samples.split(',') else: mandatory_list_samples = None if options.list_samples: list_samples = options.list_samples.split(',') else: list_samples = None run_fine=vcf_to_simple_genotype(options.vcf_file, mandatory_list_samples, list_samples, options.nb_of_required_high_qual_genotype, options.print_all_genotype) if run_fine: logging.info('Run completed') else: logging.error('Run Failed') sys.exit(1)
def main(): # initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) bam_files = [options.bam_file] if len(args) > 0: bam_files.extend(args) command_runner.set_command_to_run_localy() if options.output_dir: list_consensus = read_white_list(options.white_list_file) extract_reads_from_all_bam_files_set_of_consensus( bam_files, list_consensus, options.output_dir, genome_loader=None, all_read1_consensus_file=options.read1_consensus_file) else: iter_dir_and_file = split_whitelist( white_list_file=options.white_list_file, nb_consensus_per_dir=options.nb_consensus_per_dir) for output_dir, sub_whitelist in iter_dir_and_file: command = 'python %s -g %s -w %s -o %s -b %s' % ( sys.argv[0], options.read1_consensus_file, sub_whitelist, output_dir, ' '.join(bam_files)) print command
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) utils_logging.change_log_stdout_to_log_stderr() if options.print_commands: utils_logging.change_log_stdout_to_log_stderr() else: command_runner.set_command_to_run_localy() run_fine = run_BWA_Command(options.genome_file, options.fastq_file1, options.fastq_file2, options.output_dir, options.name, analysis_type=options.analysis, sort=options.sort, thread=options.thread, read_group=options.read_group, illumina=options.illumina) if run_fine: logging.info('Run completed') else: logging.error('Run Failed') sys.exit(1)
def main(): # initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) if options.type_output == OUTPUT_TYPE_PHYLIP or options.type_output == OUTPUT_TYPE_NEXUS: vcf_to_phylip_or_nexus(options.input_vcf_file, options.output_file, options.pop_file, options.geno_qual_threshold, options.max_prop_missing, options.type_output) elif options.type_output == OUTPUT_TYPE_STRUCTURE: vcf_to_structure(options.input_vcf_file, options.output_file, options.pop_file, options.geno_qual_threshold, options.max_prop_missing, options.phased) elif options.type_output == OUTPUT_TYPE_GENEPOP: vcf_2_genepop(options.input_vcf_file, options.output_file, options.pop_file, options.geno_qual_threshold, options.max_prop_missing, options.phased) elif options.type_output == OUTPUT_TYPE_VCF: vcf_2_vcf(options.input_vcf_file, options.output_file, options.geno_qual_threshold, options.max_prop_missing, options.phased)
def main(): # initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) bam_files = [options.bam_file] if len(args) > 0: bam_files.extend(args) command_runner.set_command_to_run_localy() if options.output_dir: list_consensus = read_white_list(options.white_list_file) extract_reads_from_all_bam_files_set_of_consensus(bam_files, list_consensus, options.output_dir, genome_loader=None, all_read1_consensus_file=options.read1_consensus_file) else: iter_dir_and_file = split_whitelist(white_list_file=options.white_list_file, nb_consensus_per_dir=options.nb_consensus_per_dir) for output_dir, sub_whitelist in iter_dir_and_file: command = 'python %s -g %s -w %s -o %s -b %s' % ( sys.argv[0], options.read1_consensus_file, sub_whitelist, output_dir, ' '.join(bam_files)) print command
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #utils_logging.init_logging(logging.CRITICAL) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) if not options.print_command: command_runner.set_command_to_run_localy() start_time = time.time() #comment = '%s: contig_align:%s, mismatches:%s site with 1 contig:%s site with more than 1 contig:%s' all_assembler_to_try = options.assembler_name.split(',') assembly_function_list = [] for assembler_name in all_assembler_to_try: assembly_function = get_assembly_function(assembler_name) assembly_function_list.append(assembly_function) if options.rg_ids: rg_ids = options.rg_ids.split() else: rg_ids = [] if options.fastq_dir: run_all_fastq_files(options.fastq_dir, assembly_function_list, options.estimated_size, options.subsample_nb_read, rg_ids, options.force_merge, adapter_file=options.adapter_file) elif options.fastq_file: output_dir = os.path.dirname(options.fastq_file) name = os.path.basename(options.fastq_file)[:-len("_2.fastq")] read1_fasta = os.path.join(output_dir, name + "_1.fa") contig_file = run_one_fastq_file( options.fastq_file, output_dir, assembly_function_list, estimated_size=options.estimated_size, subsample_nb_read=options.subsample_nb_read, read1_fasta=read1_fasta, name=name, force_merge=options.force_merge, adapter_file=options.adapter_file) logging.info("Elapsed time:%.1f seconds" % (time.time() - start_time))
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) command_runner.set_command_to_run_localy() mark_duplicate(options.input_bam)
def main(): # initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) command_runner.set_command_to_run_localy() shift_reads(options.bam_file, options.fasta_file, options.output_file)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) command_runner.set_command_to_run_localy() mark_duplicate(options.input_bam)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) print detect_hemizygous_markers(options.pop_file, options.coverage_file)
def main(): # initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) command_runner.set_command_to_run_localy() if options.debug: utils_logging.init_logging(logging.DEBUG) run_all_snps_call(options.consensus_dir)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) coverage_files = [options.coverage_files] if len(args) > 0: coverage_files.extend(args) merge_coverage(coverage_files, options.output_file)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.pair: set_read1_consensus_to_read1_and_read2(sys.stdin, sys.stdout) else: set_read1_consensus_to_read2(sys.stdin, sys.stdout)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) coverage_files=[options.coverage_files] if len(args)>0: coverage_files.extend(args) merge_coverage(coverage_files,options.output_file)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if not options.print_command: command_runner.set_command_to_run_localy() assess_read2_contigs(options.contig_file, options.genome_file,options.sites_file, options.output_sites)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if not options.print_command: command_runner.set_command_to_run_localy() assess_read2_contigs(options.contig_file, options.genome_file, options.sites_file, options.output_sites)
def main(): # initialize the logging utils_logging.init_logging() #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) vcf_to_lepmap(options.input_vcf_file, options.output_file, options.sex_file, options.mother_name, options.father_name, options.family_name, options.geno_qual_threshold, options.max_prop_missing)
def main(): #initialise the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) utils_logging.change_log_stdout_to_log_stderr() if options.wiki: parse_process_radtag_for_wiki(options.log_process_radtags, options.pools) else: parse_process_radtag(options.log_process_radtags, options.pools, options.project_id)
def main(): #initialize the logging utils_logging.init_logging() #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) utils_logging.change_log_stdout_to_log_stderr() bam_files=[options.bam_files] for file in args: if os.path.exists(file): bam_files.append(file) RAD_median_coverage(bam_files,options.output_file)
def test_all(): import utils_param utils_logging.init_logging() test_pass=True wtss_param=utils_param.get_wtss_parameter() test_pass and test_is_dir(wtss_param.get_maq_dir(),"maq") test_pass and test_is_dir(wtss_param.get_script_dir(),"script") test_pass and test_is_dir(wtss_param.get_samtools_dir(),"samtools") test_pass and test_is_dir(wtss_param.get_snvmix2_dir(),"snvmix2") test_pass and test_is_not_None(wtss_param.get_snvmix2_filter_type(),"snvmix2 filter type") test_pass and test_is_int(wtss_param.get_snvmix2_base_qual(),"snvmix2 base quality") test_pass and test_is_int(wtss_param.get_snvmix2_map_qual(),"snvmix2 mapping quality") test_pass and test_is_float(wtss_param.get_snvmix2_prob(),"snvmix2 probability") test_pass and test_is_dir(wtss_param.get_fp_dir(),"FindPeak") test_pass and test_is_int(wtss_param.get_fp_qualityfilter(),"FindPeak quality filter") test_pass and test_is_comma_separated_list_int(wtss_param.get_fp_pet_flag_se(),"FindPeak pet flag for single end") test_pass and test_is_comma_separated_list_int(wtss_param.get_fp_pet_flag_pe(),"FindPeak pet flag for paired end") if test_is_not_None(wtss_param.get_available_species(), "available species"): for sp_code in wtss_param.get_available_species().keys(): test_pass and test_is_dir(wtss_param.get_bfa_dir(sp_code),"bfa dir for %s"%sp_code) test_pass and test_is_dir(wtss_param.get_bfa_independent_dir(sp_code),"bfa independent dir for %s"%sp_code) test_pass and test_is_file(wtss_param.get_index_file(sp_code),"index file for %s"%sp_code) test_pass and test_is_dir(wtss_param.get_ensembl_dir(sp_code),"ensembl dir for %s"%sp_code) test_pass and test_is_dir(wtss_param.get_intron_ensembl_dir(sp_code),"intron ensembl dir for %s"%sp_code) test_pass and test_is_file(wtss_param.get_known_snps(sp_code),"known snps file for %s"%sp_code) test_pass and test_is_file(wtss_param.get_list_contig_file(sp_code),"list of contig file for %s"%sp_code) test_pass and test_is_valid_database(wtss_param.get_database_name(sp_code),"database for %s"%sp_code) test_pass and test_is_file(wtss_param.get_coding_regions(sp_code),"coding region file for %s"%sp_code) test_pass and test_is_file(wtss_param.get_standard_chromosomes(sp_code),"standard chromosomes file for %s"%sp_code) test_pass and test_is_file(wtss_param.get_translate_chr_names(sp_code),"translate chr names file for %s"%sp_code) wtss_param.get_env_name() wtss_param.get_env_shortname() wtss_param.get_env_version() wtss_param.get_bio_apps_analysis() wtss_param.get_wtss_analysis() wtss_param.get_resource_conf() wtss_param.get_mysql_host() wtss_param.get_mysql_user() wtss_param.get_mysql_password()
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #utils_logging.init_logging(logging.CRITICAL) #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) if not options.print_command: command_runner.set_command_to_run_localy() start_time = time.time() #comment = '%s: contig_align:%s, mismatches:%s site with 1 contig:%s site with more than 1 contig:%s' all_assembler_to_try = options.assembler_name.split(',') assembly_function_list=[] for assembler_name in all_assembler_to_try: assembly_function=get_assembly_function(assembler_name) assembly_function_list.append(assembly_function) if options.rg_ids: rg_ids=options.rg_ids.split() else: rg_ids=[] if options.fastq_dir: run_all_fastq_files(options.fastq_dir, assembly_function_list, options.estimated_size, options.subsample_nb_read, rg_ids, options.force_merge, adapter_file=options.adapter_file) elif options.fastq_file: output_dir = os.path.dirname(options.fastq_file) name = os.path.basename(options.fastq_file)[:-len("_2.fastq")] read1_fasta = os.path.join(output_dir, name + "_1.fa") contig_file = run_one_fastq_file(options.fastq_file, output_dir, assembly_function_list, estimated_size=options.estimated_size, subsample_nb_read=options.subsample_nb_read, read1_fasta=read1_fasta, name=name, force_merge=options.force_merge, adapter_file=options.adapter_file) logging.info("Elapsed time:%.1f seconds"%(time.time()-start_time))
def main(args): cfg = get_config(args.config) cfg.graph = args.graph rank = flow.env.get_rank() world_size = flow.env.get_world_size() placement = flow.env.all_device_placement("cuda") os.makedirs(cfg.output, exist_ok=True) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) # root dir of loading checkpoint load_path = None for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) trainer = Trainer(cfg, placement, load_path, world_size, rank) trainer()
def main(): # initialize the logging utils_logging.init_logging(logging.INFO) #utils_logging.init_logging(logging.CRITICAL) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) if not options.print_command: command_runner.set_command_to_run_localy() start_time = time.time() run_all_fastq_files(options.consensus_dir, readgroup_file=options.readgroup_file, snp_call=options.snp_call) logging.info("Elapsed time:%.1f seconds" % (time.time() - start_time))
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if not options.print_commands: command_runner.set_command_to_run_localy() if options.debug: utils_logging.init_logging(logging.DEBUG) if options.final_merge: code = merge_all_results(options.consensus_dir) else: code = merge_results(options.consensus_dir) sys.exit(code)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser=_prepare_optparser() (options,args) = optparser.parse_args() #verify options arg_pass=_verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if options.debug: utils_logging.init_logging(logging.DEBUG) utils_logging.init_logging(output_level=None, log_file_name=os.path.join(options.output_dir,"extract_and_assemble.log")) bam_files=[options.bam_files] if len(args)>0: bam_files.extend(args) all_assembler_to_try = options.assembler_name.split(',') assembly_function_list=[] for assembler_name in all_assembler_to_try: assembly_function=RAD_assemble_read2.get_assembly_function(assembler_name) assembly_function_list.append(assembly_function) command_runner.set_command_to_run_localy() if options.rg_ids: rg_ids=options.rg_ids.split() else: rg_ids=[] extract_and_assemble(bam_files, options.read1_consensus_file, options.white_list_file, options.output_dir, assembly_function_list, subsample_nb_read=options.subsample_nb_read, rg_ids=rg_ids, adapter_file=options.adapter_file)
def main(): #initialize the logging utils_logging.init_logging(logging.INFO) #Setup options optparser = _prepare_optparser() (options, args) = optparser.parse_args() #verify options arg_pass = _verifyOption(options) if not arg_pass: logging.warning(optparser.get_usage()) logging.critical("Non valid arguments: exit") sys.exit(1) if not options.print_commands: command_runner.set_command_to_run_localy() if options.debug: utils_logging.init_logging(logging.DEBUG) bam_files = [options.bam_files] if len(args) > 0: bam_files.extend(args) code = merge_bam_files(bam_files, options.output_file, options.genome_file, options.read_group_file, options.per_sample) sys.exit(code)
def main(args): dist.init_process_group(backend='nccl', init_method='env://') local_rank = args.local_rank torch.cuda.set_device(local_rank) rank = dist.get_rank() world_size = dist.get_world_size() if not os.path.exists(cfg.output) and rank is 0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( trainset, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset is "webface" else 0 backbone = eval("backbones.{}".format(args.network))( False, dropout=dropout, fp16=cfg.fp16).to(local_rank) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank is 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank is 0: logging.info("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: features.backward(grad_scaler.scale(x_grad)) grad_scaler.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_scaler.step(opt_backbone) grad_scaler.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()
tf.local_variables_initializer().run() tf.global_variables_initializer().run() # restore checkpoint if checkpoint_dir: checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir + 'save_models/') saver.restore(session, checkpoint_path) print('loaded saved model ', checkpoint_path) losses = [] train_perpl = [] start_time = time() # Print log headers fmt = init_logging(cfg['logpath']) try: for epoch in range(cfg['epochs']): for _ in trange(cfg['train_size'] // hp['batch_size']): fd = {x: get_images(session, subset='train')} if model_type == 'VQVAE': _, pp = session.run( [train_op, bottleneck_output['perplexity']], fd) train_perpl.append(pp) else: session.run(train_op, fd)
''' Created on 5 Mar 2010 @author: tcezard ''' import logging from utils import DNA_tools, utils_logging from annotation_loader import Exon_annotation_Retriver if __name__=="__main__": utils_logging.init_logging() gff_file='/home/tcezard/projects/2010053_Tom_Little_RNAseq_2/for tim/dmag_ep24augmap2an2.gff' exon_reader = Exon_annotation_Retriver(annotation_file=gff_file) annotations = exon_reader.get_annotation_from_chr('scaffold03376') for anno in annotations: start,end,transcript,gene,transcript_start,transcript_end,cds_start,cds_end,chr, dummy= anno #if transcript=='AUGep24bs03376g85t1': print anno
def main(args): seed = 2333 seed = seed + rank torch.manual_seed(seed) np.random.seed(seed) torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank], bucket_cap_mb=16, find_unused_parameters=True) backbone.train() # FIXME using gradient checkpoint if there are some unused parameters will cause error backbone._set_static_graph() margin_loss = CombinedMarginLoss(64, cfg.margin_list[0], cfg.margin_list[1], cfg.margin_list[2], cfg.interclass_filtering_threshold) if cfg.optimizer == "sgd": module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.SGD(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) elif cfg.optimizer == "adamw": module_partial_fc = PartialFCAdamW(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.AdamW(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, weight_decay=cfg.weight_decay) else: raise cfg.total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) from torch2onnx import convert_onnx convert_onnx(backbone.module.cpu().eval(), path_module, os.path.join(cfg.output, "model.onnx")) distributed.destroy_process_group()
def main(args): # dist world_size = int(os.environ['WORLD_SIZE']) local_rank = args.local_rank rank = int(os.environ['RANK']) dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"]) dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size) torch.cuda.set_device(local_rank) # logging if not os.path.exists(cfg.output) and rank is 0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) # data trainset = MXFaceDataset(root_dir=cfg.rec) train_sampler = torch.utils.data.distributed.DistributedSampler(trainset) train_loader = DataLoader(trainset, cfg.batch_size, shuffle=False, num_workers=8, pin_memory=True, sampler=train_sampler, drop_last=True) # backbone and DDP f_ = open(args.pruned_info) cfg_ = [int(x) for x in f_.read().split()] f_.close() backbone = backbones.__dict__[args.network](dropout=cfg.dropout, fp16=cfg.fp16, cfg=cfg_) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank is 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") backbone = backbone.cuda() backbone = torch.nn.SyncBatchNorm.convert_sync_batchnorm(backbone) backbone = DDP(module=backbone, device_ids=[local_rank]) # fc and loss margin_softmax = losses.__dict__[args.loss]() module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) # optimizer opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=cfg.lr_func) # train and valid start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank is 0: logging.info("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): img = img.cuda() label = label.cuda() global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: features.backward(grad_scaler.scale(x_grad)) grad_scaler.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_scaler.step(opt_backbone) grad_scaler.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) lr = opt_backbone.state_dict()['param_groups'][0]['lr'] callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler, lr) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() # release dist dist.destroy_process_group()
''' Created on 5 Mar 2010 @author: tcezard ''' import logging from utils import DNA_tools, utils_logging from annotation_loader import Exon_annotation_Retriver if __name__ == "__main__": utils_logging.init_logging() gff_file = '/home/tcezard/projects/2010053_Tom_Little_RNAseq_2/for tim/dmag_ep24augmap2an2.gff' exon_reader = Exon_annotation_Retriver(annotation_file=gff_file) annotations = exon_reader.get_annotation_from_chr('scaffold03376') for anno in annotations: start, end, transcript, gene, transcript_start, transcript_end, cds_start, cds_end, chr, dummy = anno #if transcript=='AUGep24bs03376g85t1': print anno
def main(args): cfg = get_config(args.config) cfg.device_num_per_node = args.device_num_per_node cfg.total_batch_size = cfg.batch_size * cfg.device_num_per_node * cfg.num_nodes cfg.steps_per_epoch = math.ceil(cfg.num_image / cfg.total_batch_size) cfg.total_step = cfg.num_epoch * cfg.steps_per_epoch cfg.lr_steps = (np.array(cfg.decay_epoch) * cfg.steps_per_epoch).tolist() lr_scales = [0.1, 0.01, 0.001, 0.0001] cfg.lr_scales = lr_scales[:len(cfg.lr_steps)] cfg.output = os.path.join("work_dir", cfg.output, cfg.loss) world_size = cfg.num_nodes os.makedirs(cfg.output, exist_ok=True) log_root = logging.getLogger() init_logging(log_root, cfg.output) flow.config.gpu_device_num(cfg.device_num_per_node) logging.info("gpu num: %d" % cfg.device_num_per_node) if cfg.num_nodes > 1: assert cfg.num_nodes <= len( cfg.node_ips ), "The number of nodes should not be greater than length of node_ips list." flow.env.ctrl_port(12138) nodes = [] for ip in cfg.node_ips: addr_dict = {} addr_dict["addr"] = ip nodes.append(addr_dict) flow.env.machine(nodes) flow.env.log_dir(cfg.output) for key, value in cfg.items(): num_space = 35 - len(key) logging.info(": " + key + " " * num_space + str(value)) train_func = make_train_func(cfg) val_infer = Validator(cfg) callback_verification = CallBackVerification(3000, cfg.val_targets, cfg.eval_ofrecord_path) callback_logging = CallBackLogging(50, cfg.total_step, cfg.total_batch_size, world_size, None) if cfg.resume and os.path.exists(cfg.model_load_dir): logging.info("Loading model from {}".format(cfg.model_load_dir)) variables = flow.checkpoint.get(cfg.model_load_dir) flow.load_variables(variables) start_epoch = 0 global_step = 0 lr = cfg.lr for epoch in range(start_epoch, cfg.num_epoch): for steps in range(cfg.steps_per_epoch): train_func().async_get( callback_logging.metric_cb(global_step, epoch, lr)) callback_verification(global_step, val_infer.get_symbol_val_fn) global_step += 1 if epoch in cfg.decay_epoch: lr *= 0.1 logging.info("lr_steps: %d" % global_step) logging.info("lr change to %f" % lr) # snapshot path = os.path.join(cfg.output, "snapshot_" + str(epoch)) flow.checkpoint.save(path) logging.info("oneflow Model Saved in '{}'".format(path))
previous_reference = vcf_records.get_reference() if len(phased_vcf_record) > 0: self.marker_created += 1 yield PhasedVcfRecord(phased_vcf_record, self.all_samples) def get_sample_names(self): """return the sample name from the header""" return self.reader.get_sample_names() def get_header_lines(self): """return the header lines in one string""" return self.reader.get_header_lines() def get_nb_marker_created(self): return self.marker_created def get_discard_quality_parent(self): return self.discard_quality_parent def get_nb_snps(self): return self.nb_snps if __name__ == "__main__": utils_logging.init_logging(logging.DEBUG) input_vcf = sys.argv[1] reader = PhasedVcfReader(input_vcf, reference_samples=None, number_mandatory_sample=None, geno_qual_threshold=20) for val in reader: print val print
def test_all(): import utils_param utils_logging.init_logging() test_pass = True wtss_param = utils_param.get_wtss_parameter() test_pass and test_is_dir(wtss_param.get_maq_dir(), "maq") test_pass and test_is_dir(wtss_param.get_script_dir(), "script") test_pass and test_is_dir(wtss_param.get_samtools_dir(), "samtools") test_pass and test_is_dir(wtss_param.get_snvmix2_dir(), "snvmix2") test_pass and test_is_not_None(wtss_param.get_snvmix2_filter_type(), "snvmix2 filter type") test_pass and test_is_int(wtss_param.get_snvmix2_base_qual(), "snvmix2 base quality") test_pass and test_is_int(wtss_param.get_snvmix2_map_qual(), "snvmix2 mapping quality") test_pass and test_is_float(wtss_param.get_snvmix2_prob(), "snvmix2 probability") test_pass and test_is_dir(wtss_param.get_fp_dir(), "FindPeak") test_pass and test_is_int(wtss_param.get_fp_qualityfilter(), "FindPeak quality filter") test_pass and test_is_comma_separated_list_int( wtss_param.get_fp_pet_flag_se(), "FindPeak pet flag for single end") test_pass and test_is_comma_separated_list_int( wtss_param.get_fp_pet_flag_pe(), "FindPeak pet flag for paired end") if test_is_not_None(wtss_param.get_available_species(), "available species"): for sp_code in wtss_param.get_available_species().keys(): test_pass and test_is_dir(wtss_param.get_bfa_dir(sp_code), "bfa dir for %s" % sp_code) test_pass and test_is_dir( wtss_param.get_bfa_independent_dir(sp_code), "bfa independent dir for %s" % sp_code) test_pass and test_is_file(wtss_param.get_index_file(sp_code), "index file for %s" % sp_code) test_pass and test_is_dir(wtss_param.get_ensembl_dir(sp_code), "ensembl dir for %s" % sp_code) test_pass and test_is_dir( wtss_param.get_intron_ensembl_dir(sp_code), "intron ensembl dir for %s" % sp_code) test_pass and test_is_file(wtss_param.get_known_snps(sp_code), "known snps file for %s" % sp_code) test_pass and test_is_file( wtss_param.get_list_contig_file(sp_code), "list of contig file for %s" % sp_code) test_pass and test_is_valid_database( wtss_param.get_database_name(sp_code), "database for %s" % sp_code) test_pass and test_is_file(wtss_param.get_coding_regions(sp_code), "coding region file for %s" % sp_code) test_pass and test_is_file( wtss_param.get_standard_chromosomes(sp_code), "standard chromosomes file for %s" % sp_code) test_pass and test_is_file( wtss_param.get_translate_chr_names(sp_code), "translate chr names file for %s" % sp_code) wtss_param.get_env_name() wtss_param.get_env_shortname() wtss_param.get_env_version() wtss_param.get_bio_apps_analysis() wtss_param.get_wtss_analysis() wtss_param.get_resource_conf() wtss_param.get_mysql_host() wtss_param.get_mysql_user() wtss_param.get_mysql_password()
return fasta_seq return None def __iter__(self): return iter(self.next, None) def __del__(self): self.open_genome_file.close() self.all_chr = None def close(self): self.__del__() if __name__ == "1__main__": utils_logging.init_logging(logging.DEBUG) import benchmarck_timer genome_file = '/home/tcezard/genomes/homo_sapiens/hg19/hg19.fa' genome_loader = GenomeLoader(genome_file) benchmarck_timer.start('load') sequence = genome_loader.get_chr('dfhgiduh') benchmarck_timer.stop('load') benchmarck_timer.start('retrieve') sequence = genome_loader.get_chr('1') benchmarck_timer.stop('retrieve') benchmarck_timer.print_all() if __name__ == "__main__": utils_logging.init_logging(logging.DEBUG) import benchmarck_timer genome_file = '/home/tcezard/genomes/homo_sapiens/hg19/hg19.fa'
def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" cfg = get_config(args.config) try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist.init_process_group('nccl') except KeyError: world_size = 1 rank = 0 dist.init_process_group(backend='nccl', init_method="tcp://127.0.0.1:12584", rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) if cfg.rec == "synthetic": train_set = SyntheticDataset(local_rank=local_rank) else: train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).to(local_rank) summary(backbone, input_size=(3, 112, 112)) exit() if cfg.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank == 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): if rank == 0: logging.info("resume fail, backbone init successfully!") backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() if cfg.loss == 'magface': margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g) elif cfg.loss == 'mag_cosface': margin_softmax = losses.get_loss(cfg.loss) else: margin_softmax = losses.get_loss(cfg.loss, s=cfg.s, m1=cfg.m1, m2=cfg.m2, m3=cfg.m3) module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=cfg.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) num_image = len(train_set) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = num_image // total_batch_size * cfg.num_epoch def lr_step_func(current_step): cfg.decay_step = [ x * num_image // total_batch_size for x in cfg.decay_epoch ] if current_step < cfg.warmup_step: return current_step / cfg.warmup_step else: return 0.1**len([m for m in cfg.decay_step if m <= current_step]) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=lr_step_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=lr_step_func) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) val_target = cfg.val_targets callback_verification = CallBackVerification(2000, rank, val_target, cfg.rec) callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() start_epoch = 0 global_step = 0 grad_amp = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 x = backbone(img) features = F.normalize(x) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc, x) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, scheduler_backbone.get_last_lr()[0], grad_amp) callback_verification(global_step, backbone) scheduler_backbone.step() scheduler_pfc.step() callback_checkpoint(global_step, backbone, module_partial_fc) callback_verification('last', backbone) dist.destroy_process_group()
def main(args): torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank]) backbone.train() if cfg.loss == "arcface": margin_loss = ArcFace() elif cfg.loss == "cosface": margin_loss = CosFace() else: raise module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() # TODO the params of partial fc must be last in the params list opt = torch.optim.SGD(params=[ { "params": backbone.parameters(), }, { "params": module_partial_fc.parameters(), }, ], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) distributed.destroy_process_group()
def main(args): cfg = get_config(args.config) if not cfg.tf32: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"]) except KeyError: world_size = 1 rank = 0 dist_url = "tcp://127.0.0.1:12584" dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) if not os.path.exists(cfg.output) and rank==0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) if rank==0: logging.info(args) logging.info(cfg) train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX( local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset == "webface" else 0 backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank==0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() cfg_vpl = cfg.vpl vpl_momentum = cfg_vpl['momentum'] if vpl_momentum: backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_w.train() for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data.copy_(param_b.data) param_w.requires_grad = False margin_softmax = losses.get_loss(cfg.loss) module_fc = VPL( rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output, cfg = cfg_vpl) #print('AAA') opt_backbone = torch.optim.SGD( params=[{'params': backbone.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD( params=[{'params': module_fc.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) #print('AAA') scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch) if rank==0: logging.info("Total Step is: %d" % total_step) #for epoch in range(start_epoch, cfg.num_epoch): # _lr = cfg.lr_func(epoch) # logging.info('%d:%f'%(epoch, _lr)) callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None use_batch_shuffle = True alpha = 0.999 for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 #img = img.to(memory_format=torch.channels_last) features = F.normalize(backbone(img)) feature_w = None if vpl_momentum: with torch.no_grad(): for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data = param_w.data * alpha + param_b.data * (1. - alpha) if use_batch_shuffle: img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size) feature_w = F.normalize(backbone_w(img_w)) if use_batch_shuffle: feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size) feature_w = feature_w.detach() x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_fc, backbone_onnx) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()