def main() -> None: torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--ckpt-dir', metavar='DIR', help='run directory') parser.add_argument('--pdb', action='store_true', help='pdb') parser.add_argument('--gpu', type=str, help='gpu ids', default=None) parser.add_argument('--print-configs', action='store_true', help='print ALL configs') args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if configs.debug.pdb or args.pdb: pdb.set_trace() if configs.debug.set_seed: torch.manual_seed(configs.debug.seed) np.random.seed(configs.debug.seed) if args.print_configs: print_conf = configs else: print_conf = get_cared_configs(configs, 'train') logger.info(f"Generate subnet started: \n {print_conf}") model = builder.make_model() sampler = ArchSampler( model, strategy=configs.model.sampler.strategy, n_layers_per_block=configs.model.arch.n_layers_per_block) arch_all = [] for _ in range(100): arch = sampler.get_random_sample_arch() arch_all.append(arch) arch_all.sort(key=lambda x: x[-1]) for arch in arch_all: logger.info(f"blk {arch[-1]} arch: {arch}")
def main() -> None: dist.init() torch.backends.cudnn.benchmark = True torch.cuda.set_device(dist.local_rank()) parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if args.run_dir is None: args.run_dir = auto_set_run_dir() else: set_run_dir(args.run_dir) logger.info(' '.join([sys.executable] + sys.argv)) logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}') dataset = builder.make_dataset() dataflow = {} for split in dataset: sampler = torch.utils.data.DistributedSampler( dataset[split], num_replicas=dist.size(), rank=dist.rank(), shuffle=(split == 'train'), ) dataflow[split] = torch.utils.data.DataLoader( dataset[split], batch_size=configs.batch_size // dist.size(), sampler=sampler, num_workers=configs.workers_per_gpu, pin_memory=True, ) model = builder.make_model() model = torch.nn.parallel.DistributedDataParallel( model.cuda(), device_ids=[dist.local_rank()], ) criterion = builder.make_criterion() optimizer = builder.make_optimizer(model) scheduler = builder.make_scheduler(optimizer) trainer = ClassificationTrainer( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, amp_enabled=configs.amp.enabled, ) trainer.train_with_defaults( dataflow['train'], num_epochs=configs.num_epochs, callbacks=[ SaverRestore(), InferenceRunner( dataflow['test'], callbacks=[ TopKCategoricalAccuracy(k=1, name='acc/top1'), TopKCategoricalAccuracy(k=5, name='acc/top5'), ], ), MaxSaver('acc/top1'), Saver(), ], )
def main() -> None: # dist.init() torch.backends.cudnn.benchmark = True # torch.cuda.set_device(dist.local_rank()) parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') parser.add_argument('--pdb', action='store_true', help='pdb') parser.add_argument('--gpu', type=str, help='gpu ids', default=None) args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if configs.debug.pdb or args.pdb: pdb.set_trace() if args.gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if configs.debug.set_seed: torch.manual_seed(configs.debug.seed) np.random.seed(configs.debug.seed) if configs.run.device == 'gpu': device = torch.device('cuda') elif configs.run.device == 'cpu': device = torch.device('cpu') else: raise ValueError(configs.run.device) if isinstance(configs.optimizer.lr, str): configs.optimizer.lr = eval(configs.optimizer.lr) if args.run_dir is None: args.run_dir = auto_set_run_dir() else: set_run_dir(args.run_dir) logger.info(' '.join([sys.executable] + sys.argv)) logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}') dataset = builder.make_dataset() dataflow = dict() for split in dataset: sampler = torch.utils.data.RandomSampler(dataset[split]) dataflow[split] = torch.utils.data.DataLoader( dataset[split], batch_size=configs.run.bsz, sampler=sampler, num_workers=configs.run.workers_per_gpu, pin_memory=True) model = builder.make_model() model.to(device) # model = torch.nn.parallel.DistributedDataParallel( # model.cuda(), # device_ids=[dist.local_rank()], # find_unused_parameters=True) total_params = sum(p.numel() for p in model.parameters()) logger.info(f'Model Size: {total_params}') # logger.info(f'Model MACs: {profile_macs(model, inputs)}') criterion = builder.make_criterion() optimizer = builder.make_optimizer(model) scheduler = builder.make_scheduler(optimizer) trainer = LayerRegressionTrainer(model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler) trainer.train_with_defaults( dataflow['train'], num_epochs=configs.run.n_epochs, callbacks=[ # SaverRestore(), InferenceRunner(dataflow=dataflow['valid'], callbacks=[]), MaxSaver('loss/valid'), # Saver(), ])
def main() -> None: # dist.init() torch.backends.cudnn.benchmark = True # torch.cuda.set_device(dist.local_rank()) parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--ckpt-dir', metavar='DIR', help='run directory') parser.add_argument('--pdb', action='store_true', help='pdb') parser.add_argument('--gpu', type=str, help='gpu ids', default=None) parser.add_argument('--print-configs', action='store_true', help='print ALL configs') args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if configs.debug.pdb or args.pdb: pdb.set_trace() if args.gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if configs.debug.set_seed: torch.manual_seed(configs.debug.seed) np.random.seed(configs.debug.seed) if configs.run.device == 'gpu': device = torch.device('cuda') elif configs.run.device == 'cpu': device = torch.device('cpu') else: raise ValueError(configs.run.device) if isinstance(configs.optimizer.lr, str): configs.optimizer.lr = eval(configs.optimizer.lr) # set the run dir according to config file's name args.run_dir = 'runs/' + args.config.replace('/', '.').replace( 'examples.', '').replace('.yml', '').replace('configs.', '') set_run_dir(args.run_dir) logger.info(' '.join([sys.executable] + sys.argv)) if args.print_configs: print_conf = configs else: print_conf = get_cared_configs(configs, 'train') logger.info(f'Training started: "{args.run_dir}".' + '\n' + f'{print_conf}') dataset = builder.make_dataset() dataflow = dict() # for split in dataset: # sampler = torch.utils.data.distributed.DistributedSampler( # dataset[split], # num_replicas=dist.size(), # rank=dist.rank(), # shuffle=(split == 'train')) # dataflow[split] = torch.utils.data.DataLoader( # dataset[split], # batch_size=configs.run.bsz // dist.size(), # sampler=sampler, # num_workers=configs.run.workers_per_gpu, # pin_memory=True) for split in dataset: if split == 'train': sampler = torch.utils.data.RandomSampler(dataset[split]) batch_size = configs.run.bsz else: # for valid and test, use SequentialSampler to make the train.py # and eval.py results consistent sampler = torch.utils.data.SequentialSampler(dataset[split]) batch_size = getattr(configs.run, 'eval_bsz', configs.run.bsz) dataflow[split] = torch.utils.data.DataLoader( dataset[split], batch_size=batch_size, sampler=sampler, num_workers=configs.run.workers_per_gpu, pin_memory=True) model = builder.make_model() state_dict = {} solution = None score = None if configs.ckpt.load_ckpt: logger.warning('Loading checkpoint!') state_dict = io.load(os.path.join(args.ckpt_dir, configs.ckpt.name), map_location='cpu') if getattr(state_dict, 'model_arch', None) is not None: model_load = state_dict['model_arch'] for module_load, module in zip(model_load.modules(), model.modules()): if isinstance(module, tq.RandomLayer): # random layer, need to restore the architecture module.rebuild_random_layer_from_op_list( n_ops_in=module_load.n_ops, wires_in=module_load.wires, op_list_in=module_load.op_list, ) if not configs.ckpt.weight_from_scratch: model.load_state_dict(state_dict['model'], strict=False) else: logger.warning(f"DO NOT load weight, train weights from scratch!") if 'solution' in state_dict.keys(): solution = state_dict['solution'] logger.info(f"Loading the solution {solution}") logger.info(f"Original score: {state_dict['score']}") model.set_sample_arch(solution['arch']) score = state_dict['score'] if 'v_c_reg_mapping' in state_dict.keys(): try: model.measure.set_v_c_reg_mapping( state_dict['v_c_reg_mapping']) except AttributeError: logger.warning(f"Cannot set v_c_reg_mapping.") if configs.model.load_op_list: assert state_dict['q_layer_op_list'] is not None logger.warning(f"Loading the op_list, will replace the q_layer in " f"the original model!") q_layer = build_module_from_op_list(state_dict['q_layer_op_list']) model.q_layer = q_layer if configs.model.transpile_before_run: # transpile the q_layer logger.warning(f"Transpile the q_layer to basis gate set before " f"training, will replace the q_layer!") processor = builder.make_qiskit_processor() if getattr(model, 'q_layer', None) is not None: circ = tq2qiskit(model.q_device, model.q_layer) """ add measure because the transpile process may permute the wires, so we need to get the final q reg to c reg mapping """ circ.measure(list(range(model.q_device.n_wires)), list(range(model.q_device.n_wires))) logger.info("Transpiling circuit...") if solution is not None: processor.set_layout(solution['layout']) logger.warning( f"Set layout {solution['layout']} for transpile!") circ_transpiled = processor.transpile(circs=circ) q_layer = qiskit2tq(circ=circ_transpiled) model.measure.set_v_c_reg_mapping( get_v_c_reg_mapping(circ_transpiled)) model.q_layer = q_layer if configs.trainer.add_noise: # noise-aware training noise_model_tq = builder.make_noise_model_tq() noise_model_tq.is_add_noise = True noise_model_tq.v_c_reg_mapping = get_v_c_reg_mapping( circ_transpiled) noise_model_tq.p_c_reg_mapping = get_p_c_reg_mapping( circ_transpiled) noise_model_tq.p_v_reg_mapping = get_p_v_reg_mapping( circ_transpiled) model.set_noise_model_tq(noise_model_tq) elif getattr(model, 'nodes', None) is not None: # every node has a noise model because it is possible that # different nodes run on different QC for node in model.nodes: circ = tq2qiskit(node.q_device, node.q_layer) circ.measure(list(range(node.q_device.n_wires)), list(range(node.q_device.n_wires))) circ_transpiled = processor.transpile(circs=circ) q_layer = qiskit2tq(circ=circ_transpiled) node.measure.set_v_c_reg_mapping( get_v_c_reg_mapping(circ_transpiled)) node.q_layer = q_layer if configs.trainer.add_noise: # noise-aware training noise_model_tq = builder.make_noise_model_tq() noise_model_tq.is_add_noise = True noise_model_tq.v_c_reg_mapping = get_v_c_reg_mapping( circ_transpiled) noise_model_tq.p_c_reg_mapping = get_p_c_reg_mapping( circ_transpiled) noise_model_tq.p_v_reg_mapping = get_p_v_reg_mapping( circ_transpiled) node.set_noise_model_tq(noise_model_tq) if getattr(configs.model.arch, 'sample_arch', None) is not None and \ not configs.model.transpile_before_run: sample_arch = configs.model.arch.sample_arch logger.warning(f"Setting sample arch {sample_arch} from config file!") if isinstance(sample_arch, str): # this is the name of arch sample_arch = get_named_sample_arch(model.arch_space, sample_arch) logger.warning(f"Decoded sample arch: {sample_arch}") model.set_sample_arch(sample_arch) if configs.trainer.name == 'pruning_trainer': """ in pruning, convert the super layers to module list, otherwise the pruning ratio is difficulty to set """ logger.warning(f"Convert sampled layer to module list layer!") model.q_layer = build_module_from_op_list( build_module_op_list(model.q_layer)) model.to(device) # model = torch.nn.parallel.DistributedDataParallel( # model.cuda(), # device_ids=[dist.local_rank()], # find_unused_parameters=True) if getattr(model, 'sample_arch', None) is not None and \ not configs.model.transpile_before_run and \ not configs.trainer.name == 'pruning_trainer': n_params = model.count_sample_params() logger.info(f"Number of sampled params: {n_params}") total_params = sum(p.numel() for p in model.parameters()) logger.info(f'Model Size: {total_params}') # logger.info(f'Model MACs: {profile_macs(model, inputs)}') criterion = builder.make_criterion() optimizer = builder.make_optimizer(model) scheduler = builder.make_scheduler(optimizer) trainer = builder.make_trainer(model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler) trainer.solution = solution trainer.score = score # trainer state_dict will be loaded in a callback callbacks = builder.make_callbacks(dataflow, state_dict) trainer.train_with_defaults(dataflow['train'], num_epochs=configs.run.n_epochs, callbacks=callbacks)
def main() -> None: torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') parser.add_argument('--pdb', action='store_true', help='pdb') parser.add_argument('--gpu', type=str, help='gpu ids', default=None) parser.add_argument('--jobs', type=int, default=None, help='max parallel job on qiskit') parser.add_argument('--print-configs', action='store_true', help='print ALL configs') args, opts = parser.parse_known_args() configs.load(os.path.join(args.run_dir, 'metainfo', 'configs.yaml')) configs.load(args.config, recursive=True) configs.update(opts) if configs.debug.pdb or args.pdb: pdb.set_trace() if args.gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.jobs is not None: configs.qiskit.max_jobs = args.jobs # if use qiskit, then not need to estimate success rate assert not (configs.es.est_success_rate and configs.qiskit.use_qiskit) if configs.run.device == 'gpu': device = torch.device('cuda') elif configs.run.device == 'cpu': device = torch.device('cpu') else: raise ValueError(configs.run.device) if args.print_configs: print_conf = configs else: print_conf = get_cared_configs(configs, 'es') logger.info(f'Evolutionary Search started: "{args.run_dir}".' + '\n' + f'{print_conf}') if configs.qiskit.use_qiskit: IBMQ.load_account() if configs.run.bsz == 'qiskit_max': provider = get_provider(configs.qiskit.backend_name) configs.run.bsz = provider.get_backend( configs.qiskit.backend_name).configuration().max_experiments dataset = builder.make_dataset() sampler = torch.utils.data.SequentialSampler( dataset[configs.dataset.split]) dataflow = torch.utils.data.DataLoader( dataset[configs.dataset.split], sampler=sampler, batch_size=configs.run.bsz, num_workers=configs.run.workers_per_gpu, pin_memory=True) state_dict = io.load(os.path.join(args.run_dir, configs.ckpt.name), map_location=device) model_load = state_dict['model_arch'] model = builder.make_model() for module_load, module in zip(model_load.modules(), model.modules()): if isinstance(module, tq.RandomLayer): # random layer, need to restore the architecture module.rebuild_random_layer_from_op_list( n_ops_in=module_load.n_ops, wires_in=module_load.wires, op_list_in=module_load.op_list, ) model.load_state_dict(state_dict['model'], strict=False) if configs.legalization.legalize: legalize_unitary(model) model.to(device) model.eval() es_dir = 'es_runs/' + args.config.replace('/', '.').replace( 'examples.', '').replace('.yml', '').replace('configs.', '') io.save(os.path.join(es_dir, 'metainfo/configs.yaml'), configs.dict()) writer = SummaryWriter(os.path.normpath(os.path.join(es_dir, 'tb'))) if configs.qiskit.use_qiskit or configs.es.est_success_rate: IBMQ.load_account() provider = get_provider(configs.qiskit.backend_name) properties = provider.get_backend( configs.qiskit.backend_name).properties() if configs.qiskit.backend_name == 'ibmq_qasm_simulator': if configs.qiskit.noise_model_name == 'ibmq_16_melbourne': n_available_wires = 15 else: n_available_wires = len(properties.qubits) qiskit_processor = builder.make_qiskit_processor() model.set_qiskit_processor(qiskit_processor) else: n_available_wires = model.q_device.n_wires total_params = sum(p.numel() for p in model.parameters()) logger.info(f'Model Size: {total_params}') es_engine = EvolutionEngine( population_size=configs.es.population_size, parent_size=configs.es.parent_size, mutation_size=configs.es.mutation_size, mutation_prob=configs.es.mutation_prob, crossover_size=configs.es.crossover_size, n_wires=model.q_device.n_wires, n_available_wires=n_available_wires, arch_space=model.arch_space, gene_mask=configs.es.gene_mask, random_search=configs.es.random_search, ) evaluator = Evaluator() logger.info(f"Start Evolution Search") for k in range(configs.es.n_iterations): logger.info(f"ES iteration {k}:") solutions = es_engine.ask() scores, best_solution_accuracy, best_solution_loss, \ best_solution_success_rate, best_solution_score \ = evaluator.evaluate_all(model, dataflow, solutions, writer, k, configs.es.population_size) es_engine.tell(scores) logger.info(f"Best solution: {es_engine.best_solution}") logger.info(f"Best score: {es_engine.best_score}") assert best_solution_score == es_engine.best_score writer.add_text('es/best_solution_arch', str(es_engine.best_solution), k) writer.add_scalar('es/best_solution_accuracy', best_solution_accuracy, k) writer.add_scalar('es/best_solution_loss', best_solution_loss, k) writer.add_scalar('es/best_solution_success_rate', best_solution_success_rate, k) writer.add_scalar('es/best_solution_score', es_engine.best_score, k) # store the model and solution after every iteration state_dict = dict() if not configs.qiskit.use_real_qc: state_dict['model_arch'] = model state_dict['model'] = model.state_dict() state_dict['solution'] = es_engine.best_solution state_dict['score'] = es_engine.best_score if not configs.dataset.name == 'vqe': state_dict['encoder_func_list'] = model.encoder.func_list state_dict['q_layer_op_list'] = build_module_op_list(model.q_layer) io.save(os.path.join(es_dir, 'checkpoints/best_solution.pt'), state_dict) logger.info(f"\n Best solution evaluation on tq:") # eval the best solution evaluator.evaluate_all(model, dataflow, [es_engine.best_solution])
def main() -> None: dist.init() torch.backends.cudnn.benchmark = True torch.cuda.set_device(dist.local_rank()) parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if args.run_dir is None: args.run_dir = auto_set_run_dir() else: set_run_dir(args.run_dir) logger.info(' '.join([sys.executable] + sys.argv)) logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}') dataset = builder.make_dataset() dataflow = dict() for split in dataset: sampler = torch.utils.data.distributed.DistributedSampler( dataset[split], num_replicas=dist.size(), rank=dist.rank(), shuffle=(split == 'train')) dataflow[split] = torch.utils.data.DataLoader( dataset[split], batch_size=configs.batch_size, # if split == 'train' else 1, sampler=sampler, num_workers=configs.workers_per_gpu, pin_memory=True, collate_fn=dataset[split].collate_fn) #model = spvnas_specialized('35G') model = builder.make_model() model = torch.nn.parallel.DistributedDataParallel( model.cuda(), device_ids=[dist.local_rank()], find_unused_parameters=True) model.eval() criterion = builder.make_criterion() optimizer = builder.make_optimizer(model) scheduler = builder.make_scheduler(optimizer) meter = MeanIoU(configs.data.num_classes, configs.data.ignore_label) trainer = SemanticKITTITrainer( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, ) callbacks = Callbacks([ SaverRestore(), MeanIoU(configs.data.num_classes, configs.data.ignore_label) ]) callbacks._set_trainer(trainer) trainer.callbacks = callbacks trainer.dataflow = dataflow trainer.before_train() trainer.before_epoch() # important model.eval() for feed_dict in tqdm(dataflow['test'], desc='eval'): _inputs = dict() for key, value in feed_dict.items(): if not 'name' in key: _inputs[key] = value.cuda() inputs = _inputs['lidar'] targets = feed_dict['targets'].F.long().cuda(non_blocking=True) outputs = model(inputs) invs = feed_dict['inverse_map'] all_labels = feed_dict['targets_mapped'] _outputs = [] _targets = [] for idx in range(invs.C[:, -1].max() + 1): cur_scene_pts = (inputs.C[:, -1] == idx).cpu().numpy() cur_inv = invs.F[invs.C[:, -1] == idx].cpu().numpy() cur_label = (all_labels.C[:, -1] == idx).cpu().numpy() outputs_mapped = outputs[cur_scene_pts][cur_inv].argmax(1) targets_mapped = all_labels.F[cur_label] _outputs.append(outputs_mapped) _targets.append(targets_mapped) outputs = torch.cat(_outputs, 0) targets = torch.cat(_targets, 0) output_dict = {'outputs': outputs, 'targets': targets} trainer.after_step(output_dict) trainer.after_epoch()
def main() -> None: torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') parser.add_argument('--pdb', action='store_true', help='pdb') parser.add_argument('--verbose', action='store_true', help='verbose') parser.add_argument('--gpu', type=str, help='gpu ids', default=None) parser.add_argument('--print-configs', action='store_true', help='print ALL configs') parser.add_argument('--jobs', type=int, default=None, help='max parallel job on qiskit') parser.add_argument('--hub', type=str, default=None, help='IBMQ provider') args, opts = parser.parse_known_args() configs.load(os.path.join(args.run_dir, 'metainfo', 'configs.yaml')) configs.load(args.config, recursive=True) configs.update(opts) # for eval, always need load weights configs.ckpt.weight_from_scratch = False if configs.debug.pdb or args.pdb: pdb.set_trace() configs.verbose = args.verbose configs.qiskit.hub = args.hub if args.gpu is not None: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.jobs is not None: configs.qiskit.max_jobs = args.jobs if configs.run.device == 'gpu': device = torch.device('cuda') elif configs.run.device == 'cpu': device = torch.device('cpu') else: raise ValueError(configs.run.device) if args.print_configs: print_conf = configs else: print_conf = get_cared_configs(configs, 'eval') logger.info(f'Evaluation started: "{args.run_dir}".' + '\n' + f'{print_conf}') eval_config_dir = args.config.replace('/', '.').replace( 'examples.', '').replace('.yml', '').replace('configs.', '') configs.eval_config_dir = eval_config_dir configs.run_dir = args.run_dir # if configs.qiskit.use_qiskit: # IBMQ.load_account() # if configs.run.bsz == 'qiskit_max': # configs.run.bsz = IBMQ.get_provider(hub='ibm-q').get_backend( # configs.qiskit.backend_name).configuration().max_experiments dataset = builder.make_dataset() sampler = torch.utils.data.SequentialSampler( dataset[configs.dataset.split]) dataflow = torch.utils.data.DataLoader( dataset[configs.dataset.split], sampler=sampler, batch_size=configs.run.bsz, num_workers=configs.run.workers_per_gpu, pin_memory=True) state_dict = io.load(os.path.join(args.run_dir, configs.ckpt.name), map_location='cpu') model_load = state_dict['model_arch'] model = builder.make_model() for module_load, module in zip(model_load.modules(), model.modules()): if isinstance(module, tq.RandomLayer): # random layer, need to restore the architecture module.rebuild_random_layer_from_op_list( n_ops_in=module_load.n_ops, wires_in=module_load.wires, op_list_in=module_load.op_list, ) model.load_state_dict(state_dict['model'], strict=False) solution = None if 'solution' in state_dict.keys(): solution = state_dict['solution'] logger.info(f"Evaluate the solution {solution}") logger.info(f"Original score: {state_dict['score']}") model.set_sample_arch(solution['arch']) if 'v_c_reg_mapping' in state_dict.keys(): if getattr(model, 'q_layer', None) is not None: try: model.measure.set_v_c_reg_mapping( state_dict['v_c_reg_mapping']) except AttributeError: logger.warning(f"Cannot set v_c_reg_mapping.") elif getattr(model, 'nodes', None) is not None: for k, node in enumerate(model.nodes): node.measure.set_v_c_reg_mapping( state_dict['v_c_reg_mapping'][k]) if state_dict.get('q_layer_op_list', None) is not None and not \ configs.model.load_op_list: logger.warning(f"the model has op_list but is not loaded!!") if configs.model.load_op_list: assert state_dict['q_layer_op_list'] is not None logger.warning(f"Loading the op_list, will replace the q_layer in " f"the original model!") if getattr(model, 'q_layer', None) is not None: q_layer = build_module_from_op_list( op_list=state_dict['q_layer_op_list'], remove_ops=configs.prune.eval.remove_ops, thres=configs.prune.eval.remove_ops_thres) model.q_layer = q_layer elif getattr(model, 'nodes', None) is not None: for k, node in enumerate(model.nodes): q_layer = build_module_from_op_list( op_list=state_dict['q_layer_op_list'][k], remove_ops=configs.prune.eval.remove_ops, thres=configs.prune.eval.remove_ops_thres) node.q_layer = q_layer if state_dict.get('noise_model_tq', None) is not None: # the readout error is ALSO applied for eval and test so need load # noise_model_tq if getattr(model, 'q_layer', None) is not None: model.set_noise_model_tq(state_dict['noise_model_tq']) if getattr(configs, 'add_noise', False): model.noise_model_tq.mode = 'train' model.noise_model_tq.noise_total_prob = \ configs.noise_total_prob else: model.noise_model_tq.mode = 'test' elif getattr(model, 'nodes', None) is not None: for k, node in enumerate(model.nodes): node.set_noise_model_tq(state_dict['noise_model_tq'][k]) if getattr(configs, 'add_noise', False): node.noise_model_tq.mode = 'train' node.noise_model_tq.noise_total_prob = \ configs.noise_total_prob else: node.noise_model_tq.mode = 'test' if configs.model.transpile_before_run: # transpile the q_layer logger.warning(f"Transpile the q_layer to basis gate set before " f"evaluation, will replace the q_layer!") processor = builder.make_qiskit_processor() circ = tq2qiskit(model.q_device, model.q_layer) """ add measure because the transpile process may permute the wires, so we need to get the final q reg to c reg mapping """ circ.measure(list(range(model.q_device.n_wires)), list(range(model.q_device.n_wires))) if solution is not None: processor.set_layout(solution['layout']) logger.warning(f"Set layout {solution['layout']} for transpile!") logger.info("Transpiling circuit...") circ_transpiled = processor.transpile(circs=circ) q_layer = qiskit2tq(circ=circ_transpiled) model.measure.set_v_c_reg_mapping(get_v_c_reg_mapping(circ_transpiled)) model.q_layer = q_layer if configs.legalization.legalize: legalize_unitary(model) if configs.act_quant.add_in_eval: quantizers = [] assert getattr(model, 'nodes', None) is not None if getattr(configs.act_quant, 'act_quant_bit', None) is not None: # settings from config file has higher priority act_quant_bit = configs.act_quant.act_quant_bit act_quant_ratio = configs.act_quant.act_quant_ratio act_quant_level = configs.act_quant.act_quant_level act_quant_lower_bound = configs.act_quant.act_quant_lower_bound act_quant_upper_bound = configs.act_quant.act_quant_upper_bound logger.warning(f"Get act_quant setting from config file!") elif state_dict.get('act_quant', None) is not None: act_quant_bit = state_dict['act_quant']['act_quant_bit'] act_quant_ratio = state_dict['act_quant']['act_quant_ratio'] act_quant_level = state_dict['act_quant']['act_quant_level'] act_quant_lower_bound = state_dict['act_quant'][ 'act_quant_lower_bound'] act_quant_upper_bound = state_dict['act_quant'][ 'act_quant_upper_bound'] logger.warning(f"Get act_quant setting from ckpt file!") elif getattr(configs.trainer, 'act_quant_bit', None) is not None: # if the act_quant info is not stored in ckpt, use the info from # training config file act_quant_bit = configs.trainer.act_quant_bit act_quant_ratio = configs.trainer.act_quant_ratio act_quant_level = configs.trainer.act_quant_level act_quant_lower_bound = configs.trainer.act_quant_lower_bound act_quant_upper_bound = configs.trainer.act_quant_upper_bound logger.warning(f"Get act_quant setting from previous training " f"config file!") else: raise NotImplementedError('No act_quant info specified!') logger.warning(f"act_quant_bit={act_quant_bit}, " f"act_quant_ratio={act_quant_ratio}, " f"act_quant_level={act_quant_level}, " f"act_quant_lower_bound={act_quant_lower_bound}, " f"act_quant_upper_bound={act_quant_upper_bound}") for k, node in enumerate(model.nodes): if configs.trainer.act_quant_skip_last_node and k == len( model.nodes) - 1: continue quantizer = PACTActivationQuantizer( module=node, precision=act_quant_bit, level=act_quant_level, alpha=1.0, backprop_alpha=False, quant_ratio=act_quant_ratio, device=device, lower_bound=act_quant_lower_bound, upper_bound=act_quant_upper_bound, ) quantizers.append(quantizer) for quantizer in quantizers: quantizer.register_hook() if getattr(configs, 'pre_specified_mean', None) is not None and \ configs.pre_specified_std \ is not None: for k, node in enumerate(model.nodes): node.pre_specified_mean_std = { 'mean': configs.pre_specified_mean[k], 'std': configs.pre_specified_std[k], } model.to(device) model.eval() if configs.qiskit.use_qiskit: qiskit_processor = builder.make_qiskit_processor() if configs.qiskit.initial_layout is not None: layout = configs.qiskit.initial_layout logger.warning(f"Use layout {layout} from config file") elif 'solution' in state_dict.keys(): layout = state_dict['solution']['layout'] logger.warning(f"Use layout {layout} from checkpoint file") else: layout = None logger.warning(f"No specified layout") qiskit_processor.set_layout(layout) model.set_qiskit_processor(qiskit_processor) if getattr(configs.model.arch, 'sample_arch', None) is not None: sample_arch = configs.model.arch.sample_arch logger.warning(f"Setting sample arch {sample_arch} from config file!") if isinstance(sample_arch, str): # this is the name of arch sample_arch = get_named_sample_arch(model.arch_space, sample_arch) logger.warning(f"Decoded sample arch: {sample_arch}") model.set_sample_arch(sample_arch) if configs.get_n_params: n_params = model.count_sample_params() logger.info(f"Number of sampled params: {n_params}") exit(0) if configs.qiskit.est_success_rate: circ_parameterized, params = tq2qiskit_parameterized( model.q_device, model.encoder.func_list) circ_fixed = tq2qiskit(model.q_device, model.q_layer) circ = circ_parameterized + circ_fixed transpiled_circ = model.qiskit_processor.transpile(circ) success_rate = get_success_rate(model.qiskit_processor.properties, transpiled_circ) logger.info(f"Success rate: {success_rate}") logger.info(f"Size: {transpiled_circ.size()}") logger.info(f"Depth: {transpiled_circ.depth()}") logger.info(f"Width: {transpiled_circ.width()}") exit(0) total_params = sum(p.numel() for p in model.parameters()) logger.info(f'Model Size: {total_params}') if hasattr(model, 'sample_arch') and not configs.model.load_op_list: n_params = model.count_sample_params() logger.info(f"Number of sampled params: {n_params}") with torch.no_grad(): target_all = None output_all = None for feed_dict in tqdm.tqdm(dataflow): if configs.run.device == 'gpu': inputs = feed_dict[configs.dataset.input_name].cuda( non_blocking=True) targets = feed_dict[configs.dataset.target_name].cuda( non_blocking=True) else: inputs = feed_dict[configs.dataset.input_name] targets = feed_dict[configs.dataset.target_name] outputs = model(inputs, verbose=configs.verbose, use_qiskit=configs.qiskit.use_qiskit) if target_all is None: target_all = targets output_all = outputs else: target_all = torch.cat([target_all, targets], dim=0) output_all = torch.cat([output_all, outputs], dim=0) # if configs.verbose: # logger.info(f"Measured log_softmax: {outputs}") if not configs.dataset.name == 'vqe': log_acc(output_all, target_all) logger.info("Final:") if not configs.dataset.name == 'vqe': log_acc(output_all, target_all) else: logger.info(f"Eigenvalue: {output_all.detach().cpu().numpy()}")
def main() -> None: dist.init() torch.backends.cudnn.benchmark = True torch.cuda.set_device(dist.local_rank()) parser = argparse.ArgumentParser() parser.add_argument('config', metavar='FILE', help='config file') parser.add_argument('--run-dir', metavar='DIR', help='run directory') args, opts = parser.parse_known_args() configs.load(args.config, recursive=True) configs.update(opts) if args.run_dir is None: args.run_dir = auto_set_run_dir() else: set_run_dir(args.run_dir) logger.info(' '.join([sys.executable] + sys.argv)) logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}') # seed if ('seed' not in configs.train) or (configs.train.seed is None): configs.train.seed = torch.initial_seed() % (2**32 - 1) seed = configs.train.seed + dist.rank( ) * configs.workers_per_gpu * configs.num_epochs random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) dataset = builder.make_dataset() dataflow = dict() for split in dataset: sampler = torch.utils.data.distributed.DistributedSampler( dataset[split], num_replicas=dist.size(), rank=dist.rank(), shuffle=(split == 'train')) dataflow[split] = torch.utils.data.DataLoader( dataset[split], batch_size=configs.batch_size, sampler=sampler, num_workers=configs.workers_per_gpu, pin_memory=True, collate_fn=dataset[split].collate_fn) model = builder.make_model() model = torch.nn.parallel.DistributedDataParallel( model.cuda(), device_ids=[dist.local_rank()], find_unused_parameters=True) criterion = builder.make_criterion() optimizer = builder.make_optimizer(model) scheduler = builder.make_scheduler(optimizer) trainer = SemanticKITTITrainer(model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, num_workers=configs.workers_per_gpu, seed=seed) trainer.train_with_defaults( dataflow['train'], num_epochs=configs.num_epochs, callbacks=[ InferenceRunner(dataflow[split], callbacks=[ MeanIoU(name=f'iou/{split}', num_classes=configs.data.num_classes, ignore_label=configs.data.ignore_label) ]) for split in ['test'] ] + [ MaxSaver('iou/test'), Saver(), ])