def make_model_tf(path: str, mode: str) -> tf.Tensor: """Prepare the model for generation. Loads the model architecture from the huggingface pre-trained model, then loads a checkpoint. TODO: There must be a way to just load from config + checkpoint, no pretrained weights. """ with utils.log_duration(LOGGER, make_model_tf.__name__, "Load model."): if mode == constants.SaveModeChoices.hfh5: config_path = os.path.join(path, "config.json") model_path = os.path.join(path, "tf_model.h5") utils.check_exists(config_path) utils.check_exists(model_path) config = transformers.GPT2Config.from_pretrained(config_path) return transformers.TFGPT2LMHeadModel.from_pretrained( model_path, config=config) elif mode == constants.SaveModeChoices.ckpt: model = transformers.TFGPT2LMHeadModel.from_pretrained( _FLAG_HF_MODEL_KEY.value, ) ckpt = tf.train.Checkpoint(model=model) ckpt.restore(_FLAG_CKPT_MODEL_PATH.value) else: raise RuntimeError(f"Unsupported Save Mode: {mode}") return model
def main(face_root, scene_root, seed, epoch): mask_path = './checkpoints/multi_view_face_scene/mask_index_file_{}.pickle'.format( seed) assert check_exists(mask_path) with open(mask_path, 'rb') as fin: face_mask_index, scene_mask_index = pickle.load(fin, encoding='bytes') model_path = './checkpoints/multi_view_face_scene/demo_arcface_face+scene_nan_{}_model_{:0>4d}.pth' \ .format(seed, epoch) assert check_exists(model_path) dataset = IQiYiFaceSceneDataset( face_root, scene_root, 'test', num_frame=40, transform=sep_cat_qds_select_face_scene_transforms, face_mask=face_mask_index, scene_mask=scene_mask_index) data_loader = DataLoader(dataset, batch_size=16384, shuffle=False, num_workers=4) model = ArcFaceSceneModel( len(face_mask_index) + 2, len(scene_mask_index), 10034 + 1, ) metric_func = torch.nn.Softmax(-1) logger.info('load model from {}'.format(model_path)) state_dict = torch.load(model_path, map_location='cpu') model.load_state_dict(state_dict) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = model.to(device) logger.info('test model on {}'.format(device)) model.eval() all_outputs = [] all_video_names = [] with torch.no_grad(): for batch_idx, (feats1, feats2, _, video_names) in enumerate(data_loader): logger.info('Test Model: {}/{}'.format(batch_idx, len(data_loader))) feats1 = feats1.to(device) feats2 = feats2.to(device) output = model(feats1, feats2) output = metric_func(output) all_outputs.append(output.cpu()) all_video_names += video_names all_outputs = torch.cat(all_outputs, dim=0) return all_outputs, all_video_names
def make_model_tf(path): with utils.log_duration(LOGGER, make_model_tf.__name__, "Load model."): if os.path.exists(path): config_path = os.path.join(path, "config.json") model_path = os.path.join(path, "tf_model.h5") utils.check_exists(config_path) utils.check_exists(model_path) config = transformers.GPT2Config.from_pretrained( config_path) return transformers.TFGPT2LMHeadModel.from_pretrained( model_path, config=config) else: return transformers.TFGPT2LMHeadModel.from_pretrained( path, )
def process(self): if not check_exists(self.raw_folder): raise RuntimeError('Dataset not found') train_set, test_set, meta = self.make_data() save(train_set, os.path.join(self.processed_folder, 'train.pt')) save(test_set, os.path.join(self.processed_folder, 'test.pt')) save(meta, os.path.join(self.processed_folder, 'meta.pt')) return
def process(self): if not check_exists(self.raw_folder): self.download() train_set, test_set, meta = self.make_data() save(train_set, os.path.join(self.processed_folder, 'train.pt')) save(test_set, os.path.join(self.processed_folder, 'test.pt')) save(meta, os.path.join(self.processed_folder, 'meta.pt')) return
def __init__(self, root, split): self.root = os.path.expanduser(root) self.split = split if not check_exists(self.processed_folder): self.process() self.token = load( os.path.join(self.processed_folder, '{}.pt'.format(split))) self.vocab = load( os.path.join(self.processed_folder, 'meta.pt'.format(split)))
def read_file(): """ load the file """ file_name = sys.argv[1] if check_exists(file_name): print("Reading File") read_data(file_name) parse_data() else: print("File may not exist so check")
def wrapper(args): # Check executable requirements for req in requires: assert check_exists(req), f"'{req}' is required for {name} step" # Save previous steps for 'archive' step's postfix if "prev_steps" not in args: args.prev_steps = args.postfix ret = f(args) args.prev_steps += f"-{name}" return ret
def __init__(self, root, split, subset, transform=None): self.root = os.path.expanduser(root) self.split = split self.subset = subset self.transform = transform if not check_exists(self.processed_folder): self.process() self.img, self.target = load(os.path.join(self.processed_folder, '{}.pt'.format(self.split))) self.target = self.target[self.subset] self.classes_counts = make_classes_counts(self.target) self.classes_to_labels, self.classes_size = load(os.path.join(self.processed_folder, 'meta.pt')) self.classes_to_labels, self.classes_size = self.classes_to_labels[self.subset], self.classes_size[self.subset]
def main(args): if not check_exists(args.save_dir): os.makedirs(args.save_dir) dataset = IQiYiFaceSceneDataset(args.face_root, args.scene_root, 'train+val-noise', num_frame=args.num_frame, ) data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) log_step = len(data_loader) // 10 if len(data_loader) > 10 else 1 model = ArcFaceSceneModel(args.face_feat_dim, args.scene_feat_dim, args.num_classes, ) metric_func = ArcMarginProduct() loss_func = FocalLoss(gamma=2.) optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=1e-5) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epoch) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = model.to(device) for epoch_idx in range(args.epoch): total_loss = .0 for batch_idx, (face_feats, scene_feats, labels, _) in enumerate(data_loader): face_feats = face_feats.to(device) scene_feats = scene_feats.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(face_feats, scene_feats) outputs_metric = metric_func(outputs, labels) local_loss = loss_func(outputs_metric, labels) local_loss.backward() optimizer.step() total_loss += local_loss.item() if batch_idx % log_step == 0 and batch_idx != 0: print('Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}' .format(epoch_idx, batch_idx * args.batch_size, len(dataset), 100.0 * batch_idx / len(data_loader), local_loss.item())) log = {'epoch': epoch_idx, 'lr': optimizer.param_groups[0]['lr'], 'loss': total_loss / len(data_loader)} for key, value in sorted(log.items(), key=lambda item: item[0]): print(' {:20s}: {:6f}'.format(str(key), value)) lr_scheduler.step() save_model(model, args.save_dir, 'demo_arcface_face+scene_nan_model', args.epoch)
def gotResult(self, data, kid, keyword, page, totalpage, t, max_interval): ''' ''' tp, feeds = data if kid in self.key_statuses: statuses = self.key_statuses[kid] else: statuses = [set(), set(), set()] self.key_statuses[kid] = statuses log.info("Keyword: %s page: %d/%d result: %d" % (repr(keyword), page, totalpage, len(feeds))) fds = [] new_statuses = statuses[0] collided_feeds = 0 next_request = False for feed in feeds: feed['kid'] = str(kid) if check_exists(feed): fds.append(feed) else: collided_feeds += 1 # 发送消息给kafka队列 send_messages(kid, keyword, fds) # 判断是否需要继续抓取 if collided_feeds * 3 > len(feeds): log.info("collided_feeds: %d with total: %d" % (collided_feeds, len(feeds))) else: page += 1 totalpage = max(tp, totalpage) log.info("Page: %d with totalpage: %d" % (page, totalpage)) if page < totalpage: next_request = True if next_request: statuses[1] |= new_statuses if len(statuses[1]) > 100: self.key_statuses[kid] = [set(), set(), statuses[1]] else: self.key_statuses[kid][0] = set() #log.info("Keyword: %s result: %d" % (str(kid), len(new_statuses))) self.key_queue.add_key((kid, page, totalpage, None, None), t) else: self.recalcKeywordFreq(kid, len(new_statuses), max_interval)
def main(data_root, epoch): load_path = './checkpoints/demo_arcface_scene_model_{:0>4d}.pth'.format( epoch) assert check_exists(load_path) dataset = IQiYiSceneFeatDataset( data_root, 'test', ) data_loader = DataLoader(dataset, batch_size=2048, shuffle=False, num_workers=0) model = ArcSceneFeatModel(2048, 10034 + 1) metric_func = torch.nn.Softmax(-1) logger.info('load model from {}'.format(load_path)) state_dict = torch.load(load_path, map_location='cpu') model.load_state_dict(state_dict) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = model.to(device) logger.info('test model on {}'.format(device)) model.eval() all_outputs = [] all_video_names = [] with torch.no_grad(): for batch_idx, (feats, _, video_names) in enumerate(data_loader): logger.info('Test Model: {}/{}'.format(batch_idx, len(data_loader))) feats = feats.to(device) output = model(feats) output = metric_func(output) all_outputs.append(output.cpu()) all_video_names += video_names all_outputs = torch.cat(all_outputs, dim=0) return all_outputs, all_video_names
def main(in_target: utils.PathType, out_target: utils.PathType): utils.check_exists(in_target) parent_dir_out = os.path.dirname(os.path.abspath(out_target)) utils.check_exists(parent_dir_out) utils.check_exists(out_target, inverse=True) all_code = "" with open(in_target) as fin: input_json = json.load(fin) cells = input_json["cells"] code_cells = lambda: (c for c in cells if c["cell_type"] == "code") for cell in code_cells(): all_code += "".join(cell["source"]) + "\n\n" with open(out_target, "w") as fout: fout.write(all_code)
def main(face_root, scene_root): load_path = './checkpoints/demo_arcface_face+scene_nan_model_0100.pth' assert check_exists(load_path) dataset = IQiYiFaceSceneDataset(face_root, scene_root, 'test', num_frame=40, ) data_loader = DataLoader(dataset, batch_size=16384, shuffle=False, num_workers=4) model = ArcFaceSceneModel(512 + 2, 2048, 10034 + 1, ) metric_func = torch.nn.Softmax(-1) logger.info('load model from {}'.format(load_path)) state_dict = torch.load(load_path, map_location='cpu') model.load_state_dict(state_dict) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = model.to(device) logger.info('test model on {}'.format(device)) model.eval() all_outputs = [] all_video_names = [] with torch.no_grad(): for batch_idx, (feats1, feats2, _, video_names) in enumerate(data_loader): logger.info('Test Model: {}/{}'.format(batch_idx, len(data_loader))) feats1 = feats1.to(device) feats2 = feats2.to(device) output = model(feats1, feats2) output = metric_func(output) all_outputs.append(output.cpu()) all_video_names += video_names all_outputs = torch.cat(all_outputs, dim=0) return all_outputs, all_video_names
def main(argv): if len(argv) > 1: raise RuntimeError(argv[1:]) absl_logging.use_python_logging() utils.check_contained(_FLAG_APPROACH_TYPE.value, _ACCEPTABLE_APPROACHES) utils.check_operator(operator.xor, bool(_FLAG_H5_MODEL_PATH.value), bool(_FLAG_CKPT_MODEL_PATH.value)) if _FLAG_H5_MODEL_PATH.value: model_path = _FLAG_H5_MODEL_PATH.value mode = constants.SaveModeChoices.hfh5 elif _FLAG_CKPT_MODEL_PATH.value: model_path = _FLAG_CKPT_MODEL_PATH.value mode = constants.SaveModeChoices.ckpt else: raise RuntimeError("Logically should never happen.") utils.check_exists(model_path) device_type = tf_utils.devices_to_use()[0].device_type # ONLY GPU IS SUPPORTED utils.check_equal(device_type, "GPU") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Build the distribution strategy #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if device_type == "TPU": # ONLY LOCAL TPU IS "SUPPORTED" utils.check_isinstance(_FLAG_IS_LOCAL_TPU.value, bool) assert _FLAG_IS_LOCAL_TPU.value tpu_config = tf_utils.init_tpus(local=True) utils.check_isinstance(tpu_config, tf_utils.TpuConfigType) utils.check_not_none(tpu_config) strategy = tf.distribute.TPUStrategy(tpu_config.resolver) elif device_type == "GPU": strategy = tf.distribute.MirroredStrategy( devices=tf.config.experimental.list_logical_devices('GPU')) else: raise RuntimeError(device_type) # ONLY GPU IS SUPPORTED print(tf.config.list_logical_devices()) utils.check_isinstance(strategy, tf.distribute.MirroredStrategy) ############################################################################## # Load Model ############################################################################## with utils.log_duration(LOGGER, main.__name__, "All of model preparation"): with strategy.scope(): # HF isn't able to read directly from GCS if (model_path.startswith("gs://") and mode == constants.SaveModeChoices.hfh5): with utils.log_duration(LOGGER, main.__name__, "Download model from GS"): with tempfile.TemporaryDirectory() as td: td += os.path.sep if os.path.exists("/root/google-cloud-sdk/bin/gsutil"): exec_ = "/root/google-cloud-sdk/bin/gsutil" else: exec_ = "gsutil" command = [ exec_, "-m", "cp", "-r", os.path.join(model_path, "*"), td, ] LOGGER.debug("Running bash command: %s", " ".join(command)) subprocess.check_call(command) LOGGER.debug("Files at the temp dir(%s): %s", td, str(os.listdir(td))) model = make_model_tf(td, mode=mode) else: model = make_model_tf(model_path, mode=mode) utils.check_not_none(model) ############################################################################## # Load Dataset Pipeline ############################################################################## utils.check_contained( _FLAG_APPROACH_TYPE.value, { constants.ApproachTypeChoices.naked_lm, constants.ApproachTypeChoices.cached_pretok }) devices = tf_utils.devices_to_use() num_replicas = (len(devices) if devices[0].device_type in {"GPU", "TPU"} else 1) utils.check_equal(devices[0].device_type, "GPU") # Only a batch size of 1 is currently supported. We need attention masks batch_size = _FLAG_BATCH_SIZE.value * num_replicas approach_type = _FLAG_APPROACH_TYPE.value logging.debug("Loading dataset.") tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl") ds = prep_ds_for_generation( dict( tokenizer=tokenizer, context_window_size=1024, dataset_name="kilt_eli5", batch_size=1, # >> We set our own batch size elsewhere db_path=None, # None, random_seed=0, use_subset=False, subset_size=-1, use_helper_words=True, approach_type=approach_type, num_retrievals=5, # Will never change retrieval_temperature=1., retriever=None, # Cached retrievals don't need a retriever repeat=False, # Will never change split=_FLAG_SPLIT.value, enable_debug_checks=False, retrieval_bank_size=5, # Will never change dataset_type=_FLAG_DATASET_TYPE.value, tfr_prefix=_FLAG_TFR_PREFIX.value, qty_shuffle=1, # Will never change max_length_generation=350), tokenizer, _FLAG_SPLIT.value) ds = strategy.experimental_distribute_dataset(ds) ############################################################################## # Generate ############################################################################## LOGGER.debug("Generating.") generations = [] num_entries_in_split = ( task_specific.DATASET_CARDINALITIES["kilt_eli5"][_FLAG_SPLIT.value]) entries_counter = tqdm.tqdm(total=num_entries_in_split) for batch_no, batch in enumerate(ds): # Calling model.generate. We should make a config file with the # hyperparameters for generation, or make a facility in the one we already # have. I feel like a separate one would be better, separating concerns. output = strategy.run( model.generate, kwargs=dict( input_ids=batch, max_length=_FLAG_GENERATION_LENGTH_LIMIT.value, use_cache=True, attention_mask=tf.cast(batch != tokenizer.eos_token_id, tf.int32), repetition_penalty=2., num_beams=5, )) output = tf_utils.process_strat_output(strategy_outputs=output, current_batch_size=batch_size, strategy=strategy, name="generations") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Display the inputs and outputs. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rich_console = rich.console.Console(color_system="256") print_sample = make_print_sample() with utils.log_duration(LOGGER, "main", "all of tokenizer.decode for a batch."): for i in range(batch_size): input_text = tokenizer.decode(batch.numpy()[i]) output_text = tokenizer.decode(output.numpy()[i]) print("#" * 1000) print(f"Batch {batch_no} Generation {i}") print_sample(input_text, f"input batch_no {batch_no}", rich_console) print_sample(output_text, f"output batch_no {batch_no}", rich_console) generations.append(output_text) print("#" * 1000) entries_counter.update(batch.shape[0]) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Save the output to a JSON File. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ utils.to_json_file( os.path.join(_FLAG_OUTPUT_PATH.value, _FLAG_SPLIT.value, _FLAG_APPROACH_TYPE.value, time.strftime("%Y%m%d-%H%M%S.json")), dict(flags={ flag.name: flag.value for flag in flags.FLAGS.flags_by_module_dict()[argv[0]] }, generations=generations)) logging.debug("Saved to: %s", _FLAG_OUTPUT_PATH.value)
def make_data(self): if not check_exists(os.path.join(self.raw_folder, 'base')): train_path = os.path.join(self.raw_folder, 'ILSVRC2012_img_train') test_path = os.path.join(self.raw_folder, 'ILSVRC2012_img_val') meta_path = os.path.join(self.raw_folder, 'ILSVRC2012_devkit_t12') extract_file( os.path.join(self.raw_folder, 'ILSVRC2012_img_train.tar'), train_path) extract_file( os.path.join(self.raw_folder, 'ILSVRC2012_img_val.tar'), test_path) extract_file( os.path.join(self.raw_folder, 'ILSVRC2012_devkit_t12.tar'), meta_path) for archive in [ os.path.join(train_path, archive) for archive in os.listdir(train_path) ]: extract_file(archive, os.path.splitext(archive)[0], delete=True) classes_to_labels, classes_size = make_meta(meta_path) with open( os.path.join(meta_path, 'data', 'ILSVRC2012_validation_ground_truth.txt'), 'r') as f: test_id = f.readlines() test_id = [int(i) for i in test_id] test_img = sorted([ os.path.join(test_path, file) for file in os.listdir(test_path) ]) test_wnid = [] for test_id_i in test_id: test_node_i = anytree.find_by_attr(classes_to_labels['label'], name='id', value=test_id_i) test_wnid.append(test_node_i.name) for test_wnid_i in set(test_wnid): os.mkdir(os.path.join(test_path, test_wnid_i)) for test_wnid_i, test_img in zip(test_wnid, test_img): shutil.move( test_img, os.path.join(test_path, test_wnid_i, os.path.basename(test_img))) shutil.move( train_path, os.path.join(self.raw_folder, 'base', 'ILSVRC2012_img_train')) shutil.move( test_path, os.path.join(self.raw_folder, 'base', 'ILSVRC2012_img_val')) shutil.move( meta_path, os.path.join(self.raw_folder, 'base', 'ILSVRC2012_devkit_t12')) if not check_exists(os.path.join(self.raw_folder, str(self.size))): raise ValueError('Need to run resizer') classes_to_labels, classes_size = make_meta( os.path.join(self.raw_folder, 'base', 'ILSVRC2012_devkit_t12')) train_img, train_label = make_img( os.path.join(self.raw_folder, str(self.size), 'ILSVRC2012_img_train'), classes_to_labels['label']) test_img, test_label = make_img( os.path.join(self.raw_folder, str(self.size), 'ILSVRC2012_img_val'), classes_to_labels['label']) train_target = {'label': train_label} test_target = {'label': test_label} return (train_img, train_target), (test_img, test_target), (classes_to_labels, classes_size)
# Read arguments specification = sys.argv[1] container = sys.argv[2] version = sys.argv[3] mozart_rest_url = sys.argv[4] storage = sys.argv[5] product = utils.get_product_id(specification, version) # Prepare dataset metadata metadata = { "container": container, "job-version": version, "resource": "jobspec" } if not utils.check_exists(container, mozart_rest_url): print( "[ERROR] Container, {0}, does not exist. Cannot create HySDS-IO.". format(container), file=sys.stderr) sys.exit(-2) # Read specification metadata and merge it with open(specification, "r") as fp: payload = json.load(fp) resolve_dependency_images(payload, storage) metadata.update(payload) metadata["id"] = product endpoint = os.path.join(mozart_rest_url, "job_spec/add") r = requests.post(endpoint,
def process(self): if not check_exists(self.raw_folder): raise RuntimeError('Dataset not found') data_set = self.make_data() save(data_set, os.path.join(self.processed_folder, 'data.pt')) return
# Read arguments specification = sys.argv[1] job_spec = sys.argv[2] version = sys.argv[3] mozart_rest_url = sys.argv[4] grq_rest_url = sys.argv[5] # Generate product name product = utils.get_product_id(specification, version) # Prepare dataset metadata metadata = { "job-specification": job_spec, "job-version": version, "resource": "hysds-io-specification" } if not utils.check_exists(job_spec, mozart_rest_url): print("[ERROR] Job Specification, {0}, does not exist. Cannot create HySDS-IO.".format(job_spec), file=sys.stderr) sys.exit(-2) # Read specification metadata and merge it with open(specification, "r") as fp: metadata.update(json.load(fp)) metadata["id"] = product if metadata.get("component", "tosca") == "mozart": add_hysds_io(mozart_rest_url, metadata) else: add_hysds_io(grq_rest_url, metadata) sys.exit(0)
def main(argv): ####################################################################### # Initial Setup. Logging, Flags, Random seeds. ####################################################################### if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") absl_logging.use_python_logging() flags_dict = { flag.name: flag.value for flag in FLAGS.flags_by_module_dict()[argv[0]] } if FLAGS.use_subset: message = (f"{colorama.Back.RED}{colorama.Fore.WHITE}" f"{colorama.Style.BRIGHT}USING A SUBSET OF THE DATASET" f"{colorama.Style.RESET_ALL}") LOGGER.warning(message) utils.log_module_args(LOGGER, argv[0]) if not FLAGS.output_dir.startswith("gs://"): utils.check_exists(FLAG_OUTPUT_DIR.value) if not tf.io.gfile.isdir(FLAG_OUTPUT_DIR.value): raise RuntimeError("Output dir needs to be a directory.") tf.random.set_seed(FLAG_RANDOM_SEED.value) np.random.seed(FLAG_RANDOM_SEED.value) # Prepare the instance output directory path and save the config there folder_name = time.strftime( f"{FLAG_RUN_NAME.value}_{FLAG_APPROACH_TYPE.value}_%Y%m%d-%H%M%S") instance_output_dir = os.path.join(FLAG_OUTPUT_DIR.value, folder_name).strip() if not instance_output_dir.endswith("/"): instance_output_dir += "/" json_target = os.path.join(instance_output_dir, "training_params.json") if not json_target.strip().startswith("gs://"): subprocess.check_call(["mkdir", "-p", instance_output_dir]) utils.to_json_file(json_target, instance_output_dir) ############################################################################## # Initialization and Configuration of the Devices. ############################################################################## tpu_setup = None # current_acelerator_type is always "CPU" in the beginning with TPUs if tf_utils.current_accelerator_type() == "CPU": tpu_setup = tf_utils.init_tpus() LOGGER.debug("Devices we are computing on:\n%s", utils.wrap_iterable(map(str, tf_utils.devices_to_use()))) LOGGER.debug("All devices:") LOGGER.debug(tf_utils.device_mapping()) if tf_utils.current_accelerator_type() == "GPU": tf.config.set_soft_device_placement(True) if tf_utils.current_accelerator_type() != "TPU": tf.debugging.set_log_device_placement(True) if FLAG_DISTRIBUTE_MODE.value in constants.PURE_DATA_PARALLEL_STRATEGIES: actual_num_replicas = len(tf_utils.devices_to_use()) elif FLAG_DISTRIBUTE_MODE.value in constants.DATA_PARALLEL_DMC: actual_num_replicas = FLAG_NUM_REPLICAS.value else: actual_num_replicas = 1 ############################################################################## # We load the retriever model if it is needed. ############################################################################## # Not currently used. retriever = None # if (FLAG_APPROACH_TYPE.value == # constants.ApproachTypeChoices.lm_and_realm): # raise NotImplementedError("This part needs to be tested anew.") # config_path = FLAG_RETRIEVER_CONFIG_PATH.value # realm_save = tf_utils.REALMSave(**utils.from_json_file(config_path)) # # # Approx 15 min when not in dev mode, on CPU # with utils.log_duration(LOGGER, "main", # "whole of BERTScaNNRetriever.__init__", # logging.INFO): # scann_config = retrievers.ScannConfig( # **utils.from_json_file(FLAG_SCANN_CONFIG_PATH.value)) # retriever = retrievers.BERTScaNNRetriever( # retriever_module_path=realm_save.query_embedder_path, # block_records_path=realm_save.text_records, # num_block_records=realm_save.num_block_records, # mode=tf.estimator.ModeKeys.EVAL, # scann_config=scann_config) # elif (FLAG_APPROACH_TYPE.value == # constants.ApproachTypeChoices.cached_realm): # raise NotImplementedError("This part needs to be tested anew.") # config_path = FLAG_RETRIEVER_CONFIG_PATH.value # realm_save = tf_utils.REALMSave(**utils.from_json_file(config_path)) # # # Approx 15 min when not in dev mode, on CPU # with utils.log_duration(LOGGER, "main", # "whole of FullyCachedRetriever.__init__", # logging.INFO): # # retriever = retrievers.FullyCachedRetriever( # db_path=FLAG_FULLYCACHED_H5_PATH.value, # block_records_path=realm_save.text_records, # num_block_records=realm_save.num_block_records, # ) ############################################################################## # Distributed training task ############################################################################## if FLAG_TASK.value == constants.TaskChoices.train: with utils.log_duration(LOGGER, "main", "Load model"): utils.print_mem("before loading model", LOGGER) model_specific = task_specific.load_model( FLAG_MODEL_LOAD_PATH.value, FLAG_MODEL_KEY.value, FLAG_DISTRIBUTE_MODE.value, tpu_setup, FLAG_NUM_REPLICAS.value) utils.print_mem("after loading model", LOGGER) model_or_replicas = model_specific.model if isinstance(model_or_replicas, list): model_or_replicas: List[transformers.TFGPT2LMHeadModel] else: model_or_replicas: transformers.TFGPT2LMHeadModel tokenizer = model_specific.tokenizer def make_optimizer(): return tensor2tensor.utils.adafactor.AdafactorOptimizer( learning_rate=FLAG_LEARNING_RATE.value) if model_specific.strategy: with model_specific.strategy.scope(): optimizer = make_optimizer() else: optimizer = make_optimizer() ############################################################################ # Prepare the dataset functions ############################################################################ rg = np.random.default_rng(FLAG_RANDOM_SEED.value) def call_lm_preproc(repeat, split, random_seed): """Using functools.partial prevents the linter from doing its job.""" if FLAG_DATASET_NAME.value == constants.DatasetNameChoices.kilt_eli5: return task_specific.create_lm_ds_kilt_eli5( tokenizer=tokenizer, context_window_size=( model_or_replicas[0].config.n_positions if isinstance( model_or_replicas, list) else model_or_replicas.config.n_positions), dataset_name=FLAG_DATASET_NAME.value, # Batches are split over the replicas: batch_size=FLAG_BATCH_SIZE.value * actual_num_replicas, db_path=FLAG_DB_PATH.value, random_seed=random_seed, use_subset=FLAG_USE_SUBSET.value, subset_size=FLAG_SUBSET_SIZE.value, use_helper_words=FLAG_USE_HELPER_WORDS.value, approach_type=FLAG_APPROACH_TYPE.value, num_retrievals=FLAG_NUM_RETRIEVALS.value, retrieval_temperature=FLAG_RETRIEVAL_TEMPERATURE.value, retriever=retriever, repeat=repeat, split=split, enable_debug_checks=FLAG_DATASET_DEBUG.value, retrieval_bank_size=FLAG_RETRIEVAL_BANK_SIZE.value, dataset_type=FLAG_DATASET_TYPE.value, qty_shuffle=FLAG_QTY_SHUFFLE.value, tfr_prefix=FLAG_TFR_PREFIX.value, max_length_generation=FLAG_MAX_LENGTH_GENERATION.value, ) else: raise NotImplementedError( f"FLAG_DATASET_NAME.value unsupported: `{FLAG_DATASET_NAME.value}`" ) make_training_dataset: Callable[Ellipsis, tf.data.Dataset] = functools.partial( call_lm_preproc, split="train", repeat=False, ) make_eval_dataset: Callable[Ellipsis, tf.data.Dataset] = functools.partial( call_lm_preproc, split="eval", repeat=True, ) ############################################################################ # Prepare the step functions ############################################################################ utils.check_contained(FLAG_DISTRIBUTE_MODE.value, constants.DistributeModeChoices.choices()) tf_function_flags = dict( experimental_compile=FLAG_EXPERIMENTAL_COMPILE.value, experimental_relax_shapes=not FLAG_INPUT_FIXED_SIZE.value) if (FLAG_DISTRIBUTE_MODE.value == constants.DistributeModeChoices.split_and_data_parallel): if not isinstance(model_or_replicas, list): raise RuntimeError(type(model_or_replicas)) training_step = build_manual_data_parallel_training_step( model_or_replicas, optimizer, tf_function_flags) else: training_step = build_regular_training_step( model_or_replicas, optimizer, strategy=model_specific.strategy, tf_function_kwargs=tf_function_flags) evaluation_step = build_evaluation_step(model_or_replicas, tf_function_flags) secs_since_last_ckpt = time.time() # Model checkpoints are saved to the tmp_directory and then rsynced to GCS ########################################################################## # Prepare the different logging facilities ########################################################################## train_log_dir = os.path.join(instance_output_dir, "tensorboard", "train") eval_log_dir = os.path.join(instance_output_dir, "tensorboard", "eval") flags_log_dir = os.path.join(instance_output_dir, "tensorboard", "params") writers = dict(train=tf.summary.create_file_writer(train_log_dir), eval=tf.summary.create_file_writer(eval_log_dir), flags=tf.summary.create_file_writer(flags_log_dir)) with writers["flags"].as_default(): tf.summary.text( "Flags", # Tensorboard takes Markdown: json.dumps(flags_dict, indent=4).replace("\n", "\n\n"), step=0) ma_loss = dict(train=utils.MovingAverage(0.9), eval=utils.MovingAverage(0.9)) step_counters = dict(train=0, eval=0) batch_counters = dict(train=0, eval=0) prev_batch_end = time.time() # The eval ds has no real concept of epoch, repeats forever, shuffling # each time it reaches its end with utils.log_duration(LOGGER, "main", "All of make_eval_dataset"): eval_ds_instance = make_eval_dataset(random_seed=rg.integers( -2**63, 2**63 - 1), ) LOGGER.debug("Distributing the eval dataset to the replicas.") if FLAG_DATASET_TYPE.value == "tfr": eval_ds_instance = ( model_specific.strategy.experimental_distribute_dataset( eval_ds_instance)) LOGGER.debug("Done distributing the eval dataset to the replcias.") eval_ds_instance = iter(eval_ds_instance) ########################################################################## # Training Loop ########################################################################## for epoch in itertools.count(): #################################################################### # Epoch Setup #################################################################### LOGGER.debug("EPOCH %d START", epoch) # Shuffle differently every epoch with utils.log_duration(LOGGER, "main", "All of make_training_dataset"): train_ds_instance = make_training_dataset( random_seed=rg.integers(-2**63, 2**63 - 1), ) LOGGER.debug( "Attempting to distribute the training dataset to the replicas." ) if FLAG_DATASET_TYPE.value == "tfr": train_ds_instance = ( model_specific.strategy.experimental_distribute_dataset( train_ds_instance)) LOGGER.debug( "Done distributing the training dataset to the replicas.") train_ds_instance = iter(train_ds_instance) # This allows us to see if we reached the end of the training iterator, # in which case "did_at_least_one_training_batch == False". # We could also test that it did all the batches, to similar results. did_at_least_one_training_batch = True split = "eval" while did_at_least_one_training_batch: # Invert split if split == "train": split = "eval" else: split = "train" # Prepare to test if we did at least one training batch if split == "train": did_at_least_one_training_batch = False if split == "train": dataset_iterator = itertools.islice( train_ds_instance, FLAG_BATCHES_BETWEEN_EVALS.value) else: # The evaluation DS is tiny, so we reshuffle and take a random dataset_iterator = itertools.islice( eval_ds_instance, FLAG_NUMBER_EVAL_BATCHES.value) LOGGER.debug("Batching") for batch in dataset_iterator: # LOGGER.debug("Input sentence:\n\"%s\"", # tokenizer.decode([x for x in batch["input_ids"][0] # if x != tokenizer.eos_token_id])) # LOGGER.debug("Label:\n\"%s\"", # tokenizer.decode([(x if x != -100 else 0) # for x in batch["label_ids"][0]])) if FLAG_DATASET_TYPE.value != "tfr": batch = (model_specific.strategy. experimental_distribute_values_from_function( tf_utils.make_dict_distribute_fn(batch))) # We only care about training epochs as, obviously, we don't train # over eval samples; the number of eval samples seen only # contributes to lowering the variance in the evaluation of when to # do early stopping. if split == "train": did_at_least_one_training_batch = True input_ids = batch["input_ids"] label_ids = batch["label_ids"] #################################################################### # Training Step #################################################################### step_counters[split] += (FLAG_BATCH_SIZE.value * actual_num_replicas) if split == "train": batch_counters[split] += 1 training_kwargs = dict( input_ids=input_ids, label_ids=label_ids, ) if model_specific.strategy: utils.print_mem("before running", LOGGER) LOGGER.debug("Training, Calling strategy.run") loss = model_specific.strategy.run( training_step, kwargs=training_kwargs) LOGGER.debug("Training, Done with strategy.run") utils.print_mem("after running", LOGGER) else: loss = training_step(**training_kwargs) # pytype: disable=wrong-arg-count # If we are in the strategy-free data parallel mode, we need # to change the weights of all replicas to those of the model at # index 0 if (FLAG_DISTRIBUTE_MODE.value == constants.DistributeModeChoices. split_and_data_parallel): for replica in model_or_replicas[1:]: replica.set_weights( model_or_replicas[0].get_weights()) #################################################################### # Evaluation Step #################################################################### elif split == "eval": evaluation_kwargs = dict( input_ids=input_ids, label_ids=label_ids, ) if model_specific.strategy: loss = model_specific.strategy.run( evaluation_step, kwargs=evaluation_kwargs) else: loss = evaluation_step(**evaluation_kwargs) else: raise ValueError( f"Unexpected value for split: {split}") #################################################################### # Logging #################################################################### if (FLAG_DISTRIBUTE_MODE.value in constants.PURE_DATA_PARALLEL_STRATEGIES): utils.check_equal(len(loss.values), actual_num_replicas) LOGGER.debug("Split: %s", split) LOGGER.debug("Real num replicas: %s", actual_num_replicas) LOGGER.debug("Loss: %s", loss) LOGGER.debug("Loss values: %s", loss.values) average_loss = float( tf.math.reduce_mean(loss.values).numpy()) else: average_loss = float(loss.numpy()) # tf.debugging.check_numerics(loss) now = time.time() batch_duration = now - prev_batch_end prev_batch_end = now ma_loss[split].update(average_loss) # Actual logging LOGGER.info("Epoch: # %d", epoch) LOGGER.info("Tensorboard_dir: %s", instance_output_dir) LOGGER.info("Batch: %s # %d", split, batch_counters[split]) LOGGER.info("Step: %s # %d", split, step_counters[split]) if FLAG_USE_SUBSET.value: LOGGER.warning(">> USING A SUBSET OF THE DATASET <<") LOGGER.info("%(split)s Batch loss: %(metric)f", dict(split=split, metric=average_loss)) LOGGER.info( "%(split)s Moving average loss: %(metric)f", dict(split=split, metric=ma_loss[split].average)) LOGGER.info( "%(split)s Moving average ppl: %(metric)f", dict(split=split, metric=np.exp(ma_loss[split].average))) LOGGER.info( "%(split)s Batch duration: %(duration)s", dict(split=split, duration=utils.TimeStamp.from_seconds( batch_duration).format())) if FLAG_DISTRIBUTE_MODE.value in constants.DATA_PARALLEL_DMC: LOGGER.info( "%(split)s Duration per sample: %(duration)s", dict(split=split, duration=utils.TimeStamp.from_seconds( batch_duration / (FLAG_BATCH_SIZE.value * actual_num_replicas)))) # Write to Tensorboard with writers[split].as_default(): tf.summary.scalar(f"Loss/{split}", average_loss, step_counters[split]) tf.summary.scalar(f"PPL/{split}", np.exp(average_loss), step_counters[split]) writers[split].flush() # Save every 5 min if (time.time() - secs_since_last_ckpt) / (60 * 20) >= 1: secs_since_last_ckpt = time.time() save_model(train_steps=step_counters["train"], model_or_replicas=model_or_replicas, instance_output_dir=instance_output_dir) secs_since_last_ckpt = time.time() save_model(train_steps=step_counters["train"], model_or_replicas=model_or_replicas, instance_output_dir=instance_output_dir) ############################################################# # Post Training Cleanup ####################################################################### for writer in writers.values(): writer.close()
args = parser.parse_args() if args.device: os.environ["CUDA_VISIBLE_DEVICES"] = args.device SEED = 0 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.cuda.manual_seed_all(SEED) log_root = args.log_root log_path = os.path.join(log_root, 'log.txt') if check_exists(log_path): os.remove(log_path) init_logging(log_path) main(args) with open( os.path.join(args.save_dir, 'scene_infos_{}.pickle'.format(args.tvt)), 'rb') as fin: scene_infos = pickle.load(fin, encoding='bytes') assert isinstance(scene_infos, dict)
def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") absl_logging.use_python_logging() utils.log_module_args(LOGGER, argv[0]) # Some checks for the flags utils.check_exists(FLAGS.source_text_path) utils.check_exists(os.path.dirname(FLAGS.subset_text_path)) utils.check_exists(os.path.dirname(FLAGS.subset_embeddings_ds_path)) utils.check_operator(operator.lt, FLAGS.subset_total, FLAGS.source_total) utils.check_glob_prefix(FLAGS.source_embeddings_prefix) # Select a random subset with utils.log_duration(LOGGER, "main", "preparing indices"): indices = np.random.choice(FLAGS.source_total, FLAGS.subset_total, replace=False) indices.sort() # Process the textual data # Much (5 min vs 2 h) faster than iterating through the records and writing # only those we want. An hypothesis for this is that # get_single_element would allow to get elements without parsing all of the # elements along the way, like simply iterating through the records would. # Or did they get constant time indexing in TFRecords? # Inspired by the ORQA codebase: # https://github.com/google-research/language/blob/master/language/orqa/models/orqa_model.py#L147 with utils.log_duration(LOGGER, "main", "preparing data"): text_ds = tf.data.TFRecordDataset(FLAGS.source_text_path, buffer_size=512 * 1024 * 1024, num_parallel_reads=os.cpu_count()) text_ds = text_ds.batch(FLAGS.source_total) text_ds = tf.data.experimental.get_single_element(text_ds) subset = tf.gather(text_ds, tf.constant(indices)) with utils.log_duration(LOGGER, "main", "writing text data"): with tf.io.TFRecordWriter(FLAGS.subset_text_path) as text_writer: for text in tqdm.tqdm(subset, total=FLAGS.subset_total): text = text.numpy() # REALM's data uses no packaging of the data into features, etc. text_writer.write(text) with utils.log_duration(LOGGER, "main", "All of the embedding task"): # Process the embeddings data with tf.device("/cpu:0"): with utils.log_duration(LOGGER, "main", "Loading the checkpoint"): embs = tf.train.load_checkpoint( FLAGS.source_embeddings_prefix).get_tensor("block_emb") utils.check_equal(embs.shape[0], FLAGS.source_total) with utils.log_duration(LOGGER, "main", "taking a subset of the indices"): subset = embs[indices] tf_db = tf.Variable(subset, shape=subset.shape) ckpt = tf.train.Checkpoint(block_emb=tf_db) with utils.log_duration(LOGGER, "main", "Saving the checkpoint"): ckpt.save(FLAGS.subset_embeddings_ds_path) LOGGER.debug("Done")
def __init__(self, root, transform=None): self.root = os.path.expanduser(root) self.transform = transform if not check_exists(self.processed_folder): self.process() self.img = load(os.path.join(self.processed_folder, 'data.pt'))
def main(argv): # Arguments and logging boilerplate if len(argv) > 1: raise RuntimeError(argv) absl_logging.use_python_logging() utils.log_module_args(LOGGER, argv[0]) # Load a retriever config. retriever_config = tf_utils.REALMConfig( **utils.from_json_file(_FLAG_RETRIEVER_CONFIG_PATH.value)) assert not _FLAG_USE_SUBSET.value # Preparation of the output path time_stamp = time.strftime("%Y%m%d-%H%M%S") target_path = os.path.join(_FLAG_OUTPUT_PATH.value, time_stamp.strip()) if target_path[-1] != "/": target_path += "/" ############################################################################## # Setup devices and strategy ############################################################################## # Duration is pretty much instantaneous with utils.log_duration(LOGGER, "main", "Initializing devices"): tpu_config = tf_utils.init_tpus(local=_FLAG_TPU_IS_LOCAL.value, tpu_name=_FLAG_TPU_NAME.value) device_type = tf_utils.current_accelerator_type() LOGGER.debug("Devices: %s", str(tf_utils.devices_to_use())) if _FLAG_TPU_NAME.value and device_type == "CPU": raise RuntimeError("Device is CPU and we expected a TPU.") if device_type == "TPU": if tpu_config is None: raise RuntimeError("We should have a tpu_config.") strategy = tf.distribute.TPUStrategy(tpu_config.resolver) batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value elif device_type == "GPU" or device_type == "CPU": strategy = tf.distribute.MirroredStrategy() batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value else: raise RuntimeError(device_type) ############################################################################## # Load the KILT ELI5 dataset. ############################################################################## # Takes a while eli5 = {} keys = ["train", "validation", "test"] gpt2_tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl") gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token with utils.log_duration(LOGGER, "main", "Loading the ELI5 datasets."): if _FLAG_DATASET_ROOT.value: for split in tqdm.tqdm(keys): load_path = os.path.join(_FLAG_DATASET_ROOT.value, "HuggingfaceDatasets", f"{split}_kilt_eli5.hf") with tf.device("/job:localhost"): eli5[split] = datasets.load_from_disk(load_path) else: eli5 = datasets.load_dataset("kilt_tasks", "eli5") ############################################################################## # Load the dataset of the text that will be retrieved. ############################################################################## # Takes a long time with utils.log_duration(LOGGER, "Main", "Load the textual dataset"): # Extract the appropriate text # The buffer_size is taken from the original ORQA code. blocks_dataset = tf.data.TFRecordDataset(retriever_config.text_records, buffer_size=512 * 1024 * 1024) blocks_dataset = blocks_dataset.batch( retriever_config.num_block_records, drop_remainder=False) blocks: tf.Tensor = tf.data.experimental.get_single_element( blocks_dataset) ############################################################################ # Increase the number of maximum open file descriptors to make space # for all the shards. ############################################################################ max_num_fd = _FLAG_NUM_SHARDS.value * 3 + _MIN_N_FD resource.setrlimit(resource.RLIMIT_NOFILE, (max_num_fd, max_num_fd)) ############################################################################ # Prepare the output files. ############################################################################ writers = {} all_paths = {} for split in keys: maybe_subset = "_subset" if _FLAG_USE_SUBSET.value else "" # Prepare paths. They can't be in a generator. A function generator would be # fine though. paths = [ os.path.join(target_path + maybe_subset, f"{split}_{i}.tfr") for i in range(_FLAG_NUM_SHARDS.value) ] all_paths[split] = paths writers[split] = [] # Create The TFR writers. for i, path in enumerate(paths): writers[split].append(tf.io.TFRecordWriter(path)) # Load the reference DB. We used to accidentally do this once per split :O with utils.log_duration(LOGGER, "main", "Loading the reference db."): checkpoint_path = os.path.join(retriever_config.query_embedder_path, "encoded", "encoded.ckpt") reference_db_device = tf_utils.device_mapping().CPUs[0].name with tf.device(reference_db_device): reference_db = tf_utils.load_reference_db( checkpoint_path, variable_name="block_emb", ) ############################################################################ # Prep the encoder and the tokenizer ############################################################################ with utils.log_duration(LOGGER, "main", "Loading the encoder model and the tokenizer."): with strategy.scope(): query_encoder = hub.load(retriever_config.query_embedder_path, tags={}) encode_fn = _make_encode_fn(query_encoder) encode_fn_strategy_run = make_encode_fn_strategy_run_fn( strategy=strategy, encode_fn=encode_fn, ) vocab_file = os.path.join(retriever_config.query_embedder_path, "assets", "vocab.txt") utils.check_exists(vocab_file) do_lower_case = query_encoder.signatures["tokenization_info"]( )["do_lower_case"] tokenization_info = dict(vocab_file=vocab_file, do_lower_case=do_lower_case) tokenizer, vocab_lookup_table = bert_utils.get_tf_tokenizer( query_encoder, tokenization_info) ############################################################################ # Preprocess the dataset ############################################################################ cls_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[CLS]")), tf.int32) sep_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[SEP]")), tf.int32) transform = _make_transform_fn( bert_tokenizer=tokenizer, bert_cls_token_id=cls_token_id, bert_sep_token_id=sep_token_id, ) feature_dtypes = { constants.CTH5Fields.distances: tf.float32, constants.CTH5Fields.gpt2_retrieved_ids: tf.int32, constants.CTH5Fields.gpt2_answer_ids_inputs: tf.int32, constants.CTH5Fields.gpt2_question_ids_inputs: tf.int32, } with utils.log_duration(LOGGER, "main", "generating codes"): for split in keys: sample_count = 0 eli5: Dict[str, datasets.Dataset] if split != "test": for_slices = dict(sample_id=eli5[split]["id"], question=eli5[split]["input"], answer=[ sample[0]["answer"] for sample in eli5[split]["output"] ]) else: for_slices = dict( sample_id=eli5[split]["id"], question=eli5[split]["input"], ) ds = tf.data.Dataset.from_tensor_slices(for_slices) ds = ds.map(transform, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.apply( tf.data.experimental.dense_to_ragged_batch(batch_size)) ds = ds.map(_squeeze, num_parallel_calls=tf.data.experimental.AUTOTUNE) tqdm_inner = tqdm.tqdm(enumerate(ds), total=len(eli5[split]["id"]) // _FLAG_BATCH_SIZE.value, desc=f"Split `{split}`: Batches") for i, batch in tqdm_inner: features = collections.defaultdict(list) ###################################################################### # Enforce the current real batch size ###################################################################### current_batch_size = batch["sample_id"].shape[0] for k, v in batch.items(): utils.check_equal(v.shape[0], current_batch_size) ###################################################################### gpt2_question_ids_inputs = _prep_field(batch["question"], gpt2_tokenizer) utils.check_equal(gpt2_question_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_question_ids_inputs.shape[0], current_batch_size) if split != "test": gpt2_answer_ids_inputs = _prep_field( batch["answer"], gpt2_tokenizer) utils.check_equal(gpt2_answer_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_answer_ids_inputs.shape[0], current_batch_size) assert len(gpt2_answer_ids_inputs.shape) == 2, ( gpt2_answer_ids_inputs.shape) ###################################################################### # Save the gpt2 tokenized question and answer ###################################################################### features[constants.CTH5Fields.gpt2_question_ids_inputs].extend( gpt2_question_ids_inputs) if split != "test": features[ constants.CTH5Fields.gpt2_answer_ids_inputs].extend( gpt2_answer_ids_inputs) ###################################################################### # Encode the samples. ###################################################################### batch = strategy.experimental_distribute_values_from_function( tf_utils.make_dict_distribute_fn(batch)) embeddings = encode_fn_strategy_run(batch) embeddings = tf_utils.process_strat_output( embeddings, "embeddings", strategy, current_batch_size) utils.check_isinstance(embeddings, ops.EagerTensor) utils.check_equal(embeddings.shape[0], current_batch_size) # pytype doesn't seem to see that we check the type utils.check_equal(embeddings.shape[1], _FLAG_EMBEDDING_DEPTH.value) # pytype: disable=attribute-error ###################################################################### # Retrieve. ###################################################################### # Do exact retrieval with tf.device(reference_db_device): top_k, inner_prods = tf_utils.mips_exact_search( embeddings, _FLAG_NUM_RETRIEVALS.value, reference_db) # Collate the results top_k = tf_utils.process_strat_output(top_k, "top_k", strategy, current_batch_size) # Check the shapes utils.check_equal( inner_prods.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) utils.check_equal( top_k.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) # Save the distances features[constants.CTH5Fields.distances].extend(inner_prods) # Retrieve the text fields associated to the indices gathered = tf.gather(blocks, top_k).numpy() utils.check_equal(gathered.shape[0], current_batch_size) utils.check_equal(gathered.shape[1], _FLAG_NUM_RETRIEVALS.value) retrievals = [] for index_in_batch in range(current_batch_size): # Put the appropriate byte strings in a list local_gathered = gathered[index_in_batch].tolist() utils.check_equal(len(local_gathered), _FLAG_NUM_RETRIEVALS.value) # Decode to utf-8 local_gathered = [ sample.decode() for sample in local_gathered ] # Encode to GPT2 BPE token_ids = np.array( gpt2_tokenizer.batch_encode_plus( local_gathered, padding="max_length", truncation=True, ).input_ids) # Make sure no line is empty # TODO(julesgm): Maybe optional for line in token_ids: assert not np.all(line == 0), line # Convert the eos_tokens token_ids[token_ids == gpt2_tokenizer.eos_token_id] = -1 # Save the retrievals retrievals.append(token_ids) # Save the feature features[constants.CTH5Fields.gpt2_retrieved_ids] = retrievals utils.check_equal( retrievals[0].shape, (_FLAG_NUM_RETRIEVALS.value, _FLAG_CONTEXT_SIZE.value)) for k, v in features.items(): utils.check_equal(len(v), current_batch_size) for index_in_batch in range(current_batch_size): feature_dict = {} for feature_k, feature_v in features.items(): # Cast the feature to its appropriate dtype casted_feats = tf.cast(feature_v[index_in_batch], feature_dtypes[feature_k]) # Serialize the tensor to bytes feature_bytes = tf.io.serialize_tensor(casted_feats) # Build a bytes list tf.train.Feature object, # the serialization tree node feature_dict[feature_k] = _bytes_feature(feature_bytes) # Create the serialization tree root # Expects a list of features feature = tf.train.Features(feature=feature_dict) # Expects a tf.train.Features object example_obj = tf.train.Example(features=feature) # Serialize that to bytes serialized_example = example_obj.SerializeToString() # Write the bytes # TODO(julesgm): Parallelize this with a thread or a process pool & # futures. writers[split][sample_count % _FLAG_NUM_SHARDS.value].write( serialized_example) sample_count += 1 if sample_count % 1000 == 0: LOGGER.debug("Paths: %s", str(all_paths[split][0])) LOGGER.debug("Flushing and closing the `%s` writers", split) for writer in tqdm.tqdm(writers[split]): writer.flush() writer.close() LOGGER.debug("Done.")
#Read arguments specification = sys.argv[1] job_spec = sys.argv[2] version = sys.argv[3] #Generate product name product = utils.get_product_id(specification, version) #Prepare dataset metadata metadata = { "job-specification": job_spec, "job-version": version, "resource": "hysds-io-specification" } if not utils.check_exists(job_spec): print( "[ERROR] Job Specification, {0}, does not exist. Cannot create HySDS-IO." .format(job_spec), file=sys.stderr) sys.exit(-2) #Read specification metadata and merge it with open(specification, "r") as fp: metadata.update(json.load(fp)) metadata["id"] = product if metadata.get("component", "tosca") == "mozart": add_hysds_io(app.conf["MOZART_REST_URL"], metadata) else: add_hysds_io(app.conf["GRQ_REST_URL"], metadata) sys.exit(0)
def main(argv): ############################################################################## # Initial Setup. Logging, Flags, Random seeds. ############################################################################## if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") absl_logging.use_python_logging() flags_dict = { flag.name: flag.value for flag in FLAGS.flags_by_module_dict()[argv[0]] } if FLAGS.use_subset: message = (f"{colorama.Back.RED}{colorama.Fore.WHITE}" f"{colorama.Style.BRIGHT}USING A SUBSET OF THE DATASET" f"{colorama.Style.RESET_ALL}") LOGGER.warning(message) utils.log_module_args(LOGGER, argv[0]) if not FLAGS.output_dir.startswith("gs://"): utils.check_exists(FLAG_OUTPUT_DIR.value) if not tf.io.gfile.isdir(FLAG_OUTPUT_DIR.value): raise RuntimeError("Output dir needs to be a directory.") tf.random.set_seed(FLAG_RANDOM_SEED.value) np.random.seed(FLAG_RANDOM_SEED.value) # Prepare the instance output directory path and save the config there # Prepare the path folder_name = time.strftime( f"{FLAG_RUN_NAME.value}_{FLAG_APPROACH_TYPE.value}_%Y%m%d-%H%M%S") instance_output_dir = os.path.join(FLAG_OUTPUT_DIR.value, folder_name).strip() if not instance_output_dir.endswith("/"): instance_output_dir += "/" json_target = os.path.join(instance_output_dir, "training_params.json") # Make the folder if we're not on gcloud if not json_target.strip().startswith("gs://"): subprocess.check_call(["mkdir", "-p", instance_output_dir]) # Safe the config file utils.to_json_file(json_target, flags_dict) ############################################################################## # Initialization and Configuration of the Devices. ############################################################################## tpu_setup = None accel = tf_utils.current_accelerator_type() if FLAG_TPU_IS_LOCAL.value: assert accel == "TPU", accel if accel == "TPU": assert FLAG_TPU_IS_LOCAL.value, FLAG_TPU_IS_LOCAL.value if tf_utils.current_accelerator_type() in {"CPU", "TPU"}: tpu_setup = tf_utils.init_tpus(tpu_name=FLAG_TPU_NAME.value, local=FLAG_TPU_IS_LOCAL.value) LOGGER.debug("Devices we are computing on:\n%s", utils.wrap_iterable(map(str, tf_utils.devices_to_use()))) LOGGER.debug("All devices:") LOGGER.debug(tf_utils.device_mapping()) if tf_utils.current_accelerator_type() == "GPU": tf.config.set_soft_device_placement(True) if tf_utils.current_accelerator_type() != "TPU": tf.debugging.set_log_device_placement(True) utils.check_operator(operator.ne, tf_utils.current_accelerator_type(), "CPU") assert FLAG_TPU_NAME.value == socket.gethostname(), ( "This is a configuration choice. You can remove this. " "There will be no side effects.") if FLAG_DISTRIBUTE_MODE.value in constants.PURE_DATA_PARALLEL_STRATEGIES: actual_num_replicas = len(tf_utils.devices_to_use()) elif FLAG_DISTRIBUTE_MODE.value in constants.DATA_PARALLEL_DMC: actual_num_replicas = FLAG_NUM_REPLICAS.value else: actual_num_replicas = 1 ############################################################################## # We load the retriever model if it is needed. ############################################################################## # Not currently used. See old commits. retriever = None ############################################################################## # Distributed training task ############################################################################## if FLAG_TASK.value == constants.TaskChoices.train: with utils.log_duration(LOGGER, "main", "Load model"): utils.print_mem("before loading model", LOGGER) model_specific = task_specific.load_model( FLAG_MODEL_KEY.value, FLAG_DISTRIBUTE_MODE.value, tpu_setup, FLAG_NUM_REPLICAS.value) utils.print_mem("after loading model", LOGGER) model = model_specific.model if isinstance(model, list): model: List[transformers.TFGPT2LMHeadModel] else: model: transformers.TFGPT2LMHeadModel tokenizer = model_specific.tokenizer def make_optimizer(): if FLAG_OPTIMIZER_TYPE.value == constants.OptimizerTypes.adafactor: return tensor2tensor.utils.adafactor.AdafactorOptimizer( learning_rate=FLAG_LEARNING_RATE.value) elif FLAG_OPTIMIZER_TYPE.value == constants.OptimizerTypes.adam: return tf.keras.optimizers.Adam( learning_rate=FLAG_LEARNING_RATE.value) else: raise ValueError(FLAG_OPTIMIZER_TYPE.value) if model_specific.strategy: with model_specific.strategy.scope(): optimizer = make_optimizer() else: optimizer = make_optimizer() ############################################################################ # Prepare the dataset functions ############################################################################ rg = np.random.default_rng(FLAG_RANDOM_SEED.value) def call_lm_preproc(repeat, split, random_seed): """Using functools.partial prevents the linter from doing its job.""" if FLAG_DATASET_NAME.value == constants.DatasetNameChoices.kilt_eli5: return task_specific.create_lm_ds_kilt_eli5( tokenizer=tokenizer, context_window_size=model.config.n_positions, dataset_name=FLAG_DATASET_NAME.value, # Batches are split over the replicas: batch_size=FLAG_BATCH_SIZE.value * actual_num_replicas, db_path=FLAG_DB_PATH.value, random_seed=random_seed, use_subset=FLAG_USE_SUBSET.value, subset_size=FLAG_SUBSET_SIZE.value, use_helper_words=FLAG_USE_HELPER_WORDS.value, approach_type=FLAG_APPROACH_TYPE.value, num_retrievals=FLAG_NUM_RETRIEVALS.value, retrieval_temperature=FLAG_RETRIEVAL_TEMPERATURE.value, retriever=retriever, repeat=repeat, split=split, enable_debug_checks=FLAG_DATASET_DEBUG.value, retrieval_bank_size=FLAG_RETRIEVAL_BANK_SIZE.value, dataset_type=FLAG_DATASET_TYPE.value, qty_shuffle=FLAG_QTY_SHUFFLE.value, tfr_prefix=FLAG_TFR_PREFIX.value, max_length_generation=FLAG_MAX_LENGTH_GENERATION.value, ) else: raise NotImplementedError( f"FLAG_DATASET_NAME.value unsupported: `{FLAG_DATASET_NAME.value}`" ) make_training_dataset: Callable[..., tf.data.Dataset] = functools.partial( call_lm_preproc, split="train", repeat=False, ) make_eval_dataset: Callable[..., tf.data.Dataset] = functools.partial( call_lm_preproc, split="eval", repeat=True, ) ############################################################################ # Prepare the step functions ############################################################################ utils.check_contained(FLAG_DISTRIBUTE_MODE.value, constants.DistributeModeChoices.choices()) tf_function_flags = dict( experimental_compile=FLAG_EXPERIMENTAL_COMPILE.value, experimental_relax_shapes=not FLAG_INPUT_FIXED_SIZE.value) training_step = build_regular_training_step( model, optimizer, strategy=model_specific.strategy, tf_function_kwargs=tf_function_flags) evaluation_step = build_evaluation_step(model, tf_function_flags) timestamp_last_ckpt_secs = time.time() # Model checkpoints are saved to the tmp_directory and then rsynced to GCS ############################################################################ # Prepare the statistics and the logging facilities. ############################################################################ # Tensorboard with model_specific.strategy.scope(): checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) saver = Saver(instance_output_dir, checkpoint) train_log_dir = os.path.join(instance_output_dir, "tensorboard", "train") eval_log_dir = os.path.join(instance_output_dir, "tensorboard", "eval") flags_log_dir = os.path.join(instance_output_dir, "tensorboard", "params") writers = dict(train=tf.summary.create_file_writer(train_log_dir), eval=tf.summary.create_file_writer(eval_log_dir), flags=tf.summary.create_file_writer(flags_log_dir)) with writers["flags"].as_default(): tf.summary.text( "Flags", # Tensorboard takes Markdown: json.dumps(flags_dict, indent=4).replace("\n", "\n\n"), step=0) # Different information to log. ma_loss = dict(train=utils.MovingAverage(0.9), eval=utils.MovingAverage(0.9)) step_counters = dict(train=0, eval=0) batch_counters = dict(train=0, eval=0) prev_batch_end = time.time() ############################################################################ # Create the Eval DS object. # ========================================================================== # The eval ds has no real concept of epoch, repeats forever, shuffling # each time it reaches its end. ############################################################################ # Create with utils.log_duration(LOGGER, "main", "All of make_eval_dataset"): eval_ds_instance = make_eval_dataset(random_seed=rg.integers( -2**63, 2**63 - 1), ) # Maybe distribute LOGGER.debug("Distributing the eval dataset to the replicas.") if FLAG_DATASET_TYPE.value == "tfr": eval_ds_instance = ( model_specific.strategy.experimental_distribute_dataset( eval_ds_instance)) # Start the iteration. We step by calling `next(...)`. LOGGER.debug("Done distributing the eval dataset to the replicas.") eval_ds_instance = iter(eval_ds_instance) step_function = dict(train=training_step, eval=evaluation_step) ############################################################################ # Training Loop # ========================================================================== # Create a new training dataset object that lasts for one epoch. # This is different from the eval training dataset object, which loops # forever. ############################################################################ for epoch in itertools.count(): ########################################################################## # Epoch Setup ########################################################################## LOGGER.debug("EPOCH %d START", epoch) # Shuffle differently every epoch with utils.log_duration(LOGGER, "main", "All of make_training_dataset"): train_ds_instance = make_training_dataset( random_seed=rg.integers(-2**63, 2**63 - 1), ) LOGGER.debug( "Attempting to distribute the training dataset to the replicas." ) if FLAG_DATASET_TYPE.value == "tfr": train_ds_instance = ( model_specific.strategy.experimental_distribute_dataset( train_ds_instance)) LOGGER.debug( "Done distributing the training dataset to the replicas.") train_ds_instance = iter(train_ds_instance) # To change splits, we use `itertools.islice` over the dataset generator. # When the training dataset generator is done, a new loop of the following # while loop occurs, but no training batch is done because we are taking # an `islice` of a generator that is done. did_at_least_one_training_batch = True split = "eval" while did_at_least_one_training_batch: utils.check_operator(operator.ne, tf_utils.current_accelerator_type(), "CPU") # Invert split if split == "train": split = "eval" else: split = "train" # Prepare to test if we did at least one training batch if split == "train": did_at_least_one_training_batch = False ######################################################################## # Take slices from the dataset iterator # ====================================================================== # We only want to do a certain number of batches before switching splits # We do this by using an `itertools.islice` of the dataset iterators. ######################################################################## if split == "train": dataset_iterator = toolz.take( FLAG_BATCHES_BETWEEN_EVALS.value, train_ds_instance) else: # The evaluation dataset generator is infinite, reshuffles everytime # it gets to its end. # Still, we take a fixed size slice form that infinite generator. dataset_iterator = toolz.take( FLAG_NUMBER_EVAL_BATCHES.value, eval_ds_instance) LOGGER.debug("Batching") for batch in dataset_iterator: if FLAG_LOG_SAMPLES.value: #################################################################### # Print elements of the dataset #################################################################### # Make ourselves resistant to values possibly being a PerReplica # object LOGGER.warning( f"%(red)sLOGGING SAMPLES. THIS IS VERY SLOW.%(reset)s", dict( red=colorama.Fore.RED, reset=colorama.Style.RESET_ALL, )) is_distributed = isinstance(batch["input_ids"], values.PerReplica) for in_batch_idx in range(FLAG_BATCH_SIZE.value): for replica_idx in (range(actual_num_replicas) if is_distributed else [0]): if is_distributed: sample = { k: batch[k].values[replica_idx] for k in batch } else: sample = batch # input_sentence = tokenizer.decode( # [x for x in sample["input_ids"][i] if x != tokenizer.eos_token_id] # ) # LOGGER.debug( # "%sInput [%d / %d]%s:\n\"%s\"", # colorama.Fore.GREEN, # replica_idx + 1, # actual_num_replicas, # colorama.Style.RESET_ALL, # input_sentence, # ) # # answer = tokenizer.decode( # [(x if x != -100 else 0) for x in sample["label_ids"][i]] # ) # LOGGER.debug( # "%sLabel [%d / %d]%s:\n\"%s\"", # colorama.Fore.GREEN, # replica_idx + 1, # actual_num_replicas, # colorama.Style.RESET_ALL, # answer, # ) cons = console.Console() sentences = table.Table() sentences.add_column("BPE Index", justify="center") sentences.add_column("Inputs", justify="center") sentences.add_column("Labels", justify="center") for bpe_idx, (x, y) in enumerate( itertools.zip_longest( sample["input_ids"] [in_batch_idx].numpy(), sample["label_ids"] [in_batch_idx].numpy(), fillvalue=None, )): x_w = tokenizer.decode( [x]) if x >= 0 else f"[ {x} ]" y_w = tokenizer.decode( [y]) if y >= 0 else f"[ {y} ]" sentences.add_row(str(bpe_idx), x_w, y_w) cons.print(sentences) # We only care about training epochs as, obviously, we don't train # over eval samples; the number of eval samples seen only # contributes to lowering the variance in the evaluation of when to # do early stopping. if split == "train": did_at_least_one_training_batch = True input_ids = batch["input_ids"] label_ids = batch["label_ids"] # Per split step counter step_counters[ split] += FLAG_BATCH_SIZE.value * actual_num_replicas batch_counters[split] += 1 ###################################################################### # Model step function. ###################################################################### step_function_kwargs = dict( input_ids=input_ids, label_ids=label_ids, ) utils.print_mem(f"[{split}] - Mem before `strategy.run`", LOGGER) LOGGER.debug("[%s] - Calling `strategy.run`", split) loss = model_specific.strategy.run( step_function[split], kwargs=step_function_kwargs) LOGGER.debug("[%s] - Done `strategy.run`", split) utils.print_mem(f"[{split}] - Mem after `strategy.run`", LOGGER) #################################################################### # End of logging step code / Logging and saving the model. #################################################################### if (FLAG_DISTRIBUTE_MODE.value in constants.PURE_DATA_PARALLEL_STRATEGIES): utils.check_equal(len(loss.values), actual_num_replicas) LOGGER.debug("[%s] - Real num replicas: %s", split, actual_num_replicas) average_loss = float( tf.math.reduce_mean(loss.values).numpy()) LOGGER.debug("[%s] - Loss: %s", str(split), str(average_loss)) else: average_loss = float(loss.numpy()) tf.debugging.check_numerics( loss.values if isinstance(loss, values.PerReplica) else loss, "Numerics failed.") now = time.time() batch_duration = now - prev_batch_end prev_batch_end = now ma_loss[split].update(average_loss) LOGGER.info("[%s] - Epoch: # %d", split, epoch) LOGGER.info("[%s] - Tensorboard_dir: %s", split, instance_output_dir) LOGGER.info("[%s] - Batch: # %d", split, batch_counters[split]) LOGGER.info("[%s] - Step: # %d", split, step_counters[split]) if FLAG_USE_SUBSET.value: LOGGER.warning(">> USING A SUBSET OF THE DATASET <<") LOGGER.info( "[%(split)s] - Batch loss: %(metric)f", dict(split=split, metric=average_loss)) LOGGER.info( "[%(split)s] - Moving average loss: %(metric)f", dict(split=split, metric=ma_loss[split].average)) LOGGER.info( "[%(split)s] - Moving average ppl: %(metric)f", dict(split=split, metric=np.exp(ma_loss[split].average))) LOGGER.info( "[%(split)s] - Batch duration: %(duration)s", dict(split=split, duration=utils.TimeStamp.from_seconds( batch_duration).format())) # Write to Tensorboard with writers[split].as_default(): tf.summary.scalar(f"Loss/{split}", average_loss, step_counters[split]) tf.summary.scalar(f"PPL/{split}", np.exp(average_loss), step_counters[split]) writers[split].flush() ###################################################################### # Save every `FLAG_SAVE_PERIOD_MIN.value` minutes. ###################################################################### delta_sec = time.time() - timestamp_last_ckpt_secs utils.check_operator(operator.gt, delta_sec, 0) period_sec = 60 * FLAG_SAVE_PERIOD_MIN.value utils.check_operator(operator.gt, period_sec, 0) ratio = delta_sec / period_sec LOGGER.info( "[%(split)s] - RATIO: %(ratio)s", dict(split=split, ratio=str(ratio))) LOGGER.info( "[%(split)s] - Target: %(target)s, Present: %(present)s", dict( split=split, target=str(period_sec), present=str(delta_sec), )) if ratio >= 1: dur = delta_sec / 60 timestamp_last_ckpt_secs = time.time() LOGGER.debug( "SAVING MODEL - CAUSE: DURATION - %0.2f min", dur) # checkpoint.save(ckpt_prefix) saver.save_model( train_steps=step_counters["train"], model_or_replicas=model, optimizer=optimizer, ) ############################################################################ # Post Training Cleanup ############################################################################ for writer in writers.values(): writer.close()
def main(argv): if len(argv) > 1: raise RuntimeError(argv) absl_logging.use_python_logging() retriever_config = tf_utils.REALMSave( **utils.from_json_file(_FLAG_RETRIEVER_CONFIG_PATH.value)) extra = "_FROM_SUBSET" if _FLAG_USE_SUBSET.value else "" time_stamp = time.strftime("%Y%m%d-%H%M%S") target_path = os.path.join(_FLAG_OUTPUT_PATH.value, time_stamp + extra).strip() if target_path[-1] != "/": target_path += "/" ############################################################################## # Setup devices and strategy ############################################################################## with utils.log_duration(LOGGER, "main", "Initializing devices"): tpu_config = tf_utils.init_tpus() device_type = tf_utils.current_accelerator_type() LOGGER.debug("Devices: %s", str(tf_utils.devices_to_use())) if device_type == "TPU": if tpu_config is None: raise RuntimeError("We should have a tpu_config.") strategy = tf.distribute.TPUStrategy(tpu_config.resolver) batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value elif device_type == "GPU" or device_type == "CPU": strategy = tf.distribute.MirroredStrategy() batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value else: raise RuntimeError(device_type) ############################################################################## # Load the dataset. ############################################################################## eli5 = {} keys = ["train", "eval", "test"] gpt2_tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl") gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token with utils.log_duration(LOGGER, "main", "Loading the ELI5 datasets."): for split in tqdm.tqdm(keys): load_path = os.path.join(_FLAGS_DATASET_ROOT.value, "HuggingfaceDatasets", f"{split}_kilt_eli5.hf") with tf.device("/job:localhost"): eli5[split] = datasets.load_from_disk(load_path) if _FLAG_USE_SUBSET.value: _warn_subset() ############################################################################## # ############################################################################## with utils.log_duration(LOGGER, "Main", "Load the textual dataset"): # Extract the appropriate text # The buffer_size is taken from the original ORQA code. blocks_dataset = tf.data.TFRecordDataset(retriever_config.text_records, buffer_size=512 * 1024 * 1024) blocks_dataset = blocks_dataset.batch( retriever_config.num_block_records, drop_remainder=True) blocks = tf.data.experimental.get_single_element(blocks_dataset) with tempfile.TemporaryDirectory() as tmp_dir: ############################################################################ # Prepare the output file. ############################################################################ tmp_dir = pathlib.Path(tmp_dir) h5_output_path = tmp_dir / "codes.h5" output_file = h5py.File(h5_output_path, "w") flags_dict = { flag.name: flag.value for flag in flags.FLAGS.flags_by_module_dict()[argv[0]] } utils.to_json_file(tmp_dir / "params.json", flags_dict) for split in keys: with utils.log_duration( LOGGER, "main", "Creating the output hdf5 file, embeddings."): num_entries = len(eli5[split]["id"]) if _FLAG_USE_SUBSET.value: num_entries = min(num_entries, _FLAG_SUBSET_AMOUNT.value) split_group = output_file.create_group(split) with utils.log_duration( LOGGER, "main", "Creating the output hdf5 file, retrieval."): split_group.create_dataset( constants.CTH5Fields.distances, shape=(num_entries, _FLAG_NUM_RETRIEVALS.value), dtype=np.float32, ) split_group.create_dataset( constants.CTH5Fields.gpt2_question_ids_inputs, shape=(num_entries, _FLAG_CONTEXT_SIZE.value), dtype=np.int32) if split != "test": split_group.create_dataset( constants.CTH5Fields.gpt2_answer_ids_inputs, shape=(num_entries, _FLAG_CONTEXT_SIZE.value), dtype=np.int32) split_group.create_dataset( constants.CTH5Fields.gpt2_retrieved_ids, shape=( num_entries, _FLAG_NUM_RETRIEVALS.value, _FLAG_MAX_LENGTH_RETRIEVALS.value, ), dtype=np.int32) with utils.log_duration(LOGGER, "main", "Loading the reference db."): checkpoint_path = os.path.join( retriever_config.query_embedder_path, "encoded", "encoded.ckpt") reference_db_device = tf_utils.device_mapping().CPUs[0].name with tf.device(reference_db_device): reference_db = tf_utils.load_reference_db( checkpoint_path, variable_name="block_emb", ) ############################################################################ # Prep the encoder and the tokenizer ############################################################################ with utils.log_duration( LOGGER, "main", "Loading the encoder model and the tokenizer."): with strategy.scope(): query_encoder = hub.load(retriever_config.query_embedder_path, tags={}) encode_fn = _make_encode_fn(query_encoder) encode_fn_strategy_run = _make_encode_fn_strategy_run_fn( strategy=strategy, encode_fn=encode_fn, ) vocab_file = os.path.join(retriever_config.query_embedder_path, "assets", "vocab.txt") utils.check_exists(vocab_file) do_lower_case = query_encoder.signatures["tokenization_info"]( )["do_lower_case"] tokenization_info = dict(vocab_file=vocab_file, do_lower_case=do_lower_case) tokenizer, vocab_lookup_table = bert_utils.get_tf_tokenizer( query_encoder, tokenization_info) ############################################################################ # Preprocess the dataset ############################################################################ cls_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[CLS]")), tf.int32) sep_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[SEP]")), tf.int32) transform = _make_transform_fn( bert_tokenizer=tokenizer, bert_cls_token_id=cls_token_id, bert_sep_token_id=sep_token_id, ) with utils.log_duration(LOGGER, "main", "generating codes"): tqdm_splits = tqdm.tqdm(keys) for split in tqdm_splits: tqdm_splits.set_description(f"Split `{split}`") eli5: Dict[str, datasets.Dataset] write_start = 0 if _FLAG_USE_SUBSET.value: _warn_subset(tqdm_splits) eli5[split] = eli5[split][:_FLAG_SUBSET_AMOUNT.value] utils.check_operator(operator.le, len(eli5[split]["id"]), _FLAG_SUBSET_AMOUNT.value) utils.check_operator(operator.le, len(eli5[split]["input"]), _FLAG_SUBSET_AMOUNT.value) else: utils.check_equal(len(eli5[split]), len(eli5[split]["id"])) utils.check_equal(len(eli5[split]), len(eli5[split]["input"])) if split != "test": for_slices = dict(sample_id=eli5[split]["id"], question=eli5[split]["input"], answer=[ sample["answer"][0] for sample in eli5[split]["output"] ]) else: for_slices = dict( sample_id=eli5[split]["id"], question=eli5[split]["input"], ) ds = tf.data.Dataset.from_tensor_slices(for_slices) ds = ds.map(transform, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.apply( tf.data.experimental.dense_to_ragged_batch(batch_size)) ds = ds.map(_squeeze, num_parallel_calls=tf.data.experimental.AUTOTUNE) tqdm_inner = tqdm.tqdm(enumerate(ds), total=len(eli5[split]["id"]) // _FLAG_BATCH_SIZE.value, desc=f"Split `{split}`: Batches") for i, batch in tqdm_inner: ###################################################################### # Enforce the current real batch size ###################################################################### current_batch_size = batch["sample_id"].shape[0] for k, v in batch.items(): utils.check_equal(v.shape[0], current_batch_size) ###################################################################### gpt2_question_ids_inputs = _prep_field( batch["question"], gpt2_tokenizer) utils.check_equal(gpt2_question_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_question_ids_inputs.shape[0], current_batch_size) if split != "test": gpt2_answer_ids_inputs = _prep_field( batch["answer"], gpt2_tokenizer) utils.check_equal(gpt2_answer_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_answer_ids_inputs.shape[0], current_batch_size) assert len(gpt2_answer_ids_inputs.shape) == 2, ( gpt2_answer_ids_inputs.shape) ###################################################################### # Save the gpt2 tokenized question and answer ###################################################################### end = write_start + current_batch_size utils.check_equal( output_file[split][ constants.CTH5Fields.gpt2_question_ids_inputs] [write_start:end].shape[0], current_batch_size) output_file[split][ constants.CTH5Fields.gpt2_question_ids_inputs][ write_start:end] = gpt2_question_ids_inputs if split != "test": output_file[split][ constants.CTH5Fields.gpt2_answer_ids_inputs][ write_start:end] = gpt2_answer_ids_inputs ###################################################################### # Encode the samples. ###################################################################### batch = strategy.experimental_distribute_values_from_function( tf_utils.make_dict_distribute_fn(batch)) embeddings = encode_fn_strategy_run(batch) embeddings = tf_utils.process_strat_output( embeddings, "embeddings", strategy, current_batch_size) utils.check_isinstance(embeddings, ops.EagerTensor) utils.check_equal(embeddings.shape[0], current_batch_size) # pytype doesn't seem to see that we check the type utils.check_equal(embeddings.shape[1], _FLAG_EMBEDDING_DEPTH.value) # pytype: disable=attribute-error ###################################################################### # Retrieve. ###################################################################### with tf.device(reference_db_device): top_k, inner_prods = tf_utils.mips_exact_search( embeddings, _FLAG_NUM_RETRIEVALS.value, reference_db) top_k = tf_utils.process_strat_output( top_k, "top_k", strategy, current_batch_size) utils.check_equal( inner_prods.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) utils.check_equal( top_k.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) output_file[split]["distances"][ write_start:end] = inner_prods gathered = tf.gather(blocks, top_k).numpy() utils.check_equal(gathered.shape[0], current_batch_size) utils.check_equal(write_start + gathered.shape[0], end) for j in range(gathered.shape[0]): local_gathered = gathered[j].tolist() utils.check_equal(len(local_gathered), _FLAG_NUM_RETRIEVALS.value) local_gathered = [ sample.decode() for sample in local_gathered ] token_ids = np.array( gpt2_tokenizer.batch_encode_plus( local_gathered, padding="max_length", truncation=True, ).input_ids) for line in token_ids: assert not np.all(line == 0), line token_ids[token_ids == gpt2_tokenizer.eos_token_id] = -1 output_file[split][ constants.CTH5Fields.gpt2_retrieved_ids][ write_start + j] = token_ids[:, :_FLAG_MAX_LENGTH_RETRIEVALS. value] write_start += current_batch_size ############################################################################ # Upload the results to GCS ############################################################################ LOGGER.debug("DONE WITH THE PRODUCTION") output_file.close() with utils.log_duration(LOGGER, "main", "gsutil transfer"): command = [ "/root/google-cloud-sdk/bin/gsutil", "-m", "cp", "-r", str(tmp_dir / "*"), target_path ] LOGGER.debug("Command: %s", " ".join(command)) subprocess.check_call(command) LOGGER.debug("ALL DONE")
def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") absl_logging.use_python_logging() utils.log_module_args(LOGGER, argv[0]) utils.check_exists(FLAGS.scann_config_path) utils.check_glob_prefix(FLAGS.embeddings_ckpt_path) utils.check_exists(FLAGS.output_dir) if not tf.io.gfile.isdir(FLAGS.output_dir): raise RuntimeError("Output dir needs to be a directory.") ############################################################################## # Setup: Build the ScaNN (Scam) searcher ############################################################################## with utils.log_duration(LOGGER, "main", "load_scann_searcher"): checkpoint_path = os.path.join(FLAGS.embeddings_ckpt_path) # The conversion to a ScannConfig object enforces that all the fields we # expect are present in the json file. scann_config = retrievers.ScannConfig( **utils.from_json_file(FLAGS.scann_config_path)) block_emb, scann_searcher = scann_utils.load_scann_searcher( var_name="block_emb", checkpoint_path=checkpoint_path, **vars(scann_config)) utils.check_operator(operator.ge, block_emb.shape[0], FLAGS.test_how_many) ############################################################################## # Recall Computation ############################################################################## LOGGER.debug(block_emb.shape) utils.check_operator(operator.ge, block_emb.shape[0], FLAGS.test_how_many) with utils.log_duration(LOGGER, "main", "all retrievals & comparisons"): LOGGER.debug("block_emb.shape: %s", str(block_emb.shape)) LOGGER.debug("FLAGS.test_how_many: %d", FLAGS.test_how_many) all_indices = np.random.choice(block_emb.shape[0], FLAGS.test_how_many, replace=False) count_total = 0 count_good = 0 for i, idx_start in tqdm.tqdm( enumerate(range(0, len(all_indices), FLAGS.batch_size))): indices = all_indices[idx_start:idx_start + FLAGS.batch_size] vectors = tf.gather(block_emb, indices) if FLAGS.mode == "all": with utils.log_duration(LOGGER, "main", "exact_search"): labels = exact_search(FLAGS.num_neighbors, vectors, block_emb) elif FLAGS.mode == "any": labels = tf.cast(tf.expand_dims(indices, -1), tf.int32) else: raise RuntimeError(FLAGS.mode) with utils.log_duration(LOGGER, "main", "scann_search"): predictions, _ = scann_searcher.search_batched(vectors) good = tf.sets.intersection(labels, predictions) count_good += len(good.values) count_total += tf.math.reduce_prod(labels.shape) ratio = count_good / count_total if i % FLAGS.print_every_n_batches == 0 and i != 0: LOGGER.debug("Recall so far: %f %%", 100 * ratio) final_recall = count_good / count_total LOGGER.debug( "Final recall for mode `%(mode)s` with `%(num_neighbors)d` " "neighbors: %(recall)f %%", dict(mode=FLAGS.mode, num_neighbors=FLAGS.num_neighbors, recall=100 * final_recall)) LOGGER.debug("%d true positives over %d points.", count_good, count_total) ############################################################################## # Build the output object and save it. ############################################################################## output = {} output["flags"] = { flag.name: flag.value for flag in FLAGS.flags_by_module_dict()[argv[0]] } output["recall"] = float(final_recall) # Redundant but easier to read output["count_goods"] = int(count_good) output["count_total"] = int(count_total) output_path = os.path.join( FLAGS.output_dir, "test_recall_" + time.strftime("results_%Y%m%d-%H%M%S.json")) utils.to_json_file(output_path, output)
def main(argv): if len(argv) > 1: raise RuntimeError(argv) absl_logging.use_python_logging() utils.log_module_args(LOGGER, argv[0]) retriever_config = tf_utils.REALMSave( **utils.from_json_file(_FLAG_RETRIEVER_CONFIG_PATH.value)) assert not _FLAG_USE_SUBSET.value time_stamp = time.strftime("%Y%m%d-%H%M%S") target_path = os.path.join(_FLAG_OUTPUT_PATH.value, time_stamp.strip()) if target_path[-1] != "/": target_path += "/" ############################################################################## # Setup devices and strategy ############################################################################## with utils.log_duration(LOGGER, "main", "Initializing devices"): tpu_config = tf_utils.init_tpus() device_type = tf_utils.current_accelerator_type() LOGGER.debug("Devices: %s", str(tf_utils.devices_to_use())) if device_type == "TPU": if tpu_config is None: raise RuntimeError("We should have a tpu_config.") strategy = tf.distribute.TPUStrategy(tpu_config.resolver) batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value elif device_type == "GPU" or device_type == "CPU": strategy = tf.distribute.MirroredStrategy() batch_size = len( tf_utils.devices_to_use()) * _FLAG_BATCH_SIZE.value else: raise RuntimeError(device_type) ############################################################################## # Load the dataset. ############################################################################## eli5 = {} keys = ["train", "eval", "test"] gpt2_tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl") gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token with utils.log_duration(LOGGER, "main", "Loading the ELI5 datasets."): for split in tqdm.tqdm(keys): load_path = os.path.join(_FLAG_DATASET_ROOT.value, "HuggingfaceDatasets", f"{split}_kilt_eli5.hf") with tf.device("/job:localhost"): eli5[split] = datasets.load_from_disk(load_path) ############################################################################## # ############################################################################## with utils.log_duration(LOGGER, "Main", "Load the textual dataset"): # Extract the appropriate text # The buffer_size is taken from the original ORQA code. blocks_dataset = tf.data.TFRecordDataset(retriever_config.text_records, buffer_size=512 * 1024 * 1024) blocks_dataset = blocks_dataset.batch( retriever_config.num_block_records, drop_remainder=True) blocks = tf.data.experimental.get_single_element(blocks_dataset) ############################################################################ # Prepare the output file. ############################################################################ writers = {} all_paths = {} for split in keys: maybe_subset = "_subset" if _FLAG_USE_SUBSET.value else "" paths = [ os.path.join(target_path + maybe_subset, f"{split}_{i}.tfr") for i in range(_FLAG_NUM_SHARDS.value) ] all_paths[split] = paths writers[split] = [tf.io.TFRecordWriter(filename) for filename in paths] with utils.log_duration(LOGGER, "main", "Loading the reference db."): checkpoint_path = os.path.join( retriever_config.query_embedder_path, "encoded", "encoded.ckpt") reference_db_device = tf_utils.device_mapping().CPUs[0].name with tf.device(reference_db_device): reference_db = tf_utils.load_reference_db( checkpoint_path, variable_name="block_emb", ) ############################################################################ # Prep the encoder and the tokenizer ############################################################################ with utils.log_duration(LOGGER, "main", "Loading the encoder model and the tokenizer."): with strategy.scope(): query_encoder = hub.load(retriever_config.query_embedder_path, tags={}) encode_fn = _make_encode_fn(query_encoder) encode_fn_strategy_run = make_encode_fn_strategy_run_fn( strategy=strategy, encode_fn=encode_fn, ) vocab_file = os.path.join(retriever_config.query_embedder_path, "assets", "vocab.txt") utils.check_exists(vocab_file) do_lower_case = query_encoder.signatures["tokenization_info"]( )["do_lower_case"] tokenization_info = dict(vocab_file=vocab_file, do_lower_case=do_lower_case) tokenizer, vocab_lookup_table = bert_utils.get_tf_tokenizer( query_encoder, tokenization_info) ############################################################################ # Preprocess the dataset ############################################################################ cls_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[CLS]")), tf.int32) sep_token_id = tf.cast(vocab_lookup_table.lookup(tf.constant("[SEP]")), tf.int32) transform = _make_transform_fn( bert_tokenizer=tokenizer, bert_cls_token_id=cls_token_id, bert_sep_token_id=sep_token_id, ) feature_dtypes = { constants.CTH5Fields.distances: tf.float32, constants.CTH5Fields.gpt2_retrieved_ids: tf.int32, constants.CTH5Fields.gpt2_answer_ids_inputs: tf.int32, constants.CTH5Fields.gpt2_question_ids_inputs: tf.int32, } with utils.log_duration(LOGGER, "main", "generating codes"): for split in keys: sample_count = 0 eli5: Dict[str, datasets.Dataset] if split != "test": for_slices = dict(sample_id=eli5[split]["id"], question=eli5[split]["input"], answer=[ sample["answer"][0] for sample in eli5[split]["output"] ]) else: for_slices = dict( sample_id=eli5[split]["id"], question=eli5[split]["input"], ) ds = tf.data.Dataset.from_tensor_slices(for_slices) ds = ds.map(transform, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.apply( tf.data.experimental.dense_to_ragged_batch(batch_size)) ds = ds.map(_squeeze, num_parallel_calls=tf.data.experimental.AUTOTUNE) tqdm_inner = tqdm.tqdm(enumerate(ds), total=len(eli5[split]["id"]) // _FLAG_BATCH_SIZE.value, desc=f"Split `{split}`: Batches") for i, batch in tqdm_inner: features = collections.defaultdict(list) ###################################################################### # Enforce the current real batch size ###################################################################### current_batch_size = batch["sample_id"].shape[0] for k, v in batch.items(): utils.check_equal(v.shape[0], current_batch_size) ###################################################################### gpt2_question_ids_inputs = _prep_field(batch["question"], gpt2_tokenizer) utils.check_equal(gpt2_question_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_question_ids_inputs.shape[0], current_batch_size) if split != "test": gpt2_answer_ids_inputs = _prep_field( batch["answer"], gpt2_tokenizer) utils.check_equal(gpt2_answer_ids_inputs.dtype, np.int32) utils.check_equal(gpt2_answer_ids_inputs.shape[0], current_batch_size) assert len(gpt2_answer_ids_inputs.shape) == 2, ( gpt2_answer_ids_inputs.shape) ###################################################################### # Save the gpt2 tokenized question and answer ###################################################################### features[constants.CTH5Fields.gpt2_question_ids_inputs].extend( gpt2_question_ids_inputs) if split != "test": features[ constants.CTH5Fields.gpt2_answer_ids_inputs].extend( gpt2_answer_ids_inputs) ###################################################################### # Encode the samples. ###################################################################### batch = strategy.experimental_distribute_values_from_function( tf_utils.make_dict_distribute_fn(batch)) embeddings = encode_fn_strategy_run(batch) embeddings = tf_utils.process_strat_output( embeddings, "embeddings", strategy, current_batch_size) utils.check_isinstance(embeddings, ops.EagerTensor) utils.check_equal(embeddings.shape[0], current_batch_size) # pytype doesn't seem to see that we check the type utils.check_equal(embeddings.shape[1], _FLAG_EMBEDDING_DEPTH.value) # pytype: disable=attribute-error ###################################################################### # Retrieve. ###################################################################### with tf.device(reference_db_device): top_k, inner_prods = tf_utils.mips_exact_search( embeddings, _FLAG_NUM_RETRIEVALS.value, reference_db) top_k = tf_utils.process_strat_output(top_k, "top_k", strategy, current_batch_size) utils.check_equal( inner_prods.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) utils.check_equal( top_k.shape, (current_batch_size, _FLAG_NUM_RETRIEVALS.value)) features[constants.CTH5Fields.distances].extend(inner_prods) gathered = tf.gather(blocks, top_k).numpy() utils.check_equal(gathered.shape[0], current_batch_size) retrievals = [] for j in range(gathered.shape[0]): local_gathered = gathered[j].tolist() utils.check_equal(len(local_gathered), _FLAG_NUM_RETRIEVALS.value) local_gathered = [ sample.decode() for sample in local_gathered ] token_ids = np.array( gpt2_tokenizer.batch_encode_plus( local_gathered, padding="max_length", truncation=True, ).input_ids) for line in token_ids: assert not np.all(line == 0), line token_ids[token_ids == gpt2_tokenizer.eos_token_id] = -1 retrievals.append(token_ids) features[constants.CTH5Fields.gpt2_retrieved_ids] = retrievals utils.check_equal( retrievals[0].shape, (_FLAG_NUM_RETRIEVALS.value, _FLAG_CONTEXT_SIZE.value)) for k, v in features.items(): utils.check_equal(len(v), current_batch_size) for k in range(current_batch_size): feature = tf.train.Features( feature={ k: _bytes_feature( tf.io.serialize_tensor( tf.cast(v[k], feature_dtypes[k]))) for k, v in features.items() }) writers[split][ sample_count % _FLAG_NUM_SHARDS.value].write( tf.train.Example( features=feature).SerializeToString()) sample_count += 1 if sample_count % 1000 == 0: LOGGER.debug("Paths: %s", str(all_paths[split][0])) LOGGER.debug("Done.")
def main(args): if not check_exists(args.save_dir): os.makedirs(args.save_dir) load_path = './checkpoints/demo_arcface_fine_tune_model_{:0>4d}.pth'.format( args.epoch) dataset = IQiYiExtractSceneDataset(args.data_root, args.tvt, image_root='/home/dcq/img', num_frame=1) if len(dataset) <= 0: logger.error( 'the size of the dataset for extract scene feat cannot be {}'. format(len(dataset))) else: logger.info( 'the size of the dataset for extract scene feat is {}'.format( len(dataset))) data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True) log_step = len(data_loader) // 100 if len(data_loader) > 100 else 1 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') logger.info('extract scene feat on {}'.format(device)) model = ArcFaceSEResNeXtModel(args.num_classes, include_top=False) state_dict = torch.load(load_path, map_location='cpu') model.load_state_dict(state_dict) model = model.to(device) all_video_names = [] all_image_index = [] all_scene_feat = [] start = time.time() with torch.no_grad(): for batch_idx, (image_data, video_names, image_indexes) in enumerate(data_loader): image_data = image_data.to(device) outputs = model(image_data) all_video_names += list(video_names) all_image_index += image_indexes.tolist() all_scene_feat.append(outputs.cpu()) if batch_idx % log_step == 0: end = time.time() log_info = '[{}/{} ({:.0f}%)] Time: {}' \ .format(batch_idx * args.batch_size, len(dataset), 100.0 * batch_idx / len(data_loader), (end - start)) logger.info(log_info) print(log_info) start = time.time() all_scene_feat = torch.cat(all_scene_feat, dim=0).numpy() scene_infos = {} for idx, video_name in enumerate(all_video_names): scene_infos.setdefault(video_name, []).append( (all_image_index[idx], all_scene_feat[idx])) with open( os.path.join(args.save_dir, 'scene_infos_{}.pickle'.format(args.tvt)), 'wb') as fout: pickle.dump(scene_infos, fout)
def main(args): if not check_exists(args.save_dir): os.makedirs(args.save_dir) dataset = IQiYiFineTuneSceneDataset(args.data_root, 'train+val-noise', image_root='/home/dcq/img') data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) log_step = len(data_loader) // 10 if len(data_loader) > 10 else 1 model = ArcFaceSEResNeXtModel(args.num_classes, include_top=True) metric_func = ArcMarginProduct() loss_func = FocalLoss(gamma=2.) trainable_params = [ { 'params': model.base_model.parameters(), "lr": args.learning_rate / 100 }, { 'params': model.weight }, ] optimizer = optim.SGD(trainable_params, lr=args.learning_rate, momentum=0.9, weight_decay=1e-5) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, args.epoch) device, device_ids = prepare_device() model = model.to(device) if len(device_ids) > 1: model = torch.nn.DataParallel(model, device_ids=device_ids) for epoch_idx in range(args.epoch): total_loss = .0 for batch_idx, (images, labels, _) in enumerate(data_loader): images = images.view(-1, *images.size()[-3:]) images = images.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(images) outputs = outputs.view(outputs.size(0) // 3, 3, -1) outputs = torch.mean(outputs, dim=1) outputs_metric = metric_func(outputs, labels) local_loss = loss_func(outputs_metric, labels) local_loss.backward() optimizer.step() total_loss += local_loss.item() if batch_idx % log_step == 0 and batch_idx != 0: print('Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format( epoch_idx, batch_idx * args.batch_size, len(dataset), 100.0 * batch_idx / len(data_loader), local_loss.item())) log = { 'epoch': epoch_idx, 'lr': optimizer.param_groups[0]['lr'], 'loss': total_loss / len(data_loader) } for key, value in sorted(log.items(), key=lambda item: item[0]): print(' {:20s}: {:6f}'.format(str(key), value)) lr_scheduler.step() save_model(model.module, args.save_dir, 'demo_arcface_fine_tune_model', args.epoch)