def iterate_over(query, doc, doc_mask, total_doc_len, segment_len, step_size): query_input_mask = tf.ones_like(query, tf.int32) query_segment_ids = tf.zeros_like(query, tf.int32) batch_size, _ = get_shape_list2(query) idx = 0 input_ids_list = [] input_masks_list = [] input_segments_list = [] n_segment = 0 edge_shape = [batch_size, 1] cls_arr = tf.ones(edge_shape, tf.int32) * CLS_ID sep_arr = tf.ones(edge_shape, tf.int32) * SEP_ID edge_one = tf.ones(edge_shape, tf.int32) edge_zero = tf.zeros(edge_shape, tf.int32) while idx < total_doc_len: st = idx ed = idx + segment_len pad_len = ed - total_doc_len if ed > total_doc_len else 0 padding = tf.zeros([batch_size, pad_len], tf.int32) doc_seg_input_ids = tf.concat([doc[:, st:ed], sep_arr, padding], axis=1) doc_seg_input_mask = tf.concat([doc_mask[:, st:ed], edge_one, padding], axis=1) doc_seg_segment_ids = tf.ones_like(doc_seg_input_ids, tf.int32) * doc_seg_input_mask input_ids = tf.concat([cls_arr, query, sep_arr, doc_seg_input_ids], axis=1) input_mask = tf.concat( [edge_one, query_input_mask, edge_one, doc_seg_input_mask], axis=1) segment_ids = tf.concat( [edge_zero, query_segment_ids, edge_zero, doc_seg_segment_ids], axis=1) input_ids_list.append(input_ids) input_masks_list.append(input_mask) input_segments_list.append(segment_ids) idx += step_size n_segment += 1 all_input_ids = tf.concat(input_ids_list, axis=0) all_input_mask = tf.concat(input_masks_list, axis=0) all_segment_ids = tf.concat(input_segments_list, axis=0) print(all_input_ids) return all_input_ids, all_input_mask, all_segment_ids, n_segment
def compute_unreduced_loss(labels, logits): """See `_RankingLoss`.""" alpha = 10.0 is_valid = utils.is_label_valid(labels) labels = tf.compat.v1.where(is_valid, labels, tf.zeros_like(labels)) logits = tf.compat.v1.where( is_valid, logits, -1e3 * tf.ones_like(logits) + tf.reduce_min(input_tensor=logits, axis=-1, keepdims=True)) label_sum = tf.reduce_sum(input_tensor=labels, axis=1, keepdims=True) nonzero_mask = tf.greater(tf.reshape(label_sum, [-1]), 0.0) labels = tf.compat.v1.where(nonzero_mask, labels, _EPSILON * tf.ones_like(labels)) gains = tf.pow(2., tf.cast(labels, dtype=tf.float32)) - 1. ranks = utils.approx_ranks(logits, alpha=alpha) discounts = 1. / tf.math.log1p(ranks) dcg = tf.reduce_sum(input_tensor=gains * discounts, axis=-1, keepdims=True) cost = -dcg * utils.inverse_max_dcg(labels) return cost, tf.reshape(tf.cast(nonzero_mask, dtype=tf.float32), [-1, 1])
def get_label_indices(input_ids): test_label = [LABEL_0, LABEL_1, LABEL_2] test_label_mask = tf.cast(tf.zeros_like(input_ids), tf.bool) for token in test_label: test_label_mask = tf.logical_or(tf.equal(input_ids, token), test_label_mask) _, masked_lm_positions = tf.math.top_k(tf.cast(test_label_mask, tf.float32), k=1, sorted=False, name="masking_top_k") is_test_inst_bool = tf.reduce_any(test_label_mask, axis=1) is_test_inst = tf.cast(tf.reduce_any(test_label_mask, axis=1), tf.float32) masked_label_ids = gather_index2d(input_ids, masked_lm_positions) is_test_inst_int = tf.cast(is_test_inst, tf.int32) not_is_test_inst_int = tf.cast(tf.logical_not(is_test_inst_bool), tf.int32) scatter_vals = LABEL_UNK * is_test_inst_int\ + tf.reshape(masked_label_ids, [-1]) * not_is_test_inst_int masked_input_ids = scatter_multiple(input_ids, masked_lm_positions, scatter_vals) return masked_input_ids, masked_lm_positions, masked_label_ids, is_test_inst
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf_logging.info("*** Features ***") for name in sorted(features.keys()): tf_logging.info(" name = %s, shape = %s" % (name, features[name].shape)) q_input_ids_1 = features["q_input_ids_1"] q_input_mask_1 = features["q_input_mask_1"] d_input_ids_1 = features["d_input_ids_1"] d_input_mask_1 = features["d_input_mask_1"] q_input_ids_2 = features["q_input_ids_2"] q_input_mask_2 = features["q_input_mask_2"] d_input_ids_2 = features["d_input_ids_2"] d_input_mask_2 = features["d_input_mask_2"] q_input_ids = tf.stack([q_input_ids_1, q_input_ids_2], axis=0) q_input_mask = tf.stack([q_input_mask_1, q_input_mask_2], axis=0) q_segment_ids = tf.zeros_like(q_input_ids, tf.int32) d_input_ids = tf.stack([d_input_ids_1, d_input_ids_2], axis=0) d_input_mask = tf.stack([d_input_mask_1, d_input_mask_2], axis=0) d_segment_ids = tf.zeros_like(d_input_ids, tf.int32) label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) with tf.compat.v1.variable_scope("query"): model_q = model_class( config=model_config, is_training=is_training, input_ids=q_input_ids, input_mask=q_input_mask, token_type_ids=q_segment_ids, use_one_hot_embeddings=train_config.use_one_hot_embeddings, ) with tf.compat.v1.variable_scope("document"): model_d = model_class( config=model_config, is_training=is_training, input_ids=d_input_ids, input_mask=d_input_mask, token_type_ids=d_segment_ids, use_one_hot_embeddings=train_config.use_one_hot_embeddings, ) pooled_q = model_q.get_pooled_output() pooled_d = model_d.get_pooled_output() logits = tf.matmul(pooled_q, pooled_d, transpose_b=True) y = tf.cast(label_ids, tf.float32) * 2 - 1 losses = tf.maximum(1.0 - logits * y, 0) loss = tf.reduce_mean(losses) pred = tf.cast(logits > 0, tf.int32) tvars = tf.compat.v1.trainable_variables() initialized_variable_names = {} scaffold_fn = None if train_config.init_checkpoint: initialized_variable_names, init_fn = get_init_fn( train_config, tvars) scaffold_fn = get_tpu_scaffold_or_init(init_fn, train_config.use_tpu) log_var_assignments(tvars, initialized_variable_names) TPUEstimatorSpec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: if "simple_optimizer" in special_flags: tf_logging.info("using simple optimizer") train_op = create_simple_optimizer(loss, train_config.learning_rate, train_config.use_tpu) else: train_op = optimization.create_optimizer_from_config( loss, train_config, tvars) output_spec = TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: eval_metrics = (classification_metric_fn, [pred, label_ids, is_real_example]) output_spec = TPUEstimatorSpec(mode=mode, loss=loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: predictions = { "q_input_ids": q_input_ids, "d_input_ids": d_input_ids, "score": logits } useful_inputs = ["data_id", "input_ids2", "data_ids"] for input_name in useful_inputs: if input_name in features: predictions[input_name] = features[input_name] output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf_logging.info("*** Features ***") for name in sorted(features.keys()): tf_logging.info(" name = %s, shape = %s" % (name, features[name].shape)) q_input_ids = features["q_input_ids"] q_input_mask = features["q_input_mask"] d_input_ids = features["d_input_ids"] d_input_mask = features["d_input_mask"] input_shape = get_shape_list(q_input_ids, expected_rank=2) batch_size = input_shape[0] doc_length = model_config.max_doc_length num_docs = model_config.num_docs d_input_ids_unpacked = tf.reshape(d_input_ids, [-1, num_docs, doc_length]) d_input_mask_unpacked = tf.reshape(d_input_mask, [-1, num_docs, doc_length]) d_input_ids_flat = tf.reshape(d_input_ids_unpacked, [-1, doc_length]) d_input_mask_flat = tf.reshape(d_input_mask_unpacked, [-1, doc_length]) q_segment_ids = tf.zeros_like(q_input_ids, tf.int32) d_segment_ids = tf.zeros_like(d_input_ids_flat, tf.int32) label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) with tf.compat.v1.variable_scope(dual_model_prefix1): q_model_config = copy.deepcopy(model_config) q_model_config.max_seq_length = model_config.max_sent_length model_q = model_class( config=model_config, is_training=is_training, input_ids=q_input_ids, input_mask=q_input_mask, token_type_ids=q_segment_ids, use_one_hot_embeddings=train_config.use_one_hot_embeddings, ) with tf.compat.v1.variable_scope(dual_model_prefix2): d_model_config = copy.deepcopy(model_config) d_model_config.max_seq_length = model_config.max_doc_length model_d = model_class( config=model_config, is_training=is_training, input_ids=d_input_ids_flat, input_mask=d_input_mask_flat, token_type_ids=d_segment_ids, use_one_hot_embeddings=train_config.use_one_hot_embeddings, ) pooled_q = model_q.get_pooled_output() # [batch, vector_size] pooled_d_flat = model_d.get_pooled_output( ) # [batch, num_window, vector_size] pooled_d = tf.reshape(pooled_d_flat, [batch_size, num_docs, -1]) pooled_q_t = tf.expand_dims(pooled_q, 1) pooled_d_t = tf.transpose(pooled_d, [0, 2, 1]) all_logits = tf.matmul(pooled_q_t, pooled_d_t) # [batch, 1, num_window] if "hinge_all" in special_flags: apply_loss_modeing = hinge_all elif "sigmoid_all" in special_flags: apply_loss_modeing = sigmoid_all else: apply_loss_modeing = hinge_max logits, loss = apply_loss_modeing(all_logits, label_ids) pred = tf.cast(logits > 0, tf.int32) tvars = tf.compat.v1.trainable_variables() initialized_variable_names = {} scaffold_fn = None if train_config.init_checkpoint: initialized_variable_names, init_fn = get_init_fn( train_config, tvars) scaffold_fn = get_tpu_scaffold_or_init(init_fn, train_config.use_tpu) log_var_assignments(tvars, initialized_variable_names) TPUEstimatorSpec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: if "simple_optimizer" in special_flags: tf_logging.info("using simple optimizer") train_op = create_simple_optimizer(loss, train_config.learning_rate, train_config.use_tpu) else: train_op = optimization.create_optimizer_from_config( loss, train_config, tvars) output_spec = TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: eval_metrics = (classification_metric_fn, [pred, label_ids, is_real_example]) output_spec = TPUEstimatorSpec(mode=mode, loss=loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: predictions = { "q_input_ids": q_input_ids, "d_input_ids": d_input_ids, "logits": logits } useful_inputs = ["data_id", "input_ids2", "data_ids"] for input_name in useful_inputs: if input_name in features: predictions[input_name] = features[input_name] output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec