def test_filter_by_length(self): def seq_len(seq): return tf.reduce_sum(tf.cast(self.vocab.padding_mask(seq), tf.int32)) n, len1, len2 = 10, 20, 30 max_len = 25 x1 = tf.pad( self.sampler.sample([n, len1]), [[0, 0], [0, len2 - len1]], constant_values=self.vocab.padding_code) x2 = self.sampler.sample([n, len2]) x = tf.concat([x1, x2], 0) filter_fn = transforms.FilterByLength( on='seq', vocab=self.vocab, max_len=max_len, precomputed=False) ds_in = tf.data.Dataset.from_tensor_slices({'seq': x}) ds_out = ds_in.apply(filter_fn.call) for ex in ds_out: self.assertLessEqual(seq_len(ex['seq']), max_len) filter_fn = transforms.FilterByLength( on='seq', vocab=self.vocab, max_len=max_len, precomputed=False) ds_in = tf.data.Dataset.from_tensor_slices( {'seq': [tf.convert_to_tensor(self.vocab.decode(x_i)) for x_i in x]}) ds_out = ds_in.apply(filter_fn.call) for ex in ds_out: self.assertLessEqual(tf.strings.length(ex['seq']), max_len) seq_lens = tf.convert_to_tensor([seq_len(x_i) for x_i in x], tf.int32) filter_fn = transforms.FilterByLength( on='seq_len', vocab=self.vocab, max_len=max_len, precomputed=True) ds_in = tf.data.Dataset.from_tensor_slices({'seq': x, 'seq_len': seq_lens}) ds_out = ds_in.apply(filter_fn.call) for ex in ds_out: self.assertLessEqual(seq_len(ex['seq']), max_len)
def main(_): has_context = FLAGS.task.endswith('with_ctx') logging.info('seed: %d', FLAGS.seed) logging.info('task: %s', FLAGS.task) logging.info('split: %s', FLAGS.split) logging.info('max_len: %d', FLAGS.max_len) logging.info('index_keys: %s', FLAGS.index_keys) logging.info('smoothing: %s', FLAGS.smoothing) logging.info('branch_key: %s', FLAGS.branch_key) logging.info('n_pairs: %d', FLAGS.n_pairs) logging.info('has_context: %s', has_context) tf.random.set_seed(FLAGS.seed) extra_keys = ['cla_key', 'seq_key', 'seq_len'] + FLAGS.index_keys if has_context: extra_keys.extend(['start', 'end']) ds_loader = LOADERS[FLAGS.loader_cls](root_dir=FLAGS.data_dir, sub_dir='', extra_keys=extra_keys, task=FLAGS.task) ds = ds_loader.load(FLAGS.split) filter_fn = transforms.FilterByLength(max_len=FLAGS.max_len - 1) pair_fn = align_transforms.StratifiedSamplingPairing( index_keys=FLAGS.index_keys, branch_key=FLAGS.branch_key, smoothing=FLAGS.smoothing) ds = ds.apply(filter_fn).apply(pair_fn).take(FLAGS.n_pairs) ds = ds.map(make_serialize_fn(), num_parallel_calls=tf.data.AUTOTUNE) ds = ds.prefetch(tf.data.AUTOTUNE) out_dir = os.path.join(FLAGS.out_dir, FLAGS.task, FLAGS.branch_key, FLAGS.split) filename = f'{FLAGS.seed}.tfrecords' tf.io.gfile.makedirs(out_dir) path = os.path.join(out_dir, filename) logging.info('Writing TFRecords to %s...', path) with tf.io.TFRecordWriter(path) as writer: for i, serialized_example in enumerate(ds): if (i % 1024) == 0: logging.info('Seed: %d, progress: %d/%d.', FLAGS.seed, i, FLAGS.n_pairs) writer.write(serialized_example.numpy())
def make_tape_builder(root_dir, task, target, weights=None, metadata=(), max_len=1024, input_sequence_key='primary', output_sequence_key='sequence'): """Creates a DatasetBuilder for TAPE's benchmark.""" supported_tasks = list(TAPE_NUM_OUTPUTS) if task not in supported_tasks: raise ValueError(f'Task {task} not recognized.' f'Supported tasks: {", ".join(supported_tasks)}.') num_outputs = TAPE_NUM_OUTPUTS[task].get(target, 1) used_keys = [input_sequence_key, target] if weights is not None: used_keys.append(weights) if metadata: used_keys.extend(metadata) unused_keys = [k for k in TAPE_SPECS[task] if k not in used_keys] ds_transformations = [] if max_len is not None: ds_transformations.append( transforms.FilterByLength(on=output_sequence_key, precomputed=False, max_len=max_len - 1)) transformations = [ transforms.Pop(on=unused_keys), transforms.Reshape(on=output_sequence_key, shape=[]), transforms.Encode(on=output_sequence_key), transforms.EOS(on=output_sequence_key), transforms.CropOrPad(on=output_sequence_key, size=max_len), ] if target in TAPE_MULTI_CL_TASKS: transformations.append(transforms.OneHot(on=target, depth=num_outputs)) elif target in TAPE_BACKBONE_ANGLE_TASKS: transformations.append(transforms.BackboneAngleTransform(on=target)) elif target in TAPE_PROT_ENGINEERING_TASKS: transformations.append(transforms.Reshape(on=target, shape=[-1])) if target in TAPE_SEQ2SEQ_TASKS: transformations.extend([ transforms.Reshape(on=target, shape=[-1, num_outputs]), transforms.CropOrPadND(on=target, size=max_len, axis=0), ]) if weights is not None: # Note: no seq-level TAPE task has weights. transformations.extend([ transforms.Reshape(on=weights, shape=[-1]), transforms.CropOrPadND(on=weights, size=max_len), ]) embeddings_labels = [target] if weights is None else [(target, weights)] return builder.DatasetBuilder( data_loader=make_tape_loader(root_dir=root_dir, task=task), ds_transformations=ds_transformations, transformations=transformations, labels=multi_task.Backbone(embeddings=embeddings_labels), metadata=metadata, sequence_key=output_sequence_key)
def make_pair_builder(max_len=512, index_keys=('fam_key', 'ci_100'), process_negatives=True, gap_token='-', sequence_key='sequence', context_sequence_key='full_sequence', loader_cls=make_pfam_pairs_loader, pairing_cls=None, lm_cls=None, has_context=False, append_eos=True, append_eos_context=True, add_random_tails=False, **kwargs): """Creates a dataset for pairs of sequences.""" # Convenience function to index key pairs. paired_keys = lambda k: tuple(f'{k}_{i}' for i in (1, 2)) def stack_and_pop(on): stack = transforms.Stack(on=paired_keys(on), out=on) pop = transforms.Pop(on=paired_keys(on)) return [stack, pop] # Defines fields to be read from the TFRecords. metadata_keys = ['cla_key', 'seq_key'] + list(index_keys) extra_keys = metadata_keys.copy() # Pre-paired datasets already been filtered by length, seq_len only needed # when pairing sequences on-the-fly. if pairing_cls is not None: extra_keys.append('seq_len') # Optionally, adds fields needed by the `AddAlignmentContext` `Transform`. if has_context: extra_keys.extend(['start', 'end']) add_alignment_context_extra_args = (paired_keys(context_sequence_key) + paired_keys('start') + paired_keys('end')) # Accounts for EOS token if necessary. max_len_eos = max_len - 1 if append_eos else max_len ### Sets up the `DatasetTransform`s. ds_transformations = [] if pairing_cls is not None: filter_by_length = transforms.FilterByLength(max_len=max_len_eos) # NOTE(fllinares): pairing on-the-fly is memory intensive on TPU for some # reason not yet understood... pair_sequences = pairing_cls(index_keys=index_keys) ds_transformations.extend([filter_by_length, pair_sequences]) ### Sets up the `Transform`s applied *before* batching. project_msa_rows = align_transforms.ProjectMSARows( on=paired_keys(sequence_key), token=gap_token) append_eos_to_context = transforms.EOS( on=paired_keys(context_sequence_key)) add_alignment_context = align_transforms.AddAlignmentContext( on=paired_keys(sequence_key) + add_alignment_context_extra_args, out=paired_keys(sequence_key), max_len=max_len_eos, gap_token=gap_token) trim_alignment = align_transforms.TrimAlignment( on=paired_keys(sequence_key), gap_token=gap_token) pop_add_alignment_context_extra_args = transforms.Pop( on=add_alignment_context_extra_args) add_random_prefix_and_suffix = align_transforms.AddRandomTails( on=paired_keys(sequence_key), max_len=max_len_eos) create_alignment_targets = align_transforms.CreateAlignmentTargets( on=paired_keys(sequence_key), out='alignment/targets', gap_token=gap_token, n_prepend_tokens=0) pid1 = align_transforms.PID(on=paired_keys(sequence_key), out='alignment/pid1', definition=1, token=gap_token) pid3 = align_transforms.PID(on=paired_keys(sequence_key), out='alignment/pid3', definition=3, token=gap_token) remove_gaps = transforms.RemoveTokens(on=paired_keys(sequence_key), tokens=gap_token) append_eos_to_sequence = transforms.EOS(on=paired_keys(sequence_key)) pad_sequences = transforms.CropOrPad(on=paired_keys(sequence_key), size=max_len) pad_alignment_targets = transforms.CropOrPadND(on='alignment/targets', size=2 * max_len) transformations = [project_msa_rows] if has_context: if append_eos_context: transformations.append(append_eos_to_context) transformations.extend([ add_alignment_context, trim_alignment, pop_add_alignment_context_extra_args ]) if add_random_tails: transformations.append(add_random_prefix_and_suffix) transformations.append(create_alignment_targets) transformations.extend([pid1, pid3, remove_gaps]) if append_eos: transformations.append(append_eos_to_sequence) transformations.extend([pad_sequences, pad_alignment_targets]) for key in [sequence_key] + metadata_keys: transformations.extend(stack_and_pop(key)) ### Sets up the `Transform`s applied *after* batching. flatten_sequence_pairs = transforms.Reshape(on=sequence_key, shape=[-1, max_len]) flatten_metadata_pairs = transforms.Reshape(on=metadata_keys, shape=[-1]) create_homology_targets = align_transforms.CreateHomologyTargets( on='fam_key', out='homology/targets', process_negatives=process_negatives) create_alignment_weights = align_transforms.CreateBatchedWeights( on='alignment/targets', out='alignment/weights') add_neg_alignment_targets_and_weights = align_transforms.PadNegativePairs( on=('alignment/targets', 'alignment/weights')) pad_neg_pid = align_transforms.PadNegativePairs(on=('alignment/pid1', 'alignment/pid3'), value=-1.0) batched_transformations = [ flatten_sequence_pairs, flatten_metadata_pairs, create_homology_targets ] if process_negatives: batched_transformations.extend([ create_alignment_weights, add_neg_alignment_targets_and_weights, pad_neg_pid ]) if lm_cls is not None: create_lm_targets = lm_cls(on=sequence_key, out=(sequence_key, 'masked_lm/targets', 'masked_lm/weights')) batched_transformations.append(create_lm_targets) ### Sets up the remainder of the `DatasetBuilder` configuration. masked_lm_labels = ('masked_lm/targets', 'masked_lm/weights') alignment_labels = ('alignment/targets' if not process_negatives else ('alignment/targets', 'alignment/weights')) homology_labels = 'homology/targets' embeddings = () if lm_cls is None else (masked_lm_labels, ) alignments = (alignment_labels, homology_labels) return builder.DatasetBuilder( data_loader=loader_cls(extra_keys), ds_transformations=ds_transformations, transformations=transformations, batched_transformations=batched_transformations, labels=multi_task.Backbone(embeddings=embeddings, alignments=alignments), metadata=('seq_key', 'alignment/pid1', 'alignment/pid3'), sequence_key=sequence_key, **kwargs)