def run_from_flags(melody_encoder_decoder):
  pipeline_instance = MelodyRNNPipeline(
      melody_encoder_decoder, FLAGS.eval_ratio)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def build_dataset(pipeline_config, pipeline_graph_def):
    output_dir = pipeline_config['data_target_dir']

    print('INFO: Target {}.'.format(pipeline_config['data_target_dir']))
    print('INFO: Collated data sourced from {}.'.format(
        pipeline_config['data_source_dir']))

    for src_file in os.listdir(pipeline_config['data_source_dir']):
        if src_file.endswith('.tfrecord'):

            collection_name = os.path.splitext(src_file)[0]

            src_file_path = os.path.join(pipeline_config['data_source_dir'],
                                         src_file)

            print('\nINFO: Building {} dataset...'.format(collection_name))

            # Construct the pipeline graph
            pipeline_graph = pipeline_graph_def(
                collection_name=collection_name, config=pipeline_config)

            # Runs pipeline graph on a data source and writes output to dir
            run_pipeline_text(
                pipeline_graph,
                pipeline.tf_record_iterator(src_file_path,
                                            pipeline_graph.input_type),
                output_dir)
def main(unused_argv):
    tf.logging.set_verbosity(FLAGS.log)

    data = None
    composers = None
    if FLAGS.csv:
        csv = os.path.expanduser(FLAGS.csv)
        tf.logging.info("CSV file provided, populating metadata")
        data = pd.read_csv(csv)
        if FLAGS.constrained:
            tf.logging.info("Constrained")
            composers, _ = models.get_composers_constrained()
        else:
            tf.logging.info("Unconstrained")
            composers, _ = models.get_composers(csv)

    pipeline_instance = condrnn_pipeline.get_pipeline(
        min_events=32,
        max_events=512,
        eval_ratio=FLAGS.eval_ratio,
        config=models.get_config_with_csv(composers),
        data=data)

    input_dir = os.path.expanduser(FLAGS.input)
    output_dir = os.path.expanduser(FLAGS.output_dir)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
        output_dir)
示例#4
0
def run_from_flags(melody_encoder_decoder):
    pipeline_instance = MelodyRNNPipeline(melody_encoder_decoder,
                                          FLAGS.eval_ratio)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
        FLAGS.output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)
  config = melody_rnn_config_flags.config_from_flags()
  pipeline_instance = get_pipeline(config, FLAGS.eval_ratio)
  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(pipeline_instance,pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),FLAGS.output_dir)
示例#6
0
def run_from_flags(pipeline_instance):
    tf.logging.set_verbosity(FLAGS.log)
    FLAGS.input = os.path.expanduser(FLAGS.input)
    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
        FLAGS.output_dir)
 def testTFRecordIterator(self):
     tfrecord_file = os.path.join(
         tf.resource_loader.get_data_files_path(),
         '../testdata/tfrecord_iterator_test.tfrecord')
     self.assertEqual([
         MockStringProto(string)
         for string in [b'hello world', b'12345', b'success']
     ], list(pipeline.tf_record_iterator(tfrecord_file, MockStringProto)))
def run_from_flags(pipeline_instance):
  tf.logging.set_verbosity(FLAGS.log)
  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
示例#9
0
 def testTFRecordIterator(self):
   tfrecord_file = os.path.join(
       tf.resource_loader.get_data_files_path(),
       '../testdata/tfrecord_iterator_test.tfrecord')
   self.assertEqual(
       [MockStringProto(string)
        for string in [b'hello world', b'12345', b'success']],
       list(pipeline.tf_record_iterator(tfrecord_file, MockStringProto)))
示例#10
0
def partition(config: str, input: str, output_dir: str, eval_ratio: int):
  modes = ["eval", "train"]
  partitioner = RandomPartition(NoteSequence, modes, [eval_ratio])
  dag = {partitioner: DagInput(NoteSequence)}
  for mode in modes:
    validator = TensorValidator(NoteSequence, f"{mode}_TensorValidator", config)
    dag[validator] = partitioner[f"{mode}"]
    dag[DagOutput(f"{mode}")] = validator
  pipeline = DAGPipeline(dag)
  run_pipeline_serial(
    pipeline, tf_record_iterator(input, pipeline.input_type), output_dir)
def run_from_flags():
  tf.logging.set_verbosity(FLAGS.log)

  config = melody_rnn_config.config_from_flags()
  pipeline_instance = get_pipeline(config.encoder_decoder)
  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  config = drums_rnn_config_flags.config_from_flags()
  pipeline_instance = get_pipeline(
      config, FLAGS.eval_ratio)

  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
示例#13
0
def main(unused_argv):
    tf.logging.set_verbosity(FLAGS.log)

    pipeline_instance = pianoroll_rnn_nade_pipeline.get_pipeline(
        min_steps=80,  # 5 measures
        max_steps=2048,
        eval_ratio=FLAGS.eval_ratio,
        config=pianoroll_rnn_nade_model.default_configs[FLAGS.config])

    input_dir = os.path.expanduser(FLAGS.input)
    output_dir = os.path.expanduser(FLAGS.output_dir)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
        output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  pipeline_instance = polyphony_rnn_pipeline.get_pipeline(
      min_steps=80,  # 5 measures
      max_steps=512,
      eval_ratio=FLAGS.eval_ratio,
      config=polyphony_model.default_configs['polyphony'])

  input_dir = os.path.expanduser(FLAGS.input)
  output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
      output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  pipeline_instance = get_pipeline(
      min_events=32,
      max_events=512,
      eval_ratio=FLAGS.eval_ratio,
      config=performance_model.default_configs[FLAGS.config])

  input_dir = os.path.expanduser(FLAGS.input)
  output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
      output_dir)
示例#16
0
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  pipeline_instance = get_pipeline(
      steps_per_quarter=4,
      min_steps=80,  # 5 measures
      max_steps=512,
      eval_ratio=FLAGS.eval_ratio,
      config=polyphony_model.default_configs['polyphony'])

  input_dir = os.path.expanduser(FLAGS.input)
  output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
      output_dir)
示例#17
0
    def _process_tfrecord(src_file):
        if src_file.endswith('.tfrecord'):

            collection_name = os.path.splitext(src_file)[0]
            src_file_path = os.path.join(pipeline_config['data_source_dir'],
                                         src_file)

            print('\nINFO: Building {} dataset...'.format(collection_name))

            # Construct the pipeline graph
            pipeline_graph = pipeline_graph_def(
                collection_name=collection_name, config=pipeline_config)

            # Runs pipeline graph on a data source and writes output to dir
            run_pipeline_text(
                pipeline_graph,
                pipeline.tf_record_iterator(src_file_path,
                                            pipeline_graph.input_type),
                output_dir)
示例#18
0
def main(unused_argv):
    tf.logging.set_verbosity(FLAGS.log)

    config = melody_rnn_config_flags.config_from_flags()
    pipeline_instance, id_pipeline_instance = get_pipeline(
        config, FLAGS.eval_ratio)

    FLAGS.input = os.path.expanduser(FLAGS.input)
    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
        FLAGS.output_dir)

    # Write id/file mappings
    if config.learn_initial_state:
        file = open(FLAGS.output_dir + '/melody-ids.csv', 'w')
        for id, filename in id_pipeline_instance.mappings.iteritems():
            file.write('%d, %s\n' % (id, filename))
        file.close()
def run_from_flags(pipeline_instance):
    tf.logging.set_verbosity(tf.logging.INFO)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
        FLAGS.output_dir)
示例#20
0
    for file_in_dir in files_in_dir:
        full_file_path = os.path.join(input_dir, file_in_dir)
        print(full_file_path)
        try:
            sequence = midi_io.midi_to_sequence_proto(
                tf.io.gfile.GFile(full_file_path, 'rb').read())
        except midi_io.MIDIConversionError as e:
            tf.logging.warning('Could not parse midi file %s. Error was: %s',
                               full_file_path, e)

        sequence.collection_name = os.path.basename(work_dir)
        sequence.filename = os.path.join(output_dir,
                                         os.path.basename(full_file_path))
        sequence.id = note_sequence_io.generate_note_sequence_id(
            sequence.filename, sequence.collection_name, 'midi')

        if sequence:
            writer.write(sequence)

filenames = [anthems_file]
dataset = tf.data.TFRecordDataset(filenames)

config = melody_rnn_model.default_configs['attention_rnn']
pipeline_instance = melody_rnn_pipeline.get_pipeline(config, eval_ratio=0.0)

pipeline.run_pipeline_serial(
    pipeline_instance,
    pipeline.tf_record_iterator(anthems_file, pipeline_instance.input_type),
    output_dir)
def run_from_flags(pipeline_instance):
  tf.logging.set_verbosity(tf.logging.INFO)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)