def run_from_flags(melody_encoder_decoder):
  pipeline_instance = MelodyRNNPipeline(
      melody_encoder_decoder, FLAGS.eval_ratio)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def run_from_flags(pipeline_instance):
  tf.logging.set_verbosity(FLAGS.log)
  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  config = drums_rnn_config_flags.config_from_flags()
  pipeline_instance = get_pipeline(
      config, FLAGS.eval_ratio)

  FLAGS.input = os.path.expanduser(FLAGS.input)
  FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  pipeline_instance = get_pipeline(
      min_events=32,
      max_events=512,
      eval_ratio=FLAGS.eval_ratio,
      config=performance_model.default_configs[FLAGS.config])

  input_dir = os.path.expanduser(FLAGS.input)
  output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
      output_dir)
def main(unused_argv):
  tf.logging.set_verbosity(FLAGS.log)

  pipeline_instance = polyphony_rnn_pipeline.get_pipeline(
      min_steps=80,  # 5 measures
      max_steps=512,
      eval_ratio=FLAGS.eval_ratio,
      config=polyphony_model.default_configs['polyphony'])

  input_dir = os.path.expanduser(FLAGS.input)
  output_dir = os.path.expanduser(FLAGS.output_dir)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(input_dir, pipeline_instance.input_type),
      output_dir)
    def testRunPipelineSerial(self):
        strings = ['abcdefg', 'helloworld!', 'qwerty']
        root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        pipeline.run_pipeline_serial(MockPipeline(), iter(strings), root_dir)

        dataset_1_dir = os.path.join(root_dir, 'dataset_1.tfrecord')
        dataset_2_dir = os.path.join(root_dir, 'dataset_2.tfrecord')
        self.assertTrue(tf.gfile.Exists(dataset_1_dir))
        self.assertTrue(tf.gfile.Exists(dataset_2_dir))

        dataset_1_reader = tf.python_io.tf_record_iterator(dataset_1_dir)
        self.assertEqual(
            set([('serialized:%s_A' % s).encode('utf-8')
                 for s in strings] + [('serialized:%s_B' % s).encode('utf-8')
                                      for s in strings]),
            set(dataset_1_reader))

        dataset_2_reader = tf.python_io.tf_record_iterator(dataset_2_dir)
        self.assertEqual(
            set(('serialized:%s_C' % s).encode('utf-8') for s in strings),
            set(dataset_2_reader))
Exemple #7
0
  def testRunPipelineSerial(self):
    strings = ['abcdefg', 'helloworld!', 'qwerty']
    root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    pipeline.run_pipeline_serial(
        MockPipeline(), iter(strings), root_dir)

    dataset_1_dir = os.path.join(root_dir, 'dataset_1.tfrecord')
    dataset_2_dir = os.path.join(root_dir, 'dataset_2.tfrecord')
    self.assertTrue(tf.gfile.Exists(dataset_1_dir))
    self.assertTrue(tf.gfile.Exists(dataset_2_dir))

    dataset_1_reader = tf.python_io.tf_record_iterator(dataset_1_dir)
    self.assertEqual(
        set([('serialized:%s_A' % s).encode('utf-8') for s in strings] +
            [('serialized:%s_B' % s).encode('utf-8') for s in strings]),
        set(dataset_1_reader))

    dataset_2_reader = tf.python_io.tf_record_iterator(dataset_2_dir)
    self.assertEqual(
        set([('serialized:%s_C' % s).encode('utf-8') for s in strings]),
        set(dataset_2_reader))
    for file_in_dir in files_in_dir:
        full_file_path = os.path.join(input_dir, file_in_dir)
        print(full_file_path)
        try:
            sequence = midi_io.midi_to_sequence_proto(
                tf.io.gfile.GFile(full_file_path, 'rb').read())
        except midi_io.MIDIConversionError as e:
            tf.logging.warning('Could not parse midi file %s. Error was: %s',
                               full_file_path, e)

        sequence.collection_name = os.path.basename(work_dir)
        sequence.filename = os.path.join(output_dir,
                                         os.path.basename(full_file_path))
        sequence.id = note_sequence_io.generate_note_sequence_id(
            sequence.filename, sequence.collection_name, 'midi')

        if sequence:
            writer.write(sequence)

filenames = [anthems_file]
dataset = tf.data.TFRecordDataset(filenames)

config = melody_rnn_model.default_configs['attention_rnn']
pipeline_instance = melody_rnn_pipeline.get_pipeline(config, eval_ratio=0.0)

pipeline.run_pipeline_serial(
    pipeline_instance,
    pipeline.tf_record_iterator(anthems_file, pipeline_instance.input_type),
    output_dir)
def run_from_flags(pipeline_instance):
  tf.logging.set_verbosity(tf.logging.INFO)
  pipeline.run_pipeline_serial(
      pipeline_instance,
      pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
      FLAGS.output_dir)
def run_from_flags(pipeline_instance):
    tf.logging.set_verbosity(tf.logging.INFO)
    pipeline.run_pipeline_serial(
        pipeline_instance,
        pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
        FLAGS.output_dir)