Пример #1
0
	def evaluate(self):
		# Load graph
		model = Transformer(trainable=False)
		print("Graph loaded")

		# Load data
		X, Sources, Targets = model.data_helper.load_test_datasets()

		# Start testing
		sv = tf.train.Supervisor()
		saver = sv.saver
		with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
			saver.restore(sess, tf.train.latest_checkpoint(pm.logdir))
			print("Restored!")

			# Load Model
			mname = codecs.open(pm.logdir + '/checkpoint', 'r', encoding='utf-8').read().split('"')[1]

			# Inference
			if not os.path.exists('results'):
				os.mkdir('results')
			with codecs.open("results/" + mname, "w", encoding="utf-8") as f:
				list_of_refs, hypothesis = [], []
				num_batch = len(X) // pm.batch_size
				for i in range(num_batch):
					# Get mini batches
					x = X[i * pm.batch_size: (i + 1) * pm.batch_size]
					sources = Sources[i * pm.batch_size: (i + 1) * pm.batch_size]
					targets = Targets[i * pm.batch_size: (i + 1) * pm.batch_size]

					# Auto-regressive inference
					preds = np.zeros((pm.batch_size, pm.maxlen), dtype=np.int32)
					for j in range(pm.maxlen):
						pred = sess.run(model.predicts, {model.x: x, model.y: preds})
						preds[:, j] = pred[:, j]

					for source, target, pred in zip(sources, targets, preds):
						res = " ".join(self.idx2en[idx] for idx in pred).split("<EOS>")[0].strip()
						f.write("- source: {}\n".format(source))
						f.write("- ground truth: {}\n".format(target))
						f.write("- predict: {}\n\n".format(res))
						f.flush()

						# Bleu Score
						ref = target.split()
						predicts = res.split()
						if len(ref) > pm.min_word_count and len(predicts) > pm.min_word_count:
							list_of_refs.append([ref])
							hypothesis.append(predicts)

				score = corpus_bleu(list_of_refs, hypothesis)
				f.write("Bleu Score = {}".format(100 * score))

		print("MSG : Done for testing!")
Пример #2
0
	def train(self):
		# Construct model
		model = Transformer()
		print("Graph loaded")
		init = tf.global_variables_initializer()

		config = tf.ConfigProto()
		config.gpu_options.allow_growth = True

		# Start training
		sv = tf.train.Supervisor(logdir=pm.logdir, save_model_secs=0, init_op=init)
		saver = sv.saver
		with sv.managed_session(config=config) as sess:
			for epoch in range(1, pm.num_epochs + 1):
				if sv.should_stop():
					break
				for _ in tqdm(range(model.num_batch), total=model.num_batch, ncols=70, leave=False, unit='b'):
					sess.run(model.optimizer)

				gs = sess.run(model.global_step)
				saver.save(sess, pm.logdir + '/model_epoch_{}_global_step_{}'.format(epoch, gs))

		print("MSG : Done for training!")
Пример #3
0
def init_app(block_attention, hierarchical, nade, num_layers, dropout,
             input_dropout, per_head_dim, num_heads,
             local_position_embedding_dim, position_ff_dim, suffix,
             subdivision, sequence_size, velocity_quantization,
             max_transposition, port):
    global metadatas
    global _subdivision
    global _batch_size
    global _banned_instruments
    global _temperature
    global _lowest_entropy_first
    global _context_size

    _subdivision = subdivision
    _batch_size = 1
    _banned_instruments = []
    _temperature = 1.2
    _lowest_entropy_first = True

    gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
    print(gpu_ids)

    dataset_manager = DatasetManager()
    arrangement_dataset_kwargs = {
        'transpose_to_sounding_pitch': True,
        'subdivision': subdivision,
        'sequence_size': sequence_size,
        'velocity_quantization': velocity_quantization,
        'max_transposition': max_transposition,
        'compute_statistics_flag': False
    }
    dataset: ArrangementDataset = dataset_manager.get_dataset(
        name='arrangement', **arrangement_dataset_kwargs)

    reducer_input_dim = num_heads * per_head_dim

    processor_encoder = ArrangementDataProcessor(
        dataset=dataset,
        embedding_dim=reducer_input_dim - local_position_embedding_dim,
        reducer_input_dim=reducer_input_dim,
        local_position_embedding_dim=local_position_embedding_dim,
        flag_orchestra=False,
        block_attention=False)

    processor_decoder = ArrangementDataProcessor(
        dataset=dataset,
        embedding_dim=reducer_input_dim - local_position_embedding_dim,
        reducer_input_dim=reducer_input_dim,
        local_position_embedding_dim=local_position_embedding_dim,
        flag_orchestra=True,
        block_attention=block_attention)

    _context_size = processor_decoder.num_frames_orchestra - 1

    global model
    model = Transformer(
        dataset=dataset,
        data_processor_encoder=processor_encoder,
        data_processor_decoder=processor_decoder,
        num_heads=num_heads,
        per_head_dim=per_head_dim,
        position_ff_dim=position_ff_dim,
        hierarchical_encoding=hierarchical,
        block_attention=block_attention,
        nade=nade,
        num_layers=num_layers,
        dropout=dropout,
        input_dropout=input_dropout,
        conditioning=True,
        lr=0,
        gpu_ids=gpu_ids,
        suffix=suffix,
    )

    model.load_overfit()
    model.cuda()

    # TODO : piano should be modifiable (by dropping mxml file ?)
    filepath = "/home/leo/Recherche/Databases/Orchestration/arrangement_mxml/source_for_generation/chopin_Prel_Op28_20_xs.xml"
    global _piano
    global _rhythm_piano
    global _orchestra
    global _orchestra_silenced_instruments
    _piano, _rhythm_piano, _orchestra, _orchestra_silenced_instruments = \
        model.data_processor_encoder.init_generation_filepath(_batch_size, filepath,
                                                              banned_instruments=_banned_instruments,
                                                              subdivision=_subdivision)

    # launch the script
    # use threaded=True to fix Chrome/Chromium engine hanging on requests
    # [https://stackoverflow.com/a/30670626]
    local_only = False
    if local_only:
        # accessible only locally:
        app.run(threaded=True)
    else:
        # accessible from outside:
        app.run(host='0.0.0.0', port=port, threaded=True)
Пример #4
0
                               group_instrument_per_section, args.nade, cpc_config_name, args.double_conditioning,
                               args.instrument_presence_in_encoder)

# Load model
model = Transformer(dataset=dataset,
                    data_processor_encodencoder=processor_encodencoder,
                    data_processor_encoder=processor_encoder,
                    data_processor_decoder=processor_decoder,
                    num_heads=args.num_heads,
                    per_head_dim=args.per_head_dim,
                    position_ff_dim=args.position_ff_dim,
                    enc_dec_conditioning=args.enc_dec_conditioning,
                    hierarchical_encoding=args.hierarchical,
                    block_attention=args.block_attention,
                    nade=args.nade,
                    conditioning=args.conditioning,
                    double_conditioning=args.double_conditioning,
                    num_layers=args.num_layers,
                    dropout=dropout,
                    input_dropout=input_dropout,
                    input_dropout_token=input_dropout_token,
                    lr=lr,
                    reduction_flag=reduction_flag,
                    gpu_ids=gpu_ids,
                    suffix=args.suffix,
                    mixup=mixup,
                    scheduled_training=scheduled_training)

model.load_overfit(device=device)
model.to(device)
model = model.eval()
Пример #5
0
def main(block_attention, hierarchical, nade, num_layers, dropout,
         input_dropout, input_dropout_token, per_head_dim, num_heads,
         local_position_embedding_dim, position_ff_dim, enc_dec_conditioning,
         lr, batch_size, num_epochs, action, loss_on_last_frame, mixup,
         midi_input, temperature, num_batches, label_smoothing,
         scheduled_training, dataset_type, conditioning, double_conditioning,
         instrument_presence_in_encoder, cpc_config_name, num_examples_sampled,
         suffix, subdivision, sequence_size, velocity_quantization,
         max_transposition, group_instrument_per_section):
    # Use all gpus available
    gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
    print(gpu_ids)

    config = get_config()

    num_layers_l = [2, 3, 4, 5, 6]
    enc_dec_conditionings_l = ['split', 'single']
    sequence_sizes_l = [3, 5, 7]
    grid_search = False
    if grid_search:
        configs = list(
            itertools.product(
                *[num_layers_l, enc_dec_conditionings_l, sequence_sizes_l]))
        write_suffix = True
    else:
        configs = [(num_layers, enc_dec_conditioning, sequence_size)]
        write_suffix = False

    for this_config in configs:
        num_layers, enc_dec_conditioning, sequence_size = this_config
        if write_suffix:
            this_suffix = f'{suffix}_{num_layers}_{enc_dec_conditioning}_{sequence_size}'
        else:
            this_suffix = suffix

        # Get dataset
        dataset_manager = DatasetManager()
        dataset, processor_decoder, processor_encoder, processor_encodencoder = \
            dataset_import.get_dataset(dataset_manager, dataset_type, subdivision, sequence_size, velocity_quantization,
                                       max_transposition,
                                       num_heads, per_head_dim, local_position_embedding_dim, block_attention,
                                       group_instrument_per_section, nade, cpc_config_name, double_conditioning,
                                       instrument_presence_in_encoder)

        reduction_flag = dataset_type in [
            'reduction', 'reduction_small', 'reduction_large',
            'reduction_categorical', 'reduction_categorical_small',
            'reduction_midiPiano', 'reduction_midiPiano_small'
        ]

        if not conditioning:
            print("NO CONDITIONING ????!!!!!!!!!!!!")

        model = Transformer(dataset=dataset,
                            data_processor_encodencoder=processor_encodencoder,
                            data_processor_encoder=processor_encoder,
                            data_processor_decoder=processor_decoder,
                            num_heads=num_heads,
                            per_head_dim=per_head_dim,
                            position_ff_dim=position_ff_dim,
                            enc_dec_conditioning=enc_dec_conditioning,
                            hierarchical_encoding=hierarchical,
                            block_attention=block_attention,
                            nade=nade,
                            conditioning=conditioning,
                            double_conditioning=double_conditioning,
                            num_layers=num_layers,
                            dropout=dropout,
                            input_dropout=input_dropout,
                            input_dropout_token=input_dropout_token,
                            lr=lr,
                            reduction_flag=reduction_flag,
                            gpu_ids=gpu_ids,
                            suffix=this_suffix,
                            mixup=mixup,
                            scheduled_training=scheduled_training)

        if action in ['generate', 'visualize']:
            model.load()
            overfit_flag = False
        elif action in [
                'generate_overfit', 'train_from_checkpoint',
                'visualize_overfit'
        ]:
            model.load_overfit()
            overfit_flag = True

        model.cuda()

        if action in ['train', 'train_from_checkpoint']:
            print(f"Train the model on gpus {gpu_ids}")
            model.train_model(cache_dir=dataset_manager.cache_dir,
                              batch_size=batch_size,
                              num_epochs=num_epochs,
                              num_batches=num_batches,
                              label_smoothing=label_smoothing,
                              loss_on_last_frame=loss_on_last_frame)
            overfit_flag = True

        if action in ['generate', 'generate_overfit']:
            print('Generation')
            ascii_melody = MARIO_MELODY
            # score, tensor_chorale, tensor_metadata = mode.generation_from_ascii(
            #     ascii_melody=ascii_melody
            # )
            # score.show()
            # score, tensor_chorale, tensor_metadata = model.generation(
            #     num_tokens_per_beat=8,
            #     num_beats=64 * 4,
            #     temperature=1.
            # )
            # score, tensor_chorale, tensor_metadata = model.generation(
            #     num_tokens_per_beat=8,
            #     num_beats=64 * 4,
            #     num_experiments=4,
            #     link_experiments=False,
            #     temperature=1.2
            # )
            # score, tensor_chorale, tensor_metadata = model.plot_attentions()
            # score, tensor_chorale, tensor_metadata = model.unconstrained_generation(
            #     num_tokens_per_beat=8,
            #     num_beats=64 * 4)

            if dataset_type in [
                    'arrangement', 'arrangement_small',
                    'arrangement_midiPiano', 'arrangement_midiPiano_small',
                    'arrangement_voice', 'arrangement_voice_small'
            ]:
                # (oppposite to standard) increasing temperature reduce agitation
                # Cold means all event will eventually have almost same proba
                # Hot accentuates spikes

                # Number of complete pass over all time frames in, non auto-regressive sampling schemes
                number_sampling_steps = 1
                #  Allows to override dataset quantization for generation
                subdivision_generation = subdivision
                # banned_instruments = ["Violin_1", "Violin_2", "Violoncello", "Contrabass", "Viola"]
                banned_instruments = []
                # Used for instruments_presence model
                unknown_instruments = []
                source_folder = f"{config['datapath']}/source_for_generation/"
                sources = [
                    {
                        "source_path":
                        source_folder + "mouss_tableaux_small.xml",
                        "writing_name": "mouss_tableaux_small",
                        "writing_tempo": "adagio",
                    },
                    # {"source_path": source_folder + "guillaume_1.mid",
                    #  "writing_name": "guillaume_1",
                    #  "writing_tempo": "adagio"
                    #  },
                    # {"source_path": source_folder + "guillaume_2.xml",
                    #  "writing_name": "guillaume_2",
                    #  "writing_tempo": "adagio"
                    #  },
                    {
                        "source_path":
                        source_folder + "chopin_Prel_Op28_20.xml",
                        "writing_name": "chopin_Prel_Op28_20",
                        "writing_tempo": "largo"
                    },
                    {
                        "source_path": source_folder + "b_1_1.xml",
                        "writing_name": "b_1_1",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "b_3_3.xml",
                        "writing_name": "b_3_3",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "b_3_4.xml",
                        "writing_name": "b_3_4",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "b_7_2.xml",
                        "writing_name": "b_7_2",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "testpiano.xml",
                        "writing_name": "testpiano",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "schubert_21_1.xml",
                        "writing_name": "schubert_21_1",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "schubert_20_1.xml",
                        "writing_name": "schubert_20_1",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path": source_folder + "Mozart_Nachtmusik.xml",
                        "writing_name": "Mozart_Nachtmusik",
                        "writing_tempo": "adagio"
                    },
                ]
                if overfit_flag:
                    write_dir = model.log_dir_overfitted
                else:
                    write_dir = model.log_dir

                if midi_input is not None:
                    sources = [{
                        'source_path': f'midi_inputs/{midi_input}',
                        'writing_name': f'{midi_input}',
                        'writing_tempo': 'adagio'
                    }]
                    write_dir = 'midi_inputs'

                for source in sources:
                    generation_from_file(
                        model=model,
                        temperature=temperature,
                        batch_size=num_examples_sampled,
                        filepath=source["source_path"],
                        write_dir=write_dir,
                        write_name=source["writing_name"],
                        banned_instruments=banned_instruments,
                        unknown_instruments=unknown_instruments,
                        writing_tempo=source["writing_tempo"],
                        subdivision=subdivision_generation,
                        number_sampling_steps=number_sampling_steps)

            elif dataset_type in [
                    'reduction', 'reduction_large', 'reduction_small',
                    'reduction_categorical', 'reduction_categorical_small'
            ]:
                #  Allows to override dataset quantization for generation
                subdivision_generation = 8
                source_folder = f"{config['datapath']}/source_for_generation/"
                sources = [
                    {
                        "source_path": source_folder + "b_7_2_orch.xml",
                        "writing_name": "b_7_2_orch",
                        "writing_tempo": "adagio"
                    },
                    # {"source_path": source_folder + "mouss_tableaux_orch.xml",
                    #  "writing_name": "mouss_tableaux_orch",
                    #  "writing_tempo": "adagio"
                    #  },
                    # {"source_path": source_folder + "Debussy_SuiteBergam_Passepied_orch.xml",
                    #  "writing_name": "Debussy_SuiteBergam_Passepied_orch",
                    #  "writing_tempo": "adagio"
                    #  },
                    # {
                    #     "source_path": source_folder + "Romantic Concert Piece for Brass Orchestra_orch.xml",
                    #     "writing_name": "Romantic Concert Piece for Brass Orchestra_orch",
                    #     "writing_tempo": "adagio"
                    #  },
                    # {
                    #     "source_path": source_folder + "mozart_25_1.xml",
                    #     "writing_name": "mozart_25_1",
                    #     "writing_tempo": "adagio"
                    # },
                    # {
                    #     "source_path": source_folder + "mozart_25_2.xml",
                    #     "writing_name": "mozart_25_2",
                    #     "writing_tempo": "adagio"
                    # },
                    # {
                    #     "source_path": source_folder + "mozart_25_3.xml",
                    #     "writing_name": "mozart_25_3",
                    #     "writing_tempo": "adagio"
                    # },
                    {
                        "source_path":
                        source_folder + "brahms_symphony_2_1.xml",
                        "writing_name": "brahms_symphony_2_1",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path":
                        source_folder + "haydn_symphony_91_1.xml",
                        "writing_name": "haydn_symphony_91_1",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path":
                        source_folder + "mozart_symphony_183_4.xml",
                        "writing_name": "mozart_symphony_183_4",
                        "writing_tempo": "adagio"
                    },
                    {
                        "source_path":
                        source_folder + "mozart_symphony_183_2.xml",
                        "writing_name": "mozart_symphony_183_2",
                        "writing_tempo": "adagio"
                    },
                ]
                for source in sources:
                    reduction_from_file(model=model,
                                        temperature=temperature,
                                        batch_size=num_examples_sampled,
                                        filepath=source["source_path"],
                                        write_name=source["writing_name"],
                                        overfit_flag=overfit_flag,
                                        writing_tempo=source["writing_tempo"],
                                        subdivision=subdivision_generation)

            elif dataset_type == "lsdb":
                score, tensor_chorale, tensor_metadata = model.generation()
                score.write('xml', 'results/test.xml')

            elif dataset_type in ['bach', 'bach_small']:
                if nade and (not conditioning):
                    scores = generation_bach_nade(
                        model=model,
                        temperature=temperature,
                        ascii_melody=ascii_melody,
                        batch_size=num_examples_sampled,
                        force_melody=False,
                    )
                else:
                    scores = generation_bach(model=model,
                                             temperature=temperature,
                                             ascii_melody=ascii_melody,
                                             batch_size=num_examples_sampled,
                                             force_melody=False)

                if overfit_flag:
                    writing_dir = model.log_dir_overfitted
                else:
                    writing_dir = model.log_dir

                for batch_index, score in enumerate(scores):
                    score.write('xml', f'{writing_dir}/{batch_index}.xml')
        elif action in ['visualize', 'visualize_overfit']:
            log_dir = model.log_dir if action == 'visualize' else model.log_dir_overfitted
            visualize_arrangement(model, batch_size, log_dir)
    return
Пример #6
0
    def __init__(self,
                 writing_dir,
                 corpus_it_gen,
                 subdivision_model=2,
                 subdivision_read=4,
                 sequence_size=3,
                 velocity_quantization=2,
                 temperature=1.2):
        """
        :param subdivision: number of sixteenth notes per beat
        """
        self.subdivision_read = subdivision_read
        self.sequence_size = sequence_size
        self.velocity_quantization = velocity_quantization
        self.writing_dir = writing_dir

        #################################################################
        #  Need the old db used to train the model (yes it sucks...)
        dataset_manager = DatasetManager()
        arrangement_dataset_kwargs = {
            'transpose_to_sounding_pitch': True,
            'subdivision': subdivision_model,
            'sequence_size': sequence_size,
            'velocity_quantization': velocity_quantization,
            'max_transposition': 12,
            'compute_statistics_flag': False
        }
        dataset = dataset_manager.get_dataset(name='arrangement_large',
                                              **arrangement_dataset_kwargs)

        #  Model params (need to know them :))
        num_heads = 8
        per_head_dim = 64
        local_position_embedding_dim = 8
        position_ff_dim = 1024
        hierarchical = False
        block_attention = False
        nade = False
        conditioning = True
        double_conditioning = False
        num_layers = 2
        suffix = 'TEST'

        reducer_input_dim = num_heads * per_head_dim

        processor_encoder = ReductionDataProcessor(
            dataset=dataset,
            embedding_dim=reducer_input_dim - local_position_embedding_dim,
            reducer_input_dim=reducer_input_dim,
            local_position_embedding_dim=local_position_embedding_dim,
            flag='orchestra',
            block_attention=False)

        processor_decoder = ReductionDataProcessor(
            dataset=dataset,
            embedding_dim=reducer_input_dim - local_position_embedding_dim,
            reducer_input_dim=reducer_input_dim,
            local_position_embedding_dim=local_position_embedding_dim,
            flag='piano',
            block_attention=block_attention)

        processor_encodencoder = None
        #################################################################

        #################################################################
        # Init model
        # Use all gpus available
        gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
        print(gpu_ids)

        self.model = Transformer(
            dataset=dataset,
            data_processor_encodencoder=processor_encodencoder,
            data_processor_encoder=processor_encoder,
            data_processor_decoder=processor_decoder,
            num_heads=num_heads,
            per_head_dim=per_head_dim,
            position_ff_dim=position_ff_dim,
            hierarchical_encoding=hierarchical,
            block_attention=block_attention,
            nade=nade,
            conditioning=conditioning,
            double_conditioning=double_conditioning,
            num_layers=num_layers,
            dropout=0.1,
            input_dropout=0.2,
            reduction_flag=True,
            lr=1e-4,
            gpu_ids=gpu_ids,
            suffix=suffix)
        #################################################################

        self.corpus_it_gen = corpus_it_gen

        self.temperature = temperature

        return
Пример #7
0
class Reducter:
    def __init__(self,
                 writing_dir,
                 corpus_it_gen,
                 subdivision_model=2,
                 subdivision_read=4,
                 sequence_size=3,
                 velocity_quantization=2,
                 temperature=1.2):
        """
        :param subdivision: number of sixteenth notes per beat
        """
        self.subdivision_read = subdivision_read
        self.sequence_size = sequence_size
        self.velocity_quantization = velocity_quantization
        self.writing_dir = writing_dir

        #################################################################
        #  Need the old db used to train the model (yes it sucks...)
        dataset_manager = DatasetManager()
        arrangement_dataset_kwargs = {
            'transpose_to_sounding_pitch': True,
            'subdivision': subdivision_model,
            'sequence_size': sequence_size,
            'velocity_quantization': velocity_quantization,
            'max_transposition': 12,
            'compute_statistics_flag': False
        }
        dataset = dataset_manager.get_dataset(name='arrangement_large',
                                              **arrangement_dataset_kwargs)

        #  Model params (need to know them :))
        num_heads = 8
        per_head_dim = 64
        local_position_embedding_dim = 8
        position_ff_dim = 1024
        hierarchical = False
        block_attention = False
        nade = False
        conditioning = True
        double_conditioning = False
        num_layers = 2
        suffix = 'TEST'

        reducer_input_dim = num_heads * per_head_dim

        processor_encoder = ReductionDataProcessor(
            dataset=dataset,
            embedding_dim=reducer_input_dim - local_position_embedding_dim,
            reducer_input_dim=reducer_input_dim,
            local_position_embedding_dim=local_position_embedding_dim,
            flag='orchestra',
            block_attention=False)

        processor_decoder = ReductionDataProcessor(
            dataset=dataset,
            embedding_dim=reducer_input_dim - local_position_embedding_dim,
            reducer_input_dim=reducer_input_dim,
            local_position_embedding_dim=local_position_embedding_dim,
            flag='piano',
            block_attention=block_attention)

        processor_encodencoder = None
        #################################################################

        #################################################################
        # Init model
        # Use all gpus available
        gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
        print(gpu_ids)

        self.model = Transformer(
            dataset=dataset,
            data_processor_encodencoder=processor_encodencoder,
            data_processor_encoder=processor_encoder,
            data_processor_decoder=processor_decoder,
            num_heads=num_heads,
            per_head_dim=per_head_dim,
            position_ff_dim=position_ff_dim,
            hierarchical_encoding=hierarchical,
            block_attention=block_attention,
            nade=nade,
            conditioning=conditioning,
            double_conditioning=double_conditioning,
            num_layers=num_layers,
            dropout=0.1,
            input_dropout=0.2,
            reduction_flag=True,
            lr=1e-4,
            gpu_ids=gpu_ids,
            suffix=suffix)
        #################################################################

        self.corpus_it_gen = corpus_it_gen

        self.temperature = temperature

        return

    def iterator_gen(self):
        return (score for score in self.corpus_it_gen())

    def __call__(self, model_path):
        #  Load model weights
        self.model.load_overfit(model_path)

        for arr_pair in self.iterator_gen():
            filepath = arr_pair['Orchestra']

            context_size = self.model.data_processor_decoder.num_frames_piano - 1

            #  Load input piano score
            piano_init, rhythm_orchestra, orchestra = \
                self.model.data_processor_encoder.init_reduction_filepath(batch_size=1,
                                                                          filepath=filepath,
                                                                          subdivision=self.subdivision_read)

            piano = self.model.generation_reduction(
                piano_init=piano_init,
                orchestra=orchestra,
                temperature=self.temperature,
                batch_size=1,
                plot_attentions=False)

            piano_cpu = piano[:, context_size:-context_size].cpu()
            # Last duration will be a quarter length
            duration_piano = np.asarray(
                list(rhythm_orchestra[1:]) + [self.subdivision_read]
            ) - np.asarray(list(rhythm_orchestra[:-1]) + [0])

            generated_piano_score = self.model.dataset.piano_tensor_to_score(
                piano_cpu,
                durations=duration_piano,
                subdivision=self.subdivision_read)

            # Copy the whole folder to writing dir
            src_dir = os.path.dirname(filepath)
            shutil.copytree(src_dir, self.writing_dir)
            # Remove old (midi) files
            new_midi_path = re.sub(src_dir, self.writing_dir, filepath)
            os.remove(new_midi_path)
            # Write generated piano score and orchestra in xml
            generated_piano_score.write(
                fp=f"{self.writing_dir}/{filepath}_piano.xml", fmt='musicxml')
            orchestra.write(fp=f"{self.writing_dir}/{filepath}_orch.xml",
                            fmt='musicxml')

        return
Пример #8
0
def main(args):
    """

    :param args:
    :return:
    """
    dropout = 0.
    input_dropout = 0.
    input_dropout_token = 0.
    mixup = False
    scheduled_training = 0.
    group_instrument_per_section = False
    reduction_flag = False
    lr = 1.
    cpc_config_name = None
    subdivision = args.subdivision

    # Use all gpus available
    gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
    print(f'Using GPUs {gpu_ids}')
    if len(gpu_ids) == 0:
        device = 'cpu'
    else:
        device = 'cuda'

    # Get dataset
    dataset_manager = DatasetManager()
    dataset, processor_decoder, processor_encoder, processor_encodencoder = \
        dataset_import.get_dataset(dataset_manager, args.dataset_type, args.subdivision, args.sequence_size,
                                   args.velocity_quantization, args.max_transposition,
                                   args.num_heads, args.per_head_dim, args.local_position_embedding_dim,
                                   args.block_attention,
                                   group_instrument_per_section, args.nade, cpc_config_name, args.double_conditioning,
                                   args.instrument_presence_in_encoder)

    # Load model
    model = Transformer(dataset=dataset,
                        data_processor_encodencoder=processor_encodencoder,
                        data_processor_encoder=processor_encoder,
                        data_processor_decoder=processor_decoder,
                        num_heads=args.num_heads,
                        per_head_dim=args.per_head_dim,
                        position_ff_dim=args.position_ff_dim,
                        enc_dec_conditioning=args.enc_dec_conditioning,
                        hierarchical_encoding=args.hierarchical,
                        block_attention=args.block_attention,
                        nade=args.nade,
                        conditioning=args.conditioning,
                        double_conditioning=args.double_conditioning,
                        num_layers=args.num_layers,
                        dropout=dropout,
                        input_dropout=input_dropout,
                        input_dropout_token=input_dropout_token,
                        lr=lr,
                        reduction_flag=reduction_flag,
                        gpu_ids=gpu_ids,
                        suffix=args.suffix,
                        mixup=mixup,
                        scheduled_training=scheduled_training)

    model.load_overfit(device=device)
    model.to(device)
    model = model.eval()

    # Dir for writing generated files
    writing_dir = f'{os.getcwd()}/generation'
    if not os.path.isdir(writing_dir):
        os.makedirs(writing_dir)

    # Create server
    server_address = (args.ip, args.port)
    server = OrchestraServer(server_address, model, subdivision, writing_dir)
    print(f'[Server listening to {args.ip} on port {args.port}]')
    server.serve_forever()
Пример #9
0
def main(
    midi_input,
    temperature,
    num_examples_sampled,
    suffix,
):
    # Use all gpus available
    gpu_ids = [int(gpu) for gpu in range(torch.cuda.device_count())]
    print(gpu_ids)

    hierarchical = False
    nade = False
    num_layers = 6
    dropout = 0.
    input_dropout = 0.
    input_dropout_token = 0.
    per_head_dim = 64
    num_heads = 8
    local_position_embedding_dim = 8
    position_ff_dim = 2048
    enc_dec_conditioning = 'split'
    lr = 1
    mixup = None
    scheduled_training = 0
    dataset_type = 'arrangement_voice'
    conditioning = True
    double_conditioning = None
    subdivision = 16
    sequence_size = 7
    velocity_quantization = 2
    max_transposition = 12
    group_instrument_per_section = False
    reduction_flag = False
    instrument_presence_in_encoder = False
    cpc_config_name = None
    block_attention = False

    # Get dataset
    dataset_manager = DatasetManager()
    dataset, processor_decoder, processor_encoder, processor_encodencoder = \
        dataset_import.get_dataset(dataset_manager, dataset_type, subdivision, sequence_size, velocity_quantization,
                                   max_transposition,
                                   num_heads, per_head_dim, local_position_embedding_dim, block_attention,
                                   group_instrument_per_section, nade, cpc_config_name, double_conditioning,
                                   instrument_presence_in_encoder)

    model = Transformer(dataset=dataset,
                        data_processor_encodencoder=processor_encodencoder,
                        data_processor_encoder=processor_encoder,
                        data_processor_decoder=processor_decoder,
                        num_heads=num_heads,
                        per_head_dim=per_head_dim,
                        position_ff_dim=position_ff_dim,
                        enc_dec_conditioning=enc_dec_conditioning,
                        hierarchical_encoding=hierarchical,
                        block_attention=block_attention,
                        nade=nade,
                        conditioning=conditioning,
                        double_conditioning=double_conditioning,
                        num_layers=num_layers,
                        dropout=dropout,
                        input_dropout=input_dropout,
                        input_dropout_token=input_dropout_token,
                        lr=lr,
                        reduction_flag=reduction_flag,
                        gpu_ids=gpu_ids,
                        suffix=suffix,
                        mixup=mixup,
                        scheduled_training=scheduled_training)

    model.load_overfit()
    model.cuda()

    print('Generation')
    #  Allows to override dataset quantization for generation
    subdivision_generation = subdivision

    source = {
        'source_path': f'midi_inputs/{midi_input}',
        'writing_name': f'{midi_input}',
        'writing_tempo': 'adagio'
    }

    write_dir = 'midi_inputs'

    generation_from_file(model=model,
                         temperature=temperature,
                         batch_size=num_examples_sampled,
                         filepath=source["source_path"],
                         write_dir=write_dir,
                         write_name=source["writing_name"],
                         banned_instruments=[],
                         unknown_instruments=[],
                         writing_tempo=source["writing_tempo"],
                         subdivision=subdivision_generation,
                         number_sampling_steps=1)
    return