def learn(self, *training_set: Union[List[Note], ChordProgression], epochs=None, callback=None): self.maxtsbq = max(n.ticks_since_beat_quantised for notes, _ in nwise_disjoint(training_set, 2) for n in notes) self.maxdq = max(n.duration_quantised for notes, changes in nwise_disjoint(training_set, 2) for n in notes) self.maxbeatdiff = max(m.beat - n.beat for notes, _ in nwise_disjoint(training_set, 2) for n, m in nwise(notes, 2)) self.model = self._build_net() self.pitch_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[0]) self.tsbq_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[1]) self.dq_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[2]) x, y = self._all_training_data(training_set) stateful = any( isinstance(layer, keras.layers.Recurrent) and layer.stateful for layer in self.model.layers) kwargs = {'batch_size': 1, 'shuffle': False} if stateful else {} if callback: kwargs.update({'callbacks': [callback], 'verbose': 0}) self.model.fit(x, y, epochs=epochs or self.epochs, **kwargs) return not self.model.stop_training
def _all_training_data(self, training_set: Iterable[Union[List[Note], ChordProgression]]): msg_callback = self.progressbar.set_text \ if isinstance(self.progressbar, gui.viewmodel.KerasProgressbar) \ else print msg_callback('Processing training data...') len_data = sum(len(notes) - self.order + 1 for notes in training_set[::2]) if self.progressbar is None: self.progressbar = keras.utils.Progbar(len_data) else: self.progressbar.set_params({'samples': len_data}) x, p, t, d, o, b = [], [], [], [], [], [] for notes, changes in nwise_disjoint(training_set, 2): for v in nwise(notes, self.order + 1): self.progressbar.add(1) xx = self._encode_network_input(v[:self.order], changes.unique(v[-1].beat - 1, self.chord_radius), changes) if not x: x = [[] for _ in xx] for xi, xxi in zip(x, xx): xi.append(xxi) p.append(encode_int(v[-1].abcnote.value, 12)) t.append(encode_int(v[-1].ticks_since_beat_quantised, self.maxtsbq + 1)) d.append(encode_int(v[-1].duration_quantised, self.maxdq + 1)) o.append(encode_int(v[-1].octave, NUM_OCTAVES)) b.append(encode_int(v[-1].beat - v[-2].beat, self.maxbeatdiff + 1)) self.progressbar.update(self.progressbar.target, force=True) msg_callback('Starting training...') return [np.array(xi, dtype=bool) for xi in x],\ [np.array(arr, dtype=bool) for arr in [p, t, d, o, b]]
def _all_training_pairs(self, notes: List[Note]) -> Tuple[numpy.ndarray, numpy.ndarray]: """ 1-of-N binary encoding of all pairs of inputs (past notes and current chord) and outputs (next note) on the training set """ x, y = [], [] for v in nwise(notes, self.order + 1): x.append(self._encode_network_input(v[:self.order], self.changes[v[-1].beat - 1])) y.append(v[-1].pitch) return numpy.array(x), numpy.array(y)
def _all_training_pairs( self, notes: List[Note]) -> Tuple[numpy.ndarray, numpy.ndarray]: """ 1-of-N binary encoding of all pairs of inputs (past notes and current chord) and outputs (next note) on the training set """ x, y = [], [] for v in nwise(notes, self.order + 1): x.append( self._encode_network_input(v[:self.order], self.changes[v[-1].beat - 1])) y.append(v[-1].pitch) return numpy.array(x), numpy.array(y)
def learn(self, seq: Sequence[State]): """ High-level interface to train the Markov-chain. :param seq: The complete sequence on which to train. """ seq = self._ensure_seq(seq) self.training_prep(seq) for np1gram in nwise(seq, self.order + 1): self.training_step(np1gram) self.training_finish_normalize()
def learn(self, notes: List[Note], *_, **__): self.markovs_by_chord = { chord: Markov(self.order) for chord in self.changes } for markov in self.markovs_by_chord.values(): markov.training_prep([n.pitch for n in notes]) for np1gram in nwise(notes, self.order + 1): chord = self.changes[np1gram[-1].beat - 1] self.markovs_by_chord[chord].training_step( [n.pitch for n in np1gram]) for markov in self.markovs_by_chord.values(): markov.training_finish_normalize() self.past = [n.pitch for n in notes[:self.order]]
def _all_training_data(self, training_set: Iterable[Union[List[Note], ChordProgression]]): """ 1-of-N binary encoding of all pairs of inputs (past notes and current chord) and outputs (next note) on the training set """ x, p, t, d = [], [], [], [] for notes, changes in nwise_disjoint(training_set, 2): for v in nwise(notes, self.order + 1): xx = self._encode_network_input(v[:self.order], changes.unique(v[-1].beat - 1, self.chord_radius), changes) if not x: x = [[] for _ in xx] for xi, xxi in zip(x, xx): xi.append(xxi) p.append(encode_int(v[-1].pitch, 127)) t.append(encode_int(v[-1].ticks_since_beat_quantised, self.maxtsbq + 1)) d.append(encode_int(v[-1].duration_quantised, self.maxdq + 1)) return [np.array(xi, dtype=bool) for xi in x], \ [np.array(p, dtype=bool), np.array(t, dtype=bool), np.array(d, dtype=bool)]
def _all_training_data(self, training_set: Iterable[Union[List[Note], ChordProgression]]): """ 1-of-N binary encoding of all pairs of inputs (past notes and current chord) and outputs (next note) on the training set """ x, p, t, d = [], [], [], [] for notes, changes in nwise_disjoint(training_set, 2): for v in nwise(notes, self.order + 1): xx = self._encode_network_input( v[:self.order], changes.unique(v[-1].beat - 1, self.chord_radius), changes) if not x: x = [[] for _ in xx] for xi, xxi in zip(x, xx): xi.append(xxi) p.append(encode_int(v[-1].pitch, 127)) t.append( encode_int(v[-1].ticks_since_beat_quantised, self.maxtsbq + 1)) d.append(encode_int(v[-1].duration_quantised, self.maxdq + 1)) return [np.array(xi, dtype=bool) for xi in x], \ [np.array(p, dtype=bool), np.array(t, dtype=bool), np.array(d, dtype=bool)]
def _all_training_data(self, training_set: Iterable[Union[List[Note], ChordProgression]]): msg_callback = self.progressbar.set_text \ if isinstance(self.progressbar, gui.viewmodel.KerasProgressbar) \ else print msg_callback('Processing training data...') len_data = sum( len(notes) - self.order + 1 for notes in training_set[::2]) if self.progressbar is None: self.progressbar = keras.utils.Progbar(len_data) else: self.progressbar.set_params({'samples': len_data}) x, p, t, d, o, b = [], [], [], [], [], [] for notes, changes in nwise_disjoint(training_set, 2): for v in nwise(notes, self.order + 1): self.progressbar.add(1) xx = self._encode_network_input( v[:self.order], changes.unique(v[-1].beat - 1, self.chord_radius), changes) if not x: x = [[] for _ in xx] for xi, xxi in zip(x, xx): xi.append(xxi) p.append(encode_int(v[-1].abcnote.value, 12)) t.append( encode_int(v[-1].ticks_since_beat_quantised, self.maxtsbq + 1)) d.append(encode_int(v[-1].duration_quantised, self.maxdq + 1)) o.append(encode_int(v[-1].octave, NUM_OCTAVES)) b.append( encode_int(v[-1].beat - v[-2].beat, self.maxbeatdiff + 1)) self.progressbar.update(self.progressbar.target, force=True) msg_callback('Starting training...') return [np.array(xi, dtype=bool) for xi in x],\ [np.array(arr, dtype=bool) for arr in [p, t, d, o, b]]
def learn(self, *training_set: Union[List[Note], ChordProgression], epochs=None, callback=None): self.maxtsbq = max(n.ticks_since_beat_quantised for notes, _ in nwise_disjoint(training_set, 2) for n in notes) self.maxdq = max(n.duration_quantised for notes, changes in nwise_disjoint(training_set, 2) for n in notes) self.maxbeatdiff = max(m.beat - n.beat for notes, _ in nwise_disjoint(training_set, 2) for n, m in nwise(notes, 2)) self.model = self._build_net() self.pitch_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[0]) self.tsbq_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[1]) self.dq_model = keras.models.Model(inputs=self.model.inputs, outputs=self.model.outputs[2]) x, y = self._all_training_data(training_set) stateful = any(isinstance(layer, keras.layers.Recurrent) and layer.stateful for layer in self.model.layers) kwargs = {'batch_size': 1, 'shuffle': False} if stateful else {} if callback: kwargs.update({'callbacks': [callback], 'verbose': 0}) self.model.fit(x, y, epochs=epochs or self.epochs, **kwargs) return not self.model.stop_training
def back_to_the_future(): """ Find out how common it is that a later note has a smaller tick_abs """ song = 'Eb_therewill' notes = notes_from_file(r"input/{}.mid".format(song)) print(sum(n.tick_abs > m.tick_abs for n, m in nwise(notes, 2)))