예제 #1
0
파일: Tuner.py 프로젝트: netay/PyTuner
    def update(self) -> None:
        try:
            wf_data = self.stream.read(flags.chunk,
                                       exception_on_overflow=False)
            wf_data = np.frombuffer(wf_data, dtype=float)
            if np.size(wf_data) == 0:
                return
            self.input_data = np.hstack(
                (self.input_data[wf_data.shape[0]:], wf_data))

            input_last = self.input_data[:self.fftWindow]
            # Normalization
            input_last = input_last / np.sqrt(
                (np.sum(np.square(input_last)) + 1e-6))
            self.spectral_data = np.fft.fft(input_last)
            self.spectral_data = np.abs(self.spectral_data[:self.fftWindow //
                                                           2])
            if np.sum(self.spectral_data) > 0:
                self.spectrum_trace2 = self.spectrum_trace
                self.spectrum_trace = self.spectrum_plot.plot(pen='m', width=3)
                self.spectrum_trace.setData(self.f, self.spectral_data)
                self.spectrum_plot.setLogMode(x=True, y=True)
                self.spectrum_plot.setYRange(-13, 2, padding=0.001)
                self.spectrum_plot.setXRange(np.log10(Note.C(4).freq),
                                             np.log10(Note.C(8).freq),
                                             padding=0.005)
                if self.spectrum_trace2 is not None:
                    self.spectrum_plot.removeItem(self.spectrum_trace2)
        except IOError:
            pass  # underflow, waiting for data
예제 #2
0
 def next_pitch(self) -> int:
     n = Note()
     n.pitch = numpy.argmax(
         self.net_fn(
             [self._encode_network_input(self.past, self.current_chord)]))
     self.past.append(n)
     self.past = self.past[-self.order:]
     return n.pitch
예제 #3
0
파일: Tuner.py 프로젝트: netay/PyTuner
 def draw_note_lines(self):
     for i in range(-48, 94):
         if i % 12 in {0, 2, 4, 5, 7, 9, 11}:
             color = 'r'
         else:
             color = 'b'
         if i % 12 == 0:
             thickness = 0.5
         else:
             thickness = 0.35
         inf_line = pg.InfiniteLine(movable=True,
                                    angle=90,
                                    label=Note(i).name(),
                                    pen=pg.mkPen(color, width=thickness))
         inf_line.setPos([np.log10(Note(i).freq), 0])
         self.spectrum_plot.addItem(inf_line)
예제 #4
0
def midi2piece(file_name):
    piece = Piece(file_name)
    file_path = MIDI_PATH / Path(file_name + '.mid')
    midi = mid.MidiFile(file_path)

    has_pedal = check_pedal(midi)

    if not has_pedal:
        time_ticks = 0
        for m, msg in enumerate(midi.tracks[0]):
            time_ticks += msg.time
            if msg.type == 'note_on':
                if msg.velocity != 0:
                    m_end = m + 1
                    delta_ticks = 0
                    while True:
                        delta_ticks += midi.tracks[0][m_end].time
                        if midi.tracks[0][
                                m_end].note == msg.note and midi.tracks[0][
                                    m_end].velocity == 0:
                            note = Note.from_midi(msg.note, msg.velocity,
                                                  time_ticks,
                                                  time_ticks + delta_ticks)
                            piece.append(note)
                            break
                        m_end += 1
    else:
        raise Exception("Pedal not integrated yet")

    return piece
예제 #5
0
def notes_from_file(filename: str) -> List[Note]:
    midifile_rel = midi.read_midifile(filename)
    midifile_abs = copy.deepcopy(midifile_rel)
    midifile_abs.make_ticks_abs()

    # Convert MIDI events to our music representation: a list of Note objects
    notes = []
    active_notes = {}

    for ev_rel, ev_abs in zip(midifile_rel[-1], midifile_abs[-1]):
        if isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1]:
            n = Note()
            n.resolution = midifile_rel.resolution
            n.tick_abs = ev_abs.tick
            n.pitch = ev_rel.data[0]
            n.velocity = ev_rel.data[1]
            if n.pitch not in active_notes:
                active_notes[n.pitch] = {n}
            else:
                active_notes[n.pitch].add(n)
        elif isinstance(ev_rel, midi.NoteOffEvent) or (isinstance(
                ev_rel, midi.NoteOnEvent) and ev_rel.data[1] == 0):
            n = active_notes[ev_rel.data[0]].pop()
            n.duration = ev_abs.tick - n.tick_abs
            notes.append(n)
    assert not any(active_notes.values()), "Some notes were not released"
    return sorted(notes, key=lambda note: note.tick_abs)
예제 #6
0
def notes_from_file(filename: str) -> List[Note]:
    midifile_rel = midi.read_midifile(filename)
    midifile_abs = copy.deepcopy(midifile_rel)
    midifile_abs.make_ticks_abs()

    # Convert MIDI events to our music representation: a list of Note objects
    notes = []
    active_notes = {}

    for ev_rel, ev_abs in zip(midifile_rel[-1], midifile_abs[-1]):
        if isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1]:
            n = Note()
            n.resolution = midifile_rel.resolution
            n.tick_abs = ev_abs.tick
            n.pitch = ev_rel.data[0]
            n.velocity = ev_rel.data[1]
            if n.pitch not in active_notes:
                active_notes[n.pitch] = {n}
            else:
                active_notes[n.pitch].add(n)
        elif isinstance(ev_rel, midi.NoteOffEvent) or (isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1] == 0):
            n = active_notes[ev_rel.data[0]].pop()
            n.duration = ev_abs.tick - n.tick_abs
            notes.append(n)
    assert not any(active_notes.values()), "Some notes were not released"
    return sorted(notes, key=lambda note: note.tick_abs)
예제 #7
0
파일: Tuner.py 프로젝트: netay/PyTuner
    def __init__(self,
                 stream,
                 rate: int,
                 lowest: Note = Note.C(2),
                 highest: Note = Note.C(8),
                 List=[]) -> None:
        super().__init__(List)
        self.rate = rate
        self.stream = stream
        self.lowest = lowest
        self.highest = highest
        self.fftWindow = flags.fftWindow
        self.savingRange = 44100  # 1 second
        self.timer = Qt.QTimer()
        self.timer.timeout.connect(self.update)
        self.app = Qt.QApplication(sys.argv)
        self.window = pg.GraphicsWindow(title="PyTuner")
        self.window.setGeometry(5, 115, 1910, 1070)

        sp_labels = [(np.log10(Note(i).freq), Note(i).name())
                     for i in range(-48, 94)]
        sp_x_axis = pg.AxisItem(orientation='bottom')
        sp_x_axis.setTicks([sp_labels])
        self.spectrum_plot = self.window.addPlot(
            title="Sound spectrum",
            row=1,
            col=1,
            axisItems={'bottom': sp_x_axis})
        self.spectrum_trace = None
        self.spectrum_trace2 = None
        self.spectrum_plot.setXRange(np.log10(self.lowest.freq),
                                     np.log10(self.highest.freq),
                                     padding=0.005)

        self.f = np.linspace(1, self.rate / 2, self.fftWindow // 2)
        self.input_data = np.zeros((self.savingRange, ), dtype=float)
        self.spectral_data = np.zeros((self.fftWindow // 2, ), dtype=float)

        self.draw_note_lines()
        self.start()
예제 #8
0
def generate(past: List[Note], changes: ChordProgression,
             melody_generator: Union[MelodyGenerator, MelodyAndRhythmGenerator,
                                     UniversalGenerator],
             rhythm_generator: Optional[RhythmGenerator],
             measures: int) -> List[Note]:
    """ Improvise a melody using two models for the melody and the rhythm, and one chord progression

        :param melody_generator:
        :param rhythm_generator:
        :param past: the seed
        :param changes: the chord progression.
            It can be the same as the melody generator, or equivalently None.
        :param measures: The number of measures to generate
        """
    if rhythm_generator is None:
        rhythm_generator = melody_generator  # type: RhythmGenerator
    universal = isinstance(melody_generator, UniversalGenerator)
    melody = past
    beat = melody[-1].beat
    chord = changes[beat]
    melody_generator.start(beat)
    while beat < measures * Note.meter:
        n = Note()
        n.resolution = past[0].resolution
        rest = None
        if universal:
            n.pitch, tsbq, dq, *rest = melody_generator.next(
            )  # in LSTM case, rest[0] is the beat diff
        else:
            tsbq, dq = rhythm_generator.next_rhythm()
        tsbq *= Note.ticks_quantisation_rate
        n.duration = dq * Note.duration_quantisation_rate
        if melody and (rest or melody[-1].ticks_since_beat > tsbq):
            beat_diff = rest[
                0] if rest else 1 + melody[-1].duration // n.resolution
            for _ in range(beat_diff):
                beat += 1
                # If the chord changed, inform the melody generator
                newchord = changes[beat]
                if newchord != chord:
                    melody_generator.start(beat)
                    chord = newchord
        # Prevent overlapping notes
        n.tick_abs = tsbq + n.resolution * \
            (beat if rest else max(beat, math.floor((melody[-1].tick_abs + melody[-1].duration) / n.resolution)))
        if not universal:
            n.pitch = melody_generator.next_pitch()
        melody.append(n)
        if melody_generator == rhythm_generator:
            melody_generator.add_past(n)
    return melody
예제 #9
0
def convert_song(song: SongMetadata):
    """ Combine the quantized and the unquantized MIDI files 
    into one that aligns to measures but attempts to retain the original phrasing """
    from file_handlers import notes_to_file
    from file_handlers import notes_from_file
    Note.default_resolution = 960
    quantized = notes_from_file('weimardb/midi_from_db_quant/{}.mid'.format(song.name))
    original = notes_from_file('weimardb/midi_from_db/{}.mid'.format(song.name))
    lilypond = notes_from_file('weimardb/midi_from_ly/{}.mid'.format(song.name))
    d = {}
    for n, m in zip(quantized, lilypond):
        if m.measure - n.measure not in d:
            d[m.measure - n.measure] = 1
        else:
            d[m.measure - n.measure] += 1
        if len(d) == 2:
            break
    meas_start = min(d.keys())  # type: int
    meas_size = Note.meter * Note.default_resolution
    meas_no = 0
    a, b, c = [], [], []  # quantized measure, original measure, combined output
    lp = []  # lilypond measure: only needed for durations
    for q, o, l in zip(quantized, original, lilypond):
        if q.measure != meas_no:
            if len(a) > 1:
                r = (a[-1].tick_abs - a[0].tick_abs) / (b[-1].tick_abs - b[0].tick_abs)  # stretch ratio
                a_m = (meas_no + 0.5) * meas_size  # middle of quantized measure
                b_m = b[0].tick_abs + (a_m - a[0].tick_abs) / r  # estimated middle of unquantized measure
                for a_j, b_j, l_j in zip(a, b, lp):
                    n = Note()
                    n.pitch = b_j.pitch
                    n.resolution = b_j.resolution
                    n.velocity = b_j.velocity
                    n.tick_abs = int(a_m + r * (b_j.tick_abs - b_m)) + (meas_start * meas_size)
                    n.duration = int(r * b_j.duration) or a_j.duration or l_j.duration
                    c.append(n)
            else:
                c += a
            meas_no = q.measure
            a, b, lp = [], [], []
        a.append(q)
        b.append(o)
        lp.append(l)
    notes_to_file(sorted(c, key=lambda p: p.tick_abs), 'weimardb/midi_combined/{}.mid'.format(song.name))
예제 #10
0
def generate(past: List[Note], changes: ChordProgression,
             melody_generator: Union[MelodyGenerator, MelodyAndRhythmGenerator, UniversalGenerator],
             rhythm_generator: Optional[RhythmGenerator], measures: int) -> List[Note]:
    """ Improvise a melody using two models for the melody and the rhythm, and one chord progression

        :param melody_generator:
        :param rhythm_generator:
        :param past: the seed
        :param changes: the chord progression.
            It can be the same as the melody generator, or equivalently None.
        :param measures: The number of measures to generate
        """
    if rhythm_generator is None:
        rhythm_generator = melody_generator  # type: RhythmGenerator
    universal = isinstance(melody_generator, UniversalGenerator)
    melody = past
    beat = melody[-1].beat
    chord = changes[beat]
    melody_generator.start(beat)
    while beat < measures * Note.meter:
        n = Note()
        n.resolution = past[0].resolution
        rest = None
        if universal:
            n.pitch, tsbq, dq, *rest = melody_generator.next()  # in LSTM case, rest[0] is the beat diff
        else:
            tsbq, dq = rhythm_generator.next_rhythm()
        tsbq *= Note.ticks_quantisation_rate
        n.duration = dq * Note.duration_quantisation_rate
        if melody and (rest or melody[-1].ticks_since_beat > tsbq):
            beat_diff = rest[0] if rest else 1 + melody[-1].duration // n.resolution
            for _ in range(beat_diff):
                beat += 1
                # If the chord changed, inform the melody generator
                newchord = changes[beat]
                if newchord != chord:
                    melody_generator.start(beat)
                    chord = newchord
        # Prevent overlapping notes
        n.tick_abs = tsbq + n.resolution * \
            (beat if rest else max(beat, math.floor((melody[-1].tick_abs + melody[-1].duration) / n.resolution)))
        if not universal:
            n.pitch = melody_generator.next_pitch()
        melody.append(n)
        if melody_generator == rhythm_generator:
            melody_generator.add_past(n)
    return melody
예제 #11
0
    def _build_net(self) -> keras.models.Model:
        dummy_input = self._encode_network_input(
            [Note()] * self.order, [Chord('C7')] * self.chord_order,
            self.changes)
        in_notes = keras.layers.Input(batch_shape=(1,) + dummy_input[0].shape) if self.stateful\
            else keras.layers.Input(shape=dummy_input[0].shape)
        in_chords = keras.layers.Input(batch_shape=(1,) + dummy_input[1].shape) if self.stateful\
            else keras.layers.Input(shape=dummy_input[1].shape)

        lstm_out = keras.layers.LSTM(
            512, stateful=self.stateful,
            implementation=self._implementation)(in_notes)
        x = keras.layers.concatenate([lstm_out, in_chords])
        x = keras.layers.Dense(512)(x)

        pitch_tensor = keras.layers.Dense(12, activation=softmax)(x)
        tsbq_tensor = keras.layers.Dense(self.maxtsbq + 1,
                                         activation=softmax)(x)
        dq_tensor = keras.layers.Dense(self.maxdq + 1, activation=softmax)(x)
        octave_tensor = keras.layers.Dense(NUM_OCTAVES, activation=softmax)(x)
        beatdiff_tensor = keras.layers.Dense(self.maxbeatdiff + 1,
                                             activation=softmax)(x)

        model = keras.models.Model(inputs=[in_notes, in_chords],
                                   outputs=[
                                       pitch_tensor, tsbq_tensor, dq_tensor,
                                       octave_tensor, beatdiff_tensor
                                   ])
        model.compile(optimizer=rmsprop(), loss=categorical_crossentropy)

        self.octave_model = keras.models.Model(inputs=model.inputs,
                                               outputs=model.outputs[3])
        self.epochs = 30
        self.outfuns = (
            sampler(.1), ) + (weighted_nlargest(2), ) * 2 + (np.argmax, ) * 2

        return model
예제 #12
0
from music import Note, Pitch
from music import Buzzer, BuzzerBand
import oak


def main():
    b1 = Buzzer(oak.PINS[5])
    b2 = Buzzer(oak.PINS[6])

    band = BuzzerBand(b1, b2)
    band.play_tunes(IN_THE_HALL_OF_THE_MOUNTAIN_KING_1,
                    IN_THE_HALL_OF_THE_MOUNTAIN_KING_2, speed=2)


IN_THE_HALL_OF_THE_MOUNTAIN_KING_1 = (
    Note(Pitch.E[3], 10, 4, 0),

    Note(Pitch.A[2], 10, 0.5, 1), Note(Pitch.B[2], 10, 0.5, 1),
    Note(Pitch.C[3], 10, 0.5, 1), Note(Pitch.D[3], 10, 0.5, 1),
    Note(Pitch.E[3], 20, 0.5, 1), Note(Pitch.C[3], 10, 0.5, 1),
    Note(Pitch.E[3], 10, 1, 1),

    Note(Pitch.Eb[3], 20, 0.5, 1), Note(Pitch.B[2], 10, 0.5, 1),
    Note(Pitch.Eb[3], 10, 1, 1),
    Note(Pitch.D[3], 20, 0.5, 1), Note(Pitch.Bb[3], 10, 0.5, 1),
    Note(Pitch.D[3], 10, 1, 1),

    Note(Pitch.A[2], 10, 0.5, 1), Note(Pitch.B[2], 10, 0.5, 1),
    Note(Pitch.C[3], 10, 0.5, 1), Note(Pitch.D[3], 10, 0.5, 1),
    Note(Pitch.E[3], 20, 0.5, 1), Note(Pitch.C[3], 10, 0.5, 1),
    Note(Pitch.E[3], 10, 0.5, 1), Note(Pitch.A[3], 10, 0.5, 1),
예제 #13
0
    'Db': ['Db', 'Eb', 'F', 'Gb', 'Ab', 'Bb', 'C', 'Db'],
    'Ab': ['Ab', 'Bb', 'C', 'Db', 'Eb', 'F', 'G', 'Ab'],
    'Eb': ['Eb', 'F', 'G', 'Ab', 'Bb', 'C', 'D', 'Eb'],
    'Bb': ['Bb', 'C', 'D', 'Eb', 'F', 'G', 'A', 'Bb'],
    'F': ['F', 'G', 'A', 'Bb', 'C', 'D', 'E', 'F'],
    'C': ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C'],
    'G': ['G', 'A', 'B', 'C', 'D', 'E', 'F#', 'G'],
    'D': ['D', 'E', 'F#', 'G', 'A', 'B', 'C#', 'D'],
    'A': ['A', 'B', 'C#', 'D', 'E', 'F#', 'G#', 'A'],
    'E': ['E', 'F#', 'G#', 'A', 'B', 'C#', 'D#', 'E'],
    'B': ['B', 'C#', 'D#', 'E', 'F#', 'G#', 'A#', 'B'],
    'F#': ['F#', 'G#', 'A#', 'B', 'C#', 'D#', 'E#', 'F#'],
    'C#': ['C#', 'D#', 'E#', 'F#', 'G#', 'A#', 'B#', 'C#'],
}
major_scales = {
    key: [Note(n) for n in notes]
    for key, notes in major_scales.items()
}


@pytest.mark.parametrize(('key', 'notes'), major_scales.items())
def test_major_scales_mode_I(key, notes):
    assert Scale(key, 'major', mode=1).notes == notes


@pytest.mark.parametrize(('key', 'notes'), major_scales.items())
@pytest.mark.parametrize('mode', range(1, 8))
def test_major_scales_modes(key, notes, mode):
    assert set(Scale(key, 'major', mode=1).notes) == set(
        Scale(notes[mode - 1], 'major', mode=mode).notes)
예제 #14
0
파일: _base.py 프로젝트: marczellm/algorimp
 def inputshape(self) -> Tuple[int]:
     """ Generates a dummy input matrix for the network and returns its shape. """
     return self._encode_network_input([Note()] * self.order,
                                       [Chord('C7')] * self.chord_order,
                                       self.changes)[0].shape
예제 #15
0
from music import Note
import sys
#Create note using music library constants 0-127
note1 = Note(int(sys.argv[1]), 1)
print('\n--Result--')
print(note1.getPitch())
print('--EndResult--')
sys.exit()
예제 #16
0
 def next_pitch(self) -> int:
     n = Note()
     n.pitch = numpy.argmax(self.net_fn([self._encode_network_input(self.past, self.current_chord)]))
     self.past.append(n)
     self.past = self.past[-self.order:]
     return n.pitch
예제 #17
0
from music import Phrase
from music import Note
import sys
import time

phr = Phrase()
note1 = Note(10, 1)
note2 = Note(20, 1)
note3 = Note(30, 1)
note4 = Note(40, 1)
phr.addNote(note1)
phr.addNote(note2)
phr.addNote(note3)
phr.addNote(note4)
phr.empty()
time.sleep(1)
print('\n--Result--')
print(phr.getSize())
print('--EndResult--')
sys.exit()
예제 #18
0
from music import Phrase
from music import Note
import sys

phr = Phrase()
note1 = Note(10, 5)
phr.addNote(note1)
print('--Result--')
print(phr.getEndTime() == 5.0)
print('--EndResult--')
sys.exit()
예제 #19
0
 def get_note(self, transform=Fft, **transform_args):
     freq = self.get_freq(transform, **transform_args)
     return Note.from_frequency(freq)
예제 #20
0
    def detectNotes(self):
        """Detect the notes in the audio recording self.data.  Plots a
        spectrogram with detected notes labelled."""
        
        fig = pylab.figure()
        fig.suptitle('Track Detection')

        # SPECTROGRAM
        # documentation at
        # http://matplotlib.org/api/pyplot_api.html?highlight=specgram#matplotlib.pyplot.specgram
        # 
        # this call to specgram is precise with regard to frequencies, but
        # blurry in time domain

        smoothAudio = Transcriber.smooth( self.data )

        (Pxx, freqs, bins, im) = pylab.specgram( smoothAudio, Fs=self.rate,
            NFFT=2**12, noverlap=2**8, sides='onesided', scale_by_freq=True)


        print "SHAPE OF freqs", freqs.shape

        # ------------------------
        # [BEGIN] identify runs of notes 
        # ------------------------

        # how many instantaneous spectra did we calculate
        (numBins, numSpectra) = Pxx.shape
        print "SHAPE OF Pxx:", Pxx.shape

        # how many seconds in entire audio recording
        numSeconds = float(self.data.size) / self.rate

        scaledPxx = 10 * np.log10(Pxx)


        def findPeaks( sample, minPeakVal=None):
            peakPos = []
            peakVal = []
            lastSlopeNeg = None
            for i in range(0, len(sample)-1):
                # we will be comparing two points on the frequency spectrum at
                # a time
               
                # If the two points are equal, the slope is 0, so we will move
                # on to the next point. For example, if we have the list [0 1 2
                # 2 1 0], there is clearly a peak, but it is between the two 2
                # values
                if (sample[i] != sample[i+1]):
                    thisSlopeNeg = sample[i] > sample[i+1]

                    if lastSlopeNeg != None and (not lastSlopeNeg) and thisSlopeNeg:
                        if minPeakVal != None and sample[i] >= minPeakVal:
                            peakPos.append(i)
                            peakVal.append(sample[i+1])

                    lastSlopeNeg = thisSlopeNeg
           
            # TODO some sort of statistical analysis of the significance of the
            # peaks found. For now, I'll just have a magic number representing
            # the minimum peak value.
            
            return (peakPos, peakVal)


        # TODO find the optimal value of minPeakVal based on the spectrogram
        # determine thresholding value
        thresh = np.mean( Pxx )

        FREQ_THRESH = 10 
        lastNotes = [] 
        prevF0NoteNamesSciPitchQueue = deque(maxlen=5)
        def recentlySaw( noteNameSciPitch ):
            #print prevF0NoteNamesSciPitchQueue
            for prevF0NoteNames in prevF0NoteNamesSciPitchQueue:
                if noteNameSciPitch in prevF0NoteNames:
                    return True
            return False

        for t in range(0, numSpectra):

            print "-----------------------"

            # extract a block from the spectrogram
            sample = Pxx[:, t]
            sample = Transcriber.smooth( sample )

            #print "SHAPE OF sample:", sample.shape

            # find the peaks in this profile (peaks represent notes)
            (peakPos, peakVal) = findPeaks(sample, minPeakVal=thresh)


            noteNames = []

            def freqsAreSameNote( f1, oct1, f2, oct2):
                # scale freqs
                f1 = f1 / 2**oct1
                f2 = f2 / 2**oct2
                return abs(f1-f2) < FREQ_THRESH


            prevF0NoteNames = []
            # Go through notes backwards, from high to low.
            # Variable i represents peak number.
            for i in reversed(range(len(peakPos))):

                # Variable pos represents at which y-value in spectrogram this
                # peak was found. Variable intensity contains the intensity at
                # that peak; how much energy at that frequency.
                pos = peakPos[i]
                intensity = peakVal[i]


                f = freqs[pos]
                if 20 <= f <= 20000: # if it is audible to a human...

                    (noteName, octave, sciPitchNoteName) = Note.getNoteName( f )
                    noteNames.append(sciPitchNoteName)
    
                                     
                    # -------------------------------------------------------
                    # attempt to find fundamental frequency of this note
                    f0 = None
                    f0Pos = None
                    for j in range( i ):
                        otherFreq = freqs[ peakPos[j] ]
                        otherIntensity = peakVal[j]
                        (otherNoteName, otherOctave, otherSciPitch) = Note.getNoteName( otherFreq )

                        if freqsAreSameNote( f, octave, otherFreq, otherOctave) \
                           and intensity <= 0.5*otherIntensity \
                           and 20 <= otherFreq <= 20000:

                            # compute frequency of f moved down some number of
                            # octaves
                            f0 = f / 2.0**(octave-otherOctave)
                            f0Pos = j
                            break
                    # -------------------------------------------------------



                    if f0 != None :

                        (f0NoteName, f0Octave, f0SciPitch) = Note.getNoteName(f0)
                        print "f0 = %f" % f0
                        print "recently saw %s? %s" % (f0SciPitch, recentlySaw(f0SciPitch))

                        # If we haven't recently handled this f0
                        if (f0SciPitch not in prevF0NoteNames) \
                          and (not recentlySaw( f0SciPitch )) \
                          and (f0SciPitch not in lastNotes):

                            prevF0NoteNames.append(f0SciPitch)

                            #print "%s @ %.2fHz" % (f0SciPitch, f0)
                    
                            # Annotate the spectrogram. Note that circles plotted actually
                            # appear as horizontal lines thanks to our logarithmic y scale.
                            time = (1.0*t / numSpectra) * numSeconds;
                            circle = plt.Circle( (time, f), 0.01, color='w')
                            fig.gca().add_artist(circle)

                            xPos = 1.0*t / numSpectra
                            yPos = f / freqs[-1]       # y position = f0 / max freq

                            plt.text( xPos, yPos, f0SciPitch, transform=fig.gca().transAxes)


            lastNotes = noteNames

            # save the F0s encountered in this time slice
            prevF0NoteNamesSciPitchQueue.append( prevF0NoteNames )




        # ------------------------
        # END [identify runs of notes]
        # ------------------------

        pylab.show()
예제 #21
0
파일: exercise.py 프로젝트: wairton/mr-ear
def get_random_note(lower=None, higher=None):
    lower = lower or Note('A', 0)
    higher = higher or Note('C', 8)
    n = random.randint(0, higher - lower)
    Player().perform([lower + Semitones(n)])
예제 #22
0
from music import Phrase
from music import Note
import sys
inary = sys.argv[1].split(' ')
phr = Phrase()
note1 = Note(int(inary[0]), 1)
note2 = Note(int(inary[1]), 1)
note3 = Note(int(inary[2]), 1)
note4 = Note(int(inary[3]), 1)
phr.addNote(note1)
phr.addNote(note2)
phr.addNote(note3)
phr.addNote(note4)
print('\n--Result--')
print(phr.getSize())
print('--EndResult--')
sys.exit()
예제 #23
0
velocity = None
start_seconds = None

# THE loop
piece_reconstructed = Piece(piece.name + ' reconstructed')

for f in tqdm(range(close_onsets.shape[0])):
    for t in range(close_onsets.shape[1] - 1):
        if close_onsets[f, t] < threshold_detection <= close_onsets[f, t + 1]:
            # Note on
            frequency = int(round(hz_to_midi(FREQUENCIES[f])))
            velocity = db_to_velocity(close_onsets[f, t + 1])
            start_seconds = float(time_vector[t + 1])
        elif close_onsets[f, t] >= threshold_detection > close_onsets[f,
                                                                      t + 1]:
            # Note off
            # TODO: decide between t and t+1 index to note off
            end_seconds = float(time_vector[t + 1])
            note = Note(frequency,
                        velocity=velocity,
                        start_seconds=start_seconds,
                        end_seconds=end_seconds)
            piece_reconstructed.append(note)

# Re-synthesize piece
signal_reconstructed = samples_set.synthesize(piece_reconstructed)
sd.play(signal_reconstructed, FS)

if __name__ == '__main__':
    pass