コード例 #1
0
import random

from midiutil import MIDIFile

MidiFinal = MIDIFile(
    1
)  # One track, defaults to format 1 (tempo track is created automatically)
MidiFinal2 = MIDIFile(
    1
)  # One track, defaults to format 1 (tempo track is created automatically)
degrees = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard
pos = 0
fitness = 0
j = 0

# NOTAS
aBem = 56  # Bem = bemol
a = 57
aSus = 58  # Sus = sostenido
bBem = 58
b = 59
cBem = 59
bSus = 60
c = 60
cSus = 61
コード例 #2
0
            for r in staff_boxes:
                r.draw(img, (0, 0, 255), 2)
            for r in sharp_recs:
                r.draw(img, (0, 0, 255), 2)
            flat_recs_img = img.copy()
            for r in flat_recs:
                r.draw(img, (0, 0, 255), 2)

            cv2.imwrite('res.png', img)
            open_file('res.png')

            for note_group in note_groups:
                print([note.note + " " + note.sym for note in note_group])

            midi = MIDIFile(1)

            track = 0
            time = 0
            channel = 0
            volume = 100

            midi.addTrackName(track, time, "Track")
            midi.addTempo(track, time, 140)

            for note_group in note_groups:
                duration = None
                for note in note_group:
                    note_type = note.sym
                    if note_type == "1":
                        duration = 4
コード例 #3
0
    def handle(self, argv=None):
        """
        Main function.

        Parses command, load settings and dispatches accordingly.

        """
        help_message = "Please supply chord progression!. See --help for more options."
        parser = argparse.ArgumentParser(
            description=
            'chords2midi - Create MIDI files from written chord progressions.\n'
        )
        parser.add_argument('progression',
                            metavar='U',
                            type=str,
                            nargs='*',
                            help=help_message)
        parser.add_argument('-B',
                            '--bassline',
                            action='store_true',
                            default=False,
                            help='Throw an extra bassline on the pattern')
        parser.add_argument('-b',
                            '--bpm',
                            type=int,
                            default=80,
                            help='Set the BPM (default 80)')
        parser.add_argument('-t',
                            '--octave',
                            type=str,
                            default='4',
                            help='Set the octave(s) (ex: 3,4) (default 4)')
        parser.add_argument('-i',
                            '--input',
                            type=str,
                            default=None,
                            help='Read from an input file.')
        parser.add_argument('-k',
                            '--key',
                            type=str,
                            default='C',
                            help='Set the key (default C)')
        parser.add_argument('-n',
                            '--notes',
                            type=int,
                            default=99,
                            help='Notes in each chord (default all)')
        parser.add_argument('-d',
                            '--duration',
                            type=float,
                            default=1.0,
                            help='Set the chord duraction (default 1)')
        parser.add_argument(
            '-D',
            '--directory',
            action='store_true',
            default=False,
            help=
            'Output the contents to the directory of the input progression.')
        parser.add_argument(
            '-H',
            '--humanize',
            type=float,
            default=0.0,
            help=
            'Set the amount to "humanize" (strum) a chord, in ticks - try .11 (default 0.0)'
        )
        parser.add_argument(
            '-o',
            '--output',
            type=str,
            help=
            'Set the output file path. Default is the current key and progression in the current location.'
        )
        parser.add_argument(
            '-O',
            '--offset',
            type=float,
            default=0.0,
            help='Set the amount to offset each chord, in ticks. (default 0.0)'
        )
        parser.add_argument('-p',
                            '--pattern',
                            type=str,
                            default=None,
                            help='Set the pattern. Available patterns: ' +
                            (', '.join(patterns.keys())))
        parser.add_argument(
            '-r',
            '--reverse',
            action='store_true',
            default=False,
            help='Reverse a progression from C-D-E format into I-II-III format'
        )
        parser.add_argument('-v',
                            '--version',
                            action='store_true',
                            default=False,
                            help='Display the current version of chords2midi')

        args = parser.parse_args(argv)
        self.vargs = vars(args)

        if self.vargs['version']:
            version = pkg_resources.require("chords2midi")[0].version
            print(version)
            return

        # Support `c2m I III V and `c2m I,III,V` formats.
        if not self.vargs['input']:
            if len(self.vargs['progression']) < 1:
                print("You need to supply a progression! (ex I V vi IV)")
                return
            if len(self.vargs['progression']) < 2:
                progression = self.vargs['progression'][0].split(',')
            else:
                progression = self.vargs['progression']
        else:
            with open(self.vargs['input']) as fn:
                content = ''.join(fn.readlines()).strip()
                content = content.replace('\n', ' ').replace(',', '  ')
                progression = content.split(' ')
        og_progression = progression

        # If we're reversing, we don't need any of the MIDI stuff.
        if self.vargs['reverse']:
            result = ""
            key = self.vargs['key']
            for item in progression:
                comps = pychord.Chord(item).components()
                position = determine(comps, key, True)[0]
                if 'M' in position:
                    position = position.upper()
                    position = position.replace('M', '')
                if 'm' in position:
                    position = position.lower()
                    position = position.replace('m', '')
                if 'B' in position:
                    position = position + "b"
                    position = position.replace('B', '')

                result = result + position + " "
            print(result)
            return

        track = 0
        channel = 0
        ttime = 0
        duration = self.vargs['duration']  # In beats
        tempo = self.vargs['bpm']  # In BPM
        volume = 100  # 0-127, as per the MIDI standard
        bar = 0
        humanize_interval = self.vargs['humanize']
        directory = self.vargs['directory']
        num_notes = self.vargs['notes']
        offset = self.vargs['offset']
        key = self.vargs['key']
        octaves = self.vargs['octave'].split(',')
        root_lowest = self.vargs.get('root_lowest', False)
        bassline = self.vargs['bassline']
        pattern = self.vargs['pattern']

        # Could be interesting to do multiple parts at once.
        midi = MIDIFile(1)
        midi.addTempo(track, ttime, tempo)

        ##
        # Main generator
        ##
        has_number = False
        progression_chords = []

        # Apply patterns
        if pattern:
            if pattern not in patterns.keys():
                print("Invalid pattern! Must be one of: " +
                      (', '.join(patterns.keys())))
                return

            new_progression = []
            input_progression = progression[:]  # 2.7 copy
            pattern_mask = patterns[pattern]
            pattern_mask_index = 0
            current_chord = None

            while True:
                pattern_instruction = pattern_mask[pattern_mask_index]

                if pattern_instruction == "N":
                    if len(input_progression) == 0:
                        break
                    current_chord = input_progression.pop(0)
                    new_progression.append(current_chord)
                elif pattern_instruction == "S":
                    new_progression.append(current_chord)
                elif pattern_instruction == "X":
                    new_progression.append("X")

                if pattern_mask_index == len(pattern_mask) - 1:
                    pattern_mask_index = 0
                else:
                    pattern_mask_index = pattern_mask_index + 1
            progression = new_progression

        # We do this to allow blank spaces
        for chord in progression:

            # This is for # 'I', 'VI', etc
            progression_chord = to_chords(chord, key)
            if progression_chord != []:
                has_number = True

            # This is for 'C', 'Am', etc.
            if progression_chord == []:
                try:
                    progression_chord = [pychord.Chord(chord).components()]
                except Exception:
                    # This is an 'X' input
                    progression_chord = [None]

            chord_info = {}
            chord_info['notes'] = progression_chord[0]
            if has_number:
                chord_info['number'] = chord
            else:
                chord_info['name'] = chord

            if progression_chord[0]:
                chord_info['root'] = progression_chord[0][0]
            else:
                chord_info['root'] = None
            progression_chords.append(chord_info)

        # For each input..
        previous_pitches = []
        for chord_index, chord_info in enumerate(progression_chords):

            # Unpack object
            chord = chord_info['notes']
            # NO_OP
            if chord == None:
                bar = bar + 1
                continue
            root = chord_info['root']
            root_pitch = pychord.utils.note_to_val(
                notes.int_to_note(notes.note_to_int(root)))

            # Reset internals
            humanize_amount = humanize_interval
            pitches = []
            all_new_pitches = []

            # Turns out this algorithm was already written in the 1800s!
            # https://en.wikipedia.org/wiki/Voice_leading#Common-practice_conventions_and_pedagogy

            # a) When a chord contains one or more notes that will be reused in the chords immediately following, then these notes should remain, that is retained in the respective parts.
            # b) The parts which do not remain, follow the law of the shortest way (Gesetze des nachsten Weges), that is that each such part names the note of the following chord closest to itself if no forbidden succession XXX GOOD NAME FOR A BAND XXX arises from this.
            # c) If no note at all is present in a chord which can be reused in the chord immediately following, one must apply contrary motion according to the law of the shortest way, that is, if the root progresses upwards, the accompanying parts must move downwards, or inversely, if the root progresses downwards, the other parts move upwards and, in both cases, to the note of the following chord closest to them.
            root = None
            for i, note in enumerate(chord):

                # Sanitize notes
                sanitized_notes = notes.int_to_note(notes.note_to_int(note))
                pitch = pychord.utils.note_to_val(sanitized_notes)

                if i == 0:
                    root = pitch

                if root:
                    if root_lowest and pitch < root:  # or chord_index is 0:
                        pitch = pitch + 12  # Start with the root lowest

                all_new_pitches.append(pitch)

                # Reuse notes
                if pitch in previous_pitches:
                    pitches.append(pitch)

            no_melodic_fluency = False  # XXX: vargify
            if previous_pitches == [] or all_new_pitches == [] or pitches == [] or no_melodic_fluency:
                pitches = all_new_pitches
            else:
                # Detect the root direction
                root_upwards = None
                if pitches[0] >= all_new_pitches[0]:
                    root_upwards = True
                else:
                    root_upwards = False

                # Move the shortest distance
                if pitches != []:
                    new_remaining_pitches = list(all_new_pitches)
                    old_remaining_pitches = list(previous_pitches)
                    for i, new_pitch in enumerate(all_new_pitches):
                        # We're already there
                        if new_pitch in pitches:
                            new_remaining_pitches.remove(new_pitch)
                            old_remaining_pitches.remove(new_pitch)
                            continue

                    # Okay, so need to find the overall shortest distance from the remaining pitches - including their permutations!
                    while len(new_remaining_pitches) > 0:
                        nearest_distance = 9999
                        previous_index = None
                        new_index = None
                        pitch_to_add = None
                        for i, pitch in enumerate(new_remaining_pitches):
                            # XXX: DRY

                            # The Pitch
                            pitch_to_test = pitch
                            nearest = min(old_remaining_pitches,
                                          key=lambda x: abs(x - pitch_to_test))
                            old_nearest_index = old_remaining_pitches.index(
                                nearest)
                            if nearest < nearest_distance:
                                nearest_distance = nearest
                                previous_index = old_nearest_index
                                new_index = i
                                pitch_to_add = pitch_to_test

                            # +12
                            pitch_to_test = pitch + 12
                            nearest = min(old_remaining_pitches,
                                          key=lambda x: abs(x - pitch_to_test))
                            old_nearest_index = old_remaining_pitches.index(
                                nearest)
                            if nearest < nearest_distance:
                                nearest_distance = nearest
                                previous_index = old_nearest_index
                                new_index = i
                                pitch_to_add = pitch_to_test

                            # -12
                            pitch_to_test = pitch - 12
                            nearest = min(old_remaining_pitches,
                                          key=lambda x: abs(x - pitch_to_test))
                            old_nearest_index = old_remaining_pitches.index(
                                nearest)
                            if nearest < nearest_distance:
                                nearest_distance = nearest
                                previous_index = old_nearest_index
                                new_index = i
                                pitch_to_add = pitch_to_test

                        # Before we add it - just make sure that there isn't a better place for it.
                        pitches.append(pitch_to_add)
                        del old_remaining_pitches[previous_index]
                        del new_remaining_pitches[new_index]

                        # This is for the C E7 type scenario
                        if len(old_remaining_pitches) == 0:
                            for x, extra_pitch in enumerate(
                                    new_remaining_pitches):
                                pitches.append(extra_pitch)
                                del new_remaining_pitches[x]

                    # Final check - can the highest and lowest be safely folded inside?
                    max_pitch = max(pitches)
                    min_pitch = min(pitches)
                    index_max = pitches.index(max_pitch)
                    folded_max = max_pitch - 12
                    if (folded_max > min_pitch) and (folded_max
                                                     not in pitches):
                        pitches[index_max] = folded_max

                    max_pitch = max(pitches)
                    min_pitch = min(pitches)
                    index_min = pitches.index(min_pitch)

                    folded_min = min_pitch + 12
                    if (folded_min < max_pitch) and (folded_min
                                                     not in pitches):
                        pitches[index_min] = folded_min

                    # Make sure the average can't be improved
                    # XXX: DRY
                    if len(previous_pitches) != 0:
                        previous_average = sum(previous_pitches) / len(
                            previous_pitches)

                        # Max
                        max_pitch = max(pitches)
                        min_pitch = min(pitches)
                        index_max = pitches.index(max_pitch)
                        folded_max = max_pitch - 12

                        current_average = sum(pitches) / len(pitches)
                        hypothetical_pitches = list(pitches)
                        hypothetical_pitches[index_max] = folded_max
                        hypothetical_average = sum(hypothetical_pitches) / len(
                            hypothetical_pitches)
                        if abs(previous_average -
                               hypothetical_average) <= abs(previous_average -
                                                            current_average):
                            pitches[index_max] = folded_max
                        # Min
                        max_pitch = max(pitches)
                        min_pitch = min(pitches)
                        index_min = pitches.index(min_pitch)
                        folded_min = min_pitch + 12

                        current_average = sum(pitches) / len(pitches)
                        hypothetical_pitches = list(pitches)
                        hypothetical_pitches[index_min] = folded_min
                        hypothetical_average = sum(hypothetical_pitches) / len(
                            hypothetical_pitches)
                        if abs(previous_average -
                               hypothetical_average) <= abs(previous_average -
                                                            current_average):
                            pitches[index_min] = folded_min

                # Apply contrary motion
                else:
                    print("Applying contrary motion!")
                    for i, new_pitch in enumerate(all_new_pitches):
                        if i == 0:
                            pitches.append(new_pitch)
                            continue

                        # Root upwards, the rest move down.
                        if root_upwards:
                            if new_pitch < previous_pitches[i]:
                                pitches.append(new_pitch)
                            else:
                                pitches.append(new_pitch - 12)
                        else:
                            if new_pitch > previous_pitches[i]:
                                pitches.append(new_pitch)
                            else:
                                pitches.append(new_pitch + 12)

            # Bassline
            if bassline:
                pitches.append(root_pitch - 24)

            # Melody

            # Octave is a simple MIDI offset counter
            for octave in octaves:
                for note in pitches:
                    pitch = int(note) + (int(octave.strip()) * 12)

                    # Don't humanize bassline note
                    if bassline and (pitches.index(note) == len(pitches) - 1):
                        midi_time = offset + bar
                    else:
                        midi_time = offset + bar + humanize_amount

                    # Write the note
                    midi.addNote(track=track,
                                 channel=channel,
                                 pitch=pitch,
                                 time=midi_time,
                                 duration=duration,
                                 volume=volume)

                humanize_amount = humanize_amount + humanize_interval
                if i + 1 >= num_notes:
                    break
            bar = bar + 1
            previous_pitches = pitches

        ##
        # Output
        ##

        if self.vargs['output']:
            filename = self.vargs['output']
        elif self.vargs['input']:
            filename = self.vargs['input'].replace('.txt', '.mid')
        else:
            if has_number:
                key_prefix = key + '-'
            else:
                key_prefix = ''

            filename = key_prefix + '-'.join(og_progression) + '-' + str(tempo)
            if bassline:
                filename = filename + "-bassline"
            if pattern:
                filename = filename + "-" + pattern
            if os.path.exists(filename):
                filename = key_prefix + '-'.join(og_progression) + '-' + str(
                    tempo) + '-' + str(int(time.time()))
            filename = filename + '.mid'

            if directory:
                directory_to_create = '-'.join(og_progression)
                try:
                    os.makedirs(directory_to_create)
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(
                            directory_to_create):
                        pass
                    else:
                        raise
                filename = directory_to_create + '/' + filename

        with open(filename, "wb") as output_file:
            midi.writeFile(output_file)
コード例 #4
0
ファイル: main.py プロジェクト: xcode2010/midi-transcripter
print('tempo: ', tempo)

print('Computing features...')
xs = []
for t in onsets:
    samples = amplitude[int((t - clf_window[0]) * sample_rate): int((t + clf_window[1]) * sample_rate)]
    x = classifier.compute_features(samples.astype(float),
                          sample_rate, 
                          window_size=feature_window_size,
                          hop_length=hop_length)
    xs.append(x)

print('\n\nPredicting instruments...')
predictions = classifier.predict_intruments(xs)

mf = MIDIFile(1)     # only 1 track
track = 0 
time     = 0    # In beats
channel = 9
volume = 100
mf.addTrackName(track, time, "Sample Track")
mf.addTempo(track, time, tempo)

print('Generating midi...')
for time in range(len(onsets)):
  for idx, instrument in classifier.INSTRUMENT_NAME_MAPPING.items():
    if predictions[instrument][time]:
      pitch = classifier.INSTRUMENT_KEY_MAPPING[idx]
      duration = 1         # 1 beat long
      mf.addNote(track, channel, pitch, time, duration, volume)
コード例 #5
0
ファイル: text2sound.py プロジェクト: jy4uk/text2sound
Text to Sound project
"""

from midiutil import MIDIFile
from tqdm import tqdm
import random

#creating a midi object
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(1)
MyMIDI.addTempo(track, time, tempo)
"""function that takes in a character and then outputs different values that will affect
 a note's specific parameters like midi-note, length, volume"""


# calculate the midi note value for what the pitch of the note should be
def get_pitch(character):
    first_letter = int(ord(character[0]))
    if first_letter > 108:
        # diff_fl = first_letter - 90
        first_letter = first_letter - 36
    return first_letter


# calculate the duration of the note
コード例 #6
0
        old, new = new, old + new
    return new


scale = major_scale(start=58)  # A
harmony_scale = major_scale(start=34)  # A

duration_scale = list([.5, 1., 1.5, 2.])

channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 140  # In BPM
volume = 90  # 0-127, as per the MIDI standard

midi = MIDIFile(numTracks=2)
#midi.addTempo(0, time, tempo)

next_note = 0.

for i in range(72):
    fib_num = fib(i)

    if i in range(24, 48):
        scale_index = fib_num % len(scale) - 2
        duration = duration_scale[fib_num % len(duration_scale)]
    else:
        scale_index = -1 * (fib_num % len(scale))
        duration = duration_scale[fib_num % len(duration_scale)]

    # melody
コード例 #7
0
for i in range(7, 1000):
    y = model.predict(X)
    note = get_note_from_one_hot(
        y)  # try sample from a probability distribution
    if i > 8 and (note == lst[i - 8] or note == lst[i - 7] or note
                  == lst[i - 6] or note == lst[i - 5] or note == lst[i - 4]):
        y[0][np.argmax(y)] = -1
    note = get_note_from_one_hot(y)
    lst.append(note)
    for i in range(num_input_note - 1):
        X[0][i] = X[0][i + 1]
    X[0][num_input_note - 1] = get_note_as_one_hot(note)
    # print(get_note_from_one_hot(X[0][0]))
    print(note)

MyMIDI = MIDIFile(1, adjust_origin=False)
MyMIDI.addTempo(track, time, tempo)

degree_list = get_note_list(lst)
print(np.shape(degree_list))
for i in range(len(degree_list)):
    for j in range(len(degree_list[i])):
        for pitch in degree_list[i][j]:
            # print(pitch)
            MyMIDI.addNote(track, channel, pitch, time, duration, volume)
            time = time + 1 / (12 if (len(degree_list[i][j])) > 12 else
                               (len(degree_list[i][j])))

with open("sample.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
コード例 #8
0
ファイル: composer.py プロジェクト: jholmes/auto-drummer
def _write_midi(hit_list, fill_list, save_path, file_name, humanisation):
    midi_patttern = MIDIFile(
        1)  # One track, defaults to format 1 (tempo track is created
    # automatically)
    midi_patttern.addTempo(0, 0, tempo)

    #iterator
    j = 0
    #Add kicks to middle C, snares to C#3 and hats to D3.
    for i in hit_list:
        if i[0] == 'k':
            if i[1] == '1':
                midi_patttern.addNote(0, 0, 60,
                                      float(i[1]) / 4 - offset, duration,
                                      int(volume - abs(humanisation[j]) * 300))
            else:
                midi_patttern.addNote(
                    0, 0, 60,
                    float(i[1]) / 4 - offset + humanisation[j], duration,
                    int(volume - abs(humanisation[j]) * 300))
        if i[0] == 's':
            if i[1] == '1':
                midi_patttern.addNote(0, 0, 61,
                                      float(i[1]) / 4 - offset, duration,
                                      int(volume - abs(humanisation[j]) * 300))
            else:
                midi_patttern.addNote(
                    0, 0, 61,
                    float(i[1]) / 4 - offset + humanisation[j], duration,
                    int(volume - abs(humanisation[j]) * 300))
        if i[0] == 'g':
            if i[1] == '1':
                midi_patttern.addNote(0, 0, 61,
                                      float(i[1]) / 4 - offset, duration,
                                      int(40 - abs(humanisation[j]) * 300))
            else:
                midi_patttern.addNote(
                    0, 0, 61,
                    float(i[1]) / 4 - offset + humanisation[j], duration,
                    int(40 - abs(humanisation[j]) * 300))
        if i[0] == 'h':
            if i[1] == '1':
                midi_patttern.addNote(0, 0, 62,
                                      float(i[1]) / 4 - offset, duration,
                                      int(volume - abs(humanisation[j]) * 300))
            else:
                midi_patttern.addNote(
                    0, 0, 62,
                    float(i[1]) / 4 - offset + humanisation[j], duration,
                    int(volume - abs(humanisation[j]) * 300))
        if i[0] == 'p':
            if i[1] == '1':
                midi_patttern.addNote(0, 0, 63,
                                      float(i[1]) / 4 - offset, duration,
                                      int(volume - abs(humanisation[j]) * 300))
            else:
                midi_patttern.addNote(
                    0, 0, 63,
                    float(i[1]) / 4 - offset + humanisation[j], duration,
                    int(volume - abs(humanisation[j]) * 300))
        j += 1

    for i in fill_list:
        if i[0] == 'k':
            midi_patttern.addNote(
                0, 0, 60,
                float(i[1]) / 4 - offset + humanisation[j], duration,
                int(volume - 15 - abs(humanisation[j]) * 500))
        if i[0] == 's':
            midi_patttern.addNote(
                0, 0, 61,
                float(i[1]) / 4 - offset + humanisation[j], duration,
                int(volume - 10 - abs(humanisation[j]) * 500))
        if i[0] == 'h':
            midi_patttern.addNote(
                0, 0, 62,
                float(i[1]) / 4 - offset + humanisation[j], duration,
                int(volume - 12 - abs(humanisation[j]) * 500))

    if osname == "posix":
        # Unwrap QString obj because it doesn't work with posixpath() in os.path.join()
        file_name = str(file_name)
    file_path = pjoin(save_path, file_name)
    with open(file_path, "wb") as output_file:
        midi_patttern.writeFile(output_file)
コード例 #9
0
ファイル: train.py プロジェクト: GerbenRienk/eartrainer
degrees  = [59, 60, 61, 62, 63, 64]
track    = 0
channel  = 0
time     = 1   # In beats
duration = 1   # In beats
tempo    = 60  # In BPM
volume   = 100 # 0-127, as per the MIDI standard
previous_note = -1  # previous note to compare with new one to avoid duplicates

total_number_of_sequences = 1000
notes_per_sequence = 4
note_counter = 0
sequence_counter = 0

random.seed
MyMIDI = MIDIFile(1,adjust_origin=True) # One track, defaults to format 1 (tempo track automatically created)
MyMIDI.addTempo(track,time, tempo)

i = previous_note
while sequence_counter <= total_number_of_sequences:
    # get a random number
    
    while previous_note == i: 
        i = random.randint(0,len(degrees)-1)
    
    print(time, degrees[i])
    previous_note = i
    # find the corresponding note
    random_note = degrees[i]
    MyMIDI.addNote(track, channel, random_note, time, duration, volume)
    time = time + 1
コード例 #10
0
def main(argv):
    usernames = ''
    proxy = ''
    filename = 'gitbeats.mid'
    try:
        opts, args = getopt.getopt(
            argv, 'hu:p:t:o',
            ['username='******'proxy=', 'tempo=', 'output-filename='])
    except getopt.GetoptError:
        print(
            'gitbeats.py -u <username> -p <http-proxy> -t <tempo> -o <output-filename>'
        )
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('gitbeats.py -u <username> -p <http-proxy> -t <tempo>')
            sys.exit()
        elif opt in ('-u', '--username'):
            usernames = arg
        elif opt in ('-p', '--proxy'):
            proxy = arg
        elif opt in ('-t', '--tempo'):
            tempo = int(arg)
        elif opt in ('-o', '--output-filename'):
            filename = arg

    print('username: {}'.format(usernames))
    print('proxy: {}'.format(proxy))
    print('tempo: {}'.format(tempo))
    print('filename: {}'.format(filename))

    proxies = {}

    if proxy != '':
        proxies = {'http': proxy, 'https': proxy}

    midi = MIDIFile(1)
    midi.addTempo(track, time, tempo)

    for channel, username in enumerate(usernames.split(',')):
        url = requests.urllib3.util.url.Url('https', None, 'github.com', None,
                                            username, None)
        print('requesting data from {}'.format(url))
        request = requests.get(url, proxies=proxies)

        print('parsing data from {}'.format(url))
        parser = GitHubSvgActivityParser()
        parser.feed(request.text)

        print('processing data from {}'.format(url))
        for i, count in enumerate(parser.dataArray):
            pitch = (count + (channel * 12))
            if pitch > 255:
                pitch = 255

            midi.addNote(track, channel, pitch, time + i, duration, volume)

    path = os.path.dirname(__file__)
    outputfile = os.path.join(path, filename)

    print('exporting track to {}'.format(outputfile))

    with open(outputfile, 'wb') as data:
        midi.writeFile(data)

    print('exported track to {}'.format(outputfile))
    print('done')
コード例 #11
0
from midiutil import MIDIFile
from math import sin

midi_file = MIDIFile(1) # Create MIDI file with one track.
midi_file.addTempo(0, 0, 240) # set the tempo to 120 BPM on track 0, at time 0.

multiples = {
    2: 60, # C4
    3: 62, # D4
    4: 65, # F4
    5: 67, # G4
    6: 67, # G4
    7: 69, # A4
    8: 72, # C5
    9: 72, # C5
    10: 74 # D5
}

N = 200
time = 0

# N is some positive integer greater than one.
for i in range(1, N + 1):
    duration = 0.45 * sin(i) + 0.55
    for multiple in multiples:
        if i % multiple == 0:
            note = multiples[multiple]

            midi_file.addNote(0, 0, note, time, duration, 100)

    time += duration
コード例 #12
0
def jpg_to_midi(img_name, grid_name, result_name):

    # First we have to convert everything to string from symbol (pd datastructure)
    img_name = str(img_name)
    grid_name = str(grid_name)
    result_name = str(result_name)

    img = Image.open(img_name)
    grid_width, grid_height = get_grid_dimensions(grid_name)
    unit_width, unit_height = get_unit_dimensions(img.width, img.height, grid_width, grid_height)

    pixels = img.load()

    result_MIDI = MIDIFile(numTracks=grid_height, adjust_origin=False)
    result_MIDI.addTempo(0, 0, 60)

    # Now need to work along rows of grid and average region of image appropriately
    with open(grid_name) as grid:
        colours = []

        # For each row in the grid...
        for y_index, line in enumerate(grid):

            # Get starting and ending y coords
            starting_y = y_index * unit_height
            ending_y = (y_index + 1) * unit_height

            x_count = 0
            row_colours = []
            regions = line.split(",")

            # r = each number in a row in the grid
            for r in regions:
                starting_beat = x_count

                starting_x = x_count * unit_width
                x_count += int(r)                
                ending_x = x_count * unit_width
                # row_colours.append(get_average_colour(pixels, (float(starting_x), float(starting_y)), (float(ending_x), float(ending_y))))

                average_colour = get_average_colour(pixels, (float(starting_x), float(starting_y)), (float(ending_x), float(ending_y)))
                pov = hsv2pov(rgb2hsv(average_colour))

                #print pov
                #print get_midi_pitch(pov)
                #print "Beat start " + str(starting_beat)
                #print "Length " + str(r)

                # y_index = line number, which corresponds to track number of note
                # notes all placed on channel 0
                # midi_pitch worked out by passing average colour (pov tuple) to function
                # starting beat is the x_count at the start of this inner loop iteration; this is the sum of all previous numbers on the line
                # int(r) is the duration of the note in beats; given by the length of the cell in the grid
                # volume of note is given by velocity from pov tuple
                result_MIDI.addNote(y_index, 0, get_midi_pitch(pov), starting_beat, int(r), pov["velocity"])

    #povs = []

    #for c in colours:
        #hs = map(rgb2hsv, c)
        #ps = map(hsv2pov, hs)
        #povs.append(ps)

    with open(result_name, "wb") as output_file:
        result_MIDI.writeFile(output_file)
コード例 #13
0
ファイル: midi_ops.py プロジェクト: Hadryan/CMSC190_CJLDadios
def convert_to_midi(input_dict_list, bpm, filename):
    print("\nConverting predictions to midi...")
    
    midi_mapping = {
        "bass": 36,                     # C2
        "stick": 37,                    # C#2
        "snare": 38,                    # D2
        "floor": 41,                    # F2
        "hihat": 42,                    # F#2
        "hihatp": 44, # pedal           # G#2
        "hihat-open": 46,               # A#2  
        "tom": 47,                      # B2     
        "crash": 49,                    # D#3
        "ride": 51,                     # E#3
        "bell": 53                      # F3
    }

    # volume_mapping = {
    #     "bass": 120,
    #     "stick": 115,
    #     "snare": 105,
    #     "floor": 110,
    #     "tom": 110,
    #     "hihat": 100,
    #     "hihat-open": 100,
    #     "hihatp": 105, # pedal
    #     "crash": 100,
    #     "ride": 100,
    #     "bell": 100,
    #     "rest": 0,
    # }

    volume_mapping = {
        "bass": 127,
        "stick": 127,
        "snare": 127,
        "floor": 127,
        "tom": 127,
        "hihat": 127,
        "hihat-open": 127,
        "hihatp": 127, # pedal
        "crash": 127,
        "ride": 127,
        "bell": 127,
        "rest": 0,
    }


    # degrees  = [60, 62, 64, 65, 67, 69, 71, 72] # MIDI note number
    track    = 0
    channel  = 10
    time     = 0   # In beats
    # duration = 1   # In beats
    duration = 1/4   # In beats
    tempo    = bpm  # In BPM
    volume   = 115 # 0-127, as per the MIDI standard

    MyMIDI = MIDIFile(1) # One track, defaults to format 1 (tempo track
                         # automatically created)
    if bpm==0:
        tempo = 1 # prevent division by zero
    
    MyMIDI.addTempo(track,time, tempo)

    # for pitch in degrees:
    #     MyMIDI.addNote(track, channel, pitch, time, duration, volume)
    #     time = time + 1


    """
        Try to add multiple notes at a time: Loop addNote
        while instruments identified by the name are not exhausted
        
        To use foot hihat, if there are already four instruments, use "hihatp"
    """

    # input_dict_list is a list of dictionaries
    # The dictionaries have keys "1", "e", "&", "a"
    # where the values are class names, eg. "bass tom hihat ride"

    rhythm_start_time_dict = {
        "1": 0,
        "2": 0,
        "3": 0,
        "4": 0,
        # "5": 0,
        "e": 1/16,
        "&": 1/8,
        "a": 3/16
    }

    print("\nInside midi_ops...")
    for quarter_dict in input_dict_list:
        for key, val in quarter_dict.items():
            print("{}: {}".format(key, val))

    # halt()

    for quarter_dict in input_dict_list:
        prev_key = "0 1" # initialization of previous rhythm

        for key, val in quarter_dict.items():

            # Advance the time depending on the rhythm, identified by the key
            # prev_rhy = rhythm_start_time_dict[prev_key]
            # curr_rhy = rhythm_start_time_dict[key]
            # time = time + curr_rhy - prev_rhy

            split_key = key.split()
            # split_prev_key = prev_key.split()

            # curr_time = int(split_key[0]) + rhythm_start_time_dict[split_key[1]]
            # prev_time = int(split_prev_key[0]) \
                # + rhythm_start_time_dict[split_prev_key[1]]
            # time += curr_time - prev_time

            quarter_id = split_key[0] # e.g. "0"
            sixteenth_id = split_key[1] # e.g. "a"

            time = (int(quarter_id) * 0.25 \
                + rhythm_start_time_dict[sixteenth_id]) * 4

            prev_key = key # update prev_key

            has_hihat = False # add the hihat last
            has_hihat_open = False
            has_bass = False
            four_hits_at_a_time = False
            hit_count = 0

            for instrument in val.split():
                volume = volume_mapping[instrument]

                if instrument == "hihat":
                    has_hihat = True
                elif instrument == "hihat-open":
                    has_hihat_open = True
                elif instrument == "rest":
                    hit_count += 1
                else:
                    if instrument == "bass":
                        has_bass = True

                    pitch = midi_mapping[instrument]
                    MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                    hit_count += 1
            # End for every instrument hit per rhythm

            # Add hihat last if there's hihat
            if has_hihat_open or has_hihat:
                if hit_count == 3: # If there's a limb available
                    pitch = midi_mapping["hihatp"] # No other hihat option
                    MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                elif hit_count == 2: # If two limbs were occupied
                    if has_bass: # but one of that limb is a bass
                        # A hand is available, so option if open is available
                        if has_hihat_open:
                            pitch = midi_mapping["hihat-open"]
                            MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                        else: # Closed hihat
                            pitch = midi_mapping["hihat"]
                            MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                    else: # If two instruments were hit, and not one of them
                        # is a bass. It means that both hands were already
                        # used. Therefore, we can only use the left foot
                        # to play the hihat.
                        pitch = midi_mapping["hihatp"] # No other hihat option
                        MyMIDI.addNote(track, channel, pitch, 
                                                    time, duration, volume)
                else: # At most one limb will be occupied.
                    # Therefore, at least one limb (required) will be able
                    # to play the hihat.
                    if has_hihat_open:
                        pitch = midi_mapping["hihat-open"]
                        MyMIDI.addNote(track, channel, pitch, 
                                time, duration, volume)
                    else: # Closed hihat
                        pitch = midi_mapping["hihat"]
                        MyMIDI.addNote(track, channel, pitch, 
                                time, duration, volume)

            # End adding hihat
        # End for each rhythm
    # End for all the hits on each rhythm

    with open(filename + ".mid", "wb") as output_file:
        MyMIDI.writeFile(output_file)
コード例 #14
0
ファイル: image2melody.py プロジェクト: fmented/Image2MIDI
def makemidi(img, midi, lo=74, hi=110):
    if int(lo) > 0:
        lo=0
    if int(hi) < 127:
        hi=127

    filename = img
    if filename.endswith('.png'):
        a= cv2.imread(filename, cv2.IMREAD_UNCHANGED)

        trans=a[:,:,3]==0
        a[trans]= [255,255,255,255]

        a=cv2.cvtColor(a, cv2.COLOR_BGRA2BGR)
    else:
        a= cv2.imread(filename, cv2.IMREAD_UNCHANGED)


    h, w = a.shape[:2]

    scale=w/h

    w=int(127*scale)

    dim= (w, 127)
    res= cv2.resize( a, dim)

    row=[]
    col=[]
    vel=[]
    for i in range(res.shape[1]):
        for j in range(res.shape[0]):
            r=res[j][i]
            
            row.append(int(pick(r)))
        col.append(int(valmap(row[random.randint(0, len(row))], 0, 255, 36, 72)))
        vel.append(int(valmap(row[random.randint(0, len(row))], 0, 255, lo, hi)))
    notes = []

    music_scale=get_octave(major, 0, 127)
    for x in col:
        n=snap(music_scale, x)
        notes.append(n)

    from midiutil import  MIDIFile

    MyMIDI = MIDIFile(1)

    t=random.randint(120, 180)
    MyMIDI.addTempo(0, 0, t*scale)


    for i, (note, velo) in enumerate(zip(notes, vel)):
        try:
            MyMIDI.addNote(0, 0, note, i/2, random.randint(1,2), velo)
        except :
            MyMIDI.addNote(0, 0, note, i/2, 1, velo)


    with open(midi, "wb") as output_file:
        MyMIDI.writeFile(output_file)
    exit("write {} successful".format(midi))
コード例 #15
0
# ---------- Initialization ---------- #
# list of audiofiles
audioFiles = []
listOfFiles = os.listdir('./Samples')
pattern = "*.wav"
pause = False
for entry in listOfFiles:
    if fnmatch.fnmatch(entry, pattern):
        audioFiles.append(entry)

# General
bpm = 120
keepPlaying = True                                                                                                      # check if sequencer should loop

# MIDI Information
mf = MIDIFile(1)
track = 0
channel = 0
pitch = 0
PitchOut = 32 + pitch
volume = 127
noteValues = []                                                                                                         # Array of all note MIDI Values in the complete sequence.


# ---------- Definitions ---------- #
def thread1 ():                                                                                                         # data for thread 1
  if p1.choice == 'random':
      p1.randomSeq()
  elif p1.choice == 'static':
      p1.staticSeq()
コード例 #16
0
 def createMidi(self):
     self.midif = MIDIFile(3)
     self.midif.addTempo(0, 0, self.tempo)
コード例 #17
0
params = None

for i, fileName in enumerate(os.listdir(rootDir)):
    # This is for implementing all songs
    print(i, ': ', fileName)
    vsqxPath = rootDir + fileName
    path = PurePath(vsqxPath)
    vsqx = xml.dom.minidom.parse(str(path))
    try:
        TEMPO = int(
            vsqx.getElementsByTagName('tempo')
            [0].childNodes[1].firstChild.data[:-2])
    except:
        print('   Failure with ', fileName)
        continue
    mf = MIDIFile(len(vsqx.getElementsByTagName('vsTrack')),
                  removeDuplicates=False)
    time = 0

    for trackNo, track in enumerate(vsqx.getElementsByTagName('vsTrack')):
        mf.addTrackName(trackNo, time, "Track {}".format(str(trackNo)))
        i = 0
        timeOffset = 0

        prevTime = 0
        durTime = 0

        #params = None
        #print(len(track.getElementsByTagName('note')))
        for note in track.getElementsByTagName('note'):
            params = createNote(note, params)
            #mf.addNote(trackNo, 0, getNoteData(note, 'n'), currTime / 480, getNoteData(note, 'dur') / 480, 64)
コード例 #18
0
def make_midi(tbon, outfile, firstbar=0, quiet=False, metronome=0):
    """
    Parse and evaluate the source string. Write the output
    to the specified outfile name.

    kwargs:
      transpose -- Number of semitones to transpose the output.
                   May be positive or negative.
      volume -- MIDI track volume
      track  -- Midi file track number
      channel -- MIDI channel number
      octave  -- Initial MIDI octave number (0 - 10)
      numeric -- tbon notation can be either named pitches (cdefgab) or
                 numbers (1234567) with 1 corresponding to 'c'.
    """

    parts = tbon.output
    numparts = len(parts)
    print("Found {} parts".format(numparts))
    metronotes = tbon.metronome_output
    if metronome == 0:
        numTracks = numparts
    elif metronome == 1:
        numTracks = 1
    else:
        numTracks = 1 + numparts
    meta = tbon.meta_output
    beat_map = tbon.beat_map
    MyMIDI = MIDIFile(numTracks,
                      adjust_origin=True,
                      removeDuplicates=False,
                      deinterleave=False)
    #MyMIDI.addTempo(track, 0, tempo)
    trk0 = 0
    for m in meta:
        if m[0] == 'T':
            MyMIDI.addTempo(trk0, m[1], m[2])
        elif m[0] == 'K' and metronome != 1:
            time = m[1]
            sf, mi = m[2]
            track = m[3]
            mode = MINOR if mi == 1 else MAJOR
            accidentals = abs(sf)
            acc_type = SHARPS if sf > 0 else FLATS
            #print("Inserting key signature at time {}".format(time))
            #print(accidentals, acc_type, mode)
            MyMIDI.addKeySignature(track, time, accidentals, acc_type, mode)
        elif m[0] == 'M':
            ## Time signature
            time = m[1]
            numerator = m[2]
            denominator = m[3]
            track = m[4]
            ## midi denominator specified a power of 2
            midi_denom = {2: 1, 4: 2, 8: 3, 16: 4}[denominator]
            ## We want to make the midi metronome match beat duration.
            ## This requires recognizing compound meters
            ## See http://midiutil.readthedocs.io/en/1.1.3/class.html
            ## for discussion of arguments to addTimeSignature()
            ## including clocks_per_tick.
            if denominator == 16 and (numerator % 3 == 0):
                metro_clocks = 18
            elif denominator == 16:
                metro_clocks = 6
            elif denominator == 8 and (numerator % 3 == 0):
                metro_clocks = 36
            elif denominator == 8:
                metro_clocks = 12
            elif denominator == 4:
                metro_clocks = 24
            elif denominator == 2:
                metro_clocks = 48
            MyMIDI.addTimeSignature(track,
                                    time,
                                    numerator,
                                    denominator=midi_denom,
                                    clocks_per_tick=metro_clocks)
        elif m[0] == 'I' and metronome != 1:
            ## Instrument change
            time = m[1]
            instrument = m[2] - 1  ## convert to 0 index
            track = m[3] + 1
            chan = m[4] - 1
            MyMIDI.addProgramChange(track, chan, time, instrument)

    def add_notes(source, trk):
        """ Add all notes in source to trk on chan. """
        for pitch, start, stop, velocity, chan in source:
            if pitch is not None:
                MyMIDI.addNote(trk, chan - 1, pitch, start, stop - start,
                               int(velocity * 127))

    if metronome == 0:
        for track, notes in enumerate(parts):
            add_notes(notes, track)
    elif metronome == 1:
        ## Metronome output only.
        add_notes(metronotes, trk0)
    else:
        ## Both
        for track, notes in enumerate(parts):
            add_notes(notes, track)
        metrotrack = numparts  ## because 0-indexing
        add_notes(metronotes, metrotrack)

    with open(outfile, "wb") as output_file:
        MyMIDI.writeFile(output_file)

    if not quiet:
        for partnum, pmap in beat_map.items():
            print_beat_map(partnum, pmap, first_bar_number=firstbar)
コード例 #19
0
ファイル: midi_gen.py プロジェクト: modbrin/music-pso
from midiutil import MIDIFile
import datetime

now = datetime.datetime.now()
if __name__ == "__main__":

    track = 0
    channel = 0
    duration = 1  # In beats
    tempo = 120  # In BPM
    volume = 80  # 0-127, as per the MIDI standard

    MyMIDI = MIDIFile(2, adjust_origin=True)
    MyMIDI.addTempo(track, 0, tempo)
    with open('data.txt') as f:
        lines = f.readlines()
        for i in range(0, 16):
            chord = [int(n) for n in lines[2*i].split()]
            MyMIDI.addNote(track, channel, chord[0], i, duration, volume)
            MyMIDI.addNote(track, channel, chord[1], i, duration, volume)
            MyMIDI.addNote(track, channel, chord[2], i, duration, volume)
        counter = 0
        volume = 110
        for i in range(16, 48):
            melody = [int(n) for n in lines[2*i].split()]
            MyMIDI.addNote(track, channel, melody[0], counter, duration, volume)
            counter+=0.5

        MyMIDI.addNote(track, channel, 200, 17, duration, volume)
    name = "music_"+str(now.month)+"-"+str(now.day)+"_"+str(now.hour)+"-"+str(now.minute)+".mid"
    with open(name, "wb") as output_file:
コード例 #20
0
def create_midi_with_melody(clean_data, alert_data, name, arpegiation):
    midiObj = MIDIFile(4)  # create one track
    midiObj.addTempo(0, 0, 150)

    ####################################################################
    ##################  CREATE ARPEGGIATION  ###########################
    ####################################################################

    if (arpegiation == True):
        clean_rhythm_length = round(len(clean_data) / 16)
        note_cutoff = 0.02

        for i in range(clean_rhythm_length):
            forward = i * 4
            #               track, channel, pitch, time      , duration    , volume
            midiObj.addNote(0, 0, note["c1"], forward,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(2, 0, note["c3"], forward + SIXTEEN_NOTE * 2,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 3,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(0, 0, note["c1"], forward + SIXTEEN_NOTE * 4,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 5,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(2, 0, note["c3"], forward + SIXTEEN_NOTE * 6,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 7,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(0, 0, note["c1"], forward + SIXTEEN_NOTE * 8,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 9,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(2, 0, note["c3"], forward + SIXTEEN_NOTE * 10,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 11,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(0, 0, note["c1"], forward + SIXTEEN_NOTE * 12,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 13,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(2, 0, note["c3"], forward + SIXTEEN_NOTE * 14,
                            SIXTEEN_NOTE - note_cutoff, 100)
            midiObj.addNote(1, 0, note["g2"], forward + SIXTEEN_NOTE * 15,
                            SIXTEEN_NOTE - note_cutoff, 100)

    ####################################################################
    ##################  CREATE DRONE ###################################
    ####################################################################

    else:
        duration = len(clean_data) / 4
        #               track, channel, pitch, time      , duration    , volume
        midiObj.addNote(0, 0, note["c2"], 0, duration, 100)
        midiObj.addNote(1, 0, note["g2"], 0, duration, 100)
        midiObj.addNote(2, 0, note["c1"], 0, duration, 100)

    ####################################################################
    ##################  CREATE PTICH BEND ##############################
    ####################################################################

    pitch_bend_ceiling = [0, 0, 0]
    max_shift = 8192
    previous_data_point = 0
    for i, data_point in enumerate(clean_data):
        note_position = i / 4

        if (previous_data_point == 0.0 and data_point > 0.0):
            pitch_bend_ceiling[0] = int(
                round(max_shift * random.uniform(-1.0, 1.0)))
            pitch_bend_ceiling[1] = int(
                round(max_shift * random.uniform(-1.0, 1.0)))
            pitch_bend_ceiling[2] = int(
                round(max_shift * random.uniform(-1.0, 1.0)))
        elif (previous_data_point > 0 and data_point == 0.0):
            pitch_bend_ceiling = [0, 0, 0]

        ######## HIGH TONIC NOTE PITCHBEND ####################
        #                          track, channel, time         , pitchWheelValue
        midiObj.addPitchWheelEvent(0, 0, note_position, pitch_bend_ceiling[0])
        midiObj.addPitchWheelEvent(1, 0, note_position, pitch_bend_ceiling[1])
        midiObj.addPitchWheelEvent(2, 0, note_position, pitch_bend_ceiling[2])

        previous_data_point = data_point

    ####################################################################
    ##################  CREATE MELODY ##################################
    ####################################################################

    melody_pitches1 = [
        note["c5"],
        note["c5"],
        note["c5"],
        note["c5"],
        note["c5"],
        note["c5"],
        note["c5"],
        note["c5"],
        note["c6"],
        note["c6"],
        note["c6"],
        note["c6"],
        note["c6"],
        note["c6"],
        note["c6"],
        note["c6"],
    ]

    melody_rhythm1 = [
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
    ]

    melody_pitches2 = [
        note["c5"],
        note["cs5"],
        note["d5"],
        note["ds5"],
        note["e5"],
        note["f5"],
        note["fs5"],
        note["c6"],
        note["cs6"],
        note["d6"],
        note["ds6"],
        note["e6"],
        note["f6"],
        note["fs6"],
        note["c5"],
        note["cs5"],
        note["d5"],
        note["ds5"],
        note["e5"],
        note["f5"],
        note["fs5"],
        note["c6"],
        note["cs6"],
        note["d6"],
        note["ds6"],
        note["e6"],
        note["f6"],
        note["fs6"],
    ]

    melody_rhythm2 = [
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
        SIXTEEN_NOTE,
    ]

    melody_pitches3 = [
        note["c5"], note["fs5"], note["cs6"], note["gs6"], note["ds7"],
        note["as7"], note["ds7"], note["gs6"], note["cs6"], note["gs5"],
        note["c5"], note["f4"]
    ]

    melody_rhythm3 = [
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
        EIGHTH_NOTE,
    ]

    midiObj = createMelody(melody_pitches2, melody_rhythm2, alert_data,
                           midiObj, 3)
    # midiObj = createMelody(melody_pitches2, melody_rhythm2, alert_data, midiObj, 3)
    # midiObj = createMelody(melody_pitches3, melody_rhythm3, alert_data, midiObj, 3)

    with open("midiFiles/" + name + ".mid", "wb") as midiFile:
        midiObj.writeFile(midiFile)
コード例 #21
0
import random
import audio

if __name__ == "__main__":
    track = 0
    channel = 0
    time = 0  # In beats
    duration = 1  # In beats
    tempo = 80  # In BPM
    volume = 100  # 0-127, as per the MIDI standard

    correct = 0
    N = 20
    for i in range(0, N):
        MyMIDI = MIDIFile(
            1
        )  # One track, defaults to format 1 (tempo track is created automatically)
        MyMIDI.addTempo(track, time, tempo)
        #program = 40 # A Violin
        program = 0  # A piano
        MyMIDI.addProgramChange(track, channel, time, program)

        midiNote = 69  #A 440 hz
        noteTimes = [1.0, 2.0, 4.0, 8.0]  # in eighth notes
        restLength = random.choice(noteTimes)

        MyMIDI.addNote(track, channel, midiNote, time + restLength / 2, 1,
                       volume)
        MyMIDI.addNote(track, channel, midiNote, time + restLength / 2 + 1, 1,
                       volume)
コード例 #22
0
ファイル: MyFirstSong.py プロジェクト: savetent/pythonoise
ch0B = [81, 81, 80]
ch0C = [83, 81, 80, 75, 76]
ch0D = [80, 80, 78]
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
qnote = 1  # quarter note
enote = 0.5  # eigth note
snote = 0.25  # sixteenth note
tempo = 36  # Integer in BPM
volume = 100

# MIDIFile() creates the midi file ---------------------------------------------------------------
# removeDuplicates -> If set to True (the default), duplicate notes will be removed from the file.
mf = MIDIFile(1, removeDuplicates=False)
# MIDIFileArgs:(numTracks=1, removeDuplicates=True, deinterleave=True, adjust_origin=False, file_format=1, ticks_per_quarternote=960, eventtime_is_ticks=False)

# Adds the tempo to the file
mf.addTempo(track, time, tempo)
# TempoArgs:(track, time, tempo)

# ProgramChange -> Change the voice (instrument) of the pitch
# Have to do it for each channel being used and they can be different
mf.addProgramChange(track, 0, time, 0)
mf.addProgramChange(track, 1, time, 0)
mf.addProgramChange(track, 2, time, 0)
# ProgramChangeArgs:(track, channel, time, program)

# ControllerChange -> Controls various dynamics of pitch .i.e. mod wheel(1), pan(10), and sustain(64)
mf.addControllerEvent(track, 0, time, 10, 0)
コード例 #23
0
def main(argv):
    argv = argv[0].split(",")
    sigmoidVal = float(argv[3])
    windStart = int(argv[1])
    windEnd = int(argv[2])
    file_name = ''.join(map(str, argv[0]))
    file_path = "./uploads/" + file_name
    print(file_name)
    print(file_path)
    model = Model()
    checkpoint = torch.load('./machine_learning/model/LSTM_model.pt',
                            map_location='cpu')
    model.load_state_dict(checkpoint['state_dict'], strict=False)
    model.eval()

    preds = []

    sr, song = wavfile.read(file_path)  # Loading your audio
    song = song / abs(max(song.min(), song.max(), key=abs))
    same_segs = len(song) // 441

    for window in range(windStart, windEnd):
        pred = np.zeros(88)
        segs = len(song) // (441 * window)
        seg = song[:(len(song) // (441 * window)) * (441 * window)]
        x = seg.mean(1)  # Converting Stereo to Mono
        x = torch.tensor(
            x, device=device,
            dtype=torch.float)  # casting the array into a PyTorch Tensor
        x = x.contiguous().view(len(seg) // (441 * window), 441 * window)
        y_pred = model(x)
        to_numpy = y_pred.cpu().detach().numpy()
        for i in range(len(to_numpy)):
            pred = np.vstack((pred, to_numpy[i]))
        pred = pred[1:]
        pred = np.vstack((pred, np.zeros((same_segs - segs * window, 88))))
        preds.append(pred)

    preds = np.array(preds)
    pred = np.amax(preds, axis=0)
    pred[pred < sigmoidVal] = 0

    # 88 is the 🎹 🔑  (21 to 108)
    new_notes = []
    for i in pred:
        x = np.where(i != 0)[0]
        #done
        x2 = x + 21
        time = {}
        for j in range(len(x)):
            time[x2[j]] = round(i[x[j]], 5)
        new_notes.append(time)

    degrees = new_notes  # MIDI note number
    track = 0
    channel = 0
    time = 0.01  # In beats
    duration = 1  # In beats
    tempo = 60  # In BPM
    volume = 127  # 0-127, as per the MIDI standard

    MyMIDI = MIDIFile(
        1)  # One track, defaults to format 1 (tempo track is created
    # automatically)
    MyMIDI.addTempo(track, time, tempo)

    for i, pitches in enumerate(degrees):
        if degrees[i] != {}:
            for pitch in pitches:
                checkFirst = pitch not in degrees[i - 1]
                if checkFirst:
                    MyMIDI.addNote(track, channel, pitch, time * i, duration,
                                   volume)

    with open("./transcribed/outputtest.midi", "wb") as output_file:
        MyMIDI.writeFile(output_file)

    print(argv)
コード例 #24
0
        if random.randint(0, 101) <= tomPercentage[i]:
            events.append(make_event(allSteps[i], tom))


def reRollMid(amountSixTeenthNote):
    for i in range(amountSixTeenthNote):
        if random.randint(0, 101) <= midPercentage[i]:
            events.append(make_event(allSteps[i], mid))


def sortEvents():
    events.sort(key=lambda x: x['timeStamp'])


#___________MIDI____________
midiFile = MIDIFile(1)
track = 0
time = 0
duration = 1
volume = 100
midiFile.addTrackName(track, time, "eindopdracht")
midiFile.addTempo(track, time, BPM)


#unpack event and turn into a midinote
def retrievePitch(event):
    for i in range(3):
        if event['instrumentName'] == instrumentNames[i]:
            return instrumentMidiNums[i]

コード例 #25
0
ファイル: main.py プロジェクト: brandonfl/RythmML
    def build_midi(self, output_name):
        sections_config = [
            e.get_notes(self.notePatterns) for e in self.track.sections_config
        ]
        instruments_set = list()

        for section_config in sections_config:
            for value in section_config.values():
                if type(value) is list:
                    instruments_set += value
        instruments_set = set(
            list(map(lambda e: e.instrument, instruments_set)))

        midi_tracks = dict()

        time = 0  # In beats
        duration = 1  # In beats
        volume = 100

        i = 0
        for instrument in instruments_set:
            midi_tracks[instrument] = i
            i = i + 1

        MyMIDI = MIDIFile(
            len(instruments_set
                ))  # One track, defaults to format 1 (tempo track is created
        # automatically)

        channels = {}
        channel_number = 0
        for instrument in instruments_set:
            if channel_number > 15:  # only 16 channels from 0 to 15
                return
            if instrument == 'drum':  # channel 9 reserved for drum
                channels[instrument] = 9
            else:
                if channel_number == 9:
                    channel_number += 1
                channels[instrument] = channel_number
            channel_number += 1

        for track in midi_tracks.values():
            MyMIDI.addTempo(track, time, self.bpm)

        for section_config in sections_config:
            for key in section_config.keys():
                if type(key) is int or type(key) is float:
                    for note_list in section_config[key]:
                        for note in note_list.notes:
                            track = midi_tracks[note_list.instrument]
                            channel = channels[note_list.instrument]
                            midi_number = drum_notes[
                                note] if note_list.instrument == 'drum' else notes[
                                    note]
                            if not note_list.instrument == 'drum':
                                program = instruments[note_list.instrument]
                                MyMIDI.addProgramChange(
                                    int(track), int(channel), 0, int(program))

                            MyMIDI.addNote(int(track), int(channel),
                                           int(midi_number), key,
                                           note_list.duration, volume)

        self.name = output_name.replace('.rml', '')
        with open("out/{}.mid".format(self.name), "wb") as output_file:
            MyMIDI.writeFile(output_file)
        print("Midi file saved in out/{}.mid".format(self.name))
コード例 #26
0
ファイル: fft_piano.py プロジェクト: 0xff800000/rawmusic2MIDI
def freq2note(f):
    return 69 + np.round(12 * np.log2(f / 440))


## Decompose sigmal procedure
# 1. Decompose the signal into N windows
# 2. for each window[i]:
# 3.    compute fft window : F(f)
# 4.    compute corresponding notes and write it down
#       MIDIFile.addNote(track, channel, pitch=freq2Note(f), time=i*T_window, duration=T_window, volume=|F(f)|)

#           C , D , E , F , G,  A , B , C
#           C , D , E , F , G, 440 , B , C , D , E , F , G , 880
degrees = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = Tempo  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(1)  # One track
MyMIDI.addTempo(track, time, tempo)

for i, pitch in enumerate(degrees):
    MyMIDI.addNote(track, channel, pitch, time + i * T_window, T_window,
                   volume)

with open("major-scale.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
コード例 #27
0
fMinor = [
    notes['f'], notes['g'], notes['g#'], notes['a#'], notes['c'], notes['c#'],
    notes['d#']
]
drums = [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]

sequencerTrack = 0
drumTrack = 1
channel = 0
time = 0  # In beats
duration = .5  # In quarter beats
tempo = 145  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(2)  # One track, defaults to format 1 (tempo track
# automatically created)
MyMIDI.addTempo(sequencerTrack, time, tempo)
MyMIDI.addTempo(drumTrack, time, tempo)


def parseProtein(proteinFilePath):
    with open(proteinFilePath) as f:
        while True:
            c = f.read(1)
            if not c or c.isspace():
                print "End of file"
                writeFile()
                break
            parseCharacter(c)
コード例 #28
0
        print("Please choose a bpm higher than 59.")
        bpm = input("-> ")
    elif int(bpm) > 200:
        print("Please choose a bpm lower than 200.")
        bpm = input("-> ")
    else:
        correctBpm = 1
bpm = int(bpm)
velocity = 100

#initialize midifile
track = 0
channel = 9
Mtime = 0
duration = 0.25
MyMIDI = MIDIFile(2)
MyMIDI.addTempo(track, Mtime, bpm)
sleepTime = 60 / (bpm * 4)

#set how many times the beat should be played
print("how many repititions?")
repetitions = input("-> ")
while repetitions.isdigit() == False:
    print("Please input integer.")
    repetitions = input("-> ")
repetitions = int(repetitions)

#make three empty lists for Kick, Snare and Misc sound
listKick = []
listSnare = []
listMisc = []
コード例 #29
0
pool.map(f, notes4)

### MIDI CREATION ####

from midiutil import MIDIFile

degrees = notes_trans2  # MIDI note number
# degrees  = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(1)  # One track, defaults to format 1 (tempo track is created

MyMIDI.addTempo(track, time, tempo)

for i, pitch in enumerate(degrees):
    MyMIDI.addNote(track, channel, pitch, time + i, duration, volume)

with open("wheat1test.random.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)

##### PROCESS GENE SEQUENCES ####

aaseq = open(
    "/mnt/e/phd.project.main/rotation1scripts_v4/original_data/datasonification/genes.fa",
    "r")
aaseq2 = aaseq.read()
コード例 #30
0
#!/usr/bin/env python

from midiutil import MIDIFile

# degrees  = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
degrees = [65] * 3600
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(
    1,
    file_format=0)  # One track, defaults to format 1 (tempo track is created
# automatically)
MyMIDI.addTempo(track, time, tempo)

for i, pitch in enumerate(degrees):
    MyMIDI.addNote(track, channel, pitch, time + i, duration, volume)

with open("test.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)