Ejemplo n.º 1
0
 def testQuantizeToStep(self):
   self.assertEqual(
       32, sequences_lib.quantize_to_step(8.0001, 4))
   self.assertEqual(
       34, sequences_lib.quantize_to_step(8.4999, 4))
   self.assertEqual(
       33, sequences_lib.quantize_to_step(8.4999, 4, quantize_cutoff=1.0))
Ejemplo n.º 2
0
 def testQuantizeToStep(self):
   self.assertEqual(
       32, sequences_lib.quantize_to_step(8.0001, 4))
   self.assertEqual(
       34, sequences_lib.quantize_to_step(8.4999, 4))
   self.assertEqual(
       33, sequences_lib.quantize_to_step(8.4999, 4, quantize_cutoff=1.0))
Ejemplo n.º 3
0
def process_note_on(addr, tags, args, source):
    """Handler for `/processnoteon` messages from SuperCollider.

  This will process the event of a key press on the MIDI controller, detected
  by SuperCollider. Depending on the state of the server it will do different
  things.

  Args:
    addr: Address message sent to.
    tags: Tags in message.
    args: Arguments passed in message.
    source: Source of sender.
  """
    global accumulated_primer_melody
    global generated_melody
    global min_primer_length
    global note_mapping
    global improv_status
    global mode
    global qpm
    global time_signature
    global start_time
    global last_first_beat
    global last_first_beat_for_record
    global bass_line
    global playable_notes
    global improv_volume
    sound = 'wurly'
    note = list(args)
    if mode == 'bass':
        sound = 'bass'
        if (last_first_beat_for_record is None
                or last_first_beat == last_first_beat_for_record):
            curr_time = time.time()
            steps_per_second = -(-qpm * 4. / 60.)  # This yields ceiling.
            curr_time_step = sequences_lib.quantize_to_step(
                curr_time - last_first_beat, steps_per_second)
            playable_notes.add(
                PlayableNote(type='bass',
                             note=note,
                             instrument='bass',
                             onset=curr_time_step))
            bass_line.append(
                PlayableNote(type='drums',
                             note=note,
                             instrument='bass',
                             onset=curr_time_step))
            last_first_beat_for_record = last_first_beat
    elif mode == 'chords':
        sound = 'chords'
        if (last_first_beat_for_record is None
                or last_first_beat == last_first_beat_for_record):
            curr_time = time.time()
            steps_per_second = -(-qpm * 4. / 60.)  # This yields ceiling.
            curr_time_step = sequences_lib.quantize_to_step(
                curr_time - last_first_beat, steps_per_second)
            playable_notes.add(
                PlayableNote(type='chords',
                             note=note,
                             instrument='chords',
                             onset=curr_time_step))
            last_first_beat_for_record = last_first_beat
    elif mode == 'improv':
        sound = 'wurly'
        # If we have data in our generated melody we substitute human's notes.
        if len(generated_melody):
            if improv_status != 'robot':
                improv_status = 'robot'
                print_status()
            # To avoid stuck notes, send a note off for previous mapped note.
            prev_note = list(args)
            prev_note[0] = note_mapping[args[0]]
            send_stopnote(prev_note)
            note_mapping[args[0]] = generated_melody[0]
            note[0] = generated_melody[0]
            note[1] *= improv_volume
            generated_melody = generated_melody[1:]
        else:
            if improv_status != 'psc':
                improv_status = 'psc'
                print_status()
            note[1] *= improv_volume
            accumulated_primer_melody.append(args[0])
        if len(accumulated_primer_melody) >= min_primer_length:
            magenta_thread = threading.Thread(target=generate_melody)
            magenta_thread.start()
    send_playnote(note, sound)