Beispiel #1
0
 def __new__(self, sample, mode=Modes.ALL_NOTES, key=Notes.C):
     if isinstance(sample, float):
         value = sample
     else:
         # assume it's something to be analysed
         value = analyse.musical_detect_pitch(sample) or 0.0
     return float.__new__(self, value)
Beispiel #2
0
def pitch_detection(data):
    samps = np.fromstring(data, dtype=np.int16)
    pitch = analyse.musical_detect_pitch(samps)
    if analyse.loudness(samps) > -25 and pitch != None:
        return pitch
    else:
        return -1
Beispiel #3
0
def main():
    # Initial values.
    loudness = -40
    loop_count = 0

    # Main control loop.
    while True:
        loop_count += 1

        # Read raw microphone data
        rawsamps = INPUT_STREAM.read(1024)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # Show the volume and pitch
        loudness, pitch = (analyse.loudness(samps),
                           analyse.musical_detect_pitch(samps))

        # Poll for config changes.
        if loop_count % 100 == 0:
            print '\n\n Updating config...\n\n\n'
            # request new config and update.

        # Visualize the volume and pitch.
        print loudness, pitch
        show_loudness(loudness)

        if loudness > -7:
            INPUT_STREAM.stop_stream()
            shush()
            INPUT_STREAM.start_stream()
Beispiel #4
0
def sampleAudio(s):
    "stream and process s seconds from the microphone"
    
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=1, rate=RATE,
        input=True, output=True,
        input_device_index=0,
        frames_per_buffer=CHUNK_SIZE)

    # little endian, signed short
    #snd_data = array('h', stream.read(CHUNK_SIZE))
    try:
        raw_data = stream.read(s*RATE/CHUNK_SIZE)
        snd_data = numpy.fromstring(raw_data, dtype=numpy.int16)
    except:
        print "You ruined EVERYTHING!"
        sys.exit()

    if byteorder == 'big':
        snd_data.byteswap()

    sample_width = p.get_sample_size(FORMAT) # size in bytes
    stream.stop_stream()
    stream.close()
    p.terminate()

    return analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)
Beispiel #5
0
def getNote():
    global last_pitch, last_loudness
    while True:
        try:
            # Read raw microphone data
            rawsamps = stream.read(1024)
            # Convert raw data to NumPy array
            samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
            # Show the volume and pitch
            loudness = analyse.loudness(samps)
            #pitch = analyse.musical_detect_pitch(samps)
            pitch = analyse.musical_detect_pitch(samps)
        except:
            continue
        if not pitch:
            continue

        level = (pitch - 60.018)/ 1.0

        if pitch and last_pitch:
            pitch_diff = pitch - last_pitch
        else:
            pitch_diff = 100
        loudness_diff = loudness - last_loudness
        #print 'pitch:', math.floor(level), 'pitch_diff:', pitch_diff, 'loudness_diff:', loudness_diff

        last_pitch = pitch
        last_loudness = loudness
        if loudness_diff < 0 and pitch_diff > 2.0:
            continue

        print 'OK', round(level), pitch
        last_returned = level
        return level
Beispiel #6
0
def main():
    # Initial values.
    loudness = -40
    loop_count = 0

    # Main control loop.
    while True:
        loop_count += 1

        # Read raw microphone data
        rawsamps = INPUT_STREAM.read(1024)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # Show the volume and pitch
        loudness, pitch = (
            analyse.loudness(samps),
            analyse.musical_detect_pitch(samps)
        )

        # Poll for config changes.
        if loop_count % 100 == 0:
            print '\n\n Updating config...\n\n\n'
            # request new config and update.

        # Visualize the volume and pitch.
        print loudness, pitch
        show_loudness(loudness)

        if loudness > -7:
            INPUT_STREAM.stop_stream()
            shush()
            INPUT_STREAM.start_stream()
Beispiel #7
0
def record():
    """
    Record a word or words from the microphone and 
    return the data as an array of signed shorts.

    Normalizes the audio, trims silence from the 
    start and end, and pads with 0.5 seconds of 
    blank sound to make sure VLC et al can play 
    it without getting chopped off.
    """
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=1, rate=RATE,
        input=True, output=True,
        input_device_index=0,
        frames_per_buffer=CHUNK_SIZE)

    num_silent = 0
    snd_started = False

    r = array('h')

    while 1:
        # little endian, signed short
        #snd_data = array('h', stream.read(CHUNK_SIZE))
        try:
            raw_data = stream.read(CHUNK_SIZE)
            snd_data = numpy.fromstring(raw_data, dtype=numpy.int16)
        except:
            print "You ruined EVERYTHING!"
            sys.exit()

        if byteorder == 'big':
            snd_data.byteswap()
        r.extend(snd_data)

        silent = is_silent(snd_data)

        if silent and snd_started:
            num_silent += 1
        elif not silent and not snd_started:
            #print "TRIGGERED: ",analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)
            snd_started = True

        if snd_started and num_silent > 30:
            break

        if not snd_started:
            print analyse.loudness(snd_data), analyse.musical_detect_pitch(snd_data)

    sample_width = p.get_sample_size(FORMAT)
    stream.stop_stream()
    stream.close()
    p.terminate()

    r = normalize(r)
    r = trim(r)
    #r = add_silence(r, 0.5)
    return sample_width, r
Beispiel #8
0
    def run(self):
        inputObj = self.get_input("input") #obiekt interfejsu wejściowego
        outputGraph = self.get_output("outputGraph") #obiekt interfejsu wyjściowego
        outputPitch = self.get_output("outputPitch") #obiekt interfejsu wyjściowego
        prev_note = 0

        #init midi
        track = 0
        time = 0
        MyMIDI = MIDIFile(1)
        MyMIDI.addTrackName(track,time,"Sample Track")
        MyMIDI.addTempo(track,time,120)

        try:
            while self.running():
                data_input = inputObj.read()

                N = data_input["N"]
                audioData = base64.b64decode(data_input["data"])
                MAX_y = data_input["MAX_y"]

                y = np.array(struct.unpack("%dh" % (N * CHANNELS), audioData)) / MAX_y
                y_L = y[::2]
                y_R = y[1::2]

                Y_L = np.fft.fft(y_L, nFFT)
                Y_R = np.fft.fft(y_R, nFFT)

                # Łączenie kanałów FFT, DC - prawy kanał
                Y = abs(np.hstack((Y_L[-nFFT/2:-1], Y_R[:nFFT/2])))

                samples = np.fromstring(audioData, dtype=np.int16)

                #wyliczenie dzwieku
                rawnote = analyse.musical_detect_pitch(samples)

                if rawnote is not None:
                    note = np.rint(rawnote)

                    #wyślij nutę na wyjście
                    outputPitch.send(note)

                    if note != prev_note:

                        #MyMIDI.addNote(track,channel,pitch,time,duration,volume)
                        MyMIDI.addNote(0,0,note,time,1,100)
                        time+=1
                        prev_note = note

                output = {"db_table": list(Y)}
                outputGraph.send(output)

        #save midi on exit
        except:
            binfile = open("output.mid", 'wb')
            MyMIDI.writeFile(binfile)
            binfile.close()
Beispiel #9
0
def raypitch():
    global strm
    global chunk
    thenote = 0.0
    try:
        if True:#0 != whattype[whoami]:
            thischunk = 8192
            sdata = strm.read(thischunk)
            swidth = pyaudio.paInt16
            window = numpy.blackman(thischunk)
            RATE = 44100
            # unpack the data and times by the hamming window
            indata = numpy.array(wave.struct.unpack("%dh"%(len(sdata)*4/swidth),sdata))*window
            # Take the fft and square each value
            fftData=abs(numpy.fft.rfft(indata))**2
            # find the maximum
            which = fftData[1:].argmax() + 1
            # use quadratic interpolation around the max
            if which != len(fftData)-1:
                y0,y1,y2 = numpy.log(fftData[which-1:which+2:])
                x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
                # find the frequency and output it
                thefreq = (which+x1)*RATE/thischunk
            else:
                thefreq = which*RATE/thischunk
            thenote = pitchtools.f2m(thefreq)
            #if thenote > 32 and thenote < 90:
                #print "The freq is %f Hz. Note is %f" % (thefreq, thenote)
                #pass
            #else:
                #thenote = 0.0
        else:
            sdata = strm.read(chunk)
            samps = numpy.fromstring(sdata, dtype=numpy.int16) # Convert raw data to NumPy array
            rayfeq = analyse.musical_detect_pitch(samps)
            #print (rayfeq)
            if rayfeq > 0:
                #strm.stop_stream()
                #rayint = round(rayfeq,1)
                if True:#rayint <= 83:
                    #rayloud = analyse.loudness(samps)
                    #rayampval = rayloud + 100 #rayampval = raymap(rayloud, -127, 0, 0, 127)
                    #print (rayfeq, rayampval)
                    thenote = rayfeq
                #strm.start_stream()
    except IOError, e:
        if e.args[1] == pyaudio.paInputOverflowed:
            pass
        else:
            raise
Beispiel #10
0
def start_stream ():
    """This function should be executed in a separated thread because of infinite loop"""
    inp = alsaaudio.PCM(type= alsaaudio.PCM_CAPTURE, mode=alsaaudio.PCM_NORMAL,device='default')
    inp.setchannels(1)
    inp.setrate(44100)
    inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
    inp.setperiodsize(3072)
     
    while True:
            length, data = inp.read()
            samps = numpy.fromstring(data, dtype='int16') 

            pitch = analyse.musical_detect_pitch(samps)
            hz =    analyse.detect_pitch(samps)     
            #CallAfter is necessary for making GUI method calls from non-GUI threads    
            wx.CallAfter(show_everything,pitch,hz) # pitch is passed as an argument
Beispiel #11
0
    def checkLevels(self):
        l = False

        try:
            l, data = self.mics[0].read()
        except:
            print "error"

        if not l or len(data) < 2048:
            return (False, 0, 0)

        samps = numpy.fromstring(data, dtype=numpy.int16)

        pitch = analyse.musical_detect_pitch(samps)
        loudness = min(0, max(-32, analyse.loudness(samps)))

        return (True, pitch, loudness)
def inputSound():
    # Initialize PyAudio
    pyaud = pyaudio.PyAudio()
    
    # Open input stream, 16-bit mono at 44100 Hz
    # On my system, device 2 is a USB microphone, your number may differ.
    stream = pyaud.open(
        format = pyaudio.paInt16,
        channels = 1,
        rate = 44100,
        input_device_index = 1,
        input = True)
    times = []
    end = []
    notes = cleanup.notes
    notesSung = [] #so that there is an established pause in any beginning
    print "*listening*"
    start = time.time()
    fun = []
    while cleanup.silentEnd(notesSung):
        times.append(time.time()-start)
        fun.append("*")
        # Read raw microphone data
        rawsamps = stream.read(1024)
        # print(type(rawsamps), rawsamps)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # print(type(samps), samps)
        # Show the volume and pitch
        # analyse.loudness(samps), analyse.musical_detect_pitch(samps)
        freq = analyse.musical_detect_pitch(samps)
        notesSung.append(cleanup.getNote(freq, notes))
        # print cleanup.getNote(freq, notes)
    print "*Done*"
    print times
    leng = len(notesSung)
    cleanup.cleanUp(notesSung)
    print notesSung
    print len(times), leng, len(notesSung), len(fun)
    print
    print
    cleanup.removeRepeats(notesSung)
    print notesSung
Beispiel #13
0
def read():
  INPUT_INDEX = int(sys.argv[1])

  # Open input stream, 16-bit mono at 44100 Hz
  # On my system, device 4 is a USB microphone
  stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 8000,
    input_device_index = INPUT_INDEX,
    input = True)

  while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
def inputSound():
    # Initialize PyAudio
    pyaud = pyaudio.PyAudio()
    global recording 
    recording = True

    # Open input stream, 16-bit mono at 44100 Hz
    # On my system, device 2 is a USB microphone, your number may differ.
    stream = pyaud.open(
        format = pyaudio.paInt16,
        channels = 1,
        rate = 44100,
        input_device_index = 1,
        input = True)
    times = []
    end = []
    notes = cleanup.notes
    global notesSung
    notesSung = []
    print "*listening*"
    start = time.time()
    # while not cleanup.silentEnd(notesSung):
    while not cleanup.ended(notesSung):
        # Read raw microphone data
        rawsamps = stream.read(1024)
        # print(type(rawsamps), rawsamps)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # print(type(samps), samps)
        # Show the volume and pitch
        # analyse.loudness(samps), analyse.musical_detect_pitch(samps)
        freq = analyse.musical_detect_pitch(samps)
        notesSung.append(cleanup.getNote(freq, notes))
        #print cleanup.getNote(freq, notes)
        
    print "*Done*"
    recording = False
    # print notesSung
    cleanup.cleanUp(notesSung)
    # print notesSung
    # print
    cleanup.removeRepeats(notesSung)
    return notesSung
Beispiel #15
0
def inputSound():
    # Initialize PyAudio
    pyaud = pyaudio.PyAudio()

    # Open input stream, 16-bit mono at 44100 Hz
    # On my system, device 2 is a USB microphone, your number may differ.
    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=44100,
                        input_device_index=1,
                        input=True)
    times = []
    end = []
    notes = cleanup.notes
    notesSung = []  #so that there is an established pause in any beginning
    print "*listening*"
    start = time.time()
    fun = []
    while cleanup.silentEnd(notesSung):
        times.append(time.time() - start)
        fun.append("*")
        # Read raw microphone data
        rawsamps = stream.read(1024)
        # print(type(rawsamps), rawsamps)
        # Convert raw data to NumPy array
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
        # print(type(samps), samps)
        # Show the volume and pitch
        # analyse.loudness(samps), analyse.musical_detect_pitch(samps)
        freq = analyse.musical_detect_pitch(samps)
        notesSung.append(cleanup.getNote(freq, notes))
        # print cleanup.getNote(freq, notes)
    print "*Done*"
    print times
    leng = len(notesSung)
    cleanup.cleanUp(notesSung)
    print notesSung
    print len(times), leng, len(notesSung), len(fun)
    print
    print
    cleanup.removeRepeats(notesSung)
    print notesSung
Beispiel #16
0
def main():
    pyaud = pyaudio.PyAudio()

    stream = pyaud.open(format=pyaudio.paInt16,
                        channels=2,
                        rate=44100,
                        input_device_index=1,
                        input=True)

    last_note = last_vol = last_time = 0

    while True:
        t = timing.get_time()

        rawsamps = stream.read(SAMPLE_SIZE)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

        event = ''

        midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

        if midi_note:
            midi_note += OCTAVE_CORRECTION

            latest_note = notes.midi_to_note(midi_note)
            latest_vol = analyse.loudness(samps)

            attacked = latest_vol - last_vol > ATTACK_THRESHOLD

            if latest_note != last_note or attacked:
                if latest_vol > MINIMUM_VOLUME:
                    event = {'note': latest_note, 'time': t}
                    last_time = t

                last_note = latest_note
                last_vol = latest_vol

        elif last_note:
            last_note = None

        print event
        sys.stdout.flush()
Beispiel #17
0
def raypitch():
    global strm
    try:
        rawsamps = strm.read(1024) # Read raw microphone data
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16) # Convert raw data to NumPy array
        rayfeq = analyse.musical_detect_pitch(samps)
        if rayfeq > 0:
            #strm.stop_stream()
            rayint = round(rayfeq,1)
            if True:#rayint <= 83:

                rayloud = analyse.loudness(samps)
                rayampval = rayloud + 100 #rayampval = raymap(rayloud, -127, 0, 0, 127)
                #print (rayfeq, rayampval)
                return rayint, rayampval

            #strm.start_stream()
    except IOError, e:
        if e.args[1] == pyaudio.paInputOverflowed:
            rawsamps  = '\x00'
        else:
            raise
def main():
    pyaud = pyaudio.PyAudio()

    stream = pyaud.open(format=pyaudio.paInt16, channels=2, rate=44100, input_device_index=1, input=True)

    last_note = last_vol = last_time = 0

    while True:
        t = timing.get_time()

        rawsamps = stream.read(SAMPLE_SIZE)
        samps = numpy.fromstring(rawsamps, dtype=numpy.int16)

        event = ""

        midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

        if midi_note:
            midi_note += OCTAVE_CORRECTION

            latest_note = notes.midi_to_note(midi_note)
            latest_vol = analyse.loudness(samps)

            attacked = latest_vol - last_vol > ATTACK_THRESHOLD

            if latest_note != last_note or attacked:
                if latest_vol > MINIMUM_VOLUME:
                    event = {"note": latest_note, "time": t}
                    last_time = t

                last_note = latest_note
                last_vol = latest_vol

        elif last_note:
            last_note = None

        print event
        sys.stdout.flush()
Beispiel #19
0
	def run(self):
		notes_array = [0,0,0,0,0,0,0,0,0,0,0,0]
		count = 0
		while True:

			p = pyaudio.PyAudio()

			stream = p.open(format=FORMAT,   #open stream 
							channels=CHANNELS,
							rate=RATE,
							input=True)

			rawsamps = stream.read(CHUNK) #read chunk of data into string of sound input
			samps = numpy.fromstring(rawsamps, dtype=numpy.int16) #convert to numpy interger

			midi = analyse.musical_detect_pitch(samps)  #find associated midi pitch and therefore note
			print midi
			if midi is not None:
				note =  midi_dict[int(midi)] #pull note from dictionary and update note array
				if note == 'A':notes_array[0]+=1
				if note =='A#':notes_array[1]+=1
				if note == 'B':notes_array[2]+=1
				if note == 'C':notes_array[3]+=1
				if note =='C#':notes_array[4]+=1
				if note == 'D':notes_array[5]+=1
				if note == 'D#':notes_array[6]+=1
				if note == 'E':notes_array[7]+=1
				if note == 'F':notes_array[8]+=1
				if note == 'F#':notes_array[9]+=1
				if note == 'G':notes_array[10]+=1
				if note == 'G#':notes_array[11]+=1
				count+= 1
				if count == 50:  #after 50 note samples return array for key recognitions
					action.put(notes_array)
					count = 0
		return
Beispiel #20
0
import analyse
import matplotlib.pyplot as plt

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
stream = pyaud.open(format=pyaudio.paInt16, channels=1, rate=44100, input_device_index=1, input=True)


vols = []
pitches = []
i = 0
while i < 100:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch

    vols.append(analyse.loudness(samps))
    pitches.append(analyse.musical_detect_pitch(samps) or 0.0)

    print vols[-1]

    i += 1

plt.plot(vols)
plt.plot(pitches, "ro")
plt.show()
Beispiel #21
0
        re.refresh()

    t = timing.get_time()

    available = stream.get_read_available()
    sample_size = int(paramaters.sample_size)
    if not available > sample_size:
        time.sleep(0.01)
        continue

    rawsamps = stream.read(available)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16, count=sample_size)

    event = ''

    midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)

    if midi_note:
        midi_note += paramaters.octave_correction * 12

        latest_note = notes.midi_to_note(midi_note)
        latest_vol = analyse.loudness(samps)

        attacked = latest_vol - last_vol > paramaters.attack_threshold

        if latest_note != last_note or attacked:
            if latest_vol > paramaters.minimum_volume:
                event = {'note': latest_note, 'time': t}
                last_time = t

            last_note = latest_note
        re.refresh ()

    t = timing.get_time()
    
    available = stream.get_read_available()
    sample_size = int(paramaters.sample_size)
    if not available > sample_size:
        time.sleep(0.01)
        continue

    rawsamps = stream.read(available)
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16, count=sample_size)

    event = ''
    
    midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)
    
    if midi_note:
        midi_note += paramaters.octave_correction * 12

        latest_note = notes.midi_to_note(midi_note)
        latest_vol = analyse.loudness(samps)

        attacked = latest_vol - last_vol > paramaters.attack_threshold

        if latest_note != last_note or attacked:
            if latest_vol > paramaters.minimum_volume:
                event = {'note':     latest_note,    'time':     t}
                last_time = t

            last_note = latest_note
Beispiel #23
0
 
 tm += clk.tick()
 if tm > 8000: 
     tm = 0
     history = []
 # Read raw microphone data
 rawsamps = stream.read(1024)
 # Convert raw data to NumPy array
 samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
 # Show the volume and pitch
 screen.fill((0, 0, 0))
 tx = int((tm / 8000.0) * 1024.0)
 if tx > 1024: tx -= 1024
 pygame.draw.rect(screen, (0, 255, 0), (tx, 0, 3, 768))
 m, v = None, None
 m = analyse.musical_detect_pitch(samps, samplerate=samplerate)
 v = analyse.loudness(samps)
 if m is not None:
     # m is in range about 40-80
     ty = (m - 40.0) / 40.0
     # ty is in range 0-1
     ty = int(768 - ty * 768.0)
     # now ty is betwee 0 - 768
     pygame.draw.rect(screen, (0, 255, 255), (0, ty, 1024, 3))
     history.append((tx, ty))
 for (x, y) in history:
     pygame.draw.rect(screen, (255, 0, 0), (x, y, 3, 3))
 pygame.display.flip()
 for evt in pygame.event.get():
     if evt.type == pygame.KEYDOWN:
         sys.exit()
Beispiel #24
0
import thread

timeout=True

def timer(numSeconds, dummy=True):
    global timeout
    time.sleep(numSeconds)
    timeout = True

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# CSC: device 0 is built in mike?
# expected loudness output: -1dB for very loud; downto -36dB "typical silence"
chunk=1024
stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 48000,
    input_device_index = 0,
    input = True)

while True:
	# Read raw microphone data
	rawsamps = stream.read(chunk)
	# Convert raw data to NumPy array
	samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
	# Show the volume and pitch
	print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
Beispiel #25
0
	def run(self):
		CHUNK = 1024
		FORMAT = pyaudio.paInt16
		CHANNELS = 1
		RATE = 22400

		midi_dict = {0:'C', 1:'C#', 2:'D', 3:'D#', 4:'E', 5:'F', 6:'F#', 7:'G', 8:'G#', 9:'A', 10:'A#', 11:'B',
					12:'C', 13:'C#', 14:'D', 15:'D#', 16:'E', 17:'F', 18:'F#', 19:'G', 20:'G#', 21:'A', 22:'A#', 23:'B',
					24:'C', 25:'C#', 26:'D', 27:'D#', 28:'E', 29:'F', 30:'F#', 31:'G', 32:'G#', 33:'A', 34:'A#', 35:'B',
					36:'C', 37:'C#', 38:'D', 39:'D#', 40:'E', 41:'F', 42:'F#', 43:'G', 44:'G#', 45:'A', 46:'A#', 47:'B',
					48:'C', 49:'C#', 50:'D', 51:'D#', 52:'E', 53:'F', 54:'F#', 55:'G', 56:'G#', 57:'A', 58:'A#', 59:'B',
					60:'C', 61:'C#', 62:'D', 63:'D#', 64:'E', 65:'F', 66:'F#', 67:'G', 68:'G#', 69:'A', 70:'A#', 71:'B',
					72:'C', 73:'C#', 74:'D', 75:'D#', 76:'E', 77:'F', 78:'F#', 79:'G', 80:'G#', 81:'A', 82:'A#', 83:'B',
					84:'C', 85:'C#', 86:'D', 87:'D#', 88:'E', 89:'F', 90:'F#', 91:'G', 92:'G#', 93:'A', 94:'A#', 95:'B',
					96:'C', 97:'C#', 98:'D', 99:'D#', 100:'E', 101:'F', 102:'F#', 103:'G', 104:'G#', 105:'A', 106:'A#', 107:'B',
					108:'C', 109:'C#', 110:'D', 111:'D#', 112:'E', 113:'F', 114:'F#', 115:'G', 116:'G#', 117:'A', 118:'A#', 119:'B',
					 120:'C', 121:'C#', 122:'D', 123:'D#', 124:'E', 125:'F', 126:'F#', 127:'G'}
		notes_array = [0,0,0,0,0,0,0,0,0,0,0,0]
		note_count = 0
		step_count = 0
		player_delay =  False
		sound = "chord" #default arguments for player
		time = .5
		note = "C"
		vel = 120
		a_sy = autosynth.synth_Constructor(bpm) #initialize synth automoton
		p = pyaudio.PyAudio()

		stream = p.open(format=FORMAT,   #open stream 
						channels=CHANNELS,
						rate=RATE,
						input=True)
		while True:

			rawsamps = stream.read(CHUNK) #read chunk of data into string of sound input
			samps = numpy.fromstring(rawsamps, dtype=numpy.int16) #convert to numpy interger

			midi = analyse.musical_detect_pitch(samps)  #find associated midi pitch and therefore note
			step_count+=1
			if midi is not None:
				note =  midi_dict[int(midi)] #pull note from dictionary and update note array
				print note
				if note == 'A':notes_array[0]+=1
				if note =='A#':notes_array[1]+=1
				if note == 'B':notes_array[2]+=1
				if note == 'C':notes_array[3]+=1
				if note =='C#':notes_array[4]+=1
				if note == 'D':notes_array[5]+=1
				if note == 'D#':notes_array[6]+=1
				if note == 'E':notes_array[7]+=1
				if note == 'F':notes_array[8]+=1
				if note == 'F#':notes_array[9]+=1
				if note == 'G':notes_array[10]+=1
				if note == 'G#':notes_array[11]+=1
				note_count+= 1
			if note_count == 35 or (step_count==50 and player_delay):  #after 50 note samples return array for key recognitions	
				step_count=0
				if note_count == 35:
					a_sy.update(notes_array) #whenever there is an update of sounds update key and then return sound
					note_count=0
					player_delay = True
				t = random.randint(0,5)  #generate random int to see if chord is arpegiated
				if t==3:
					sound = "arp"
					time = .5/4
				else:
					sound = 'chord'
					time = .5
				note = a_sy.player()
				play.put([sound,time,note,vel]) #call actual player function
				if sound=='arp':
					sound = 'chord'
					time = .5/4
					play.put([sound,time,note,vel])
Beispiel #26
0
CHUNK = 8192
lastfeq = 0

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 2 is a USB microphone, your number may differ.
stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 44100,
    input_device_index = 1,
    input = True,
    frames_per_buffer=CHUNK)

while True:
    # Read raw microphone data
    rawsamps = stream.read(CHUNK)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    rayfeq = analyse.musical_detect_pitch(samps)
    if rayfeq > 0 and math.fabs(rayfeq-lastfeq) > 2:
        print(stream.stop_stream())
        title=commands.getoutput("echo \"with_fx :reverb, mix: 0.9, phase: 0.25, room: 1 do sample :guit_em9, rate: 0.5 end\" | sonic_pi")
        time.sleep(30)
        title=commands.getoutput("echo \"stop\" | sonic_pi")
        print(stream.start_stream())
        lastfeq = rayfeq
        print (analyse.loudness(samps), rayfeq)
    else:
        lastfeq = 0
Beispiel #27
0
#!/usr/bin/env python

import numpy
import pyaudio
import analyse

# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 4 is a USB microphone
stream = pyaud.open(format=pyaudio.paInt16,
                    channels=1,
                    rate=44100,
                    input_device_index=2,
                    input=True)

while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    print analyse.loudness(samps), analyse.musical_detect_pitch(samps)
Beispiel #28
0
    tm += clk.tick()
    if tm > 8000:
        tm = 0
        history = []
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    screen.fill((0, 0, 0))
    tx = int((tm / 8000.0) * 1024.0)
    if tx > 1024: tx -= 1024
    pygame.draw.rect(screen, (0, 255, 0), (tx, 0, 3, 768))
    m, v = None, None
    m = analyse.musical_detect_pitch(samps, samplerate=samplerate)
    v = analyse.loudness(samps)
    if m is not None:
        # m is in range about 40-80
        ty = (m - 40.0) / 40.0
        # ty is in range 0-1
        ty = int(768 - ty * 768.0)
        # now ty is betwee 0 - 768
        pygame.draw.rect(screen, (0, 255, 255), (0, ty, 1024, 3))
        history.append((tx, ty))
    for (x, y) in history:
        pygame.draw.rect(screen, (255, 0, 0), (x, y, 3, 3))
    pygame.display.flip()
    for evt in pygame.event.get():
        if evt.type == pygame.KEYDOWN:
            sys.exit()
Beispiel #29
0
			24:'C', 25:'C#', 26:'D', 27:'D#', 28:'E', 29:'F', 30:'F#', 31:'G', 32:'G#', 33:'A', 34:'A#', 35:'B',
			36:'C', 37:'C#', 38:'D', 39:'D#', 40:'E', 41:'F', 42:'F#', 43:'G', 44:'G#', 45:'A', 46:'A#', 47:'B',
			48:'C', 49:'C#', 50:'D', 51:'D#', 52:'E', 53:'F', 54:'F#', 55:'G', 56:'G#', 57:'A', 58:'A#', 59:'B',
			60:'C', 61:'C#', 62:'D', 63:'D#', 64:'E', 65:'F', 66:'F#', 67:'G', 68:'G#', 69:'A', 70:'A#', 71:'B',
			72:'C', 73:'C#', 74:'D', 75:'D#', 76:'E', 77:'F', 78:'F#', 79:'G', 80:'G#', 81:'A', 82:'A#', 83:'B',
			84:'C', 85:'C#', 86:'D', 87:'D#', 88:'E', 89:'F', 90:'F#', 91:'G', 92:'G#', 93:'A', 94:'A#', 95:'B',
			96:'C', 97:'C#', 98:'D', 99:'D#', 100:'E', 101:'F', 102:'F#', 103:'G', 104:'G#', 105:'A', 106:'A#', 107:'B',
			108:'C', 109:'C#', 110:'D', 111:'D#', 112:'E', 113:'F', 114:'F#', 115:'G', 116:'G#', 117:'A', 118:'A#', 119:'B',
			 120:'C', 121:'C#', 122:'D', 123:'D#', 124:'E', 125:'F', 126:'F#', 127:'G'}
# Initialize PyAudio
pyaud = pyaudio.PyAudio()

# Open input stream, 16-bit mono at 44100 Hz
# On my system, device 2 is a USB microphone, your number may differ.
stream = pyaud.open(
    format = pyaudio.paInt16,
    channels = 1,
    rate = 44100,
    input = True)

while True:
    # Read raw microphone data
    rawsamps = stream.read(1024)
    # Convert raw data to NumPy array
    samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
    # Show the volume and pitch
    midi = analyse.musical_detect_pitch(samps)
    print analyse.loudness(samps)
    if midi is not None:
        print midi_dict[int(midi)]