Exemple #1
0
 def __db_level(self, rms_mode: bool = False) -> Tuple[float, float]:
     """
     Returns the average audio volume level measured in dB (range -60 db to 0 db)
     If the sample is stereo, you get back a tuple: (left_level, right_level)
     If the sample is mono, you still get a tuple but both values will be the same.
     This method is probably only useful if processed on very short sample fragments in sequence,
     so the db levels could be used to show a level meter for the duration of the sample.
     """
     maxvalue = 2**(8 * self.__samplewidth - 1)
     if self.nchannels == 1:
         if rms_mode:
             peak_left = peak_right = (audioop.rms(
                 self.__frames, self.__samplewidth) + 1) / maxvalue
         else:
             peak_left = peak_right = (audioop.max(
                 self.__frames, self.__samplewidth) + 1) / maxvalue
     else:
         left_frames = audioop.tomono(self.__frames, self.__samplewidth, 1,
                                      0)
         right_frames = audioop.tomono(self.__frames, self.__samplewidth, 0,
                                       1)
         if rms_mode:
             peak_left = (audioop.rms(left_frames, self.__samplewidth) +
                          1) / maxvalue
             peak_right = (audioop.rms(right_frames, self.__samplewidth) +
                           1) / maxvalue
         else:
             peak_left = (audioop.max(left_frames, self.__samplewidth) +
                          1) / maxvalue
             peak_right = (audioop.max(right_frames, self.__samplewidth) +
                           1) / maxvalue
     # cut off at the bottom at -60 instead of all the way down to -infinity
     return max(20.0 * math.log(peak_left, 10),
                -60.0), max(20.0 * math.log(peak_right, 10), -60.0)
Exemple #2
0
    def play(self, fileName):
        # Initialise matrix
        matrix = [0, 0, 0, 0, 0, 0, 0, 0]

        # Set up audio
        wavfile = wave.open(fileName, 'r')
        chunk = 1024
        output = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL)
        output.setchannels(1)
        output.setrate(22050)
        output.setformat(aa.PCM_FORMAT_S16_LE)
        output.setperiodsize(chunk)

        data = wavfile.readframes(chunk)
        try:
            while data != '':
                output.write(data)
                # Split channel data and find maximum volume
                channel_l = audioop.tomono(data, 2, 1.0, 0.0)
                channel_r = audioop.tomono(data, 2, 0.0, 1.0)
                max_vol_factor = 5000
                max_l = audioop.max(channel_l, 2) / max_vol_factor
                max_r = audioop.max(channel_r, 2) / max_vol_factor

                for i in range(1, 8):
                    self.generateMouthSignal((1 << max_r) - 1)

                data = wavfile.readframes(chunk)
        except:
            data = None

        os.system('/etc/init.d/alsa-utils restart')
        sleep(.25)
Exemple #3
0
def capture_audio():
    inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL)

    inp.setchannels(1)
    inp.setrate(16000)
    inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
    inp.setperiodsize(512)

    loops = 290
    silence_counter = 80
    silence_thershold = 2500
    rf = open(path + 'recording.raw', 'w')
    while loops > 0:
        loops -= 1
        l, data = inp.read()
        print(audioop.max(data, 2))
        if audioop.max(data, 2) < silence_thershold:
            silence_counter -= 1
            if silence_counter == 0:
                print("Silence detected ")
                break
        else:
            silence_counter = 60
        if l:
            rf.write(data)

    rf.close()
Exemple #4
0
def testmax(data):
    if verbose:
        print 'max'
    if audioop.max(data[0], 1) != 2 or \
              audioop.max(data[1], 2) != 2 or \
              audioop.max(data[2], 4) != 2:
        return 0
    return 1
Exemple #5
0
def db_level(data, samplewidth=2, rms_mode=False):
    maxvalue = 2**(8 * samplewidth - 1)
    left_frames = audioop.tomono(data, samplewidth, 1, 0)
    right_frames = audioop.tomono(data, samplewidth, 0, 1)
    if rms_mode:
        peak_left = (audioop.rms(left_frames, samplewidth) + 1) / maxvalue
        peak_right = (audioop.rms(right_frames, samplewidth) + 1) / maxvalue
    else:
        peak_left = (audioop.max(left_frames, samplewidth) + 1) / maxvalue
        peak_right = (audioop.max(right_frames, samplewidth) + 1) / maxvalue
    return peak_left * 1000, peak_right * 1000
Exemple #6
0
def main():

    # Open the device in nonblocking capture mode. The last argument could
    # just as well have been zero for blocking mode. Then we could have
    # left out the sleep call in the bottom of the loop
    inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)

    # Set attributes: Mono, 8000 Hz, 16 bit little endian samples
    inp.setchannels(1)
    inp.setrate(8000)
    inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)

    # The period size controls the internal number of frames per period.
    # The significance of this parameter is documented in the ALSA api.
    # For our purposes, it is suficcient to know that reads from the device
    # will return this many frames. Each frame being 2 bytes long.
    # This means that the reads below will return either 320 bytes of data
    # or 0 bytes of data. The latter is possible because we are in nonblocking
    # mode.
    inp.setperiodsize(160)

    ringing = 0
    notified = False

    # pause here for the mic to calm down
    time.sleep(2)

    print("Listening...")
    while True:
        try:
            # Read data from device
            l, data = inp.read()
            if l:
                print(audioop.max(data, 2), end="\r")
                # Check the maximum of the absolute value of all samples in a fragment.
                if audioop.max(data, 2) > VOLUME_THRESHOLD:
                    ringing += 1 if ringing < RINGING_TIME else 0
                else:
                    ringing -= 1 if ringing > 0 else 0

            if ringing >= RINGING_TIME and notified == False:
                notified = True
                notify()

            # reset once quiet
            elif ringing == 0 and notified == True:
                print("Reset")
                notified = False

            time.sleep(.001)
        except:
            pass
Exemple #7
0
    def start(self):
        while True:
            # Read data from device
            l, data = self.inp.read()
            if l:
                # Return the maximum of the absolute value of all samples in a fragment.
                for i in range(106, 146):
                    self.strip.setPixelColor(i, 0)

                try:
                    volume = audioop.max(data, 2)
                except:
                    volume = 3
                scaled = volume / 5000
                if scaled > 8:
                    scaled = 8

                scaled = scaled - 1
                if scaled < 0:
                    scaled = 0

                pix = self.pixels[scaled]
                for pixel in pix:
                    index = 105 + pixel
                    self.strip.setPixelColor(index, Color(100, 100, 100))
                self.strip.show()
            time.sleep(.001)
Exemple #8
0
def AudioLevel(format):
    try:
        # Open the device in nonblocking capture mode. The last argument could
        # just as well have been zero for blocking mode. Then we could have
        # left out the sleep call in the bottom of the loop
        # print(alsaaudio.pcms())
        # print(alsaaudio.cards())
        if format == alsaaudio.PCM_FORMAT_S16_LE:  # select sound card automatically
            inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)
        else:  # select second sound card (the USB PnP microphone)
            inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK,
                                'whatever', 1)
        inp.setchannels(1)
        inp.setrate(8000)
        inp.setformat(
            format)  # PCM_FORMAT_S16_LE for laptop, PCM_FORMAT_U8 for pi
        bytes = 1  # 2 for laptop, 1 for pi
        if format == alsaaudio.PCM_FORMAT_S16_LE:
            bytes = 2
        inp.setperiodsize(160)
        volume = 0.0
        for i in range(0, 100):
            # Read data from device
            l, data = inp.read()
            if l:
                # Return the maximum of the absolute value of all samples in a fragment.
                volume += audioop.max(data, bytes)
            time.sleep(0.002)
        return (int(volume * volume / 1000.0 / 1000.0 / 2))
    except:
        return -1  # probably no microphone
Exemple #9
0
    def sense(self):
        ret_val = None
        data = self.__stream.read(1024)  # read one chunk
        a = abs(audioop.max(data, 2))
        #print a
        self.__queue.append(a)

        #check for silence
        silence = filter(lambda x: x > self.__threshold, self.__queue) == []

        if self.__capture_on:
            if silence:
                self.__capture_on = False
                # perform translation
                #print "Stop capturing"
                ret_val = self.__do_stt()
                self.__audio_buffer = []
                self.__queue.clear()
            else:
                self.__audio_buffer.append(data)
        else:
            if not (silence):
                #print "Start capturing..."
                self.__capture_on = True
                self.__audio_buffer.append(data)

        return ret_val
Exemple #10
0
def play_sound(filename):
    color_provider = ColorProvider(20)

    f = wave.open(filename, 'rb')

    periodsize = int(f.getframerate() / 16)
    print("framerate: %f, periodsize: %f" % (f.getframerate(), periodsize))
    data = f.readframes(periodsize)

    period_length = 1 / 16
    counter = 1

    next_timestamp = time.time()

    while data:
        if time.time() >= next_timestamp:
            device.write(data)
            next_timestamp = time.time() + period_length
            vu = (math.log(float(max(audioop.max(data, 2), 1))) -
                  log_lo) / (log_hi - log_lo)
            volume = (min(max(int(vu * 100), 0), 100))
            print("%i: %f" % (counter, volume))
            r, g, b = color_provider.give_color()
            blinkt.set_all(r, g, b, brightness=volume / 200.0)
            blinkt.show()
            counter += 1
            data = f.readframes(periodsize)
    f.close()
    blinkt.clear()
    blinkt.show()
Exemple #11
0
def get_max_value(block):
    """Return maximum absolute sample value in block.

    The value is normalized to 0..1.
    """
    max_sample = 32768
    return audioop.max(block, sample_size) / max_sample
Exemple #12
0
def get_sound(inp):
	while True:
		l, data = inp.read()
		if l>0:
			val = audioop.max(data, 2)
			return({'sound': val})
		time.sleep(0.001)
Exemple #13
0
def removeSilence(data):
	# Get global variables
	global CurrentSilence, WavSplit, directory
	# Get max audio level for segment
	level = audioop.max(data, 2)
	# If the max level is below the threshold the silence counter is incremented
	if level < SilenceThreshold:
		CurrentSilence = CurrentSilence + 1
	# If the current silence is above the limit for silence
	if CurrentSilence >=  SilenceLimit and audio_array:
		# If the current audio array length is below 10 (seconds)
		if len(audio_array) >= 10:
			# Generate new .wav directory path
			directName = str(sys.argv[2]) + ('%s' % strftime("%d_%H"))
			directory = '/home/Project/split/' + directName
			if not os.path.isdir(directory):
				os.makedirs(directory)
			# Create new .wav file containing current array data
			filename = directory + ('/%s.wav' % (strftime("%Y-%m-%d_%H%M%S") + str(WavSplit)))
			newFile = wave.open(filename, 'w')
			newFile.setparams((1, 2, 44100, 44100, 'NONE', 'not compressed'))
			newFile.writeframes(''.join(audio_array))
			newFile.close()
		# Increment wavsplit counter and reset variables
		WavSplit = WavSplit + 1
		CurrentSilence = 0
		del audio_array[:]
	# If the current level is above the threshold then append the array
	if level > SilenceThreshold:
		audio_array.append(data)
Exemple #14
0
    def run(self):
        absolute_threshold = 10030
        self.calibrate()
        while not rospy.is_shutdown():
            if self.threshold > absolute_threshold:
                print 'Threshold too high.  Recalibrating'
                self.calibrate()

            soundbite = 0
            bite_length = 0
            peak_volume = 0

            cont = False
            while (cont == False) or peak_volume == 0:
                #only breaks out of loop when peak volume conditions are met
                l, data = self.mic.read()
                if l:
                    level = audioop.max(data, 2)
                    if level > self.threshold:
                        if cont:
                            soundbite += 1
                            cont = False
                        if level > peak_volume:
                            peak_volume = level
                    elif level < self.threshold:  #Once hit under threshold, it will recognize next sound.
                        cont = True
                time.sleep(.01)
                if peak_volume > 0:
                    bite_length += .01

            print "BYTE LENGTH: ", bite_length
            self.pub.publish(str(bite_length) + ' ' + str(peak_volume))
Exemple #15
0
def main():
  inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK)
  # Set attributes: Mono, 8000 Hz, 16 bit little endian samples
  inp.setchannels(1)
  inp.setrate(8000)
  inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
  inp.setperiodsize(160)

  # Loop forever, look for claps
  while True:
    # Read data from device
    l,data = inp.read()
    if l:
        if audioop.max(data, 2) > 28000:
          call(['cmus-remote', '-n'])
          print "Song changed | Amp:", audioop.max(data,2)
    time.sleep(.01)
Exemple #16
0
def get_dr(filename, floats=False):
    with wave.open(filename, "rb") as f:
        channels = f.getnchannels()
        if channels not in (1, 2):
            # TODO unpack n channels
            raise NotImplementedError(
                "We only handle mono or stereo at the moment")
        framesize = f.getsampwidth()
        if framesize != 2:
            # TODO map framesize to struct module constants
            raise NotImplementedError(
                "We only handle 16 bit formats at the moment")
        framerate = f.getframerate()
        total = f.getnframes()
        read = 0
        peaks = [[] for i in range(channels)]
        rmss = [[] for i in range(channels)]
        while True:
            # read three seconds of data
            block = f.readframes(framerate * 3)
            expected = framerate * 3 * channels * framesize
            if len(block) < expected:
                # EOF
                break
            read += 3 * framerate
            # unpack
            if channels == 2:
                chansamples = [
                    audioop.tomono(block, framesize, 1, 0),
                    audioop.tomono(block, framesize, 0, 1)
                ]
            else:
                chansamples = [block]
            for i, chan in enumerate(chansamples):
                peak = audioop.max(chan, framesize) / NORM
                rms = math.sqrt(2) * audioop.rms(chan, framesize) / NORM
                peaks[i].append(peak)
                rmss[i].append(rms)

        drs = []
        for c in range(channels):
            peaks[c].sort()
            rmss[c].sort()
            p2 = peaks[c][-2]
            if p2 == 0:
                raise SilentTrackError
            N = int(0.2 * len(peaks[c]))
            if N == 0:
                raise TooShortError
            r = math.sqrt(sum(i**2 for i in rmss[c][-N:]) / N)
            dr = -to_db(r / p2)
            drs.append(dr)

        if not floats:
            fdr = round(sum(drs) / len(drs))
        else:
            fdr = sum(drs) / len(drs)
        return fdr
Exemple #17
0
def get_sound(inp):
    #print('in get_sound function')
    while True:
        #print('in while loop')
        l, data = inp.read()
        if l > 0:
            val = audioop.max(data, 2)
            return ({'sound': val})
        time.sleep(0.001)
 def test_max(self):
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.max(b'', w), 0)
         self.assertEqual(audioop.max(bytearray(), w), 0)
         self.assertEqual(audioop.max(memoryview(b''), w), 0)
         p = packs[w]
         self.assertEqual(audioop.max(p(5), w), 5)
         self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
         self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
         self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
         self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
Exemple #19
0
    def update_peak_meter(self):
        p = self.rec.params
        sum = audioop.add(self.rec.input, self.rec.output, p.sampwidth)
        maxval = audioop.max(sum, p.sampwidth) / float(p.maxval)
        self.peak_meter.update(maxval)

        w = int(self.peak_canvas['width'])
        h = int(self.peak_canvas['height'])

        self.peak_canvas.coords('peak', 0, h, w, h-(h*self.peak_meter.peak))
Exemple #20
0
 def is_sound(self, data):
     result = False
     if len(data):
         # Return the maximum of the absolute value of all samples in a fragment.
         audio_val = audioop.max(data, 2)
         #print("audio_val=" + str(audio_val))
         if audio_val > self.audio_threshold:
             result = True
     #print('is_sound();result=' + str(result))
     return result
Exemple #21
0
 def amplify_max(self):
     """Amplify the sample to maximum volume without clipping or overflow happening."""
     assert not self.__locked
     max_amp = audioop.max(self.__frames, self.samplewidth)
     max_target = 2**(8 * self.samplewidth - 1) - 2
     if max_amp > 0:
         factor = max_target / max_amp
         self.__frames = audioop.mul(self.__frames, self.samplewidth,
                                     factor)
     return self
    def musicAnalyzerFunc(self, cube):
        #Pyaudio was utalized to take
        #input from the USb mic.
        chunk = 10000
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 44100
        RECORD_SECONDS = 2

        p = pyaudio.PyAudio()

        #Each one of the columns is turned on itially
        #this is to minimize the amount of code exicuted
        #each time the cube goes to update its leds
        cols = [19, 26, 14, 15, 11, 5, 6, 13, 27, 22, 10, 9, 2, 3, 4, 17]
        IO.output(cols, 0)

        s = p.open(format=FORMAT,
                   channels=CHANNELS,
                   rate=RATE,
                   input=True,
                   frames_per_buffer=chunk)

        while True:
            data = s.read(chunk)
            mx = audioop.max(data, 2)
            time.sleep(.2)

            if (mx < 1000):
                pass

            elif (mx > 3000 and mx < 3750):
                IO.output(24, 1)
                IO.output(23, 0)
                IO.output(18, 0)
                IO.output(21, 0)

            elif (mx > 3750 and mx < 4500):
                IO.output(24, 1)
                IO.output(23, 1)
                IO.output(18, 0)
                IO.output(21, 0)

            elif (mx > 4500 and mx < 5250):
                IO.output(24, 1)
                IO.output(23, 1)
                IO.output(18, 1)
                IO.output(21, 0)

            else:
                IO.output(24, 1)
                IO.output(23, 1)
                IO.output(18, 1)
                IO.output(21, 1)
Exemple #23
0
 def amplify_max(self) -> 'Sample':
     """Amplify the sample to maximum volume without clipping or overflow happening."""
     if self.__locked:
         raise RuntimeError("cannot modify a locked sample")
     max_amp = audioop.max(self.__frames, self.samplewidth)
     max_target = 2**(8 * self.samplewidth - 1) - 2
     if max_amp > 0:
         factor = max_target / max_amp
         self.__frames = audioop.mul(self.__frames, self.samplewidth,
                                     factor)
     return self
    def draw_on_path(self):
        mic_data = self.stream.read(CHUNK, exception_on_overflow=False)
        fft_data = apply_fft(mic_data)

        for index_to_check in range(self.index,
                                    self.index + self.drawing_step):
            index_to_delete = ((index_to_check + DELETE_SPACING) %
                               len(self.path))
            if index_to_delete > self.index or self.lap == 0:
                self.delete_index_drawing(index_to_delete)

        self.line_width = min(
            int((audioop.max(mic_data, 2) / MAX_MIC_READING) * 15), 15)
        rgb = [
            min(255,
                int(self.max_bass_level(fft_data) / MAX_FFT_READING * 255)),
            min(
                255,
                int(
                    self.max_harmonics_level(fft_data) / MAX_FFT_READING *
                    255)),
            min(255,
                int(self.max_high_level(fft_data) / MAX_FFT_READING * 255))
        ]
        rgb = [rgb[i] for i in self.color_order]
        self.color = self.rgb_to_hex(rgb)

        if self.line_width > 2:
            if self.shape == 'square':
                self.drawings[
                    self.index] = self.create_square_centered_and_rotated(
                        self.path[self.index],
                        self.line_width,
                        random.randint(0, 360),
                        fill=self.color,
                        outline='')
            elif self.shape == 'circle':
                self.drawings[self.index] = self.create_circle_centered(
                    self.path[self.index],
                    self.line_width,
                    fill=self.color,
                    outline='')

        self.index += self.drawing_step
        if self.index >= len(self.path):
            self.lap += 1
            self.index = self.index % len(self.path)

        if self.lap > 1:
            self.index = 0
            self.stream.stop_stream()
            self.after(STATIC_DRAWING_TIME, self.delete_drawings)
        else:
            self.after(DRAWING_STEP_DELAY, self.draw_on_path)
Exemple #25
0
    def get_level(self):
        # Read data from device
        l, data = self._in.read()
        if l:
            # Return the maximum of the absolute value of
            # all samples in a fragment.
            # For visualization purposes shrink it by dividing
            self.value = audioop.max(data, 2) / 50

        # Call this function again
        self._ioloop.add_callback(self.get_level)
Exemple #26
0
def record_audio(pcm_in, data, vol):
    print 'Recording. Two seconds of silence will commit the command.'
    buf = open(filename, 'w+b')
    time_start = time.time()
    while ((time.time() - time_start) < 2) or (vol > volume_threshold):
        if vol > volume_threshold:
            time_start = time.time()
        buf.write(data)
        l, data = pcm_in.read()
        vol = audioop.max(data, 2)
        time.sleep(.001)
    buf.close()
Exemple #27
0
    def __listen(self):
        while self.running:
            l, data = self.inp.read()  # Read data from mic

            if l:  # data was read
                max = audioop.max(data, 2)
                # max abs input from mic
                if max > ClapListener.clapThreshold and not self.hold:
                    self.relay.switch()
                    self.hold = True
                elif max < ClapListener.clapThreshold:
                    self.hold = False
Exemple #28
0
def writeSounds(xar, yar, dic):
    if dic['rootNot']:
        root = tk.Tk()
        root.withdraw()
        dic['rootNot'] = False
    inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)
    # Set attributes: Mono, 8000 Hz, 16 bit little endian samples
    inp.setchannels(1)
    inp.setrate(8000)
    inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)

    # The period size controls the internal number of frames per period.
    # The significance of this parameter is documented in the ALSA api.
    # For our purposes, it is suficcient to know that reads from the device
    # will return this many frames. Each frame being 2 bytes long.
    # This means that the reads below will return either 320 bytes of data
    # or 0 bytes of data. The latter is possible because we are in nonblocking
    # mode.
    inp.setperiodsize(160)
    i = 0
    while True:
        l, data = inp.read()
        if dic['curr'] == "RECORDING" or dic['curr'] == "ANALYZING":
            root = tk.Tk()
            root.geometry("300x224")
            root.resizable(0, 0)
            root.withdraw()
            messagebox.showwarning("", "%s IN PROGRESS" % dic['curr'])
            root.destroy()
        if dic['curr'] == 'Done':
            root = tk.Tk()
            root.geometry("300x224")
            root.resizable(0, 0)
            root.withdraw()
            if dic['analysis'] == 'normal':
                text = 'Normal: %s' % dic['normal']
            else:
                text = 'Abnormal: %s' % dic['abnormal']
            messagebox.showinfo('Analysis Complete', text)
            dic['curr'] = ""
            root.destroy()
        if l:
            # plt.text(0.05,0.1, dic[curr])

            try:
                val = dic['gain'] * audioop.max(data, 2)
            except:
                val = 0
            i += 1
            xar[i] = i
            yar[i] = val
            if (i >= 399):
                i = 0
Exemple #29
0
    def is_sound(self):
        logging.debug("is_sound")
        # Read data from device
        length, data = self.inp.read()

        if length:
            # Return the maximum of the absolute value of all samples in a fragment.
            audio_val = audioop.max(data, 2)
            logging.debug("audio_val=" + str(audio_val))
            if audio_val > 1000:
                return True
        return False
Exemple #30
0
    def detect_clap(self, retries):
        rtnval = False
        retry = 0
        while retry < retries:
            l, data = self.audinp.read()
            if l and self.in_clap_session:
                try:
                    lchannel = audioop.tomono(data, 2, 1, 0)
                    rchannel = audioop.tomono(data, 2, 0, 1)
                    lvu = (math.log(float(max(audioop.max(lchannel, 2), 1))) -
                           LO) / (HI - LO)
                    rvu = (math.log(float(max(audioop.max(rchannel, 2), 1))) -
                           LO) / (HI - LO)
                    lval = min(max(int(lvu * VAL_MAX), VAL_MIN), VAL_MAX)
                    rval = min(max(int(rvu * VAL_MAX), VAL_MIN), VAL_MAX)
                    if rval >= VAL_TRIGGER or lval >= VAL_TRIGGER:
                        rtnval = True
                        break
                except:
                    retry += 1

        return rtnval