Exemple #1
0

CHORDS = [  (A2, B3, G5),
            (G2, B3, A5),
            (F2, C, A5),
            (F2, D, G5),
            (E2, D, B5),
            (F2, C, B5),
            (F2, B3, A5),
            (A2, B3, G5)
            ]

db = crashdb.load("data.json")

max_t = max(((max(db['heartbeats'][-1], db['pedals'][-1])), db['breaths'][-1]))
log.info("MAX TIME %s" % util.format_time(max_t))

notes = []

for heartbeat in db['heartbeats']:
    pos = heartbeat / max_t
    chord_pos = pos * (len(CHORDS) - 1)
    p1, p2 = CHORDS[int(math.floor(chord_pos))][0], CHORDS[int(math.ceil(chord_pos))][0]    
    chord_pos -= int(chord_pos)
    chord_pos = ease_in_out(chord_pos)
    pitch = p1 if random.random() > chord_pos else p2
    notes.append((heartbeat, [1, pitch, 127, util.format_time(heartbeat), chord_pos, p1, p2, get_note_name(pitch)]))
    # notes.append((heartbeat + 0.1, [HEARTBEAT_CHANNEL, pitch, 110, util.format_time(heartbeat)]))

for pedal in db['pedals']:
    pos = pedal / max_t
def process(t):

    log.info("////////// process %s //////////" % t)
    filename = "audio_tmp/%s.wav" % t
    sample_rate, signal = wavfile.read(filename)
    log.info("AUDIO SAMPLES %s" % len(signal))
    log.info("SAMPLE RATE %s" % sample_rate)
    duration = float(len(signal)) / sample_rate
    log.info("AUDIO DURATION %ss" % util.format_time(duration))
    signal = (np.array(signal).astype('float') / (2**16 * 0.5))   # assuming 16-bit PCM, -1 - 1

    log.info("--> preprocessing")
    magnitude = abs(signal)
    thresholded_magnitude = (magnitude > THRESHOLD) * magnitude
    # level = sp.smooth(thresholded_magnitude, size=10000)      # shit -- smooth is too expensive for raspi
    level = thresholded_magnitude

    log.info("--> scanning")
    TOLERANCE = sample_rate / 10    # within a tenth of a second, same sound (poor man's smoothing?)
    indexes = []
    maxes = []
    durations = []
    zeros = 0
    on_chunk = False
    for index, sample in enumerate(level):
        if sample > 0.0:
            if not on_chunk:
                indexes.append(index)                
                durations.append(0)
                maxes.append(0)
                on_chunk = True              
            durations[-1] += 1
            if sample > maxes[-1]:
                maxes[-1] = sample
            zeros = 0            
        if sample == 0.0:            
            if on_chunk:
                zeros += 1
                if zeros == TOLERANCE:
                    on_chunk = False
    events = []
    for i in xrange(len(indexes)):
        value, t_, duration = maxes[i], t + int(float(indexes[i]) / sample_rate), float(durations[i]) / sample_rate
        events.append((value, t_, duration))
    for event in events:
        log.debug(event)

    if 'draw' in config and config['draw']:   
        from housepy import drawing    
        log.info("--> drawing")
        ctx = drawing.Context(width=2000, height=500, background=(0., 0., 1.), hsv=True, flip=True, relative=True)
        ctx.line([(float(i) / len(magnitude), sample) for (i, sample) in enumerate(magnitude)], thickness=1, stroke=(0., 0., 0.5))
        ctx.line([(float(i) / len(thresholded_magnitude), sample) for (i, sample) in enumerate(thresholded_magnitude)], thickness=1, stroke=(0., 0., 0.))
        ctx.line([(float(i) / len(level), sample) for (i, sample) in enumerate(level)], thickness=1, stroke=(0., 1., 1.))
        level = sp.normalize(level)
        ctx.line([(float(i) / len(level), sample) for (i, sample) in enumerate(level)], thickness=1, stroke=(0.15, 1., 1.))
        ctx.line(0.0, THRESHOLD, 1.0, THRESHOLD, thickness=1, stroke=(0.55, 1., 1.))
        ctx.show()

    try:
        data = []
        for event in events:
            value, t_, duration = event
            data.append({'device': config['device'], 'kind': "sound", 'value': value, 't': t_, 'duration': duration})
        response = net.read("http://%s:%s" % (config['server']['host'], config['server']['port']), json.dumps(data))
        log.info(response)
    except Exception as e:
        log.error(log.exc(e))

    if config['device'] != "Granu":
        os.remove(filename)
Exemple #3
0
    ts.append(t)
    try:
        heartrate = float(trackpoint.findtext("%sHeartRateBpm/%sValue" % tuple([ns]*2)))
    except Exception:
        heartrate = heartrates[-1] if len(heartrates) else 0.0  # carry over, heartrate doesnt go to 0 (hopefully)
    heartrates.append(heartrate)
    try:
        cadence = float(trackpoint.findtext("%sCadence" % tuple([ns])))
    except Exception as e:
        cadence = 0.0   # drop to 0, probably stopped
    cadences.append(cadence * 2)    # two feet!
log.info("DATA START TIME %s UTC" % datetime.datetime.utcfromtimestamp(start_t).strftime("%Y-%m-%d %H:%M:%S"))
num_samples = len(ts)
log.info("NUM DATA SAMPLES %s" % num_samples)
if len(ts) != int(ts[-1]):
    log.warning("%s != %s" % (util.format_time(len(ts)), util.format_time(ts[-1])))
log.info("DURATION %s" % (util.format_time(ts[-1])))

log.info("CONVERTING AND SAMPLING")

# clean data
cadences = science.filter_deviations(cadences, positive_only=True)
# heartrates = science.filter_deviations(heartrates)

# normalize data
cadences_norm = science.normalize(cadences)
heartrates_norm = science.normalize(heartrates)

# show
ctx = drawing.Context(2000, 250, relative=True, flip=True)
ctx.line([(float(i) / num_samples, cadences_norm[i]) for i in range(num_samples)], stroke=(0, 0, 255), thickness=2)
Exemple #4
0
log.info("GPX %s" % gpx_filename)
log.info("WAV %s" % wav_filename)

audio_start_dt = datetime.datetime.strptime(wav_filename.split('.')[0].split('/')[-1].replace('_smp', ''), "%Y%m%d %H%M%S")
audio_start_dt = util.to_utc(audio_start_dt)

# get video times
video_start_t, video_end_t = compile_gpx.get_video_times(gpx_filename)

log.info("AUDIO START %s" %  audio_start_dt)
audio_start_t = float(calendar.timegm(audio_start_dt.timetuple()))
sample_rate, data = wavfile.read(wav_filename)
log.info("AUDIO SAMPLE RATE %s" % sample_rate)
log.info("AUDIO LENGTH (samples) %s" % len(data))
seconds = float(len(data)) / sample_rate
log.info("AUDIO DURATION %s" % util.format_time(seconds))
skip = video_start_t - audio_start_t
log.info("AUDIO SKIP %s%s" % ('-' if skip < 0 else '', util.format_time(abs(skip))))

# downsample to 60hz
target_sample_rate = 60.0
signal = science.downsample(data, int(sample_rate / target_sample_rate))
log.info("NEW LENGTH (samples) %s" % len(signal))
average = np.average(signal)
reduced = signal - average
reduced = [x if x >= 0 else 0 for x in reduced]
reduced = science.smooth(reduced, window_len=50)
reduced = science.normalize(reduced)
signal = science.normalize(signal)

log.info("DETECTING PEAKS")