Exemplo n.º 1
0
def main(session_id):

    ctx = drawing.Context(2000, 1000)

    for sensor in [2, 3, 4]:
        result = db.branches.find({'session': session_id, 'sensor': sensor}).sort([('t', ASCENDING)])
        if not result.count():
            continue

        result = list(result)
        ts = [r['t'] for r in result]
        xs = [r['sample'][0] for r in result]
        ys = [r['sample'][1] for r in result]
        zs = [r['sample'][2] for r in result]
        rms = [r['sample'][3] for r in result]

        duration = ts[-1] - ts[0]
        SAMPLING_RATE = 50 # hz

        x_signal = (sp.resample(ts, xs, duration * SAMPLING_RATE) - RANGE[0]) / (RANGE[1] - RANGE[0])
        y_signal = (sp.resample(ts, ys, duration * SAMPLING_RATE) - RANGE[0]) / (RANGE[1] - RANGE[0])
        z_signal = (sp.resample(ts, zs, duration * SAMPLING_RATE) - RANGE[0]) / (RANGE[1] - RANGE[0])
        rms_signal = (sp.resample(ts, rms, duration * SAMPLING_RATE) - RANGE[0]) / (RANGE[1] - RANGE[0])

        # ctx.plot(x_signal, stroke=(1.0, 0.0, 0.0, 1.0), thickness=2.0)
        # ctx.plot(y_signal, stroke=(0.0, 1.0, 0.0, 1.0), thickness=2.0)
        # ctx.plot(z_signal, stroke=(0.0, 0.0, 1.0, 1.0), thickness=2.0)
        rms_signal = sp.normalize(rms_signal)
        ctx.plot(rms_signal, stroke=colors[sensor], thickness=2.0)
    ctx.output("graphs")
Exemplo n.º 2
0
    def run(self):
        spectrum_sum = None
        num_spectrums = 0
        while True:
            # filename = "robin_chat_sample_11k_16_mono.wav"                        
            # sound = Sound().load(filename)            
            sound = Sound().record(10, keep_file=False)
            block_size = 512
            block_overlap = block_size / 2 # power of two, default is 128        
            spectrum, freqs, ts, image = plt.specgram(sound.signal, NFFT=block_size, Fs=sound.rate, noverlap=block_overlap)            
            log.info("--> freq bins %s" % len(freqs))
            log.info("--> time columns %s" % len(ts))

            spectrum = sp.normalize(np.sqrt(spectrum), 0.0, 100.0) # sqrt compresses, good for power. 200 is a clipping threshold.
            spectrum = sp.rescale(spectrum, 0, 255)

            s = np.sum(spectrum)
            log.info("--> SUM %s" % s)
            if s < THRESHOLD:
                continue

            num_spectrums += 1
            if spectrum_sum is None:
                spectrum_sum = spectrum
            else:
                sum_multiplier = (num_spectrums - 1) / num_spectrums
                spectrum_sum *= sum_multiplier
                add_multiplier = 1 / num_spectrums
                spectrum *= add_multiplier
                spectrum_sum += spectrum

            ds = np.copy(spectrum_sum).astype(np.uint32) # convert to uint32
            ds = ((ds & 0xFF) <<8) + ((ds & 0xFF) <<16) + ((ds & 0xFF)) # make white
            # print(ds[80])
            display_spectrums.put(ds)
Exemplo n.º 3
0
def process_readings(kind, color, thickness=5):
    global t, db, ctx
    data = model.fetch_readings(kind, t - DURATION, t)
    if not len(data):
        return False
    data.sort(key=lambda d: d['t'])
    ts = [d['t'] for d in data]
    vs = [d['v'] for d in data]
    signal = sp.resample(ts, vs, DURATION)
    signal = sp.normalize(signal, RANGE[kind][0], RANGE[kind][1])
    db[kind] = list(signal)
    if config['draw']:
        ctx.line([(float(i) / DURATION, sample) for (i, sample) in enumerate(signal)], thickness=thickness, stroke=color)
Exemplo n.º 4
0
    def run(self):
        while True:
            # filename = "robin_chat_sample_11k_16_mono.wav"                        
            # sound = Sound().load(filename)            
            sound = Sound().record(10)
            block_size = 512
            block_overlap = block_size / 2 # power of two, default is 128        
            spectrum, freqs, ts, image = plt.specgram(sound.signal, NFFT=block_size, Fs=sound.rate, noverlap=block_overlap)            
            log.info("--> freq bins %s" % len(freqs))
            log.info("--> time columns %s" % len(ts))

            spectrum = sp.normalize(np.sqrt(spectrum), 0.0, 100.0) # sqrt compresses, good for power. 200 is a clipping threshold.
            spectrum = sp.rescale(spectrum, 0, 255).astype(np.uint32) # convert to uint32
            spectrum = ((spectrum & 0xFF) <<8) + ((spectrum & 0xFF) <<16) + ((spectrum & 0xFF))

            print(spectrum[80])
            spectrums.put(spectrum)
Exemplo n.º 5
0
def get_data(hidden=False):

    log.debug("HIDDEN %s" % hidden)

    data = {}

    try:

        walks = model.fetch_walks(hidden=hidden)

        notes = []
        v = 0
        ids = []
        for walk in walks:
            sequence = model.fetch_sequence(walk['id'])
            if len(sequence) < config['min_steps']:
                continue
            for step in sequence:  #[:config['max_steps']]:
                notes.append((step[0], v, 0 if step[1] == 'left' else 1))
            v += 1
            ids.append(walk['id'])

        # sort and normalize onsets
        notes.sort(key=lambda x: x[0])
        onsets = [note[0] for note in notes]
        onsets = sp.normalize(onsets)
        notes = [(onsets[i], note[1], note[2])
                 for (i, note) in enumerate(notes)]

        log.info("NOTES %s" % len(notes))

        data['notes'] = notes
        data['walk_ids'] = ids

    except Exception as e:
        log.error(log.exc(e))
        return {}

    return json.dumps(data)
Exemplo n.º 6
0
def get_data(hidden=False):

    log.debug("HIDDEN %s" % hidden)

    data = {}

    try:

        walks = model.fetch_walks(hidden=hidden)

        notes = []
        v = 0
        ids = []
        for walk in walks:
            sequence = model.fetch_sequence(walk['id'])
            if len(sequence) < config['min_steps']:
                continue
            for step in sequence:#[:config['max_steps']]:
                notes.append((step[0], v, 0 if step[1] == 'left' else 1))
            v += 1
            ids.append(walk['id'])

        # sort and normalize onsets
        notes.sort(key=lambda x: x[0])
        onsets = [note[0] for note in notes]
        onsets = sp.normalize(onsets)
        notes = [(onsets[i], note[1], note[2]) for (i, note) in enumerate(notes)]

        log.info("NOTES %s" % len(notes))

        data['notes'] = notes
        data['walk_ids'] = ids

    except Exception as e:
        log.error(log.exc(e))
        return {}

    return json.dumps(data)
Exemplo n.º 7
0
def main(session_id):

    ctx = drawing.Context(2000, 1000)

    for sensor in [2, 3, 4]:
        result = db.branches.find({
            'session': session_id,
            'sensor': sensor
        }).sort([('t', ASCENDING)])
        if not result.count():
            continue

        result = list(result)
        ts = [r['t'] for r in result]
        xs = [r['sample'][0] for r in result]
        ys = [r['sample'][1] for r in result]
        zs = [r['sample'][2] for r in result]
        rms = [r['sample'][3] for r in result]

        duration = ts[-1] - ts[0]
        SAMPLING_RATE = 50  # hz

        x_signal = (sp.resample(ts, xs, duration * SAMPLING_RATE) -
                    RANGE[0]) / (RANGE[1] - RANGE[0])
        y_signal = (sp.resample(ts, ys, duration * SAMPLING_RATE) -
                    RANGE[0]) / (RANGE[1] - RANGE[0])
        z_signal = (sp.resample(ts, zs, duration * SAMPLING_RATE) -
                    RANGE[0]) / (RANGE[1] - RANGE[0])
        rms_signal = (sp.resample(ts, rms, duration * SAMPLING_RATE) -
                      RANGE[0]) / (RANGE[1] - RANGE[0])

        # ctx.plot(x_signal, stroke=(1.0, 0.0, 0.0, 1.0), thickness=2.0)
        # ctx.plot(y_signal, stroke=(0.0, 1.0, 0.0, 1.0), thickness=2.0)
        # ctx.plot(z_signal, stroke=(0.0, 0.0, 1.0, 1.0), thickness=2.0)
        rms_signal = sp.normalize(rms_signal)
        ctx.plot(rms_signal, stroke=colors[sensor], thickness=2.0)
    ctx.output("graphs")
Exemplo n.º 8
0
def generate():

    # load data into t and count arrays per species
    species = OrderedDict()
    start_t = util.timestamp(util.parse_date(str(config['start'])))
    end_t = util.timestamp(util.parse_date(str(config['end'])))
    max_count = 0
    with open("data.csv") as f:
        data = csv.reader(f)
        for r, row in enumerate(data):
            if r == 0:
                continue
            plot = row[1]
            name = row[2]
            if len(config['species_list']
                   ) and name not in config['species_list']:
                continue
            dt = datetime.datetime(int(row[3]), 1,
                                   1) + datetime.timedelta(int(row[4]) - 1)
            t = util.timestamp(dt)
            if t < start_t or t > end_t:
                continue
            count = 0 if row[5] == "NA" else int(row[5])
            if count > max_count:
                max_count = count
            if name not in species:
                species[name] = {'ts': [start_t, t - 1], 'counts': [0, 0]}
            species[name]['ts'].append(t)
            species[name]['counts'].append(count)
    species = OrderedDict(sorted(species.items()))
    print("--> loaded")

    # add a zero count at the start and end of every year
    yts = [
        util.timestamp(datetime.datetime(y, 1, 1)) for y in range(1974, 2017)
    ]
    for name in species:
        ts = species[name]['ts']
        for yt in yts:
            i = 0
            while i < len(ts) and ts[i] < yt:
                i += 1
            if i > 0:
                end_season_t = ts[i - 1]
                if i < len(ts):
                    start_season_t = ts[i]
                    ts.insert(i, start_season_t - config['tail'])
                    species[name]['counts'].insert(i, 0)
                ts.insert(i, end_season_t + config['tail'])
                species[name]['counts'].insert(i, 0)
        species[name]['ts'].append(end_t)
        species[name]['counts'].append(0)
    print("--> onsets added")

    # create and draw signals
    signals = []
    names = []
    i = 0
    for name, data in species.items():
        print("Processing %s..." % name)

        # create signal from bloom counts
        signal = sp.resample(data['ts'], data['counts'])
        if config['normalize']:
            signal = sp.normalize(signal)
        else:
            signal = sp.normalize(signal, 0, max_count)
        signal = sp.smooth(signal, size=8)
        signal = sp.limit(
            signal,
            max(signal))  # get rid of noise below 0 for onset detection

        # add spikes for peaks
        if config['peak_spikes']:
            peaks, valleys = sp.detect_peaks(signal, lookahead=50)
            peak_signal = np.zeros(len(signal))
            for peak in peaks:
                peak_signal[peak[0]] = 1.0
            signal += peak_signal

        # add spikes for onsets
        if config['onset_spikes']:
            onsets = sp.detect_onsets(signal)
            onset_signal = np.zeros(len(signal))
            for onset in onsets:
                onset_signal[onset] = 0.5
                onset_signal[onset + 1] = 0.4
                onset_signal[onset + 2] = 0.25
            signal += onset_signal

        # limit
        signal = sp.limit(signal, 1.0)
        signal *= 0.9  # hack, just controlling gain
        signals.append(signal)

        names.append(name)

        i += 1

    return signals, names
Exemplo n.º 9
0
def process_walk(walk_id, force=False):

    if not model.process_check(walk_id):
        log.error("Walk %s already processed" % walk_id)
        if force:
            log.info("--> forcing...")
            model.remove_sequence(walk_id)
        else:
            return
    log.info("Processing walk %s" % walk_id)

    # fetch data
    data = model.fetch_accels(walk_id)
    data = [(reading['t'], reading['x'], reading['y'], reading['z'])
            for reading in data]

    # let's sample every millisecond, so the time of the last reading is how many samples we need
    data = np.array(data)
    ts = data[:, 0]
    total_samples = int(ts[-1])

    # need at least 10s of data
    # add 2000 for trimming at nd
    if total_samples < 10000 + 2000:
        log.info("No footsteps detected (too short)")
        model.hide(walk_id)
        return

    # resample the values
    xs = sp.resample(ts, data[:, 1], total_samples)
    ys = sp.resample(ts, data[:, 2], total_samples)
    zs = sp.resample(ts, data[:, 3], total_samples)

    # skip for accelerometer startup and for phone out of pocket at end
    skipin, skipout = 0, 2000
    xs = xs[skipin:-skipout]
    ys = ys[skipin:-skipout]
    zs = zs[skipin:-skipout]
    total_samples -= (skipin + skipout)
    log.info("TOTAL SAMPLES %s (%fs)" % (total_samples,
                                         (total_samples / 1000.0)))

    # get 3d magnitude (not RMS) -- orientation shouldnt matter
    ds = np.sqrt(np.power(xs, 2) + np.power(ys, 2) + np.power(zs, 2))

    # prep the raw values for display
    # normalize the values to a given range  (this is Gs)
    MIN = -10.0
    MAX = 10.0
    xs = (xs - MIN) / (MAX - MIN)
    ys = (ys - MIN) / (MAX - MIN)
    zs = (zs - MIN) / (MAX - MIN)
    # smooth them
    xs = sp.smooth(xs, 300)
    ys = sp.smooth(ys, 300)
    zs = sp.smooth(zs, 300)

    # process the magnitude signal
    ds = sp.smooth(ds, 500)
    ds = np.clip(ds, -10.0, 10.0)  # limit the signal to +-10 Gs
    ds = sp.normalize(ds)
    ds = 1 - ds
    ds = sp.compress(ds, 3.0)
    ds = sp.normalize(ds)

    # detect peaks
    peaks, valleys = sp.detect_peaks(ds, lookahead=50, delta=0.10)
    peaks = np.array(peaks)
    valleys = np.array(valleys)
    log.info("PEAKS %s" % len(peaks))
    if not len(peaks):
        log.info("No footsteps detected")
        model.hide(walk_id)
        return

    # get foot separator line
    fxs = [int(peak[0]) for peak in peaks]
    fys = [peak[1] for peak in peaks]
    avs = np.average([peak[1] for peak in peaks])
    fys[0] = avs  # it's going to start with a peak, so we need to bring it up or down accordingly
    fxs.append(total_samples - 1)
    fys.append(avs)
    fs = sp.resample(fxs, fys, total_samples)
    fs = sp.smooth(fs, 3000)

    # print out
    log.info("Saving sequence (%s)..." % walk_id)
    sequence = []
    for p, peak in enumerate(peaks):
        foot = 'right' if peak[1] > fs[int(peak[0])] else 'left'
        t = peak[0]
        t += 250  # turns out the peak hits just before the step
        sequence.append((t, foot))

    # fix triples
    for i in range(len(sequence) - 2):
        if sequence[i][1] == sequence[i + 1][1] == sequence[i + 2][1]:
            sequence[i + 1] = (sequence[i + 1][0],
                               'right') if sequence[i + 1][1] == 'left' else (
                                   sequence[i + 1][0], 'left')

    model.insert_sequence(walk_id, sequence)

    plot(walk_id, xs, ys, zs, ds, peaks, total_samples, fs)
Exemplo n.º 10
0
def generate():

    # load data into t and count arrays per species
    species = OrderedDict()
    start_t = util.timestamp(util.parse_date(str(config['start'])))
    end_t = util.timestamp(util.parse_date(str(config['end'])))
    max_count = 0
    with open("data.csv") as f:
        data = csv.reader(f)
        for r, row in enumerate(data):
            if r == 0:
                continue
            plot = row[1]        
            name = row[2]        
            if len(config['species_list']) and name not in config['species_list']:
                continue
            dt = datetime.datetime(int(row[3]), 1, 1) + datetime.timedelta(int(row[4]) - 1)
            t = util.timestamp(dt)
            if t < start_t or t > end_t:
                continue
            count = 0 if row[5] == "NA" else int(row[5]) 
            if count > max_count:
                max_count = count
            if name not in species:
                species[name] = {'ts': [start_t, t - 1], 'counts': [0, 0]}
            species[name]['ts'].append(t)
            species[name]['counts'].append(count)
    species = OrderedDict(sorted(species.items()))
    print("--> loaded")


    # add a zero count at the start and end of every year
    yts = [util.timestamp(datetime.datetime(y, 1, 1)) for y in range(1974, 2017)]
    for name in species:
        ts = species[name]['ts']
        for yt in yts:
            i = 0        
            while i < len(ts) and ts[i] < yt:
                i += 1
            if i > 0:
                end_season_t = ts[i-1]
                if i < len(ts):
                    start_season_t = ts[i]
                    ts.insert(i, start_season_t - config['tail'])
                    species[name]['counts'].insert(i, 0)
                ts.insert(i, end_season_t + config['tail'])
                species[name]['counts'].insert(i, 0)
        species[name]['ts'].append(end_t)
        species[name]['counts'].append(0)
    print("--> onsets added")


    # create and draw signals
    signals = []
    names = []
    i = 0
    for name, data in species.items():
        print("Processing %s..." % name)

        # create signal from bloom counts
        signal = sp.resample(data['ts'], data['counts'])
        if config['normalize']:
            signal = sp.normalize(signal)
        else:
            signal = sp.normalize(signal, 0, max_count)    
        signal = sp.smooth(signal, size=8)
        signal = sp.limit(signal, max(signal))  # get rid of noise below 0 for onset detection

        # add spikes for peaks
        if config['peak_spikes']:
            peaks, valleys = sp.detect_peaks(signal, lookahead=50)
            peak_signal = np.zeros(len(signal))    
            for peak in peaks:
                peak_signal[peak[0]] = 1.0
            signal += peak_signal

        # add spikes for onsets
        if config['onset_spikes']:
            onsets = sp.detect_onsets(signal)
            onset_signal = np.zeros(len(signal))    
            for onset in onsets:
                onset_signal[onset] = 0.5
                onset_signal[onset+1] = 0.4
                onset_signal[onset+2] = 0.25
            signal += onset_signal

        # limit
        signal = sp.limit(signal, 1.0)
        signal *= 0.9   # hack, just controlling gain
        signals.append(signal)   

        names.append(name)
   
        i += 1

    return signals, names
Exemplo n.º 11
0
def process(t):

    log.info("////////// process %s //////////" % t)
    filename = "audio_tmp/%s.wav" % t
    sample_rate, signal = wavfile.read(filename)
    log.info("AUDIO SAMPLES %s" % len(signal))
    log.info("SAMPLE RATE %s" % sample_rate)
    duration = float(len(signal)) / sample_rate
    log.info("AUDIO DURATION %ss" % util.format_time(duration))
    signal = (np.array(signal).astype('float') / (2**16 * 0.5))   # assuming 16-bit PCM, -1 - 1

    log.info("--> preprocessing")
    magnitude = abs(signal)
    thresholded_magnitude = (magnitude > THRESHOLD) * magnitude
    # level = sp.smooth(thresholded_magnitude, size=10000)      # shit -- smooth is too expensive for raspi
    level = thresholded_magnitude

    log.info("--> scanning")
    TOLERANCE = sample_rate / 10    # within a tenth of a second, same sound (poor man's smoothing?)
    indexes = []
    maxes = []
    durations = []
    zeros = 0
    on_chunk = False
    for index, sample in enumerate(level):
        if sample > 0.0:
            if not on_chunk:
                indexes.append(index)                
                durations.append(0)
                maxes.append(0)
                on_chunk = True              
            durations[-1] += 1
            if sample > maxes[-1]:
                maxes[-1] = sample
            zeros = 0            
        if sample == 0.0:            
            if on_chunk:
                zeros += 1
                if zeros == TOLERANCE:
                    on_chunk = False
    events = []
    for i in xrange(len(indexes)):
        value, t_, duration = maxes[i], t + int(float(indexes[i]) / sample_rate), float(durations[i]) / sample_rate
        events.append((value, t_, duration))
    for event in events:
        log.debug(event)

    if 'draw' in config and config['draw']:   
        from housepy import drawing    
        log.info("--> drawing")
        ctx = drawing.Context(width=2000, height=500, background=(0., 0., 1.), hsv=True, flip=True, relative=True)
        ctx.line([(float(i) / len(magnitude), sample) for (i, sample) in enumerate(magnitude)], thickness=1, stroke=(0., 0., 0.5))
        ctx.line([(float(i) / len(thresholded_magnitude), sample) for (i, sample) in enumerate(thresholded_magnitude)], thickness=1, stroke=(0., 0., 0.))
        ctx.line([(float(i) / len(level), sample) for (i, sample) in enumerate(level)], thickness=1, stroke=(0., 1., 1.))
        level = sp.normalize(level)
        ctx.line([(float(i) / len(level), sample) for (i, sample) in enumerate(level)], thickness=1, stroke=(0.15, 1., 1.))
        ctx.line(0.0, THRESHOLD, 1.0, THRESHOLD, thickness=1, stroke=(0.55, 1., 1.))
        ctx.show()

    try:
        data = []
        for event in events:
            value, t_, duration = event
            data.append({'device': config['device'], 'kind': "sound", 'value': value, 't': t_, 'duration': duration})
        response = net.read("http://%s:%s" % (config['server']['host'], config['server']['port']), json.dumps(data))
        log.info(response)
    except Exception as e:
        log.error(log.exc(e))

    if config['device'] != "Granu":
        os.remove(filename)
Exemplo n.º 12
0
def main(session_id):
    result = db.branches.find({'session': session_id}).sort([('t', ASCENDING)])
    if not result.count():
        print("NO DATA!")
        exit()

    log.info("Start processing...")

    result = list(result)
    ts = [r['t'] for r in result]
    rms = [r['sample'][3] for r in result]
    duration = ts[-1] - ts[0]
    SAMPLING_RATE = 60 # hz
    log.info("DURATION %fs" % duration)

    signal = sp.resample(ts, rms, duration * SAMPLING_RATE)
    signal = sp.remove_shots(signal)
    signal = sp.normalize(signal)    
    signal = sp.smooth(signal, 15)

    # this number should match some lower frequency bound. ie, put this in hz.
    # the smaller the number, the more it will affect small motion
    # so this should be higher than the slowest motion we care about
    # ie, dont care about motion over 0.5hz, which is 120 samples
    trend = sp.smooth(signal, 120)  
    signal -= trend
    signal += 0.5

    atrend = sp.smooth(signal, 500)



    ## autocorrelation

    auto = sp.autocorrelate(signal)
    # this should be small -- if 60hz, fastest gesture would reasonably be half of that, so 30
    peaks, valleys = sp.detect_peaks(auto, 10)
    peaks = [peak for peak in peaks[1:] if peak[1] > 0.5]
    partials = []
    for peak in peaks:    
        frequency = SAMPLING_RATE / peak[0]
        partial = frequency * 1000
        partials.append([partial, float(peak[1])])
        log.info("%d samps\t%fhz\t%f magnitude\t%f map" % (peak[0], frequency, peak[1], partial))
    log.info(partials)    

    ctx = drawing.Context(2000, 750)
    ctx.plot(auto, stroke=(0.0, 0.0, 0.0, 1.0), thickness=2.0)
    for peak in peaks:
        x = peak[0] / len(auto)
        ctx.line(x, 0.0, x, peak[1], stroke=(1.0, 0.0, 0.0, 1.0))
    ctx.output("graphs")


    ## audio

    audio_signal = sp.make_audio(signal)
    spectrum(audio_signal, SAMPLING_RATE)

    AUDIO_RATE = 11025
    filename = "%s.wav" % util.timestamp()
    sound.write_audio(audio_signal, filename, AUDIO_RATE)
    subprocess.call(["open", filename])
    log.info("AUDIO DURATION %fs" % (duration / (AUDIO_RATE / SAMPLING_RATE)))

    ctx = drawing.Context(2000, 750)
    ctx.plot(signal, stroke=(0.0, 0.0, 0.0, 1.0), thickness=2.0)
    ctx.plot(trend, stroke=(1.0, 0.0, 0.0, 1.0), thickness=2.0)
    ctx.plot(atrend, stroke=(0.0, 0.0, 1.0, 1.0), thickness=2.0)
    ctx.output("graphs")


    log.info("--> done") # around 300ms
Exemplo n.º 13
0
points, rates = util.load("data/last_snap.pkl")
log.info("INPUT: %s POINTS, %s DIMENSIONS" % points.shape)
# points = manifold.Isomap().fit_transform(points)
# points = manifold.LocallyLinearEmbedding(method="modified").fit_transform(points)
# points = manifold.SpectralEmbedding().fit_transform(points)
# points = manifold.MDS().fit_transform(points)
# points = manifold.TSNE(n_iter=2000).fit_transform(points)
# points = decomposition.PCA(n_components=2).fit_transform(points)
# points = manifold.TSNE().fit_transform(points)
points = decomposition.PCA(n_components=2).fit_transform(points)
log.info("OUTPUT: %s POINTS, %s DIMENSIONS" % points.shape)

# labels = cluster.DBSCAN(eps=0.1, min_samples=5).fit_predict(points)
clusterer = cluster.KMeans(n_clusters=8)
labels = clusterer.fit_predict(points)
centroids = clusterer.cluster_centers_
labels += abs(min(labels))
max_label = max(labels)
log.info("CENTROIDS\n%s" % centroids)

centroids = np.column_stack((sp.normalize(centroids[:,0], np.min(points[:,0]), np.max(points[:,0])), sp.normalize(centroids[:,1], np.min(points[:,1]), np.max(points[:,1]))))
points = np.column_stack((sp.normalize(points[:,0], np.min(points[:,0]), np.max(points[:,0])), sp.normalize(points[:,1], np.min(points[:,1]), np.max(points[:,1]))))



chart.plot(points, sample_axis=True, scatter=False, c=(0., 0., 1., 1.), linewidth=2)
chart.plot(centroids, sample_axis=True, scatter=True, c=(1., 0., 0., 1.), linewidth=0, s=100)

chart.show("charts/")

Exemplo n.º 14
0
def spectrum(signal, rate):

    log.info("Computing spectrogram...")

    block_size = 512
    block_overlap = block_size / 2  # power of two, default is 128

    # freqs, ts, spectrum = spectrogram(sound.signal, fs=sound.rate, noverlap=block_overlap, nfft=block_size, detrend='constant', return_onesided=True, scaling='density', axis=-1, mode='psd', window=('tukey', 0.25), nperseg=block_overlap*2)
    spectrum, freqs, ts, image = plt.specgram(signal,
                                              NFFT=block_size,
                                              Fs=rate,
                                              noverlap=block_overlap)

    # (plt is 3k smaller)

    # print("spectrum", spectrum) # freq rows of time columns.
    # print()
    # print(freqs)
    # print()
    # print(ts)

    log.info("--> done")
    log.info("--> freq bins %s" % len(freqs))
    log.info("--> time columns %s" % len(ts))

    log.info("Drawing...")

    # with gzip.open("spectrum.pklz", 'wb') as f:
    #     f.write(pickle.dumps(spectrum))

    ctx = drawing.Context(
        len(ts) * 1, len(freqs) * 1,
        relative=True)  # if it's not an even multiple, artifacts happen

    pixel_width = ctx.width / len(spectrum[0])
    pixel_height = ctx.height / len(spectrum)

    # for y, row in enumerate(spectrum):
    #     for x, value in enumerate(row):
    #         v = min(value / (allmax / 500), 1.0)
    #         v = 1 - v
    #         # print((x * pixel_width) / ctx.width, (y * pixel_height) / ctx.height, pixel_width / ctx.width, pixel_height / ctx.height)
    #         ctx.rect((x * pixel_width) / ctx.width, (y * pixel_height) / ctx.height, pixel_width / ctx.width, pixel_height / ctx.height, fill=(v, v, v, 1.), stroke=(1., 0., 0., 0.), thickness=0.0)

    # mx = spectrum.flatten().max()
    # print("maximum", spectrum.flatten().max())
    # print("minimum", spectrum.flatten().min())

    spectrum = sp.normalize(
        np.sqrt(spectrum), 0.0,
        200.0)  # sqrt compresses, good for power. 200 is a clipping threshold.

    for y, row in enumerate(spectrum):
        for x, v in enumerate(row):
            ctx.line((x * pixel_width) / ctx.width,
                     (y * pixel_height) / ctx.height,
                     ((x * pixel_width) + pixel_width) / ctx.width,
                     (y * pixel_height) / ctx.height,
                     stroke=(v, v, v, 1.),
                     thickness=pixel_height)

    log.info("--> done")
    ctx.output("charts/")
Exemplo n.º 15
0
def sample(draw=False):
    log.info("START SAMPLE")
    # get the time
    # dt = timeutil.get_dt(tz=config['tz'])
    # dt -= datetime.timedelta(days=300)  # time adjustment if necessary for testing
    # t_utc = timeutil.t_utc(dt)
    t_utc = timeutil.t_utc()
    dt = timeutil.get_dt(t_utc, tz=config['tz'])
    log.info("CURRENT TIME %s" % timeutil.get_string(t_utc, tz=config['tz']))

    # pull the last 24 hours worth -- we're going to normalize over that to set our dynamic levels
    log.info(config['sites'][config['sample']])

    # # this is the real-time last 24 hours
    # query = {'site': config['sample'], 't_utc': {'$gt': t_utc - 86400, '$lt': t_utc}}
    # log.info(query)
    # results = db.entries.find(query)

    # this is the last 24 hours we have
    # assume updating every 15 minutes, last 24 hours is the last 96 results
    results = db.entries.find({
        'site': config['sample']
    }).sort([('t_utc', DESCENDING)]).limit(96)
    results = list(results)
    results.reverse()
    log.info("%s results" % len(results))  # should be 96
    log.info(json.dumps(results[-1], indent=4,
                        default=lambda d: str(d)))  # show the last one

    # resample signals for each
    ts = [d['t_utc'] for d in results]
    duration = ts[-1] - ts[0]
    log.info("DURATION %s %s" % (duration, timeutil.format_seconds(duration)))
    signals = []
    rates = []
    labels = list(config['labels'].values())
    labels.sort()
    for i, label in enumerate(labels):
        # log.debug(label)
        try:
            values = [d[label] if label in d else None for d in results]
            values = sp.remove_shots(values,
                                     nones=True)  # repair missing values
            signal = sp.resample(ts, values)
            num_samples = len(signal)
            sample_rate = num_samples / duration
            rates.append(sample_rate)
            signal = sp.normalize(signal)
            signal = sp.smooth(signal, 15)
            signals.append(signal)
        except KeyError as e:
            log.error(log.exc(e))
            log.error(values)

    # draw if desired
    if draw:
        from housepy import drawing
        ctx = drawing.Context(1200, 500, margin=20, hsv=True)
        for i, label in enumerate(labels):
            color = i / len(labels), .8, .8, 1.
            signal = signals[i]
            ctx.plot(signal, stroke=color, thickness=2)
        ctx.output("charts/")

    # collapse into n-dimensional points
    points = []
    for i in range(len(signals[0])):
        point = [signal[i] for signal in signals]
        points.append(point)

    # PCA to 4D -- this takes whatever data we've got and maximizes variation for our four panels
    points = np.array(points)
    # log.debug("INPUT: %s POINTS, %s DIMENSIONS" % points.shape)
    points = decomposition.PCA(n_components=4).fit_transform(points)
    # log.debug("OUTPUT: %s POINTS, %s DIMENSIONS" % points.shape)

    # normalize each dimension independently, again amplifying dynamics
    points = np.column_stack((sp.normalize(points[:, 0], np.min(points[:, 0]),
                                           np.max(points[:, 0])),
                              sp.normalize(points[:, 1], np.min(points[:, 1]),
                                           np.max(points[:, 1])),
                              sp.normalize(points[:, 1], np.min(points[:, 1]),
                                           np.max(points[:, 2])),
                              sp.normalize(points[:, 2], np.min(points[:, 3]),
                                           np.max(points[:, 3]))))

    # now, for each time this is queried we want to return an interpolation between the last two points
    # this essentially implements a delay that closes in on the most recent query
    # ...hopefully to be refreshed with a new USGS reading when it gets there
    # if that reading doesnt come, it's ok, it just hovers there until we proceed
    # aaandd actually we want a couple of hours delay, because these come in at bulk every 1-4 hours

    # we know we have 96 points. four hours back is 16 points
    # interpolating between points -17 and -16 should give the most recent guaranteed smooth transitions
    # transduction takes time, pues

    point_a = points[-17]
    point_b = points[-16]
    # log.debug(point_a)
    # log.debug(point_b)

    # linear interpolation over 15 minutes
    position = (((dt.minute % 15) * 60) + dt.second) / (15 * 60)
    # log.debug(position)
    point = [(point_a[i] * (1.0 - position)) + (point_b[i] * position)
             for i in range(len(point_a))]

    log.info("RESULT: %s" % point)

    return point
Exemplo n.º 16
0
def process_walk(walk_id, force=False):

    if not model.process_check(walk_id):
        log.error("Walk %s already processed" % walk_id)        
        if force:
            log.info("--> forcing...")
            model.remove_sequence(walk_id)
        else:
            return
    log.info("Processing walk %s" % walk_id)

    # fetch data
    data = model.fetch_accels(walk_id)
    data = [(reading['t'], reading['x'], reading['y'], reading['z']) for reading in data]

    # let's sample every millisecond, so the time of the last reading is how many samples we need
    data = np.array(data)
    ts = data[:,0]
    total_samples = int(ts[-1])

    # need at least 10s of data
    # add 2000 for trimming at nd
    if total_samples < 10000 + 2000: 
        log.info("No footsteps detected (too short)")
        model.hide(walk_id)        
        return

    # resample the values
    xs = sp.resample(ts, data[:,1], total_samples)
    ys = sp.resample(ts, data[:,2], total_samples)
    zs = sp.resample(ts, data[:,3], total_samples)

    # skip for accelerometer startup and for phone out of pocket at end 
    skipin, skipout = 0, 2000
    xs = xs[skipin:-skipout]
    ys = ys[skipin:-skipout]
    zs = zs[skipin:-skipout]
    total_samples -= (skipin + skipout)
    log.info("TOTAL SAMPLES %s (%fs)" % (total_samples, (total_samples / 1000.0)))

    # get 3d magnitude (not RMS) -- orientation shouldnt matter
    ds = np.sqrt(np.power(xs, 2) + np.power(ys, 2) + np.power(zs, 2))    

    # prep the raw values for display
    # normalize the values to a given range  (this is Gs)
    MIN = -10.0
    MAX = 10.0
    xs = (xs - MIN) / (MAX - MIN)
    ys = (ys - MIN) / (MAX - MIN)
    zs = (zs - MIN) / (MAX - MIN)
    # smooth them
    xs = sp.smooth(xs, 300)
    ys = sp.smooth(ys, 300)
    zs = sp.smooth(zs, 300)

    # process the magnitude signal
    ds = sp.smooth(ds, 500)
    ds = np.clip(ds, -10.0, 10.0)   # limit the signal to +-10 Gs
    ds = sp.normalize(ds)
    ds = 1 - ds
    ds = sp.compress(ds, 3.0)
    ds = sp.normalize(ds)

    # detect peaks
    peaks, valleys = sp.detect_peaks(ds, lookahead=50, delta=0.10)
    peaks = np.array(peaks)
    valleys = np.array(valleys)
    log.info("PEAKS %s" % len(peaks))
    if not len(peaks):
        log.info("No footsteps detected")
        model.hide(walk_id)
        return

    # get foot separator line
    fxs = [int(peak[0]) for peak in peaks]
    fys = [peak[1] for peak in peaks]
    avs = np.average([peak[1] for peak in peaks])
    fys[0] = avs    # it's going to start with a peak, so we need to bring it up or down accordingly
    fxs.append(total_samples-1)
    fys.append(avs)
    fs = sp.resample(fxs, fys, total_samples)
    fs = sp.smooth(fs, 3000)

    # print out
    log.info("Saving sequence (%s)..." % walk_id)
    sequence = []
    for p, peak in enumerate(peaks):
        foot = 'right' if peak[1] > fs[int(peak[0])] else 'left'
        t = peak[0]
        t += 250   # turns out the peak hits just before the step
        sequence.append((t, foot))

    # fix triples
    for i in range(len(sequence) - 2):
        if sequence[i][1] == sequence[i+1][1] == sequence[i+2][1]:
            sequence[i+1] = (sequence[i+1][0], 'right') if sequence[i+1][1] == 'left' else (sequence[i+1][0], 'left')

    model.insert_sequence(walk_id, sequence)

    plot(walk_id, xs, ys, zs, ds, peaks, total_samples, fs)
Exemplo n.º 17
0
duration = ts[-1] - ts[0]
print("DURATION %s %s" % (duration, strings.format_time(duration)))
signals = []
rates = []
labels = list(config['labels'].values())
labels.sort()
for i, label in enumerate(labels):
    log.info(label)
    try:
        values = [d[label] if label in d else None for d in results]
        values = sp.remove_shots(values, nones=True)  # repair missing values
        signal = sp.resample(ts, values)
        num_samples = len(signal)
        sample_rate = num_samples / duration
        rates.append(sample_rate)
        signal = sp.normalize(signal)
        signal = sp.smooth(signal, 15)
        signals.append(signal)        
        # color = colors[i]
        color = i / len(labels), .8, .8, 1.
        ctx.plot(signal, stroke=color, thickness=2)
        ctx.line(10 / ctx.width, 1 - ((10 + (i * 10)) / ctx.height), 30 / ctx.width, 1 - ((10 + (i * 10)) / ctx.height), stroke=color, thickness=2)
        ctx.label(35 / ctx.width, 1 - ((13 + (i * 10)) / ctx.height), label.upper(), size=8)            
    except KeyError as e:
        log.error(log.exc(e))
        log.error(values)

ctx.output("charts/")

points = []
for i in range(len(signals[0])):
Exemplo n.º 18
0
    db.stream.find(
        {
            "t_utc": {
                "$gt": util.timestamp(util.parse_date(START, tz="America/New_York")),
                "$lt": util.timestamp(util.parse_date(END, tz="America/New_York")),
            }
        }
    ).sort([("t_utc", pymongo.ASCENDING)])
)
log.info("--> done")

##

ts = [r["t_utc"] for r in results]
xs = [r["x"] for r in results]

duration = ts[-1] - ts[0]
SAMPLING_RATE = 100

log.info("Resampling...")
signal = sp.resample(ts, xs, duration * SAMPLING_RATE)
signal += 1  # change -1,1 to 0,2
signal = sp.normalize(signal, 0, 2)
log.info("--> done")

log.info("Drawing...")
ctx = drawing.Context(1000, 400)
ctx.plot(signal, stroke=(0.0, 0.0, 0.0, 1.0), thickness=1.0)
ctx.output("graphs")
log.info("--> done")
Exemplo n.º 19
0
log.info("T_MAX %s" % t_max)

signals = []
labels = list(streams.keys())
log.info("LABELS %s" % labels)
for label in labels:
    log.info(label)
    ts = tses[label]
    ts = [t_min] + ts + [t_max]
    values = [d[label] if label in d else None for d in streams[label]]
    values = [values[0]] + values + [values[-1]]
    values = sp.remove_shots(values, nones=True)  # repair missing values
    signal = sp.resample(ts, values)
    num_samples = len(signal)
    sample_rate = num_samples / duration
    signal = sp.normalize(signal)
    signal = sp.smooth(signal, 15)
    signals.append(signal)    

log.info("Drawing...")
ctx = drawing.Context(1200, 500, margin=20, hsv=True)
for b in range(12):
    ctx.line(b / 12, 0, b / 12, 1, stroke=(0.5, 0.5, 0.5, 0.5), thickness=0.5)
ctx.line(1, 0, 1, 1, stroke=(0.5, 0.5, 0.5, 0.5), thickness=0.5)
for i, signal in enumerate(signals):
    color = i / (len(signals) + 4) + .1, 1., .8, 1.
    ctx.plot(signal, stroke=color, thickness=1.5)
    ctx.line(10 / ctx.width, 1 - ((10 + (i * 10)) / ctx.height), 30 / ctx.width, 1 - ((10 + (i * 10)) / ctx.height), stroke=color, thickness=2)
    ctx.label(35 / ctx.width, 1 - ((13 + (i * 10)) / ctx.height), labels[i].upper(), size=8)            

ctx.output("charts/")
Exemplo n.º 20
0
voices = []
for walk in walks:
    sequence = model.fetch_sequence(walk['id'])
    if len(sequence) < MIN_STEPS:
        continue    
    voices.append(Swerve(v + 1))
    for step in sequence:
        notes.append((step[0], v, 0 if step[1] == 'left' else 1))
    v += 1   
    if v == len(steps):
        break

# sort and normalize onsets
notes.sort(key=lambda x: x[0])
onsets = [note[0] for note in notes]
onsets = sp.normalize(onsets)
notes = [(onsets[i], note[1], note[2]) for (i, note) in enumerate(notes)]


for voice in voices:
    voice.synth = 'cycle'
    voice.attack = 350
    # voice.sustain = 350
    voice.sustain = 2000
    voice.decay = 600
    voice.reverb = 0.5, 0.5, 0.45, 0.5, 0.0
    # voice.chord = C3

DURATION = 20 * 60.0
DURATION = 10 * 60.0