Esempio n. 1
0
def retrieve(db, start, end, type_):
    start = util.parse_date(start, tz=config['tz'])
    start_t = util.timestamp(start)
    end = util.parse_date(end, tz=config['tz'])
    end_t = util.timestamp(end)
    results = db.entries.find({'t_utc': {'$gt': start_t, '$lt': end_t}, 'type': type_}).sort('t_utc')
    return list(results)
Esempio n. 2
0
def assemble(self, search, limit, order, resolution):
    log.info("expeditions.assemble")
    expeditions = {}
    try:
        results = self.db.features.find(search).distinct('properties.Expedition')
        for expedition in results:
            start_date = util.parse_date(str(config['start_date'][expedition]))
            last_feature = list(self.db.features.find({'properties.Expedition': expedition}).sort([('properties.t_utc', DESCENDING)]).limit(1))[0]
            end_date = util.parse_date(last_feature['properties']['DateTime'])
            duration = end_date - start_date
            expeditions[expedition] = {'StartDate': start_date, 'Days': duration.days}
    except Exception as e:
        return self.error(log.exc(e))
    total = returned = len(expeditions)
    return expeditions, total, returned
Esempio n. 3
0
def parse(request):
    log.info("ambit_geo.parse")
    sample = ingest_json_body(request)
    if sample is None:
        return sample, "Could not parse"

    data = {}
    for key, value in sample.items():
        if key == "UTC":
            dt = util.parse_date(sample['UTC']) # these are marked UTC in the data
            t = util.timestamp(dt)
            data['t_utc'] = t
            continue
        if key == "Longitude":
            data['longitude'] = math.degrees(float(sample['Longitude']))
            continue                       
        if key == "Latitude":
            data['latitude'] = math.degrees(float(sample['Latitude']))
            continue
        if key == "GPSAltitude":
            data['altitude'] = float(sample['GPSAltitude'])
            continue
        if type(value) != str:
            continue                            
        data[key] = strings.as_numeric(value) 

    try:
        log.debug("%s %s %s" % (data['longitude'], data['latitude'], data['altitude']))
    except:
        log.error("MISSING GEO")

    return data
Esempio n. 4
0
def ingest_geo_feature(path, kind):
    log.info("ingest_geo_feature %s" % path)
    t_protect = model.get_protect(kind)
    sightings = []
    headings = {}
    with open(path) as f:
        rows = csv.reader(f)
        for r, row in enumerate(rows):
            if r == 0:
                for i, item in enumerate(row):
                    headings[item] = i
                continue
            try:
                dt = util.parse_date("%s %s" % (row[headings['Date']], row[headings['Time']]), tz=config['local_tz'], dayfirst=True)
                t = util.timestamp(dt)
                if t <= t_protect:
                    log.warning("Protected t, skipping...")
                    continue                
                try:
                    coordinates = strings.as_numeric(row[headings['Longitude']]), strings.as_numeric(row[headings['Latitude']]), strings.as_numeric(row[headings['Altitude']])
                except Exception as e:
                    log.error("Missing coordinates! Skipping...")
                    continue
                properties = {'DateTime': dt.strftime("%Y-%m-%dT%H:%M:%S%z"), 't_utc': t, 'ContentType': kind}
                for heading in headings:
                    if heading not in ['Date', 'Time', 'Latitude', 'Longitude', 'Altitude']:
                        try:
                            properties[heading] = strings.as_numeric(row[headings[heading]])
                        except IndexError:
                            pass
                feature = geojson.Feature(geometry={'type': "Point", 'coordinates': coordinates}, properties=properties)
                model.insert_feature(kind, t, geojson.dumps(feature))
            except Exception as e:
                log.error("Row failed: " + log.exc(e))
                continue
Esempio n. 5
0
def retrieve(db, source, start, end, filters, page=None):
    if filters == None:
        filters = {}
    sources = [clean(source) for source in source.split(",")]    
    start_t = 0 if start == "*" else util.timestamp(util.parse_date(start, tz=config['tz']))
    end_t = min(2147483647, sys.maxsize) if end == "*" else util.timestamp(util.parse_date(end, tz=config['tz']))
    template = {'t_utc': {'$gt': start_t, '$lt': end_t}, '$or': [{'source': source} for source in sources]}
    template.update(filters)
    log.info("QUERY %s" % template)    
    results = db.entries.find(template).sort('t_utc')
    count = results.count()
    if page is None:
        page = (count // 100) + 1
    skip = (page - 1) * 100
    log.debug("page %s, skip %s" % (page, skip))
    results = results.skip(skip).limit(100)    
    log.info("--> done")
    return list(results), start_t, end_t, count, page
Esempio n. 6
0
def main(): ## called via tweet_grabber.py

    for a, account in enumerate(ACCOUNTS):
        log.info("Checking %s..." % account)
        try: 
            feed = "https://medium.com/feed/@%s" % account
            data = feedparser.parse(feed)['entries']
        except Exception as e:
            log.error(log.exc(e))
            continue    
        for entry in data:
            try:
                entry = {strings.camelcase(key): value for (key, value) in entry.items() if key in ['title', 'link', 'summary', 'published']}
                entry['Member'] = MEMBERS[a]
                entry['t_utc'] = util.timestamp(util.parse_date(entry['Published']))
                if entry['t_utc'] < (util.timestamp(util.parse_date(str(config['start_date'][config['expedition']])))) - (3 * 24 * 60 * 60): ## hack, minus three days to get jer's post
                    log.info("--> skipping too early blog post")
                    continue
                del entry['Published']
                entry['Url'] = entry['Link']
                del entry['Link']                
                entry['Summary'] = strings.strip_html(entry['Summary']).replace("Continue reading on Medium \u00bb", "")
                entry['FeatureType'] = "blog"
                dup = db.features.find_one({'properties.FeatureType': 'blog', 'properties.Url': entry['Url']})
                if dup is not None:
                    log.info("--> skipping duplicate blog post")
                    continue
                log.info("--> %s" % entry)
                success, value = ingest_data("tweet", entry)
                if not success:
                    log.error("--> failed: %s" % value)
                else:
                    log.info("--> %s" % value)
            except Exception as e:
                log.error(log.exc(e))
                continue
Esempio n. 7
0
 def get_timeline(self):
     skip = self.get_argument('skip', 1)
     kinds = self.get_argument('types', "beacon").split(',')
     kinds = [kind.rstrip('s') for kind in kinds if kind.rstrip('s') in ['ambit', 'ambit_geo', 'sighting', 'breadcrumb', 'image', 'audio', 'breadcrumb', 'beacon', 'heart_spike']]   # sanitizes
     try:
         dt = self.get_argument('date', datetime.datetime.now(pytz.timezone(config['local_tz'])).strftime("%Y-%m-%d"))
         log.debug(dt)
         dt = util.parse_date(dt, tz=config['local_tz'])
         days = int(self.get_argument('days', 1))
     except Exception as e:
         return self.error("Bad parameters: %s" % log.exc(e))
     t = util.timestamp(dt)        
     log.debug("--> search for kinds: %s" % kinds)
     features = model.fetch_features(kinds, t, t + (days * (24 * 60 * 60)), skip)
     feature_collection = geojson.FeatureCollection(features)
     return self.json(feature_collection)
Esempio n. 8
0
def parse(request):
    log.info("ambit.parse")
    sample = ingest_json_body(request)
    if sample is None:
        return sample, "Could not parse"
 
    data = {}
    for key, value in sample.items():
        if key == "UTC":
            dt = util.parse_date(value) # these are marked UTC in the data
            t = util.timestamp(dt)
            data['t_utc'] = t
            continue
        if type(value) != str:
            continue                                    
        data[key] = strings.as_numeric(value)

    return data
Esempio n. 9
0
def ingest_hydrosensor(hydrosensor_id, content, dt):
    log.info("ingest_hydrosensor")
    t_protect = model.get_protect('hydrosensor')    
    lat, lon = model.get_drop_by_id(hydrosensor_id)
    #HACK
    coordinates = [0, 0, 0]
    t = util.timestamp(dt)
    #properties = {'DateTime': dt.strftime("%Y-%m-%dT%H:%M:%S%z"), 't_utc': t, 'ContentType': "hydrosensor"}  
    properties = {'ContentType': "hydrosensor"}  
    try:
        lines = content.split('\n')
        for line in lines:
            if not len(line.strip()):
                continue
            try:
                #Date: Sat, Sep 13, 2014 at 5:23 AM
                if "Date" in line:
                    dt = util.parse_date(line.replace("Date: ", "").strip(), tz=config['local_tz'])
                    t = util.timestamp(dt)
                    properties['DateTime'] = dt.strftime("%Y-%m-%dT%H:%M:%S%z")
                    properties['t_utc'] = t
                    log.info(dt.strftime("%Y-%m-%dT%H:%M:%S%z"))
                if "Temp" in line:
                    temperature = strings.as_numeric(line.replace("Temp (deg C) = (", "").replace(")", "").strip())
                    properties['temperature'] = temperature
                if "pH" in line:
                    ph = strings.as_numeric(line.replace("pH = (", "").replace(")", "").strip())
                    properties['ph'] = ph
                if "Conductivity" in line:
                    conductivity = line.replace("Conductivity (Cond,TDS,Sal,SG) = (", "").replace(")", "").strip()
                    conductivity = [strings.as_numeric(element) for element in conductivity.split(",")]
                    properties['conductivity'] = conductivity                    
            except Exception as e:
                log.error(log.exc(e))
                continue
        # if t <= t_protect:
        #     log.warning("Protected t, skipping...")
        #     return         
        feature = geojson.Feature(geometry={'type': "Point", 'coordinates': coordinates}, properties=properties)
        feature_id = model.insert_feature('hydrosensor', t, geojson.dumps(feature))
    except Exception as e:
        log.error(log.exc(e))
Esempio n. 10
0
def ingest_beacon(content):
    log.info("ingest_beacon")
    t_protect = model.get_protect('beacon')    
    properties = {}
    coordinates = [None, None, None]
    t = None
    try:
        lines = content.split('\n')
        for line in lines:
            log.debug("%s" % line)
            try:
                if "Position Time:" in line:
                    line = line.replace("Position Time:", "").strip()
                    dt = util.parse_date(line)
                    t = util.timestamp(dt)
                    properties['DateTime'] = dt.astimezone(pytz.timezone(config['local_tz'])).strftime("%Y-%m-%dT%H:%M:%S%z")
                    properties['t_utc'] = t
                if "Map:" in line:
                    line = line.split('?')[1].strip()
                    result = net.urldecode(line)
                    lat, lon = result['q'].split(' ')[0].split(',')
                    coordinates[0], coordinates[1] = strings.as_numeric(lon), strings.as_numeric(lat)
                if "Altitude:" in line:
                    altitude = strings.as_numeric(line.replace("Altitude:", "").replace("meters", "").strip())
                    coordinates[2] = altitude
                if "Speed:" in line:
                    speed = strings.as_numeric(line.replace("Speed:", "").replace("Knots", "").strip())
                    properties['Speed'] = speed
                if "Heading:" in line:
                    heading = strings.as_numeric(line.replace("Heading:", "").replace("°", "").strip())
                    properties['Heading'] = heading
            except Exception as e:
                log.error(log.exc(e))
                continue
        # if t <= t_protect:
        #     log.warning("Protected t, skipping...")
        #     return                                
        feature = geojson.Feature(geometry={'type': "Point", 'coordinates': coordinates}, properties=properties)
        feature_id = model.insert_feature('beacon', t, geojson.dumps(feature))
    except Exception as e:
        log.error(log.exc(e))
Esempio n. 11
0
def process_image(path, member=None, t_utc=None):
    # try to get EXIF data
    log.info("process_image %s..." % path)
    data = {}
    if member is not None:
        data['Member'] = member
    if t_utc is not None:
        data['t_utc'] = t_utc
    try:    
        image = Image.open(path)  
        width, height = image.size
        data['Dimensions'] = width, height
        try:
            exif = {ExifTags.TAGS[k]: v for (k, v) in image._getexif().items() if k in ExifTags.TAGS}
        except Exception as e:
            log.warning("--> no EXIF data in image: %s" % e)            
            if 't_utc' not in data:
                log.warning("--> substituting current time for t_utc")
                data['t_utc'] = util.timestamp()
        else:
            # log.debug(json.dumps(exif, indent=4, default=lambda x: str(x)))
            date_field = exif['DateTimeOriginal'] if 'DateTimeOriginal' in exif else exif['DateTime']
            if date_field[4] == ":" and date_field[7] == ":":
                date_field = list(date_field)
                date_field[4] = "-"
                date_field[7] = "-"
                date_field = ''.join(date_field)
            date = util.parse_date(date_field, tz=config['local_tz'])
            data['t_utc'] = util.timestamp(date)                            ## careful about this overriding
            data['DateTime'] = util.datestring(data['t_utc'], tz=config['local_tz'])    
            data['Make'] = exif['Make'].replace("\u0000", '').strip() if 'Make' in exif else None
            data['Model'] = exif['Model'].replace("\u0000", '').strip() if 'Model' in exif else None
        filename = "%s_%s.jpg" % (data['t_utc'], str(uuid.uuid4()))
        new_path = os.path.join(os.path.dirname(__file__), "..", "static", "data", "images", filename)
        shutil.copy(path, new_path)
        data['Url'] = "/static/data/images/%s" % filename
    except Exception as e:
        log.error(log.exc(e))
        return None
    return data
Esempio n. 12
0
def parse(request):
    log.info("beacon.parse")
    content = ingest_plain_body(request)
    if content is None:
        return content, "Could not parse"

    data = {}
    lines = content.split('\n')
    for line in lines:
        log.debug("%s" % line)
        try:
            if "sat4rent" in line.lower():
                data['Satellite'] = line[-8:].upper()
            if "Position Time:" in line:
                line = line.replace("Position Time:", "").strip()
                dt = util.parse_date(line)
                t = util.timestamp(dt)
                data['t_utc'] = t
            if "Map:" in line:
                line = line.split('?')[1].strip()
                result = net.urldecode(line)
                lat, lon = result['q'].split(' ')[0].split(',')
                data['longitude'], data['latitude'] = strings.as_numeric(lon), strings.as_numeric(lat)
            if "Altitude:" in line:
                altitude = strings.as_numeric(line.replace("Altitude:", "").replace("meters", "").strip())
                data['altitude'] = altitude
            if "Speed:" in line:
                speed = strings.as_numeric(line.replace("Speed:", "").replace("Knots", "").strip())
                data['Speed'] = speed
            if "Heading:" in line:
                heading = strings.as_numeric(line.replace("Heading:", "").replace("°", "").strip())
                data['Heading'] = heading
        except Exception as e:
            log.error(log.exc(e))
            continue

    return data
Esempio n. 13
0
 def traverse(pd):
     log.info("Checking %s..." % pd)
     for i, filename in enumerate(os.listdir(pd)):
         if filename[0] == ".":
             continue
         elif os.path.isdir(os.path.join(pd, filename)):
             traverse(os.path.join(pd, filename))
         elif filename[-3:] == "sml":
             try:
                 log.info("Reading %s..." % os.path.join(pd, filename))
                 with open(os.path.join(pd, filename)) as f:
                     content = f.read()        
             except Exception as e:
                 log.error("Could not read file: %s" % log.exc(e))
             else:
                 try:
                     log.info("Parsing...")
                     data = xmltodict.parse(content)
                     # log.debug(json.dumps(data, indent=4))
                     serial_number = str(data['sml']['DeviceLog']['Device']['SerialNumber'])
                     try:
                         member = config['ambits'][serial_number]
                     except KeyError:
                         log.warning("Ambit serial number not linked to a Member")
                         log.debug(serial_number)
                         log.debug(config['ambits'])
                         continue
                     log.info("Member: %s" % member)
                     samples = data['sml']['DeviceLog']['Samples']['Sample']
                     start_t = None                                            
                     for s, sample in enumerate(samples):  
                         if s == 0:
                             dt = util.parse_date(sample['UTC']) # these are marked UTC in the data
                             start_t = util.timestamp(dt)                
                         sample['Member'] = member
                         if 'Satellites' in sample:  # ingest satellite location data                    
                             try:
                                 url = "%s/ingest/ambit_geo" % config['url']
                                 log.info("Sending to %s..." % url)
                                 response = net.read(url, str(json.dumps(sample)).encode('utf-8'))
                                 log.info("--> %s" % response)                                                        
                             except Exception as e:
                                 log.error(log.exc(e))
                         else: # ingest energy data sample 
                             ## bh16
                             # this data is not interesting, and mucks up the estimating
                             continue
                             # try:
                             #     url = "%s/ingest/ambit" % config['url']
                             #     log.info("Sending to %s..." % url)
                             #     response = net.read(url, str(json.dumps(sample)).encode('utf-8'))
                             #     log.info("--> %s" % response)
                             # except Exception as e:
                             #     log.error(log.exc(e))
                     try:
                         beats = [strings.as_numeric(beat) for beat in data['sml']['DeviceLog']['R-R']['Data'].split()]
                         d = {'Member': member, 't_utc': start_t, 'Beats': beats}
                         url = "%s/ingest/ambit_hr" % config['url']
                         log.info("Sending to %s..." % url)
                         response = net.read(url, str(json.dumps(d)).encode('utf-8'))
                         log.info("--> %s" % response)
                     except Exception as e:
                         log.error(log.exc(e))                                                        
                 except Exception as e:
                     log.error("Parsing error: %s" % log.exc(e))
         else:
             log.warning("--> unknown file type %s, skipping..." % filename)
Esempio n. 14
0
def ingest_ambit(path, t_protect):    
    log.info("ingest_ambit %s" % path)
    with open(path, 'r') as f:
        content = f.read()        
        content = content.split("<IBI>")[0]
        parts = content.split("</header>")
        header = parts[0] + "</header>"
        header = xmltodict.parse(header.encode('utf-8'))
        person = header['header']['Activity'].replace("OWBS ", "") 
        person = "Chris" if person == "Trail running" else person
        person = "Jer" if person == "John" else person
        content = parts[-1].encode('utf-8')
        samples = xmltodict.parse(content)['Samples']['Sample']
        c = 0;

        for s, sample in enumerate(samples):            
            try:
                #if 'VerticalSpeed' not in sample:
                if 'Satellites' in sample:
                    # satellite data sample          
                    lon, lat, alt = None, None, None      
                    t, dt = None, None
                    for key, value in sample.items():
                        if key == "UTC":
                            dt = util.parse_date(sample['UTC']) # these are marked UTC in the data
                            t = util.timestamp(dt)
                            del sample[key]
                            continue
                        if key == "Longitude":
                            lon = math.degrees(float(sample['Longitude']))
                            del sample[key]                
                            continue                       
                        if key == "Latitude":
                            lat = math.degrees(float(sample['Latitude']))
                            del sample[key]               
                            continue
                        if key == "GPSAltitude":
                            alt = float(sample['Latitude'])
                            del sample[key]               
                            continue
                        if key[:3] == "Nav":
                            del sample[key]
                            continue
                        if type(value) != str:
                            del sample[key]
                            continue                            
                        sample[key] = strings.as_numeric(value) 
                    # if t <= t_protect:
                    #     log.warning("Protected t, skipping...")
                    #     continue
                    sample['DateTime'] = dt.astimezone(pytz.timezone(config['local_tz'])).strftime("%Y-%m-%dT%H:%M:%S%z")
                    sample['t_utc'] = t
                    sample['ContentType'] = 'ambit_geo'
                    sample['Person'] = person         
                    feature = geojson.Feature(geometry={'type': "Point", 'coordinates': [lon, lat, alt]}, properties=sample)            
                    model.insert_feature('ambit_geo', t, geojson.dumps(feature))
                   

                elif 'VerticalSpeed' in sample:
                    # energy data sample
                    for key, value in sample.items():
                        if key == "UTC":
                            dt = util.parse_date(value) # these are marked UTC in the data
                            t = util.timestamp(dt)
                            del sample[key]
                            continue
                        if type(value) != str:
                            del sample[key]
                            continue
                        sample[key] = strings.as_numeric(value)
                    # if t <= t_protect:
                    #     log.warning("Protected t, skipping...")
                    #     continue                        
                    sample['DateTime'] = dt.astimezone(pytz.timezone(config['local_tz'])).strftime("%Y-%m-%dT%H:%M:%S%z")
                    sample['t_utc'] = t
                    sample['ContentType'] = 'ambit'
                    sample['Person'] = person
                    if((person == 'Chris' and c % 10 == 0) or person != 'Chris'):
                        feature = geojson.Feature(properties=sample)
                        model.insert_feature('ambit', t, geojson.dumps(feature))
                    c = c + 1

                else: 
                    log.info("extra ambit field")

            except Exception as e:
                log.error(log.exc(e))
Esempio n. 15
0
from housepy import net, science, drawing, util
from openpaths_video import *

LON = 0
LAT = 1
T = 2
X = 3
Y = 4

ZOOM = 1000
DURATION = 60*30    # if points are within 10 minutes, group them
THRESHOLD = 3       # less than threshold points isnt much of a path


points = json.loads(open("thief_points.json").read())
points = np.array([(float(point['lon']), float(point['lat']), time.mktime(util.parse_date(point['time']).timetuple()), None, None) for point in points])

median_lon = np.median(points[:,0])
median_lat = np.median(points[:,1])
points = np.array([point for point in points if abs(point[0] - median_lon) < ZOOM and abs(point[1] - median_lat) < ZOOM])    
max_lon = np.max(points[:,0])
min_lon = np.min(points[:,0])
max_lat = np.max(points[:,1])
min_lat = np.min(points[:,1])
points = list(points)

for point in points:
    point[X] = util.scale(point[LON], min_lon, max_lon)
    point[Y] = util.scale(point[LAT], min_lat, max_lat)

all_points = []
Esempio n. 16
0
#!/usr/bin/env python3

import pymongo, json
import signal_processing as sp
from housepy import drawing, config, log, util
from mongo import db

START = "2016-07-05 00:00:00"
END = "2016-07-06"

log.info("Retrieving data...")
results = list(
    db.stream.find(
        {
            "t_utc": {
                "$gt": util.timestamp(util.parse_date(START, tz="America/New_York")),
                "$lt": util.timestamp(util.parse_date(END, tz="America/New_York")),
            }
        }
    ).sort([("t_utc", pymongo.ASCENDING)])
)
log.info("--> done")

##

ts = [r["t_utc"] for r in results]
xs = [r["x"] for r in results]

duration = ts[-1] - ts[0]
SAMPLING_RATE = 100
Esempio n. 17
0
def generate():

    # load data into t and count arrays per species
    species = OrderedDict()
    start_t = util.timestamp(util.parse_date(str(config['start'])))
    end_t = util.timestamp(util.parse_date(str(config['end'])))
    max_count = 0
    with open("data.csv") as f:
        data = csv.reader(f)
        for r, row in enumerate(data):
            if r == 0:
                continue
            plot = row[1]        
            name = row[2]        
            if len(config['species_list']) and name not in config['species_list']:
                continue
            dt = datetime.datetime(int(row[3]), 1, 1) + datetime.timedelta(int(row[4]) - 1)
            t = util.timestamp(dt)
            if t < start_t or t > end_t:
                continue
            count = 0 if row[5] == "NA" else int(row[5]) 
            if count > max_count:
                max_count = count
            if name not in species:
                species[name] = {'ts': [start_t, t - 1], 'counts': [0, 0]}
            species[name]['ts'].append(t)
            species[name]['counts'].append(count)
    species = OrderedDict(sorted(species.items()))
    print("--> loaded")


    # add a zero count at the start and end of every year
    yts = [util.timestamp(datetime.datetime(y, 1, 1)) for y in range(1974, 2017)]
    for name in species:
        ts = species[name]['ts']
        for yt in yts:
            i = 0        
            while i < len(ts) and ts[i] < yt:
                i += 1
            if i > 0:
                end_season_t = ts[i-1]
                if i < len(ts):
                    start_season_t = ts[i]
                    ts.insert(i, start_season_t - config['tail'])
                    species[name]['counts'].insert(i, 0)
                ts.insert(i, end_season_t + config['tail'])
                species[name]['counts'].insert(i, 0)
        species[name]['ts'].append(end_t)
        species[name]['counts'].append(0)
    print("--> onsets added")


    # create and draw signals
    signals = []
    names = []
    i = 0
    for name, data in species.items():
        print("Processing %s..." % name)

        # create signal from bloom counts
        signal = sp.resample(data['ts'], data['counts'])
        if config['normalize']:
            signal = sp.normalize(signal)
        else:
            signal = sp.normalize(signal, 0, max_count)    
        signal = sp.smooth(signal, size=8)
        signal = sp.limit(signal, max(signal))  # get rid of noise below 0 for onset detection

        # add spikes for peaks
        if config['peak_spikes']:
            peaks, valleys = sp.detect_peaks(signal, lookahead=50)
            peak_signal = np.zeros(len(signal))    
            for peak in peaks:
                peak_signal[peak[0]] = 1.0
            signal += peak_signal

        # add spikes for onsets
        if config['onset_spikes']:
            onsets = sp.detect_onsets(signal)
            onset_signal = np.zeros(len(signal))    
            for onset in onsets:
                onset_signal[onset] = 0.5
                onset_signal[onset+1] = 0.4
                onset_signal[onset+2] = 0.25
            signal += onset_signal

        # limit
        signal = sp.limit(signal, 1.0)
        signal *= 0.9   # hack, just controlling gain
        signals.append(signal)   

        names.append(name)
   
        i += 1

    return signals, names
Esempio n. 18
0
def generate():

    # load data into t and count arrays per species
    species = OrderedDict()
    start_t = util.timestamp(util.parse_date(str(config['start'])))
    end_t = util.timestamp(util.parse_date(str(config['end'])))
    max_count = 0
    with open("data.csv") as f:
        data = csv.reader(f)
        for r, row in enumerate(data):
            if r == 0:
                continue
            plot = row[1]
            name = row[2]
            if len(config['species_list']
                   ) and name not in config['species_list']:
                continue
            dt = datetime.datetime(int(row[3]), 1,
                                   1) + datetime.timedelta(int(row[4]) - 1)
            t = util.timestamp(dt)
            if t < start_t or t > end_t:
                continue
            count = 0 if row[5] == "NA" else int(row[5])
            if count > max_count:
                max_count = count
            if name not in species:
                species[name] = {'ts': [start_t, t - 1], 'counts': [0, 0]}
            species[name]['ts'].append(t)
            species[name]['counts'].append(count)
    species = OrderedDict(sorted(species.items()))
    print("--> loaded")

    # add a zero count at the start and end of every year
    yts = [
        util.timestamp(datetime.datetime(y, 1, 1)) for y in range(1974, 2017)
    ]
    for name in species:
        ts = species[name]['ts']
        for yt in yts:
            i = 0
            while i < len(ts) and ts[i] < yt:
                i += 1
            if i > 0:
                end_season_t = ts[i - 1]
                if i < len(ts):
                    start_season_t = ts[i]
                    ts.insert(i, start_season_t - config['tail'])
                    species[name]['counts'].insert(i, 0)
                ts.insert(i, end_season_t + config['tail'])
                species[name]['counts'].insert(i, 0)
        species[name]['ts'].append(end_t)
        species[name]['counts'].append(0)
    print("--> onsets added")

    # create and draw signals
    signals = []
    names = []
    i = 0
    for name, data in species.items():
        print("Processing %s..." % name)

        # create signal from bloom counts
        signal = sp.resample(data['ts'], data['counts'])
        if config['normalize']:
            signal = sp.normalize(signal)
        else:
            signal = sp.normalize(signal, 0, max_count)
        signal = sp.smooth(signal, size=8)
        signal = sp.limit(
            signal,
            max(signal))  # get rid of noise below 0 for onset detection

        # add spikes for peaks
        if config['peak_spikes']:
            peaks, valleys = sp.detect_peaks(signal, lookahead=50)
            peak_signal = np.zeros(len(signal))
            for peak in peaks:
                peak_signal[peak[0]] = 1.0
            signal += peak_signal

        # add spikes for onsets
        if config['onset_spikes']:
            onsets = sp.detect_onsets(signal)
            onset_signal = np.zeros(len(signal))
            for onset in onsets:
                onset_signal[onset] = 0.5
                onset_signal[onset + 1] = 0.4
                onset_signal[onset + 2] = 0.25
            signal += onset_signal

        # limit
        signal = sp.limit(signal, 1.0)
        signal *= 0.9  # hack, just controlling gain
        signals.append(signal)

        names.append(name)

        i += 1

    return signals, names
Esempio n. 19
0
    def get(self, view_name=None, output=None):

        # add a header for unrestricted access
        self.set_header("Access-Control-Allow-Origin", "*")
        csv = False

        # do the routing and load view module
        if not len(view_name):
            log.info("Listing views...")
            views = ["/api/%s" % filename.split('.')[0] for filename in os.listdir(os.path.abspath(os.path.dirname(__file__))) if filename[0] != "_" and filename[-3:] == ".py"]
            response = {'description': "API view endpoints", "views": views}
            return self.json(response)
        module_name = "api.%s" % view_name
        try:
            view = importlib.import_module(module_name)
            log.info("Loaded %s module" % module_name)
        except ImportError as e:
            log.error(log.exc(e))
            return self.error("View \"%s\" not recognized" % view_name)
        if len(output):
            if output == "csv":
                csv = True
            else:
                feature_type = self.get_argument('FeatureType', None)
                try:
                    return self.render("api/%s.html" % output, query=(self.request.uri).replace("/%s" % output, ""), feature_type=feature_type)
                except Exception as e:
                    return self.error("Could not render %s" % output)

        # time to build our search filter
        search = {}

        # special parsing for startDate and endDate
        start_string = self.get_argument('startDate', None) 
        if start_string is not None:
            try:            
                start_dt = util.parse_date(start_string, tz=config['local_tz'])
                start_t = util.timestamp(start_dt)        
                end_string = self.get_argument('endDate', (start_dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
                end_dt = util.parse_date(end_string, tz=config['local_tz'])
                end_t = util.timestamp(end_dt)        
                log.debug("startDate %s" % start_dt)            
                log.debug("endDate %s" % end_dt)    
                search['t_utc'] = {'$gt': start_t, '$lt': end_t}
            except Exception as e:
                log.error(log.exc(e))
                return self.error("Bad dates")          

        # special parsing for location
        # expecting bounds (upper left (NW), lower right (SE)): lon_1,lat_1,lon_2,lat_2
        # oka: 20,-17,26,-22 nyc: -75,41,-71,40
        geo_bounds = self.get_argument('geoBounds', None)
        if geo_bounds is not None:
            try:
                lon_1, lat_1, lon_2, lat_2 = [float(coord) for coord in geo_bounds.split(',')]
                log.debug("geo_bounds %f,%f %f,%f" % (lon_1, lat_1, lon_2, lat_2))
                search['geometry'] = {'$geoWithin': {'$geometry': {'type': "Polygon", 'coordinates': [[ [lon_1, lat_1], [lon_2, lat_1], [lon_2, lat_2], [lon_1, lat_2], [lon_1, lat_1] ]]}}}
            except Exception as e:
                log.error(log.exc(e))
                return self.error("Bad geometry")

        # special parsing for expeditionDay (overrides startDate / endDate)
        expedition_day = self.get_argument('expeditionDay', None)
        if expedition_day is not None:
            try:
                expedition = self.get_argument('expedition', config['expedition'])
                expedition = self.get_argument('Expedition', expedition)
                start_dt = util.parse_date(str(config['start_date'][expedition]), tz=config['local_tz'])
                expedition_day = int(expedition_day) - 1
                log.debug("%s days after %s" % (expedition_day, start_dt))
                gt_t = util.timestamp(start_dt + datetime.timedelta(days=expedition_day))
                lt_t = util.timestamp(start_dt + datetime.timedelta(days=expedition_day + 1))
                search['t_utc'] = {'$gt': gt_t, '$lt': lt_t}
            except Exception as e:
                log.error(log.exc(e))
                return self.error("Bad day")

        # special parsing for resolution
        resolution = strings.as_numeric(self.request.arguments['resolution'][0]) if 'resolution' in self.request.arguments else 0

        # special parsing for SpeciesSearch
        species_search = self.get_argument('speciesSearch', None)
        if species_search is not None:
            search['$text'] = {'$search': species_search}

        # get limit and order
        # limit = self.get_argument('limit', 100) # this fails on int arguments, which I think is a tornado bug
        limit = strings.as_numeric(self.request.arguments['limit'][0]) if 'limit' in self.request.arguments else 100
        order = self.request.arguments['order'][0].lower() if 'order' in self.request.arguments else 'ascending'
        order = ASCENDING if order == "ascending" else DESCENDING

        # get all the rest of the arguments and format as properties    
        try:
            for param, value in self.request.arguments.items():
                for i, item in enumerate(value):
                    item = item.decode('utf-8')
                    item = strings.as_numeric(item)
                    item = True if type(item) == str and item.lower() == "true" else item
                    item = False if type(item) == str and item.lower() == "false" else item
                    item = {'$exists': True} if item == '*' else item
                    value[i] = item
                search[param] = value[0] if len(value) == 1 else value  
            search = {('properties.%s' % (strings.camelcase(param) if param != 't_utc' else 't_utc') if param != 'geometry' and param != '$text' else param): value for (param, value) in search.items() if param not in ['geoBounds', 'startDate', 'endDate', 'expeditionDay', 'limit', 'order', 'resolution', 'speciesSearch']}
        except Exception as e:
            log.error(log.exc(e))
            return self.error("bad parameters")

        # http://localhost:7777/api?geoBounds=20,-17,26,-22&startDate=2014-08-01&endDate=2014-09-01&Member=Jer
        log.info("FILTER %s" % search)

        # pass our search to the view module for execution and formatting
        # try:         
        result = view.assemble(self, search, limit, order, resolution)   
        if result is None:
            return
        if csv:
            return self.csv(format_csv(result), "data.csv")
        results, total, returned = result
        search = {key.replace('properties.', ''): value for (key, value) in search.items()}
        return self.json({'order': order, 'limit': limit, 'total': total, 'returned': len(results) if returned is None else returned, 'filter': search, 'results': results, 'resolution': resolution if resolution != 0 else "full"})
Esempio n. 20
0
import pymongo, time
from housepy import video, config, log, animation, util
from collections import deque
from mongo import db

PATH = "/Users/house/Projects/rats/bronx_lab/1467738431_.mov"
# PATH = "/Users/house/Projects/rats/bronx_lab/BDMV_5.mov"
# PATH = "/Users/house/Projects/rats/bronx_lab/video-720p-h264.mov"


START = "2016-07-05 12:30:00"
END = "2016-07-06"

log.info("Retrieving data...")
results = db.stream.find({'t_utc': {'$gt': util.timestamp(util.parse_date(START, tz='America/New_York')), '$lt': util.timestamp(util.parse_date(END, tz='America/New_York'))}}).sort([('t_utc', pymongo.ASCENDING)])
log.info("--> done")


ctx = animation.Context(640, 480, background=(1.0, 1.0, 1.0, 1.), fullscreen=False, title="collar stream")    

graph = deque()
start_time = time.time()
current_data = results.next()
data_start_time = current_data['t_utc']

def draw():
    global start_time, data_start_time, current_data
    elapsed_time = time.time() - start_time    
    while True:
        data_elapsed_time = current_data['t_utc'] - data_start_time
Esempio n. 21
0
# center = -73.959486, 40.685193  # brooklyn
# center = -72.723889, 43.173611  # vermont       # no points!
# center = -71.009755, 41.569593  # new bedford
# center = -93.219539, 44.933524  # minneapolis
# center = -77.059081, 38.948266  # dc
# center = -104.890219, 39.698841 # denver
# center = -83.961412, 35.935478  # knoxville
# center = -73.490419, 41.908486  # berkshires
# center = -74.035318, 41.498944  # hudson valley
# center = 127.032687, 37.635063  # seoul
# center = -71.221729, 42.306461  # boston
# center = -68.700278, 45.658056  # millinocket
# center = -118.334105, 34.045948 # LA

# almanac = Almanac.build(data, time.mktime(util.parse_date('2011-05-25').timetuple()), time.mktime(util.parse_date('2012-05-27').timetuple()), center)
almanac = Almanac.build(data, time.mktime(util.parse_date('2011-08-01').timetuple()), time.mktime(util.parse_date('2012-05-27').timetuple()), center)
# almanac = Almanac.build(data, time.mktime(util.parse_date('2012-01-01').timetuple()), time.mktime(util.parse_date('2012-05-27').timetuple()), center)

print
print "POINTS"
print np.array(almanac.points)
print     

print "PATHS"
for path in almanac.paths:
    print path        
print    

print "PLACES"        
for place in almanac.places:
    print place