def combine_versions(items, a, b, props=['d1']):
    results = []
    keys_a = set(items[a].keys())
    keys_b = set(items[a].keys())
    # keys_a = set(['boo', 'bar'])
    # keys_b = set(['foo', 'bar', 'hala'])
    intersected = keys_a.intersection(keys_b)
    diverse = keys_a.union(keys_b) - intersected

    if diverse:
        print 'key sets are not same! They differ at {}'.format(diverse)

    for key in intersected:
        result = { 'name': key }
        for p in props:
            val_b = [float(x) for x in pluck(items[b][key], p) if x is not None]
            val_a = [float(x) for x in pluck(items[a][key], p) if x is not None]
            if not val_a or not val_b:
                # print 'missing property on {key}'.format(key=key)
                result[p] = 1.0
            else:
                # fix calculated slow-down-factor for frame f if number of collected results differ
                # only if missing collected results are results fixing random element
                # tests MUST be the same, otherwise stretching does not make sense
                result[p] = (sum(val_b) / len(val_b)) / (sum(val_a) / len(val_a))
                # result[p] = sum(val_b) / sum(val_a)
        results.append(result)
    return results
Exemple #2
0
    def measure(self, cls, name, timeout=None, tries=None, processes=None):
        timeout = timeout if timeout is not None else self.timeout
        tries = tries if tries is not None else self.tries
        processes = processes if processes is not None else self.processes

        pb = ProgressBar(maximum=tries, width=30, prefix="{self.name:35}",
                         suffix=" {self.last_progress}/{self.maximum}")

        measure_result = list()
        for no_cpu in processes:
            pb.name = "{:s} {:d} {:s}".format(name, no_cpu, 'core' if no_cpu == 1 else 'cores')
            results = list()
            for i in range(0, tries):
                if self.print_output:
                    pb.progress(i)

                targets = [cls() for j in range(0, no_cpu)]

                with timer.measured("{:s} {:d}".format(name, i), False):
                    # start processes
                    for target in targets:
                        target.start()

                    # wait for timeout
                    time.sleep(timeout)

                    # send exit status
                    for target in targets:
                        target.shutdown()

                    # join threads
                    for target in targets:
                        target.join()

                tmp = dict()
                tmp['duration'] = timer.time()
                tmp['value'] = sum(pluck(targets, 'result.value'))
                tmp['exit'] = not max(pluck(targets, 'terminated'))
                results.append(tmp)

            if self.print_output:
                pb.end()

            result = dict()
            result['processes'] = no_cpu
            # result['exit'] = min(pluck(results, 'exit'))
            result['duration'] = sum(pluck(results, 'duration')) / float(tries)
            result['value'] = sum(pluck(results, 'value')) / float(tries)
            result['performance'] = result['value'] / result['duration']
            result['effectiveness'] = (result['value'] / result['duration']) / no_cpu

            if self.human_format:
                result['value'] = human_readable(result['value'])
                result['performance'] = human_readable(result['performance'])
                result['effectiveness'] = human_readable(result['effectiveness'])

            measure_result.append(result)

        return measure_result
Exemple #3
0
 def test_getattr_based_plucks(self):
     l = [
             datetime(2012, 10, 1, 3),
             datetime(2012, 10, 15, 4),
             datetime(2012, 10, 30, 8),
         ]
     assert pluck(l, 'year') == [2012, 2012, 2012]
     assert pluck(l, 'day') == [1, 15, 30]
     assert pluck(l, 'hour') == [3, 4, 8]
Exemple #4
0
 def test_getitem_based_plucks(self):
     l = [
             {'id': 282, 'name': 'Alice', 'age': 30},
             {'id': 217, 'name': 'Bob', 'age': 56},
             {'id': 328, 'name': 'Charlie', 'age': 56},
             {'id': '167'},
         ]
     assert pluck(l, 'id') == [282, 217, 328, '167']
     self.assertRaises(ValueError, pluck, l, 'name')
     assert pluck(l, 'name', default=None) == ['Alice', 'Bob', 'Charlie', None]
     assert pluck(l, 'name', default='Mr. X') == ['Alice', 'Bob', 'Charlie', 'Mr. X']
     assert pluck(l, 'age', default=None) == [30, 56, 56, None]
Exemple #5
0
def rpluck(obj, path):
    names = path.split(".")
    last = names[-1]
    del names[-1]

    o = rget(obj, ".".join(names), [])
    return pluck(o, last)
Exemple #6
0
def phase_classify_render(request):
    query = {'timestamp': {'$exists': True, '$ne': None}}
    # print(request.POST)
    min_phase_time = float(request.POST.get('min-phase-seconds'))
    cruise_window = float(request.POST.get('cruise-window'))
    if request.POST.getall('trips[]'):
        query['trip_id'] = {'$in': request.POST.getall('trips[]')}
    else:
        query['trip_id'] = {
            '$nin':
            pluck(
                request.db.webcan_trip_filters.find(
                    {'vid': {
                        '$in': request.POST.getall('devices[]')
                    }}), 'trip_id')
        }
    if request.POST.getall('devices[]'):
        query['vid'] = {'$in': request.POST.getall('devices[]')}
    readings = request.db.rpi_readings.with_options(codec_options=CodecOptions(
        tz_aware=True, tzinfo=pytz.timezone('Australia/Adelaide')))
    cursor = readings.find(query, {'_id': False}).sort(SORT_TRIP_SEQ)
    print(query)
    print("Len readings:", cursor.count())
    readings = list(cursor)
    speed = _classify_readings_with_phases_pas(readings,
                                               min_phase_time,
                                               cruise_avg_window=cruise_window)
    summary = _summarise_readings(readings)
    return {
        'readings': readings,
        # 'readings': [],
        'summary': summary,
        'speed_field': speed
    }
Exemple #7
0
def get_device_trips_for_user(request):
    return list(
        request.db.rpi_readings.distinct(
            'trip_id',
            {'vid': {
                '$in': pluck(request.user['devices'], 'name')
            }}))
    def __init__(self, request_file):
        if type(request_file) is dict:
            request = request_file
        else:
            with open(request_file, 'r') as fp:
                request = json.load(fp)

        self.username = request.get('username', None)
        self.nameuser = request.get('nameuser', None)
        self.reference = request.get('reference', None)
        self.timestamp = datetime.datetime.fromtimestamp(request.get('timestamp', 0))
        self.root = request.get('root', None)
        self.filename = request.get('filename', None)

        self.main_file = os.path.join(self.root, self.filename)
        self.main_file_name, self.main_file_ext = os.path.splitext(self.main_file)

        self.lang = Langs.get(request.get('lang_id', None))
        self.problem = Problems.get(request.get('problem_id', None))
        try:
            self.cases = request.get('cases', pluck(self.problem.input if self.problem else [], 'id'))
        except Exception as e:
            print(e)
            self.cases = []
            raise 
        

        self.result_file = os.path.join(self.root, 'result.json')
        self.output_root = os.path.join(self.root, 'output')
        self.delete_me_file = os.path.join(self.root, '.delete-me')
Exemple #9
0
def rpluck(obj, path):
    names = path.split('.')
    last = names[-1]
    del names[-1]

    o = rget(obj, '.'.join(names), [])
    return pluck(o, last)
Exemple #10
0
 def get(self):
     words = [score.to_json() for score in Word.query.all()]
     random_word = random.choice(pluck(words, "word"))
     response_object = {
         "word": random_word,
     }
     return response_object, 200
Exemple #11
0
def main():
    conf = configparser.ConfigParser()
    conf.read('../../development.ini')
    uri = conf['app:webcan']['mongo_uri']

    conn = MongoClient(uri)['webcan']
    filtered_trips = pluck(conn.webcan_trip_filters.find(), 'trip_id')
    # vid_re = 'rocco_phev'
    vid_re = '^adl_metro'
    num_trips = len(set(x.split('_')[2] for x in
                        conn.rpi_readings.distinct('trip_id', {'vid': {'$regex': vid_re}}))
                    - set(x.split('_')[2] for x in filtered_trips))
    # generate a fuel consumption report
    query = {
        # 'vid': vid_re,
        'vid': {'$regex': vid_re},
    }
    if filtered_trips:
        query['trip_id'] = {'$nin': filtered_trips}
    readings = conn.rpi_readings.with_options(
        codec_options=CodecOptions(tz_aware=True, tzinfo=pytz.timezone('Australia/Adelaide')))
    cursor = readings.find(query).sort([('trip_key', 1)])
    report = []

    prog = tqdm.tqdm(desc='Trip Reports: ', total=num_trips, unit=' trips')

    def on_complete(r):
        # put this in the db
        # print(r)
        conn.trip_summary.insert_one({k: parse(v) for k, v in r.items()})
        if r['Distance (km)'] >= 10:
            report.append(r)
        prog.update()

    def summary_exists(trip_key):
        return conn.trip_summary.find_one({'trip_key': trip_key}) is not None

    pool = Pool()
    i = 0

    for trip_id, readings in groupby(cursor, key=lambda x: x['trip_key']):

        if summary_exists(trip_id):
            continue
        readings = list(readings)
        # on_complete(fuel_report_trip(trip_id, readings))
        pool.apply_async(fuel_report_trip, args=(trip_id, readings), callback=on_complete)
        i += 1

    pool.close()
    pool.join()
    prog.close()
    print(tabulate.tabulate(report, headers='keys'))
    exit()
    import csv
    with open('adl_metro_report_phev.csv', 'w') as out:
        writer = csv.DictWriter(out, fieldnames=list(report[0].keys()))
        writer.writeheader()
        writer.writerows(report)
Exemple #12
0
    def get_statuses(self, format):
        status_getter = lambda x: x[1]
        statuses = pluck(self.threads, 'pretty_name', 'status', 'cpus',
                         'timer')
        sorted_statuses = sorted(statuses, key=status_getter)

        if format is LogStatusFormat.SIMPLE:
            msg = 'Worker statuses:\n'
            msgs = list()
            for key, group in itertools.groupby(sorted_statuses,
                                                key=status_getter):
                grp = list(group)
                if key is WorkerStatus.RUNNING:
                    msgs.append('  %2d x %s %s' %
                                (len(grp), key.name, [x[0] for x in grp]))
                else:
                    msgs.append('  %2d x %s' % (len(grp), key.name))
            return msg + '\n'.join(msgs)

        elif format is LogStatusFormat.COMPLEX:
            msg = 'Worker statuses:\n'
            msgs = list()
            for key, group in itertools.groupby(sorted_statuses,
                                                key=status_getter):
                msgs.append('%s:' % key.name)
                for name, status, cpus, timer in group:
                    if timer.duration > 0.0:
                        msgs.append(' - %dx %s [%1.3f sec]' %
                                    (cpus, name, timer.duration))
                    else:
                        msgs.append(' - %dx %s' % (cpus, name))
            return msg + '\n'.join(msgs)

        elif format is LogStatusFormat.ONELINE:
            statuses = sorted(
                [x[0] for x in pluck(self.threads, 'status.name')])
            msg = ''.join(statuses).upper(
            )  # .replace('W', '◦').replace('R', '▸').replace('F', ' ') # ⧖⧗⚡
            return msg

        elif format is LogStatusFormat.ONELINE_GROUPED:
            statuses = sorted([x for x in pluck(self.threads, 'status')])
            msg = list()
            for g, d in itertools.groupby(statuses):
                msg.append('[%d x %s]' % (len(list(d)), g.name))
            return ' > '.join(msg)
Exemple #13
0
def reduce_groups(items, reductions=[]):
    result = {}
    for key, grp in items.items():
        item = {'_id': key, '_count': len(grp)}
        for reduce_field, new_field, reduce_func in reductions:
            item[new_field] = reduce_func(pluck(grp, reduce_field))
        result[key] = item
    return result
Exemple #14
0
def reduce_groups(items, reductions=[]):
    result = { }
    for key, grp in items.items():
        item = { '_id': key, '_count': len(grp) }
        for reduce_field, new_field, reduce_func in reductions:
            item[new_field] = reduce_func(pluck(grp, reduce_field))
        result[key] = item
    return result
Exemple #15
0
 def test_works_with_iterables(self):
     l = iter([
             {'id': 282, 'name': 'Alice', 'age': 30},
             {'id': 217, 'name': 'Bob', 'age': 56},
             {'id': 328, 'name': 'Charlie', 'age': 56},
             {'id': 167},
         ])
     assert pluck(l, 'id', 'name', defaults={'name': 'Mr. X'}) == \
            [(282, 'Alice'), (217, 'Bob'), (328, 'Charlie'), (167, 'Mr. X')]
Exemple #16
0
    def make_stats(_readings):

        out = {}
        for key, val in _readings.items():
            if not val:
                continue
            dist = 0
            duration = 0
            phases = {"Phase {}".format(i): 0.0 for i in range(7)}
            # print(val)
            start = val[0]['timestamp']

            for r1, r2 in zip(val, val[1:]):
                if r1['trip_id'] != r2['trip_id']:
                    duration += (r1['timestamp'] - start).total_seconds()
                    start = r2['timestamp']
                    continue
                dist += vincenty(r1['pos']['coordinates'][::-1],
                                 r2['pos']['coordinates'][::-1]).kilometers

            for phase, data in groupby(
                    val, key=lambda x: f"{x['phase']}:{x['trip_key']}"):
                phase = phase.split(':')[0]
                data = list(data)
                phase_duration = (data[-1]['timestamp'] -
                                  data[0]['timestamp']).total_seconds()
                phases[f"Phase {phase}"] += phase_duration
            duration += (val[-1]['timestamp'] - start).total_seconds()
            h, m, s = seconds_2_hms(duration)
            usages = [
                'Total CO2e (g)',
                'Total Energy (kWh)',
                'Petrol Used (ml)',
                'Petrol CO2e (g)',
                'Petrol cost (c)',
                'P Used (kWh)',
                'E Used (kWh)',
                'E CO2e (g)',
                'E cost (c)',
            ]
            out[key] = {
                'Duration': "%d:%02d:%02d" % (h, m, s),
                'Distance (km)': round(dist, 4),
            }
            for field in usages:
                out[key][field] = round(sum(pluck(val, field, default=0)), 2)
            total_time = sum(phases.values())
            out[key].update({
                k: "{} ({:.2f}%)".format(
                    "{:02d}:{:02d}:{:02d}".format(
                        *(int(x) for x in seconds_2_hms(v))),
                    100 * v / total_time)
                for k, v in phases.items()
            })
            out[key]['Start'] = val[0]['timestamp'].isoformat(' ')
            out[key]['End'] = val[-1]['timestamp'].isoformat(' ')
        return out
def get_diagrams(intersections, request):
    intersections = pluck(intersections, 'site_no')
    return request.db.locations.find({
        'site_no': {
            '$in': intersections
        },
        'scats_diagram': {
            '$exists': True
        }
    })
Exemple #18
0
    def test_get_multiple_fields(self):
        l = [
                {'id': 282, 'name': 'Alice', 'age': 30},
                {'id': 217, 'name': 'Bob', 'age': 56},
                {'id': 328, 'name': 'Charlie', 'age': 56},
                {'id': '167'},
            ]
        self.assertRaises(ValueError, pluck, l, 'id', 'name')
        assert pluck(l, 'id', 'name', defaults={'name': 'Mr. X'}) == \
               [(282, 'Alice'), (217, 'Bob'), (328, 'Charlie'), ('167', 'Mr. X')]

        # Specifying a default works, as long as all other fields are found
        self.assertRaises(ValueError, pluck, l, 'name', 'age')

        # This still fails, because there are missing age fields
        self.assertRaises(ValueError, pluck, l, 'name', 'age', defaults={'name': 'Mr. X'})

        assert pluck(l, 'id', 'age', defaults={'age': 100}) == \
               [(282, 30), (217, 56), (328, 56), ('167', 100)]
Exemple #19
0
def trips_of_device(request):
    req_devices = set(request.GET.getall('devices[]'))
    user_devices = set(pluck(request.user['devices'], 'name'))
    if len(req_devices) == 0:
        devices = user_devices
    else:
        devices = req_devices & user_devices
    trips = list(request.db.rpi_readings.distinct('trip_id', {'vid': {'$in': list(devices)}}))
    trips_with_vid = [request.db.rpi_readings.find_one({'trip_id': x, 'vid': {'$exists': True}},
                                                       {'_id': False, 'trip_id': True, 'vid': True}) for x in trips]
    return {
        'trips': trips_with_vid
    }
Exemple #20
0
def list_groups(request):
    # a regular user will have their groups listed
    # admin will see all groups
    user = request.user

    query = request.db_session.query(LedGroup)
    users_groups = request.db_session.query(LedGroupUser).filter(LedGroupUser.led_user == user).all()
    if not user.admin:
        query.filter(LedGroup.id.in_(pluck(users_groups, 'led_group_id')))
    groups_admin = [group.led_group_id for group in users_groups if group.access_level == 2]
    return {
        'user_admins': groups_admin,
        'groups': query.all()
    }
Exemple #21
0
def get_comments(request):
    comments = Comments.objects.filter(article_id=request.GET['id']).order_by('-id')
    comments = pagination(comments, request.GET.get('page'))
    login_ids = pluck(comments, 'login_id')
    user_details = Login.objects.filter(id__in=login_ids)
    all_comments = []
    for comment in comments:
        for user_detail in user_details:
            if comment.login_id == user_detail.id:
                data = {
                    'name' : user_detail.first_name + ' ' + user_detail.last_name,
                    'comment': comment.comment
                }
                all_comments.append(data)
    return render(request, 'comments.html', {'comments':comments, 'all_comments':all_comments, 'id':request.GET['id']})
Exemple #22
0
def home(request):
    if request.user.is_authenticated and 'category' not in request.GET and 'all' not in request.GET:
        intrests = Login.objects.filter(id=request.user.id).first()
        intrests = intrests.intrests.replace('[', '')
        intrests = intrests.replace(']', '')
        intrests = intrests.replace("'", '')
        intrests = intrests.split(',')
        intrests_id = []
        for d in intrests:
            intrests_id.append(int(d))
        articles = Article.objects.filter(category_id__in=intrests_id).order_by('-created_at')
        if len(articles) == 0:
            articles = Article.objects.order_by('-created_at')

    elif 'category' in request.GET:
        articles = Article.objects.filter(category_id=request.GET['category']).order_by('-created_at')
    elif 'all' in request.GET:
        articles = Article.objects.filter().order_by('-created_at')
    else:
        articles = Article.objects.filter().order_by('-created_at')

    categories = Category.objects.filter()

    total = Article.objects.count()
    articles = pagination(articles, request.GET.get('page'))
    login_ids = pluck(articles, 'login_id')
    user_details = Login.objects.filter(id__in=login_ids)
    categories = Category.objects.filter()
    all_articles = []
    for article in articles:
        for user_detail in user_details:
            if article.login_id == user_detail.id:
                data = {
                    'id':article.id,
                    'author':user_detail.first_name + ' ' + user_detail.last_name,
                    'title':article.title,
                    'image':article.image,
                    'content':article.content,
                    'created_at':article.created_at if not article.updated_at else article.updated_at,
                    'login_id':article.login_id,
                    'category_id':article.category_id,
                }
                all_articles.append(data)
    for d in all_articles:
        for category in categories:
            if category.id == d['category_id']:
                d.update({'category_name':category.name})
    return render(request, 'home.html', {'articles': articles, 'all_articles':all_articles, 'total':total, 'categories':categories})
Exemple #23
0
        def target():
            # no work to be done? just finish up here
            # and return the thread
            if not self.threads:
                return

            last_print = 0
            while True:
                if (time.time() - last_print) > log_period:
                    logger.debug(self.get_statuses(format))
                    last_print = time.time()

                if set(pluck(self.threads,
                             'status')) == {WorkerStatus.FINISHED}:
                    break
                time.sleep(update_period)
Exemple #24
0
async def get_my_all_jobs(token: str = Header(None)):
    authenticated_user = verify_token(token)
    if not authenticated_user:
        response = {'detail': 'UNAUTHORIZED ACCESS', 'status': 401}
        return JSONResponse(status_code=status.HTTP_401_UNAUTHORIZED,
                            content=response)

    my_jobs = session.query(AppliedJob).filter(
        AppliedJob.c.user_id == authenticated_user['user_id'])
    if my_jobs.count() == 0:
        response = {'detail': 'JOBS NOT FOUND', 'status': 404}
        return JSONResponse(status_code=status.HTTP_404_NOT_FOUND,
                            content=response)

    job_ids = pluck(my_jobs, 'job_id')
    job_ids = set(job_ids)
    total_jobs = len(job_ids)
    jobs = session.query(Job).filter(Job.c.id.in_(job_ids))
    if jobs.count() == 0:
        response = {'detail': 'JOBS NOT FOUND', 'status': 404}
        return JSONResponse(status_code=status.HTTP_404_NOT_FOUND,
                            content=response)

    my_all_jobs = []
    for my_job in my_jobs:
        for job in jobs:
            if my_job.job_id == job.id:
                data = {
                    'my_status':
                    my_job.status.value if my_job.status else 'NA',
                    'job_details': {
                        'id': job.id,
                        'company_name': job.company_name,
                        'job_title': job.job_title,
                        'job_type': job.job_type.value,
                        'experiance_min': job.experiance_min,
                        'experiance_max': job.experiance_max,
                        'job_count': job.job_count,
                        'location': job.location,
                        'job_status': job.status.value if job.status else 'NA',
                    }
                }
                my_all_jobs.append(data)

    response = {'Total': total_jobs, 'Jobs': my_all_jobs, 'status': 200}
    return JSONResponse(status_code=status.HTTP_200_OK, content=response)
Exemple #25
0
    def test_tile_list(self):
        game = Game.objects.create()
        board = Board.objects.create(game=game)
        simple_tile_group = TileGroup.objects.create(board=board)
        colored_tile_group = TileGroup.objects.create(board=board)
        chance_tile_group = TileGroup.objects.create(board=board)
        chancellery_tile_group = TileGroup.objects.create(board=board)
        SimpleTile.objects.create(tile_group=simple_tile_group)
        ColoredTile.objects.create(tile_group=colored_tile_group)
        ChanceTile.objects.create(tile_group=chance_tile_group)
        ChancelleryTile.objects.create(tile_group=chancellery_tile_group)

        response = self.client.get("/api/tiles/")
        self.assertEqual(
            pluck(response.json(), "resourcetype"),
            ["SimpleTile", "ColoredTile", "ChanceTile", "ChancelleryTile"],
        )
Exemple #26
0
def fuel_report_trip(trip_id, p):
    report = {'trip_key': trip_id}
    prev = None
    p.sort(key=lambda x: x['trip_sequence'])
    for r in p:
        r.update(calc_extra(r, prev))
        prev = r
    fms_spd = 'FMS_CRUISE_CONTROL_VEHICLE_SPEED (km/h)'
    gps_spd = 'spd_over_grnd'
    duration = (p[-1]['timestamp'] - p[0]['timestamp']).total_seconds()
    speeds_fms = np.array(pluck(p, fms_spd, default=0))
    speeds = np.array(pluck(p, gps_spd, default=0))
    fuel_rates = np.array(pluck(p, 'FMS_FUEL_ECONOMY (L/h)', default=0))
    # durations = np.array(pluck(p, '_duration', default=0)) / 3600
    # fuels = fuel_rates * durations  # should be in ml

    # if not any(fuel_rates):
    #     fuel_rates = np.array(pluck(p, 'Petrol Used (ml)', default=0)) / 1000 / durations
    fuels = pluck(p, 'Petrol Used (ml)', default=0)
    co2s = pluck(p, 'Total CO2e (g)', default=0)
    idle_time = 0
    for r in p:
        if gps_spd in r and '_duration' in r and r[gps_spd] < 2:
            idle_time += r['_duration']
    energy = np.array(pluck(p, 'Total Energy (kWh)', default=0))
    report.update({
        'vid': p[0]['vid'],
        'Start Time': p[0]['timestamp'],
        'Finish Time': p[-1]['timestamp'],
        'Duration (s)': duration,
        'Idle Duration (s)': idle_time,
        'Distance (km)': sum(
            vincenty(r1['pos']['coordinates'], r2['pos']['coordinates']).kilometers for r2, r1 in zip(p, p[1:])),
        'GPS Min Speed (km/h)': np.min(speeds),
        'GPS Max Speed (km/h)': np.max(speeds),
        'GPS Mean Speed (km/h)': np.mean(speeds),
        'GPS STDEV Speed (km/h)': np.std(speeds),
        'FMS Min Speed (km/h)': np.min(speeds_fms),
        'FMS Max Speed (km/h)': np.max(speeds_fms),
        'FSM Mean Speed (km/h)': np.mean(speeds_fms),
        'FMS STDEV Speed (km/h)': np.std(speeds_fms),
        'Total Fuel (ml)': np.sum(fuels),
        'Min Fuel rate (L/h)': np.min(fuel_rates),
        'Max Fuel Rate (L/h)': np.max(fuel_rates),
        'Mean Fuel Rate (L/h)': np.mean(fuel_rates),
        'STDEV Fuel Rate (L/h)': np.std(fuel_rates),
        'Mean CO2 (g)': np.mean(co2s),
        'STDEV CO2 (g)': np.std(co2s),
        'Total CO2e (g)': np.sum(co2s),
        'Total Energy (kWh)': energy.sum(),
        '% Idle': idle_time / duration * 100
    })
    return report
Exemple #27
0
def main():
    import matplotlib.pyplot as plt

    # get all the bus data
    conf = configparser.ConfigParser()
    conf.read('../../development.ini')
    uri = conf['app:webcan']['mongo_uri']

    conn = MongoClient(uri)['webcan']
    filtered_trips = pluck(conn.webcan_trip_filters.find(), 'trip_id')
    # vid_re = '^adl_metro_1905'

    query = {
        # 'vid': {'$in': ['adl_metro_1905','adl_metro_1901', 'adl_metro_2451', 'adl_metro_2450']},
        'Distance (km)': {
            '$gte': 5
        }
    }
    if filtered_trips:
        query['trip_id'] = {'$nin': filtered_trips}
    data = list(conn['trip_summary'].find(query))
    series = defaultdict(lambda: {'x': [], 'y': []})
    xf = 'Total Fuel (ml)'
    yf = 'Total CO2e (g)'
    for trip_summary in data:
        fuel_litres = trip_summary[xf] * 0.001
        dist_100km = trip_summary['Distance (km)'] / 100.
        fuel_usage_rate_per_100km = fuel_litres / dist_100km
        series[trip_summary['vid']]['x'].append(fuel_usage_rate_per_100km)
        series[trip_summary['vid']]['y'].append(trip_summary[yf])

    colors = cm.rainbow(np.linspace(0, 1, len(series)))
    it = cycle(colors)
    for vid, xy in series.items():
        x, y = np.array(xy['x']), np.array(xy['y'])
        color = next(it)
        plt.scatter(x, y, label="{} ({})".format(vid, len(y)), color=color)

    plt.legend()
    plt.xlabel(xf)
    plt.ylabel(yf)
    allys = np.array(sum((vals['y'] for vals in series.values()), []))
    plt.title("{} vs {} n={}".format(xf, yf, allys.size))
    plt.show()
Exemple #28
0
    def update(self, customer, validated_data):
        addresses = validated_data.pop("addresses")
        primary_addresses = filter(
            lambda address: address.get("is_primary", False), addresses)

        if len(list(primary_addresses)) > 1:
            raise ValidationError("There are more than one primary address")

        customer = super().update(customer, validated_data)

        customer.addresses.exclude(
            id__in=pluck(addresses, "id", default=None)).delete()

        for address in addresses:
            address["customer_id"] = customer.id
            Address.objects.update_or_create(id=address.get("id"),
                                             defaults=address)

        return customer
Exemple #29
0
def crash_in_polygon(request):
    with _get_mongo_client() as client:
        crashes_collection = client[mongo_database]['crashes']

        points = request.json_body
        # make sure it's a list of lists of floats
        points.append(points[0])
        crashes = crashes_collection.find({'loc': {
            '$geoWithin': {
                '$geometry': {
                    'type': 'Polygon',
                    'coordinates': [points]
                }
            }
        }})
        readings_collection = client[mongo_database]['readings']
        crashes = list(crashes)
        td = timedelta(minutes=5)
        for i, crash in enumerate(crashes):
            # find the nearest 2 intersections
            # and get the readings for the downstream one
            sites = client[mongo_database]['locations'].find({
                'loc': {
                    '$geoNear': {
                        '$geometry': crash['loc']
                    }
                }
            }).limit(2)
            sites = pluck(list(sites), 'intersection_number')
            readings = readings_collection.find({
                'datetime': {'$gte': crash['datetime'] - td, '$lte': crash['datetime'] + td},
                'site_no': {'$in': sites}
            }).limit(6).sort([['site_no', pymongo.ASCENDING], ['datetime', pymongo.ASCENDING]])
            crashes[i]['readings'] = list(readings)
            crashes[i]['sites'] = sites
    return {
        'crashes': crashes
    }
Exemple #30
0
def show_group(request):
    # a regular user will have their groups listed
    # admin will see all groups
    user = request.user
    group = request.db_session.query(LedGroup).filter(LedGroup.id == request.matchdict['group_id']).first()

    users = request.db_session.query(LedGroupUser).filter(LedGroupUser.led_group == group).all()
    if not (user.admin or user.id in pluck(users, 'led_user_id')):
        raise exc.HTTPForbidden("Only site admins or group members can view this")
    schedule = request.db_session.query(LedSchedule).filter(LedSchedule.led_group == group).order_by(LedSchedule.position.asc()).all()
    subquery = request.db_session.query(LedGroupUser.led_user_id).filter(LedGroupUser.led_group == group)
    other_users = request.db_session.query(LedUser).filter(~LedUser.id.in_(subquery))
    subquery = request.db_session.query(LedSchedule.led_plugin_id).filter(LedSchedule.led_group == group)
    # other_plugins = request.db_session.query(LedPlugin).filter(~LedPlugin.id.in_(subquery))
    other_plugins = request.db_session.query(LedPlugin)
    return {
        'group': group,
        'users': users,
        'schedule': schedule,
        'other_users': other_users,
        'other_plugins': other_plugins,
        'group_admin': can_modify_group(request, group.id, False)
    }
def export(results, props):
    result = 'name\t' + '\t'.join(pluck(results, 'name')) + '\n'
    for p in props:
        result +=  p + '\t' + '\t'.join(['%1.3f' % x for x in pluck(results, p)]) + '\n'
    return result
def get_diagrams(intersections):
    with _get_mongo_client() as client:
        db = client[mongo_database]
        intersections = pluck(intersections, 'intersection_number')
        return db['locations'].find({'intersection_number': {'$in': intersections}, 'scats_diagram': {'$exists': True}})
Exemple #33
0
 def avgStdSpeed(l):
     speeds = pluck(l, speed)
     return np.mean(speeds), np.std(speeds)
Exemple #34
0
from pluck import pluck, ipluck
from datetime import datetime

#  is the simplest way of plucking “fields” from an iterable of values.
# “Fields” are either item.field or item[field]. Pluck tries both,
# in that order. If nothing is found, and no default value is specified,
# it throws an exception.
#pluck(iterable, key)
#pluck(iterable, *keys)

dates = [
    datetime(2012, 10, 22, 12, 00),
    datetime(2012, 10, 22, 15, 14),
    datetime(2012, 10, 22, 21, 44),
]
print(pluck(dates, 'day'))
#[22, 22, 22]
print(pluck(dates, 'hour'))
#[12, 15, 21]

#It also works on dictionary-like access (__getitem__):
objects = [
    {
        'id': 282,
        'name': 'Alice',
        'age': 30,
        'sex': 'female'
    },
    {
        'id': 217,
        'name': 'Bob',
Exemple #35
0
def phase_classify_csv_render(request):
    query = {
        # 'timestamp': {'$exists': True, '$ne': None},
    }

    min_phase_time = int(request.POST.get('min-phase-seconds'))
    cruise_window = int(request.POST.get('cruise-window'))
    if request.POST.get('select-trips'):
        query['trip_id'] = {'$in': request.POST.get('select-trips').split(',')}
    else:
        query['trip_id'] = {
            '$nin':
            pluck(
                request.db.webcan_trip_filters.find({
                    'vid': {
                        '$in': request.POST.get('select-trips').split(',')
                    }
                }), 'trip_id')
        }
    if request.POST.get('select-vids'):
        query['vid'] = {'$in': request.POST.get('select-vids').split(',')}
    if any(request.POST.getall('map-hull')):
        query['pos'] = {
            '$geoWithin': {
                '$geometry': {
                    'type': 'Polygon',
                    'coordinates': request.POST.getall('map-hull')
                }
            }
        }
    print(request.POST)
    print(query)
    cursor = request.db.rpi_readings.find(query, {
        '_id': False
    }).sort(SORT_TRIP_SEQ)
    trips = groupby(cursor, lambda x: x['trip_key'])
    rows = []
    for trip_id, readings in trips:
        readings = list(readings)
        prev = None
        for i in readings:
            i.update(calc_extra(i, prev))
            prev = i
        _classify_readings_with_phases_pas(readings, min_phase_time,
                                           cruise_window)
        phase_report = per_phase_report(readings, min_phase_time)
        rows.extend(phase_report)
    if len(rows) == 0:
        raise exc.HTTPBadRequest('No data returned for query')
    headers = rows[0].keys()
    data = {
        'header': headers,
        'rows': rows,
    }
    renderer_obj = get_renderer('csv')
    request.response.content_type = renderer_obj.content_type
    request.response.content_disposition = 'attachment;filename=phase_report_{}.{}'.format(
        datetime.now().strftime('%Y%m%d_%H%M%S'),
        renderer_obj.content_type.split('/')[1])
    return render_to_response(renderer_name='csv',
                              value=data,
                              request=request,
                              response=request.response)
Exemple #36
0
    def gen_summary_report_for_vehicle(vid):
        # get all distinct trip_keys for this vehicle
        # trip_keys = list([x.split('_')[2] for x in request.db.rpi_readings.distinct('trip_id', {'vid': vid})])
        # get all the trip_summary data, if not exists, generate
        report = dict(trips=0,
                      distance=0,
                      time=0,
                      first=datetime(9999, 1, 1),
                      last=datetime(2000, 1, 1))
        report['vehicle'] = request.db.webcan_devices.find_one({'name': vid}, {
            '_id': 0,
            'secret': 0
        })

        def merge(trip_info):
            x, trip_id = trip_info
            if x['distance'] >= min_trip_distance:
                for k, v in x.items():
                    report[k] += v
            else:
                excluded.append(trip_id)

        filtered_trips = pluck(
            request.db.webcan_trip_filters.find({'vid': vid}), 'trip_id')
        pool = Pool()
        query = {
            'vid': vid,
            'trip_key': {
                '$nin': filtered_trips
            },
            'Distance (km)': {
                '$gte': min_trip_distance
            },
            'Fuel Economy (L/100km)': {
                '$gt': 1,
                '$lt': 100
            }
        }
        vehicle_obj = request.db.webcan_devices.find_one({'name': vid})
        if vehicle_obj is not None and vehicle_obj['type'] == 'Electric':
            del query['Fuel Economy (L/100km)']
        for trip_summary in request.db.trip_summary.find(query, {'phases': 0}):
            # if trip_summary is None:
            #     cursor = request.db.rpi_readings.find({
            #         'vid': vid,
            #         'trip_id': {'$nin': filtered_trips}
            #     }, {'_id': False}).sort('trip_key', 1)
            #     trips = groupby(cursor, lambda x: x['trip_key'])
            #     print("Doing summary for", trip_key)
            #     for trip_id, readings in trips:
            #         # summarise_trip(trip_id, list(readings), report)
            #         lreadings = list(readings)
            #         report['first'] = min(lreadings[0]['timestamp'], report['first'])
            #         report['last'] = max(lreadings[-1]['timestamp'], report['last'])
            #         pool.apply_async(summarise_trip, args=(trip_id, lreadings), callback=merge, error_callback=print)
            # else:
            report['first'] = min(trip_summary['Start Time'], report['first'])
            report['last'] = max(trip_summary['Finish Time'], report['last'])
            # print("Used cached", trip_key)
            ts = {
                'trips': 1,
                'distance': trip_summary['Distance (km)'],
                'time': trip_summary['Duration (s)']
            }
            merge((ts, trip_summary['trip_key']))
        pool.close()
        pool.join()
        return report
Exemple #37
0
def summary_report_do(request):
    min_trip_distance = float(request.POST.get('min-trip-distance', 5))
    excluded = []

    def gen_summary_report_for_vehicle(vid):
        # get all distinct trip_keys for this vehicle
        # trip_keys = list([x.split('_')[2] for x in request.db.rpi_readings.distinct('trip_id', {'vid': vid})])
        # get all the trip_summary data, if not exists, generate
        report = dict(trips=0,
                      distance=0,
                      time=0,
                      first=datetime(9999, 1, 1),
                      last=datetime(2000, 1, 1))
        report['vehicle'] = request.db.webcan_devices.find_one({'name': vid}, {
            '_id': 0,
            'secret': 0
        })

        def merge(trip_info):
            x, trip_id = trip_info
            if x['distance'] >= min_trip_distance:
                for k, v in x.items():
                    report[k] += v
            else:
                excluded.append(trip_id)

        filtered_trips = pluck(
            request.db.webcan_trip_filters.find({'vid': vid}), 'trip_id')
        pool = Pool()
        query = {
            'vid': vid,
            'trip_key': {
                '$nin': filtered_trips
            },
            'Distance (km)': {
                '$gte': min_trip_distance
            },
            'Fuel Economy (L/100km)': {
                '$gt': 1,
                '$lt': 100
            }
        }
        vehicle_obj = request.db.webcan_devices.find_one({'name': vid})
        if vehicle_obj is not None and vehicle_obj['type'] == 'Electric':
            del query['Fuel Economy (L/100km)']
        for trip_summary in request.db.trip_summary.find(query, {'phases': 0}):
            # if trip_summary is None:
            #     cursor = request.db.rpi_readings.find({
            #         'vid': vid,
            #         'trip_id': {'$nin': filtered_trips}
            #     }, {'_id': False}).sort('trip_key', 1)
            #     trips = groupby(cursor, lambda x: x['trip_key'])
            #     print("Doing summary for", trip_key)
            #     for trip_id, readings in trips:
            #         # summarise_trip(trip_id, list(readings), report)
            #         lreadings = list(readings)
            #         report['first'] = min(lreadings[0]['timestamp'], report['first'])
            #         report['last'] = max(lreadings[-1]['timestamp'], report['last'])
            #         pool.apply_async(summarise_trip, args=(trip_id, lreadings), callback=merge, error_callback=print)
            # else:
            report['first'] = min(trip_summary['Start Time'], report['first'])
            report['last'] = max(trip_summary['Finish Time'], report['last'])
            # print("Used cached", trip_key)
            ts = {
                'trips': 1,
                'distance': trip_summary['Distance (km)'],
                'time': trip_summary['Duration (s)']
            }
            merge((ts, trip_summary['trip_key']))
        pool.close()
        pool.join()
        return report

    """
    Summary is a dict of
    vehicle_id: {
        'trips': int,
        'distance': float,
        'time': float,
        'petrol_used': float
    }
    """
    summary = {}

    # group everything by trip_id
    vids = request.POST.getall('devices[]')
    if not vids:
        return exc.HTTPBadRequest('Please enter at least 1 vehicle')
    for vid in vids:
        summary[vid] = gen_summary_report_for_vehicle(vid)
    # print(summary)
    vals = list(summary.values())
    summary['Aggregate'] = {
        key: sum(pluck(vals, key))
        for key in ['trips', 'distance', 'time']
    }
    summary['Aggregate']['last'] = max(pluck(vals, 'last'))
    summary['Aggregate']['first'] = min(pluck(vals, 'first'))
    return {'summary': summary, 'excluded': excluded}
Exemple #38
0
def process_readings(readings, intersection, write_anomaly, progress=True, multi_model=False, smoothing=0):
    counter = 0
    total = readings.count(True)

    if multi_model:

        loc = locations_collection.find_one({'intersection_number': intersection})
        models = {}
        for sensor in loc['sensors']:
            models[sensor] = Worker(sensor, intersection)
            models[sensor].start()
    else:
        model = createModel(intersection)
        anomaly_likelihood_helper = anomaly_likelihood.AnomalyLikelihood(1000, 200)
        if model is None:
            print "No model could be made for intersection", intersection
            return
        pfield = model.getInferenceArgs()['predictedField']
        encoders = get_encoders(model)
    if progress:
        progBar = pyprind.ProgBar(total, width=50)
    _smoothing = smoothing >= 1
    if _smoothing:
        previous = deque(maxlen=smoothing)
    for i in readings:
        counter += 1
        if progress:
            progBar.update()
        timestamp = i['datetime']
        if multi_model:
            predictions, anomalies = {}, {}
            for sensor, proc in models.iteritems():
                vc = i['readings'][sensor]
                if vc > max_vehicles:
                    vc = None
                elif _smoothing and len(previous):
                    vc = (vc + sum(pluck(sensor, previous)))/float(len(previous) + 1)
                fields = {"timestamp": timestamp, sensor: vc}
                proc.queue_in.put(fields)
            for sensor, proc in models.iteritems():
                result = proc.queue_out.get()
                # (self.sensor, prediction, anomaly_score, likelihood)
                anomalies[result[0]] = {'score': result[2], 'likelihood': result[3]}
                predictions[result[0]] = result[1]
        else:
            fields = {"timestamp": timestamp}
            for p, j in enumerate(i['readings'].items()):
                if j[0] not in encoders:
                    continue
                vc = j[1]
                if vc > max_vehicles:
                    vc = None
                fields[j[0]] = vc
            result = model.run(fields)
            prediction = result.inferences["multiStepBestPredictions"][1]
            anomaly_score = result.inferences["anomalyScore"]
            predictions = {pfield: prediction}
            likelihood = anomaly_likelihood_helper.anomalyProbability(
                i['readings'][pfield], anomaly_score, timestamp)
            anomalies = {pfield: {'score': anomaly_score, 'likelihood': likelihood}}
        if write_anomaly:
            write_anomaly_out(i, anomalies, predictions)
        if _smoothing:
            previous.append(i['readings'])
    locations_collection.find_one_and_update({'intersection_number': intersection}, {'$unset': {'running': ''}})
    if multi_model:
        for proc in models.values():
            proc.terminate()
    else:
        save_model(model, intersection)
    if progress:
        print
    print "Read", counter, "lines"
assert duration >= 0.1, "To ensure machine safety, duration must be greater than 100 ms"

print 'Watching memory usage (max: {limit} MB, check interval: {duration:1.2f} ms)'.format(limit=limit, duration=float(duration*1000))
while True:
    output = subprocess.check_output("ps aux --sort '-rss' --cols 200 | head -n 200", shell=True)
    lines = output.splitlines()
    headers = re.split(r'\s+', lines[0], 10)
    del lines[0]

    processes = list()
    for line in lines:
        attrs = re.split(r'\s+', line, 10)

        processes.append(dict(zip(headers, attrs)))

    rsss = [int(x) / 1000 for x in pluck(processes, 'RSS')]
    pids = pluck(processes, 'PID')

    pairs = dict(zip(pids, rsss))
    measurements.append(pairs)
    if len(measurements) > history_size:
        del measurements[0]

    top = dict(
        pid=pids[0],
        rss=rsss[0],
        name=get_name_by_pid(pids[0])
    )
    measure_index = (measure_index + 1) % 2
    print '\r ' + points[measure_index] + ' Top process "{name}:{pid}" with {rss} MB'.format(**top),
    sys.stdout.flush()
Exemple #40
0
def refresh_schedule():
    """
    Get the latest scheduling from the database
    The next plugin to run is at the right
    :return:class Runner:

    def __init__(self, board_dimensions):
        self.board_dimensions = board_dimensions
        import numpy as np
        self.np = np
        self.pixels = self.np.random.normal(128, 128, (self.board_dimensions[0], self.board_dimensions[1], 3)).astype(self.np.uint8)

    def run(self):
        # shift everything to the left, put a new random column on the end

        self.pixels = self.np.roll(self.pixels, 1, 0)
        col = self.np.random.normal(128, 128, [17, 3]).astype(dtype=self.np.uint8)
        for idx, i in enumerate(col):
            self.pixels[0][idx] = i
        self.pixels.sort(1)
        return self.pixels
    """
    global current_plugin
    global do_exception
    #print "Updating schedule"

    try:
        connection = get_connection()
    except pymysql.err.OperationalError as e:
        print e
        print "Using default schedule"
        return test_sched()
    cursor = connection.cursor()
    # need to fix this so that it loads the right schedule
    #
    group_id = get_current_schedule(connection)
    if group_id is None:
        return test_sched()
    sql = """SELECT * FROM `led_schedule`
             LEFT OUTER JOIN led_plugin
             ON led_schedule.led_plugin_id = led_plugin.id
             WHERE
              `enabled` = 1 AND
              `led_group_id` = {}
             ORDER BY `position` DESC """.format(group_id['id'])
    cursor.execute(sql)
    schedule.clear()
    cursor.close()

    for row in cursor:
        schedule.append(row)
    cursor.close()
    
    cursor = connection.cursor()
    sql = """SELECT * FROM `led_skip`"""
    cursor.execute(sql)
    if cursor.fetchone():
        do_exception = True
        cursor.execute("TRUNCATE TABLE `led_skip`")
    cursor.close()
    
    connection.close()
    # if there schedule has > 2 elems,
    # roll the schedule until we get old_schedule[0] at the start

    if current_plugin and len(schedule) > 2 and current_plugin['id'] in pluck(schedule, 'id'):
        while schedule[-1]['id'] != current_plugin['id']:
            schedule.rotate(-1)
    else:
        schedule.rotate(-1)
    # show_schedule(schedule)
    return schedule
Exemple #41
0
def per_phase_report(readings, min_duration=5):
    """
    For a bunch of readings, calculate some stats about every phase it did,
    splitting up by trip_id
    :param readings:
    :return:
    """
    phases = []
    for idx, phase_group in enumerate([(key, list(group)) for key, group in (
            groupby(readings, key=lambda x: f"{x['phase']}:{x['trip_id']}"))]):
        phase_type = phase_group[0].split(':')[0]
        phase_no = idx
        p = phase_group[1]
        duration = (p[-1]['timestamp'] - p[0]['timestamp']).total_seconds()
        if duration < min_duration:
            continue
        if len(p) < 2:
            continue
        speeds = pluck(p, 'speed')
        fuel_rates = np.array(pluck(p, 'FMS_FUEL_ECONOMY (L/h)',
                                    default=0)) / 1000
        durations = np.array(pluck(p, '_duration', default=1e-9)) / 3600
        fuels = fuel_rates * durations  # should be in ml

        if not any(fuel_rates):
            fuel_rates = np.array(pluck(p, 'Petrol Used (ml)',
                                        default=0)) / 1000 / durations
            fuels = pluck(p, 'Petrol Used (ml)', default=0)
        co2s = pluck(p, 'Total CO2e (g)', default=0)

        accels = [(s2['speed'] - s1['speed']) /
                  ((s2['timestamp'] - s1['timestamp']).total_seconds())
                  for s1, s2 in zip(p, p[1:])]
        times = pluck(p, 'timestamp')

        slope, intercept, r_value, p_value, std_err = stats.linregress(
            np.sqrt([(x - times[0]).total_seconds() for x in times]), speeds)

        energy = np.array(pluck(p, 'Total Energy (kWh)', default=0))
        phases.append({
            'phasetype':
            int(phase_type),
            'phase_no':
            phase_no,
            'trip_id':
            p[0]['trip_id'],
            'Start Time':
            p[0]['timestamp'],
            'Finish Time':
            p[-1]['timestamp'],
            'Duration (s)':
            duration,
            'Avg Temp (°C)':
            np.mean(pluck(p, 'FMS_ENGINE_TEMP (°C)', default=0)),
            'Distance (km)':
            sum(
                vincenty(r1['pos']['coordinates'][::-1], r2['pos']
                         ['coordinates'][::-1]).kilometers
                for r2, r1 in zip(p, p[1:])),
            'Start Speed (km/h)':
            speeds[0],
            'Finish Speed (km/h)':
            speeds[-1],
            'Min Speed (km/h)':
            np.min(speeds),
            'Max Speed (km/h)':
            np.max(speeds),
            'Mean Speed (km/h)':
            np.mean(speeds),
            'STDEV Speed (km/h)':
            np.std(speeds),
            'Coeff Beta (km/h)/√(Δt)':
            slope,
            'Y Intercept (km/h)':
            intercept,
            'r_squared_value':
            r_value**2,
            # 'p_value': p_value,
            'Min Acc ((Δkm/h)/s)':
            np.min(accels),
            'Max Acc ((Δkm/h)/s)':
            np.max(accels),
            'Mean Acc ((Δkm/h)/s)':
            np.mean(accels),
            'Total Acc ((Δkm/h)/s)': (p[-1]['speed'] - p[0]['speed']) /
            duration,  # change in speed km/h
            'STDEV Acc ((Δkm/h)/s)':
            np.std(accels),
            'Total Fuel (ml)':
            np.sum(fuels),
            'Min Fuel rate (L/h)':
            np.min(fuel_rates),
            'Max Fuel Rate (L/h)':
            np.max(fuel_rates),
            'Mean Fuel Rate (L/h)':
            np.mean(fuel_rates),
            'STDEV Fuel Rate (L/h)':
            np.std(fuel_rates),
            # 'Mean NOX': 0,
            # 'STDEV NOx': 0,
            # 'Mean HC': 0,
            # 'STDEV HC': 0,
            # 'Mean CH4': 0,
            # 'STDEV CH4': 0,
            # 'Mean CO': 0,
            # 'STDEV CO': 0,
            'Mean CO2 (g)':
            np.mean(co2s),
            'STDEV CO2 (g)':
            np.std(co2s),
            'Total CO2 (g)':
            np.sum(co2s),
            # 'Mean FC': 0,
            # 'STDEV FC': 0,
            # 'Mean PM': 0,
            # 'STDEV PM': 0,
            # 'Mean OP': 0,
            # 'STDEV OP': 0
            'Min Energy (kWh)':
            energy.min(),
            'Max Energy (kWh)':
            energy.max(),
            'Mean Energy (kWh)':
            energy.mean(),
            'STDEV Energy (kWh)':
            energy.std(),
            'Total Energy (kWh)':
            energy.sum()
        })
    return phases
Exemple #42
0
        row_count += 1
    return step_predictions, data, model, it, row_count, len(data)


if __name__ == "__main__":
    import sys
    for i in sys.argv[1:]:
        print("Running ", i)
        fname = i.split('/')[-1]
        predictions, data, model, it, row_count, data_len = run_data(i, limit=datetime(2013, 4, 23), sensors=[5])

        model.save('/scratch/model_store/3002_model_sensor_5')
        # turn the data into numpy arrays
        split_idx = int(len(data) * 0.4)
        flow_values = np.array(pluck(data[split_idx:], 'downstream'))
        print()
        # print (predictions)

        predictions = {
            k: np.array(v[split_idx:]) for k, v in predictions.items()
        }
        print()

        table = []
        print(' & '.join(['step', 'geh', 'mape', 'rmse'])+' \\\\')
        for step in steps:
            # true values
            stepped_vals = flow_values[step:len(predictions)]
            # predicted  values
            pred_vals = predictions[step][:-step] + eps
Exemple #43
0
def getSecurityHistory(strEspecie):
    print('fetching security data...')
    strEspecie = strEspecie.upper()
    strFilename = 'data/SecurityHistory/' + strEspecie + '.json'
    if (os.path.exists(strFilename)):
        print('from file: ' + strFilename)
        with open(strFilename) as jsonFile:
            jsondata = json.load(jsonFile)
    else:
        print('from bolsar.com: ' + strEspecie)
        body = '{"strEspecie":"' + strEspecie + '","intVto":"4","intPeriodoId":4}'
        headers = {"Content-type": "application/json",
                   "Accept": "application/json"}
        conn = httplib.HTTPSConnection("www.bolsar.com")
        conn.request("POST", "/VistasDL/PaginaIntradiarioEspecies.aspx/GetIntradiarioHistorico", body, headers)
        response = conn.getresponse()
        print response.status, response.reason
        if 200 == response.status:
            jsondata = json.load(response)
            conn.close()
            with open(strFilename, 'w') as outfile:
                json.dump(jsondata, outfile)
                outfile.close()
        else:
            return
    series = jsondata['d'].pop()['EspeciesSeries']

    # all dates represented here are at GMT-3 (Buenos Aires)
    intDates = map(dateToInt, pluck(series, 'Fecha'))
    intTimes = map(timeToInt, pluck(series, 'Hora'))
    intTimestamps = np.sum([np.array(intDates).astype(np.float), np.array(intTimes).astype(np.float)], axis=0)
    datetimeDates = map(timestampToDatetime, intTimestamps)
    strDates = map(timestampToStr, intTimestamps)

    data = np.array([
            intTimestamps,
            pluck(series, 'VolumenNominal'),
            pluck(series, 'PrecioUltimo'),
            pluck(series, 'PrecioCierre'),
            pluck(series, 'PrecioOperacion'),
            pluck(series, 'PrecioApertura'),
            pluck(series, 'PrecioMaximo'),
            pluck(series, 'PrecioMinimo'),
            pluck(series, 'VariacionPrecio'),
            pluck(series, 'Operaciones'),
            pluck(series, 'TotalOperadoVn'),
            pluck(series, 'TotalOperadoMonto')

            #datetimeDates,
            #strDates,
            ])
    data = data.transpose()
    data = data[data[:,0].argsort()] # sort data by column 0 (timepstamps)
    return data
Exemple #44
0
def plot(x_title,
         y_title,
         plt_title,
         # xlim,
         # ylim
         ):
    # get all the bus data
    conf = configparser.ConfigParser()
    conf.read('../../development.ini')
    uri = conf['app:webcan']['mongo_uri']
    conn = MongoClient(uri)['webcan']

    bus = 'adl_metro_1999'

    readings_col = conn['rpi_readings']
    trip_key = conn['trip_summary'].find_one({'vid': bus, 'Distance (km)': {'$gte': 5}})['trip_key']

    readings = []
    for _r in readings_col.find({
        'trip_key': trip_key,
        'vid': bus,

    }):
        # filter out the garbage
        rpm = _r.get('FMS_ELECTRONIC_ENGINE_CONTROLLER_1 (RPM)')
        if rpm is not None and rpm > 8000:
            continue
        else:
            readings.append(_r)

    readings.sort(key=lambda r: r['trip_sequence'])
    prev = None
    for p in readings:
        p.update(calc_extra(p, prev))
        prev = p

    _classify_readings_with_phases_pas(readings, 3, 1)
    accel_phases = []
    for g, vals in groupby(readings, key=lambda x: x['phase']):
        vals = list(vals)

        if g == 1 and (vals[-1]['timestamp'] - vals[0]['timestamp']).total_seconds() > 5:
            accel_phases.append(vals)

    # pick the an accel phase somewhere
    phase = accel_phases[0]
    x, y = [], []
    start_time = phase[0]['timestamp']
    for r in phase:
        x.append((r['timestamp'] - start_time).total_seconds())
        y.append(r['spd_over_grnd'])

    x = np.array(x)
    y = np.array(y)
    times = pluck(phase, 'timestamp')
    sqrt_x = np.sqrt([(t - times[0]).total_seconds() for t in times]),
    bus_short = bus.split('_')[-1]

    plt.figure()
    plt.scatter(x, y, s=4, label='Speed Over time')
    plt.scatter(sqrt_x, y, s=4, label='sqrt speed over time')
    trend = stats.linregress(x, y)
    slope, intercept, r_value, p_value, std_err = trend
    plt.plot(x, intercept + slope * x,
             label=trend_lbl(bus_short, len(x), slope, intercept, r_value),
             color='k',
             path_effects=[pe.Stroke(linewidth=5, foreground='r'), pe.Normal()]
             )

    # plt.ylim(*ylim)
    # plt.xlim(*xlim)
    plt.xlabel(x_title)
    plt.ylabel(y_title)
    plt.legend()
    plt.title(plt_title + bus_short)
    plt.savefig('./out/' + plt_title + bus_short + '.png')

    plt.show()
Exemple #45
0
 def test_empty_iterables(self):
     assert pluck([], 'foo') == []
     assert set(pluck(set(), 'foo')) == set()