Пример #1
0
    def testFeb22MultiSyncEndDetected(self):
        # Re-run, but with multiple calls to sync data
        # This tests the effect of online versus offline analysis and segmentation with potentially partial data

        dataFile = "emission/tests/data/real_examples/iphone_2016-02-22"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
        end_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
        cacheKey = "diary/trips-2016-02-22"
        with open(dataFile + ".ground_truth") as gtf:
            ground_truth = json.load(gtf, object_hook=bju.object_hook)

        logging.info("Before loading, timeseries db size = %s" %
                     edb.get_timeseries_db().estimated_document_count())
        with open(dataFile) as df:
            all_entries = json.load(df, object_hook=bju.object_hook)
        # 18:01 because the transition was at 2016-02-22T18:00:09.623404-08:00, so right after
        # 18:00
        ts_1800 = arrow.get("2016-02-22T18:00:30-08:00").timestamp
        logging.debug("ts_1800 = %s, converted back = %s" %
                      (ts_1800, arrow.get(ts_1800).to("America/Los_Angeles")))
        before_1800_entries = [
            e for e in all_entries
            if ad.AttrDict(e).metadata.write_ts <= ts_1800
        ]
        after_1800_entries = [
            e for e in all_entries
            if ad.AttrDict(e).metadata.write_ts > ts_1800
        ]

        # Sync at 18:00 to capture all the points on the trip *to* the optometrist
        etc.createAndFillUUID(self)
        self.entries = before_1800_entries
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)

        # Then sync after 18:00
        self.entries = after_1800_entries
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)
        self.persistGroundTruthIfNeeded(api_result, dataFile, start_ld,
                                        cacheKey)

        # Although we process the day's data in two batches, we should get the same result
        self.compare_approx_result(ad.AttrDict({
            'result': api_result
        }).result,
                                   ad.AttrDict(ground_truth).data,
                                   time_fuzz=60,
                                   distance_fuzz=100)
    def testJumpSmoothingSectionStart(self):
        dataFile = "emission/tests/data/real_examples/shankari_2016-independence_day_jump_bus_start"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 8, 'day': 15})
        cacheKey = "diary/trips-2016-08-15"
        with open("emission/tests/data/real_examples/shankari_2016-independence_day.ground_truth") as gfp:
            ground_truth = json.load(gfp, object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth).data)
Пример #3
0
    def testResetToFuture(self):
        """
        - Load data for both days
        - Run pipelines
        - Reset to a date after the two
        - Verify that all is well
        - Re-run pipelines and ensure that there are no errors
        """
        # Load all data
        dataFile_1 = "emission/tests/data/real_examples/shankari_2016-07-22"
        dataFile_2 = "emission/tests/data/real_examples/shankari_2016-07-25"
        start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 22})
        start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 25})
        cacheKey_1 = "diary/trips-2016-07-22"
        cacheKey_2 = "diary/trips-2016-07-25"
        ground_truth_1 = json.load(open(dataFile_1 + ".ground_truth"),
                                   object_hook=bju.object_hook)
        ground_truth_2 = json.load(open(dataFile_2 + ".ground_truth"),
                                   object_hook=bju.object_hook)

        # Run both pipelines
        etc.setupRealExample(self, dataFile_1)
        etc.runIntakePipeline(self.testUUID)
        self.entries = json.load(open(dataFile_2), object_hook=bju.object_hook)
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)

        # Reset to a date well after the two days
        reset_ts = arrow.get("2017-07-24").timestamp
        epr.reset_user_to_ts(self.testUUID, reset_ts, is_dry_run=False)

        # Data should be untouched because of early return
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1,
                                            start_ld_1)
        self.compare_result(
            ad.AttrDict({
                'result': api_result
            }).result,
            ad.AttrDict(ground_truth_1).data)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2,
                                            start_ld_2)
        self.compare_result(
            ad.AttrDict({
                'result': api_result
            }).result,
            ad.AttrDict(ground_truth_2).data)

        # Re-running the pipeline again should not affect anything
        etc.runIntakePipeline(self.testUUID)
    def testOct07MultiSyncSpuriousEndDetected(self):
        # Re-run, but with multiple calls to sync data
        # This tests the effect of online versus offline analysis and segmentation with potentially partial data

        dataFile = "emission/tests/data/real_examples/issue_436_assertion_error"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 10, 'day': 0o7})
        end_ld = ecwl.LocalDate({'year': 2016, 'month': 10, 'day': 0o7})
        cacheKey = "diary/trips-2016-10-07"
        ground_truth = json.load(open(dataFile + ".ground_truth"),
                                 object_hook=bju.object_hook)

        logging.info("Before loading, timeseries db size = %s" %
                     edb.get_timeseries_db().count())
        all_entries = json.load(open(dataFile), object_hook=bju.object_hook)
        # 18:01 because the transition was at 2016-02-22T18:00:09.623404-08:00, so right after
        # 18:00
        ts_1800 = arrow.get("2016-10-07T18:33:11-07:00").timestamp
        logging.debug("ts_1800 = %s, converted back = %s" %
                      (ts_1800, arrow.get(ts_1800).to("America/Los_Angeles")))
        before_1800_entries = [
            e for e in all_entries
            if ad.AttrDict(e).metadata.write_ts <= ts_1800
        ]
        after_1800_entries = [
            e for e in all_entries
            if ad.AttrDict(e).metadata.write_ts > ts_1800
        ]

        # Sync at 18:00 to capture all the points on the trip *to* the optometrist
        # Skip the last few points to ensure that the trip end is skipped
        import uuid
        self.testUUID = uuid.uuid4()
        self.entries = before_1800_entries
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)

        # Then sync after 18:00
        self.entries = after_1800_entries
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)

        # Although we process the day's data in two batches, we should get the same result
        self.compare_approx_result(ad.AttrDict({
            'result': api_result
        }).result,
                                   ad.AttrDict(ground_truth).data,
                                   time_fuzz=60,
                                   distance_fuzz=100)
Пример #5
0
    def __init__(self, context: det_torch.PyTorchTrialContext) -> None:
        self.context = context
        # A subclass of BaseTransformerTrial may have already set hparams and data_config
        # attributes so we only reset them if they do not exist.
        if not hasattr(self, "hparams"):
            self.hparams = attrdict.AttrDict(context.get_hparams())
        if not hasattr(self, "data_config"):
            self.data_config = attrdict.AttrDict(context.get_data_config())
        if not hasattr(self, "exp_config"):
            self.exp_config = attrdict.AttrDict(
                context.get_experiment_config())
        # Check to make sure all expected hyperparameters are set.
        self.check_hparams()

        # Parse hparams and data_config.
        (
            self.config_kwargs,
            self.tokenizer_kwargs,
            self.model_kwargs,
        ) = hf_parse.default_parse_config_tokenizer_model_kwargs(self.hparams)
        optimizer_kwargs, scheduler_kwargs = hf_parse.default_parse_optimizer_lr_scheduler_kwargs(
            self.hparams)

        self.config, self.tokenizer, self.model = build_using_auto(
            self.config_kwargs,
            self.tokenizer_kwargs,
            self.hparams.model_mode,
            self.model_kwargs,
            use_pretrained_weights=self.hparams.use_pretrained_weights,
        )
        self.model = self.context.wrap_model(self.model)

        self.optimizer = self.context.wrap_optimizer(
            build_default_optimizer(self.model, optimizer_kwargs))

        if self.hparams.use_apex_amp:
            self.model, self.optimizer = self.context.configure_apex_amp(
                models=self.model,
                optimizers=self.optimizer,
            )

        self.lr_scheduler = self.context.wrap_lr_scheduler(
            build_default_lr_scheduler(self.optimizer, scheduler_kwargs),
            det_torch.LRScheduler.StepMode.STEP_EVERY_BATCH,
        )
        self.grad_clip_fn = (
            lambda x: torch.nn.utils.clip_grad_norm_(
                x, optimizer_kwargs.max_grad_norm)
            if optimizer_kwargs.max_grad_norm > 0  # type: ignore
            else None)
    def testIndexLengthChange(self):
        # Test for 94f67b4848611fa01c4327a0fa0cab97c2247744
        dataFile = "emission/tests/data/real_examples/shankari_2015-08-23"
        start_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 23})
        cacheKey = "diary/trips-2015-08-23"
        ground_truth = json.load(open("emission/tests/data/real_examples/shankari_2015-08-23.ground_truth"), object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth).data)
    def testAug27TooMuchExtrapolation(self):
        dataFile = "emission/tests/data/real_examples/shankari_2015-aug-27"
        start_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 27})
        end_ld = ecwl.LocalDate({'year': 2015, 'month': 8, 'day': 27})
        cacheKey = "diary/trips-2015-08-27"
        ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)

        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth).data)
    def testFeb22ShortTripsDistance(self):
        dataFile = "emission/tests/data/real_examples/iphone_3_2016-02-22"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
        end_ld = ecwl.LocalDate({'year': 2016, 'month': 2, 'day': 22})
        cacheKey = "diary/trips-2016-02-22"
        ground_truth = json.load(open(dataFile+".ground_truth"), object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, end_ld)

        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                                   ad.AttrDict(ground_truth).data)
    def testIosJumpsAndUntrackedSquishing(self):
        # Test for a2c0ee4e3ceafa822425ceef299dcdb01c9b32c9
        dataFile = "emission/tests/data/real_examples/sunil_2016-07-20"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 20})
        cacheKey = "diary/trips-2016-07-20"
        ground_truth = json.load(open("emission/tests/data/real_examples/sunil_2016-07-20.ground_truth"), object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth).data)
Пример #10
0
def get_maps_for_usercache(user_id):
    from functional import seq

    data_to_phone = seq(enau.sync_server_to_phone(user_id))
    logging.debug("Before pipeline, trips to phone list has length %d" % len(data_to_phone.to_list()))
    logging.debug("keys are %s" % data_to_phone.map(lambda e: ad.AttrDict(e).metadata.key))
    trips_to_phone = data_to_phone.map(lambda e: ad.AttrDict(e))\
                                    .filter(lambda e: e.metadata.key.startswith("diary/trips")) \
                                    .map(lambda e: e.data)
    logging.debug("After pipeline, trips to phone list has length %d" % len(trips_to_phone.to_list()))
    # logging.debug("trips_to_phone = %s" % trips_to_phone)
    maps_for_day = []
    for day in trips_to_phone:
        maps_for_day.append(get_maps_for_geojson_list(day))
    return maps_for_day
Пример #11
0
    def __init__(self, load_set=None, datas=None):
        assert load_set is None or datas is None
        self.load_set = load_set
        self.datas = list()
        self.globals = attrdict.AttrDict()
        self.param = None

        if load_set is not None:
            for data_dict in load_set.read_seq():
                self.datas.append(attrdict.AttrDict(data_dict))
            for data_dict in load_set.read_global():
                self.globals.update(data_dict)

        if datas is not None:
            self.datas = datas
Пример #12
0
    def testResetToPast(self):
        """
        - Load data for both days
        - Run pipelines
        - Verify that all is well
        - Reset to a date before both
        - Verify that analysis data for the both days is removed
        - Re-run pipelines
        - Verify that all is well
        """
        # Load all data
        dataFile_1 = "emission/tests/data/real_examples/shankari_2016-07-22"
        dataFile_2 = "emission/tests/data/real_examples/shankari_2016-07-25"
        start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 22})
        start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 25})
        cacheKey_1 = "diary/trips-2016-07-22"
        cacheKey_2 = "diary/trips-2016-07-25"
        ground_truth_1 = json.load(open(dataFile_1+".ground_truth"), object_hook=bju.object_hook)
        ground_truth_2 = json.load(open(dataFile_2+".ground_truth"), object_hook=bju.object_hook)

        # Run both pipelines
        etc.setupRealExample(self, dataFile_1)
        etc.runIntakePipeline(self.testUUID)
        self.entries = json.load(open(dataFile_2), object_hook = bju.object_hook)
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)

        # Verify that all is well
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_1).data)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_2).data)

        # Reset to a date well before the two days
        reset_ts = arrow.get("2015-07-24").timestamp
        epr.reset_user_to_ts(self.testUUID, reset_ts, is_dry_run=False)

        # Data should be completely deleted
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.assertEqual(api_result, [])

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.assertEqual(api_result, [])

        # Re-running the pipeline again
        etc.runIntakePipeline(self.testUUID)
        
        # Should reconstruct everything
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_1).data)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_2).data)
Пример #13
0
    def testResetToStart(self):
        """
        - Load data for both days
        - Run pipelines
        - Verify that all is well
        - Reset to start
        - Verify that there is no analysis data
        - Re-run pipelines
        - Verify that all is well
        """

        # Load all data
        dataFile_1 = "emission/tests/data/real_examples/shankari_2016-07-22"
        dataFile_2 = "emission/tests/data/real_examples/shankari_2016-07-25"
        start_ld_1 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 22})
        start_ld_2 = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 25})
        cacheKey_1 = "diary/trips-2016-07-22"
        cacheKey_2 = "diary/trips-2016-07-25"
        ground_truth_1 = json.load(open(dataFile_1+".ground_truth"), object_hook=bju.object_hook)
        ground_truth_2 = json.load(open(dataFile_2+".ground_truth"), object_hook=bju.object_hook)

        # Run both pipelines
        etc.setupRealExample(self, dataFile_1)
        etc.runIntakePipeline(self.testUUID)
        self.entries = json.load(open(dataFile_2), object_hook = bju.object_hook)
        etc.setupRealExampleWithEntries(self)
        etc.runIntakePipeline(self.testUUID)

        # Check results: so far, so good
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_1).data)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_2).data)

        # Reset pipeline to start
        epr.reset_user_to_start(self.testUUID, is_dry_run=False)

        # Now there are no results
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.assertEqual(api_result, [])

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.assertEqual(api_result, [])

        # Re-run the pipeline again
        etc.runIntakePipeline(self.testUUID)

        # Should be back to ground truth
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_1, start_ld_1)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_1).data)

        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld_2, start_ld_2)
        self.compare_result(ad.AttrDict({'result': api_result}).result,
                            ad.AttrDict(ground_truth_2).data)
Пример #14
0
    def get_last_trip_end_point(self, filtered_points_df, last10Points_df,
                                last5MinsPoints_df):
        ended_before_this = last5MinsPoints_df is None or len(
            last5MinsPoints_df) == 0
        if ended_before_this:
            logging.debug("trip end transition, so last 10 points are %s" %
                          last10Points_df.index)
            last10PointsMedian = np.median(last10Points_df.index)
            last_trip_end_index = int(last10PointsMedian)
            logging.debug(
                "last5MinsPoints not found, last_trip_end_index = %s" %
                last_trip_end_index)
        else:
            last10PointsMedian = np.median(last10Points_df.index)
            last5MinsPointsMedian = np.median(last5MinsPoints_df.index)
            last_trip_end_index = int(
                min(last5MinsPointsMedian, last10PointsMedian))
            logging.debug(
                "last5MinsPoints and last10PointsMedian found, last_trip_end_index = %s"
                % last_trip_end_index)
        #                     logging.debug("last5MinPoints.median = %s (%s), last10Points_df = %s (%s), sel index = %s" %
        #                         (np.median(last5MinsPoints_df.index), last5MinsPoints_df.index,
        #                          np.median(last10Points_df.index), last10Points_df.index,
        #                          last_trip_end_index))

        last_trip_end_point_row = filtered_points_df.iloc[last_trip_end_index]
        last_trip_end_point = ad.AttrDict(
            filtered_points_df.iloc[last_trip_end_index])
        logging.debug("Appending last_trip_end_point %s with index %s " %
                      (last_trip_end_point, last_trip_end_point_row.name))
        return (ended_before_this, last_trip_end_point)
    def testAirTripToHawaii(self):
        dataFile = "emission/tests/data/real_examples/shankari_2016-07-27"
        start_ld = ecwl.LocalDate({'year': 2016, 'month': 7, 'day': 27})
        cacheKey = "diary/trips-2016-07-27"
        with open(dataFile + ".ground_truth") as gfp:
            ground_truth = json.load(gfp, object_hook=bju.object_hook)

        etc.setupRealExample(self, dataFile)
        etc.runIntakePipeline(self.testUUID)
        api_result = gfc.get_geojson_for_dt(self.testUUID, start_ld, start_ld)
        # Although we process the day's data in two batches, we should get the same result
        self.compare_result(
            ad.AttrDict({
                'result': api_result
            }).result,
            ad.AttrDict(ground_truth).data)
    def setUp(self):
        #load test user
        self.testUUID = uuid.uuid4()
        autogen_string = randomGen()
        autogen_email = autogen_string + '@test.com'
        self.sampleAuthMessage1 = {
            'username': autogen_string,
            'email': autogen_email,
            'password': "******",
            'our_uuid': self.testUUID
        }
        sampleAuthMessage1Ad = ad.AttrDict(self.sampleAuthMessage1)
        proxy.habiticaRegister(sampleAuthMessage1Ad.username,
                               sampleAuthMessage1Ad.email,
                               sampleAuthMessage1Ad.password,
                               sampleAuthMessage1Ad.our_uuid)

        self.ts = esta.TimeSeries.get_time_series(self.testUUID)
        bike_habit = {
            'type': "habit",
            'text': "Bike",
            'up': True,
            'down': False,
            'priority': 2
        }
        bike_habit_id = proxy.create_habit(self.testUUID, bike_habit)
        walk_habit = {
            'type': "habit",
            'text': "Walk",
            'up': True,
            'down': False,
            'priority': 2
        }
        walk_habit_id = proxy.create_habit(self.testUUID, walk_habit)
        logging.debug("in setUp, result = %s" % self.ts)
Пример #17
0
class BaseObject(object):
    name = ""
    MM = 10.0
    layer2color = {
        "91": "#4BA54B",  # Nets
        "93": "#9B4646",  # Pins
        "94": "#9B4646",  # Symbols
        "95": "#A5A5A5",  # Names
        "96": "#A5A5A5",  # Values
    }
    mirror = False
    spin = False
    angle = 0
    get_bool = {"no": False, "yes": True}
    attr = attrdict.AttrDict({})
    dwg = svgwrite.drawing.Drawing()

    def val2mm(self, value):
        value = float(value)
        ret = "{val:1.5f}".format(val=value * self.MM)
        return float(ret)

    def coord2mm(self, position):
        x, y = position
        return (self.val2mm(x), self.val2mm(y))

    def rot(self, rotate="R0"):
        mirror, spin, angle = re.findall(re.compile(r"([M]*)([S]*)R(\d+)"),
                                         rotate).pop()
        return (bool(mirror), bool(spin), int(angle))
Пример #18
0
    def __init__(self,
                 positions=None,
                 fingers=None,
                 barre=None,
                 title=None,
                 style=None):
        if positions is None:
            positions = []
        elif '-' in positions:
            # use - to separate numbers when frets go above 9, e.g., x-x-0-10-10-10
            positions = positions.split('-')
        else:
            positions = list(positions)
        self.positions = list(
            map(lambda p: int(p) if p.isdigit() else None, positions))

        self.fingers = list(fingers) if fingers else []

        self.barre = barre

        self.style = attrdict.AttrDict(
            dict_merge(copy.deepcopy(self.default_style), style or {}))

        self.title = title

        self.fretboard = None
Пример #19
0
def create_routes_from_ref(bus_stop_list):
    created_routes = []
    route_ref_bus_stop_list = [s for s in bus_stop_list if "route_ref" in s.tags]
    for s in route_ref_bus_stop_list:
        logging.debug("Splitting route ref %s" % s.tags.route_ref)
        # route_ref is a ; separated list of routes. We want to split them up
        route_list = s.tags.route_ref.split(';')
        for route in route_list:
            # the id of the stop represents the stop, not the route
            # so we create an id from the route
            re = {"id": route,
                   "tags": {"ref": route, "name": route, "route": "bus", "type": "route"}}

            # 'tags': {'bus': 'yes', 'gtfs_id': '0300315', 'gtfs_location_type': '0', 'gtfs_stop_code': '57566', 'highway': 'bus_stop', 'name': 'Addison St:Oxford St', 'network': 'AC Transit', 'public_transport': 'platform', 'ref': '57566', 'route_ref': '65'}
            # #65 bus stop doesn't have an operator tag, only network
            if "operator" in s.tags:
                re["operator"] = s.tags.operator
            elif "network" in s.tags:
                re["operator"] = s.tags.network
            # logging.debug("Converted stop %s + route_ref %s -> route %s" %
            #    (s, route, re))
            created_routes.append(ad.AttrDict(re))
    logging.debug("%d bus stops -> %d bus stops with refs -> %d routes" %
        (len(bus_stop_list), len(route_ref_bus_stop_list), len(created_routes)))
    return created_routes
 def testJoinParty(self):
     sampleAuthMessage1Ad = ad.AttrDict(self.sampleAuthMessage1)
     proxy.habiticaRegister(sampleAuthMessage1Ad.username,
                            sampleAuthMessage1Ad.email,
                            sampleAuthMessage1Ad.password,
                            sampleAuthMessage1Ad.our_uuid)
     #Create an inviter
     inviterUUID = uuid.uuid4()
     inviter = randomGen()
     inviter_email = inviter + '@save.world'
     inviter_id = proxy.habiticaRegister(inviter, inviter_email, inviter,
                                         inviterUUID)['data']['id']
     inviter_group_id = json.loads(
         proxy.habiticaProxy(inviterUUID, 'POST', "/api/v3/groups", {
             'type': 'party',
             'privacy': 'private',
             'name': inviter
         }).text)['data']['id']
     #Finally try to make this user (self) join the party
     group_id_returned = proxy.setup_party(self.testUserUUID,
                                           inviter_group_id, inviter_id)
     self.assertEqual(group_id_returned, inviter_group_id)
     #Now try to join again, it should throw an error
     with self.assertRaises(RuntimeError):
         proxy.setup_party(self.testUserUUID, inviter_group_id, inviter_id)
     delete_inviter = proxy.habiticaProxy(inviterUUID, "DELETE",
                                          "/api/v3/user",
                                          {'password': inviter})
     edb.get_habitica_db().remove({'user_id': inviterUUID})
Пример #21
0
def make_gif_of_traj(filename, output_filename, raw=True, fps=5):
    file = dd.io.load(filename)
    traj_len = len(file)
    print(f'trajectory length is {traj_len}')
    traj_images = []
    a = attrdict.AttrDict({'mean': 0, 'scale': 1})
    for i in range(1, traj_len):
        data = file[i]
        act = obs_to_action(data, file[i - 1], a)
        sorted_act = sorted(act, key=np.abs)
        if np.abs(sorted_act[0]) == 0 and sorted_act[1] == 0:
            print('alert alert')
        print(act)
        fig, axs = plot_images(data, raw=raw)
        fig.canvas.draw()
        image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
        image = image.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
        traj_images.append(image)
    clip = ImageSequenceClip(traj_images, fps=fps)
    print(f'Writing out to {output_filename}.mp4')
    clip.write_videofile(f'{output_filename}.mp4', fps=fps)
    print(f'Writing out to {output_filename}.gif')
    clip.write_gif(f'{output_filename}.gif',
                   fps=fps,
                   program='imageio',
                   opt='wu')
Пример #22
0
    def filter(self, with_speeds_df):
        self.inlier_mask_ = [True] * with_speeds_df.shape[0]

        prev_pt = None
        for (i, pt) in enumerate(
                with_speeds_df[["mLatitude", "mLongitude", "mTime",
                                "speed"]].to_dict('records')):
            pt = ad.AttrDict(dict(pt))
            if prev_pt is None:
                # Don't have enough data yet, so don't make any decisions
                prev_pt = pt
            else:
                currSpeed = pf.calSpeed(prev_pt, pt)
                logging.debug(
                    "while considering point %s(%s), prev_pt (%s) speed = %s" %
                    (pt, i, prev_pt, currSpeed))
                if currSpeed > self.maxSpeed:
                    logging.debug("currSpeed > %s, removing index %s " %
                                  (self.maxSpeed, i))
                    self.inlier_mask_[i] = False
                else:
                    logging.debug("currSpeed < %s, retaining index %s " %
                                  (self.maxSpeed, i))
                    prev_pt = pt
        logging.info("Filtering complete, removed indices = %s" %
                     np.nonzero(self.inlier_mask_))
 def create(self, subnet_cidr, server_ip, image, flavor, nova_host):
     try:
         network = self._create_network()
         self._create_subnet(network, subnet_cidr)
         security_group = self._create_security_group()
         server = self._create_server(image=image,
                                      flavor=flavor,
                                      nova_host=nova_host,
                                      network=network,
                                      ip=server_ip,
                                      security_group=security_group)
         port = self.port_steps.get_port(
             device_owner=stepler_config.PORT_DEVICE_OWNER_SERVER,
             device_id=server.id)
         floating_ip = self._add_floating_ip(port)
     except Exception:
         self.stack.close()
         raise
     return attrdict.AttrDict({
         'server_steps': self.get_server_steps(),
         'network': network,
         'server': server,
         'port': port,
         'floating_ip': floating_ip,
         'security_group': security_group,
     })
Пример #24
0
def import_config(fname_config_file):
    """ Loads and validates a JSON configuration file.

    This method uses the `load_config_file` and `validate_config` functions to
    load a JSON configuration file and validate against the
    `config_schema_default` returning the validates configuration as an
    `attrdict.AttrDict`.

    Args:
        fname_config_file (str, unicode): The path to the JSON configuration
            file.

    Returns:
        attrdict.AttrDict: The imported configuration `AttrDict`.
    """

    # Load the JSON configuration file.
    config = load_config_file(fname_config_file=fname_config_file)

    # Validate the loaded `dict` against the `config_schema_default` JSON
    # schema.
    validate_config(config_instance=config,
                    config_schema=config_schema_default)

    return attrdict.AttrDict(config)
    def add_feed(self, frame, observation, trajectory_feeds):
        """Updates trajectory feed at frame.
        based on StreamingCARLALoader.populate_phi_feeds

        Parameters
        ----------
        observation : PlayerObservation
        trajectory_feeds : collection.OrderedDict
        """
        player_past = observation.player_positions_local[-self.T_past:, :3]
        agent_pasts = observation.agent_positions_local[:, -self.T_past:, :3]
        player_yaw = observation.player_yaw
        agent_yaws = observation.agent_yaws

        feed_dict = attrdict.AttrDict({
            'player_past': player_past,
            'agent_pasts': agent_pasts,
            'player_yaw': player_yaw,
            'agent_yaws': agent_yaws
        })

        # not sure how agent_presence is being used to train PRECOG
        # agent_presence = np.ones(shape=tensoru.shape(phi.agent_presence), dtype=np.float32)
        # TODO: set yaws
        # yaws = np.tile(np.asarray(observation.yaws_local[:A])[None], (B, 1))
        # TODO: set traffic light
        # light_string_batch = np.tile(np.asarray(light_string), (B,))
        # feed_dict[phi.light_strings] = light_string_batch
        # feed_dict.validate()
        trajectory_feeds[frame] = (observation.player_transform,
                                   observation.other_id_ordering, feed_dict)
Пример #26
0
 def testConvertTransition(self):
     entry = json.load(open("emission/tests/data/netTests/android.transition.txt"))
     formatted_entry = enuf.convert_to_common_format(ad.AttrDict(entry))
     self.assertEquals(formatted_entry.data.curr_state, et.State.WAITING_FOR_TRIP_START.value)
     self.assertEquals(formatted_entry.data.transition, et.TransitionType.INITIALIZE.value)
     self.assertEquals(formatted_entry.metadata.write_ts, 1436821510.445)
     self.assertTrue(formatted_entry.data.fmt_time.startswith("2015-07-13T14:05:10.445"))
Пример #27
0
    def continue_just_ended(self, idx, currPoint, filtered_points_df):
        """
        Normally, since the logic here and the
        logic on the phone are the same, if we have detected a trip
        end, any points after this are part of the new trip.

        However, in some circumstances, notably in my data from 27th
        August, there appears to be a mismatch and we get a couple of
        points past the end that we detected here.  So let's look for
        points that are within the distance filter, and are at a
        delta of a minute, and join them to the just ended trip instead of using them to
        start the new trip

        :param idx: Index of the current point
        :param currPoint: current point
        :param filtered_points_df: dataframe of filtered points
        :return: True if we should continue the just ended trip, False otherwise
        """
        if idx == 0:
            return False
        else:
            prev_point = ad.AttrDict(filtered_points_df.iloc[idx - 1])
            logging.debug("Comparing with prev_point = %s" % prev_point)
            if pf.calDistance(prev_point, currPoint) < self.distance_threshold and \
                                    currPoint.ts - prev_point.ts <= 60:
                logging.info(
                    "Points %s and %s are within the distance filter and only 1 min apart so part of the same trip"
                    % (prev_point, currPoint))
                return True
            else:
                return False
Пример #28
0
 def testConvertMotionActivity_ios(self):
     entry = json.load(open("emission/tests/data/netTests/ios.activity.txt"))
     formatted_entry = enuf.convert_to_common_format(ad.AttrDict(entry))
     self.assertEquals(formatted_entry.data.confidence, 100)
     self.assertEquals(formatted_entry.data.type, ema.MotionTypes.STILL.value)
     self.assertEquals(formatted_entry.data.ts, 1446513827.479381)
     self.assertTrue(formatted_entry.data.fmt_time.startswith("2015-11-02T17:23:47"))
Пример #29
0
def run_eval_loop(sess=None,
                  fetches_to_collect=None,
                  other_ops=(),
                  hooks=(),
                  checkpoint_dir=None,
                  load_path=None,
                  max_steps=None,
                  max_seconds=None,
                  init_fn=None):
    if isinstance(fetches_to_collect, dict):
        keys, values = zip(*fetches_to_collect.items())
        results = run_eval_loop(sess, list(values), other_ops, hooks,
                                checkpoint_dir, load_path, max_steps,
                                max_seconds)
        return attrdict.AttrDict(dict(zip(keys, results)))

    sess_creator = None if sess else make_session_creator(
        checkpoint_dir, load_path, init_fn)
    collect_hook = session_hooks.collect_hook(fetches_to_collect)
    hooks = [collect_hook, *hooks]
    if max_seconds or max_steps:
        stop_hook = session_hooks.stop_after_steps_or_seconds_hook(
            max_seconds, max_steps)
        hooks.append(stop_hook)

    tfasync.main_loop(sess=sess,
                      sess_creator=sess_creator,
                      ops=other_ops,
                      hooks=hooks)
    return collect_hook.result
Пример #30
0
 def testConvertTransition_ios(self):
     entry = json.load(open("emission/tests/data/netTests/ios.transition.txt"))
     formatted_entry = enuf.convert_to_common_format(ad.AttrDict(entry))
     self.assertEquals(formatted_entry.data.curr_state, et.State.WAITING_FOR_TRIP_START.value)
     self.assertEquals(formatted_entry.data.transition, et.TransitionType.STOPPED_MOVING.value)
     self.assertEquals(formatted_entry.metadata.write_ts, 1446577206.122407)
     self.assertTrue(formatted_entry.data.fmt_time.startswith("2015-11-03T11:00:06.122"))