コード例 #1
0
def _AllSubtestsDeprecated(test):
    descendant_tests = yield list_tests.GetTestDescendantsAsync(
        test.key, has_rows=True, keys_only=False)
    result = all(t.deprecated for t in descendant_tests)
    raise ndb.Return(result)
コード例 #2
0
def insert_async(entity, new_key_callback=None, extra=None):
    """Inserts an entity in the DB and guarantees creation.

  Similar in principle to ndb.Model.get_or_insert() except that it only succeeds
  when the entity was not already present. As such, this always requires a
  transaction.

  Optionally retries with a new key if |new_key_callback| is provided.

  Arguments:
    entity: entity to save, it should have its .key already set accordingly. The
        .key property will be mutated, even if the function fails. It is highly
        preferable to have a root entity so the transaction can be done safely.
    new_key_callback: function to generates a new key if the previous key was
        already taken. If this function returns None, the execution is aborted.
        If this parameter is None, insertion is only tried once.
        May return a future.
    extra: additional entities to store simultaneously. For example a bookeeping
        entity that must be updated simultaneously along |entity|. All the
        entities must be inside the same entity group. This function is not safe
        w.r.t. `extra`, entities in this list will overwrite entities already in
        the DB.

  Returns:
    ndb.Key of the newly saved entity or None if the entity was already present
    in the db.
  """
    assert not ndb.in_transaction()
    assert entity.key.id(), entity.key
    entities = [entity]
    if extra:
        entities.extend(extra)
        root = entity.key.pairs()[0]
        assert all(i.key and i.key.pairs()[0] == root for i in extra), extra

    def new_key_callback_async():
        key = None
        if new_key_callback:
            key = new_key_callback()
        if isinstance(key, ndb.Future):
            return key
        future = ndb.Future()
        future.set_result(key)
        return future

    @ndb.tasklet
    def run():
        if (yield entities[0].key.get_async()):
            # The entity exists, abort.
            raise ndb.Return(False)
        yield ndb.put_multi_async(entities)
        raise ndb.Return(True)

    # TODO(maruel): Run a severe load test and count the number of retries.
    while True:
        # First iterate outside the transaction in case the first entity key number
        # selected is already used.
        while entity.key and entity.key.id() and (yield
                                                  entity.key.get_async()):
            entity.key = yield new_key_callback_async()

        if not entity.key or not entity.key.id():
            break

        try:
            if (yield txn.transaction_async(run, retries=0)):
                break
        except txn.CommitError:
            # Retry with the same key.
            pass
        else:
            # Entity existed. Get the next key.
            entity.key = yield new_key_callback_async()
    raise ndb.Return(entity.key)
コード例 #3
0
def _GetSheriffForTest(test):
    """Gets the Sheriff for a test, or None if no sheriff."""
    if test.sheriff:
        sheriff = yield test.sheriff.get_async()
        raise ndb.Return(sheriff)
    raise ndb.Return(None)
コード例 #4
0
 def get_events_teams_districts():
     events, teams, districts = yield get_events_async(
     ), get_teams_async(), get_districts_async()
     raise ndb.Return((events, teams, districts))
コード例 #5
0
ファイル: config.py プロジェクト: brandonscott1780/luci-py
def _get_service_config_rev_async(cfg_name):
    """Returns last processed Revision of given config."""
    e = yield _AuthServiceConfig.get_by_id_async(cfg_name)
    raise ndb.Return(Revision(e.revision, e.url) if e else None)
コード例 #6
0
class Descriptor(object):
    """Describe a timeseries by its characteristics.

  Supports partial test paths (e.g. test suite paths) by allowing some
  characteristics to be None.
  """
    def __init__(self,
                 test_suite=None,
                 measurement=None,
                 bot=None,
                 test_case=None,
                 statistic=None,
                 build_type=None):
        self.test_suite = test_suite
        self.measurement = measurement
        self.bot = bot
        self.test_case = test_case
        self.statistic = statistic
        self.build_type = build_type

    def Clone(self):
        return Descriptor(self.test_suite, self.measurement, self.bot,
                          self.test_case, self.statistic, self.build_type)

    def __repr__(self):
        return 'Descriptor(%r, %r, %r, %r, %r, %r)' % (
            self.test_suite, self.measurement, self.bot, self.test_case,
            self.statistic, self.build_type)

    def __eq__(self, other):
        return repr(self) == repr(other)

    def __lt__(self, other):
        return repr(self) < repr(other)

    CONFIGURATION = {}

    @classmethod
    @ndb.tasklet
    def _GetConfiguration(cls, key, default=None):
        if key not in cls.CONFIGURATION:
            cls.CONFIGURATION[key] = (yield
                                      stored_object.GetAsync(key)) or default
        raise ndb.Return(cls.CONFIGURATION[key])

    @classmethod
    def ResetMemoizedConfigurationForTesting(cls):
        cls.CONFIGURATION = {}

    @classmethod
    @ndb.tasklet
    def _MeasurementCase(cls, test_suite, path):
        if len(path) == 1:
            raise ndb.Return((path.pop(0), None))

        if test_suite.startswith('loading.'):
            measurement = path.pop(0)
            parts, path[:] = path[:], []
            if len(parts) > 1 and parts[1].endswith('_' + parts[0]):
                parts[1] = parts[1][:-(len(parts[0]) + 1)]
            raise ndb.Return((measurement, ':'.join(parts)))

        if test_suite.startswith('resource_sizes'):
            parts, path[:] = path[:], []
            raise ndb.Return((':'.join(parts), None))

        if test_suite == 'sizes':
            parts, path[:] = path[:], []
            raise ndb.Return((':'.join(parts[:6]), ':'.join(parts[6:])))

        complex_cases_test_suites = yield cls._GetConfiguration(
            COMPLEX_CASES_TEST_SUITES_KEY, [])
        if (test_suite.startswith('system_health')
                or (test_suite in complex_cases_test_suites)):
            measurement = path.pop(0)
            path.pop(0)
            if len(path) == 0:
                raise ndb.Return((measurement, None))
            raise ndb.Return(
                (measurement,
                 path.pop(0).replace('_', ':').replace('long:running:tools',
                                                       'long_running_tools')))

        if test_suite in (yield cls._GetConfiguration(TWO_TWO_TEST_SUITES_KEY,
                                                      [])):
            parts, path[:] = path[:], []
            raise ndb.Return(':'.join(parts[:2]), ':'.join(parts[2:]))

        if test_suite in [
                'memory.dual_browser_test', 'memory.top_10_mobile',
                'v8:runtime_stats.top_25'
        ]:
            measurement = path.pop(0)
            case = path.pop(0)
            if len(path) == 0:
                raise ndb.Return((measurement, None))
            raise ndb.Return((measurement, case + ':' + path.pop(0)))

        if test_suite in (yield cls._GetConfiguration(
                POLY_MEASUREMENT_TEST_SUITES_KEY, [])):
            parts, path[:] = path[:], []
            case = None
            if parts[-1] == NO_MITIGATIONS_CASE:
                case = parts.pop()
            raise ndb.Return((':'.join(parts), case))

        raise ndb.Return((path.pop(0), path.pop(0)))
コード例 #7
0
 def get_teams_async():
     team_keys = yield Team.query().order(
         Team.team_number).fetch_async(keys_only=True)
     teams = yield ndb.get_multi_async(team_keys)
     raise ndb.Return(teams)
コード例 #8
0
def _FindMonitoredStatsForTest(test):
    del test
    # TODO: This will get filled out after refactor.
    raise ndb.Return(['avg'])
コード例 #9
0
def _MakeAnomalyEntity(change_point, test, stat, rows):
    """Creates an Anomaly entity.

  Args:
    change_point: A find_change_points.ChangePoint object.
    test: The TestMetadata entity that the anomalies were found on.
    stat: The TestMetadata stat that the anomaly was found on.
    rows: List of Row entities that the anomalies were found on.

  Returns:
    An Anomaly entity, which is not yet put in the datastore.
  """
    end_rev = change_point.x_value
    start_rev = _GetImmediatelyPreviousRevisionNumber(end_rev, rows) + 1
    display_start = display_end = None
    if test.master_name == 'ClankInternal':
        display_start, display_end = _GetDisplayRange(change_point.x_value,
                                                      rows)
    median_before = change_point.median_before
    median_after = change_point.median_after

    suite_key = test.key.id().split('/')[:3]
    suite_key = '/'.join(suite_key)
    suite_key = utils.TestKey(suite_key)

    queried_diagnostics = yield (
        histogram.SparseDiagnostic.GetMostRecentDataByNamesAsync(
            suite_key,
            set([
                reserved_infos.BUG_COMPONENTS.name, reserved_infos.OWNERS.name
            ])))

    bug_components = queried_diagnostics.get(
        reserved_infos.BUG_COMPONENTS.name, {}).get('values')
    if bug_components:
        bug_components = bug_components[0]
        # TODO(902796): Remove this typecheck.
        if isinstance(bug_components, list):
            bug_components = bug_components[0]

    ownership_information = {
        'emails':
        queried_diagnostics.get(reserved_infos.OWNERS.name, {}).get('values'),
        'component':
        bug_components
    }

    new_anomaly = anomaly.Anomaly(
        start_revision=start_rev,
        end_revision=end_rev,
        median_before_anomaly=median_before,
        median_after_anomaly=median_after,
        segment_size_before=change_point.size_before,
        segment_size_after=change_point.size_after,
        window_end_revision=change_point.window_end,
        std_dev_before_anomaly=change_point.std_dev_before,
        t_statistic=change_point.t_statistic,
        degrees_of_freedom=change_point.degrees_of_freedom,
        p_value=change_point.p_value,
        is_improvement=_IsImprovement(test, median_before, median_after),
        ref_test=_GetRefBuildKeyForTest(test),
        test=test.key,
        statistic=stat,
        internal_only=test.internal_only,
        units=test.units,
        display_start=display_start,
        display_end=display_end,
        ownership=ownership_information)
    raise ndb.Return(new_anomaly)
コード例 #10
0
def DeduplicateAndPut(new_entities, test, rev):
    result = yield DeduplicateAndPutAsync(new_entities, test, rev)
    raise ndb.Return(result)
コード例 #11
0
 def get_projects_async(self):
   res = yield self._api_call_async('projects', allow_not_found=False)
   raise ndb.Return(res.get('projects', []))
コード例 #12
0
 def _query_async(self):
     district_key = self._query_args[0]
     events = yield Event.query(Event.district_key == ndb.Key(
         District, district_key)).fetch_async()
     raise ndb.Return(events)
コード例 #13
0
 def _query_async(self):
     year = self._query_args[0]
     events = yield Event.query(Event.year == year).fetch_async()
     raise ndb.Return(events)
コード例 #14
0
 def _query_async(self):
     event_key = self._query_args[0]
     event = yield Event.get_by_id_async(event_key)
     raise ndb.Return(event)
コード例 #15
0
    def FromTestPathAsync(cls, test_path):
        """Parse a test path into a Descriptor.

    Args:
      path: Array of strings of any length.

    Returns:
      Descriptor
    """
        path = test_path.split('/')
        if len(path) < 2:
            raise ndb.Return(cls())

        bot = path.pop(0) + ':' + path.pop(0)
        if len(path) == 0:
            raise ndb.Return(cls(bot=bot))

        test_suite = path.pop(0)

        if test_suite in (yield cls._GetConfiguration(PARTIAL_TEST_SUITES_KEY,
                                                      [])):
            if len(path) == 0:
                raise ndb.Return(cls(bot=bot))
            test_suite += ':' + path.pop(0)

        if test_suite.startswith('resource_sizes '):
            test_suite = 'resource_sizes:' + test_suite[16:-1]
        else:
            for prefix in (yield cls._GetConfiguration(
                    GROUPABLE_TEST_SUITE_PREFIXES_KEY, [])):
                if test_suite.startswith(prefix):
                    test_suite = prefix[:-1] + ':' + test_suite[len(prefix):]
                    break

        if len(path) == 0:
            raise ndb.Return(cls(test_suite=test_suite, bot=bot))

        build_type = TEST_BUILD_TYPE
        if path[-1] == 'ref':
            path.pop()
            build_type = REFERENCE_BUILD_TYPE
        elif path[-1].endswith('_ref'):
            build_type = REFERENCE_BUILD_TYPE
            path[-1] = path[-1][:-4]

        if len(path) == 0:
            raise ndb.Return(
                cls(test_suite=test_suite, bot=bot, build_type=build_type))

        measurement, test_case = yield cls._MeasurementCase(test_suite, path)

        statistic = None
        if measurement != 'jank_count':
            # TODO: Handle other measurements ending with statistics?
            stat_match = re.match(STATISTICS_REGEX, measurement)
            if stat_match:
                measurement, statistic = stat_match.groups()

        if path:
            raise ValueError('Unable to parse %r' % test_path)

        raise ndb.Return(
            cls(test_suite=test_suite,
                bot=bot,
                measurement=measurement,
                statistic=statistic,
                test_case=test_case,
                build_type=build_type))
コード例 #16
0
def _ProcessTestStat(config, test, stat, rows, ref_rows):
    test_key = test.key

    # If there were no rows fetched, then there's nothing to analyze.
    if not rows:
        logging.error('No rows fetched for %s', test.test_path)
        raise ndb.Return(None)

    # Get anomalies and check if they happen in ref build also.
    change_points = FindChangePointsForTest(rows, config)

    if ref_rows:
        ref_change_points = FindChangePointsForTest(ref_rows, config)

        # Filter using any jumps in ref
        change_points = _FilterAnomaliesFoundInRef(change_points,
                                                   ref_change_points, test_key)

    anomalies = yield [
        _MakeAnomalyEntity(c, test, stat, rows) for c in change_points
    ]

    # If no new anomalies were found, then we're done.
    if not anomalies:
        raise ndb.Return(None)

    logging.info('Created %d anomalies', len(anomalies))
    logging.info(' Test: %s', test_key.id())
    logging.info(' Stat: %s', stat)

    # Get all the sheriff from sheriff-config match the path
    client = SheriffConfigClient()
    subscriptions, err_msg = client.Match(test.test_path)
    subscription_names = [s.name for s in subscriptions or []]

    # Breaks the process when Match failed to ensure find_anomaly do the best
    # effort to find the subscriber. Leave retrying to upstream.
    if err_msg is not None:
        raise RuntimeError(err_msg)

    if not subscriptions:
        logging.warning('No subscription for %s', test.test_path)

    for a in anomalies:
        a.subscriptions = subscriptions
        a.subscription_names = subscription_names
        a.internal_only = (any([
            s.visibility != subscription.VISIBILITY.PUBLIC
            for s in subscriptions
        ]) or test.internal_only)

    yield ndb.put_multi_async(anomalies)

    # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
    # code will run serially.
    # Email sheriff about any new regressions.
    for anomaly_entity in anomalies:
        if (anomaly_entity.bug_id is None
                and not anomaly_entity.is_improvement):
            deferred.defer(_EmailSheriff, anomaly_entity.subscriptions,
                           test.key, anomaly_entity.key)
コード例 #17
0
 def _BotTestPaths(self):
     master, slave = self.bot.split(':')
     aliases = yield bot_configurations.GetAliasesAsync(slave)
     raise ndb.Return({master + '/' + alias for alias in aliases})
コード例 #18
0
                else:
                    break

            # Fetch response
            if last_file_url:
                try:
                    result = yield context.urlfetch(last_file_url)
                except Exception, e:
                    logging.error(
                        "URLFetch failed for: {}".format(last_file_url))
                    logging.info(e)
                    raise ndb.Return(None)
                content = result.content

            if content is None:
                raise ndb.Return(None)
            result = type('DummyResult', (object, ), {
                "status_code": 200,
                "content": content
            })
        else:
            """
            Make fetch to FRC API
            """
            headers = {
                'Authorization': 'Basic {}'.format(self._fms_api_authtoken),
                'Cache-Control': 'no-cache, max-age=10',
                'Pragma': 'no-cache',
            }
            try:
                result = yield context.urlfetch(url, headers=headers)
コード例 #19
0
 def get_events_async():
     event_keys = yield Event.query().order(-Event.year).order(
         Event.name).fetch_async(keys_only=True)
     events = yield ndb.get_multi_async(event_keys)
     raise ndb.Return(events)
コード例 #20
0
 def _parse(self, url, parser):
     result = yield self._parse_async(url, parser)
     raise ndb.Return(result)
コード例 #21
0
 def get_districts_async():
     district_keys = yield District.query().order(
         -District.year).fetch_async(keys_only=True)
     districts = yield ndb.get_multi_async(district_keys)
     raise ndb.Return(districts)
コード例 #22
0
class DatafeedFMSAPI(object):
    EVENT_SHORT_EXCEPTIONS = {
        'arc': 'archimedes',
        'cars': 'carson',
        'carv': 'carver',
        'cur': 'curie',
        'dal': 'daly',
        'dar': 'darwin',
        'gal': 'galileo',
        'hop': 'hopper',
        'new': 'newton',
        'roe': 'roebling',
        'tes': 'tesla',
        'tur': 'turing',
    }

    SUBDIV_TO_DIV = {  # 2015, 2016
        'arc': 'arte',
        'cars': 'gaca',
        'carv': 'cuca',
        'cur': 'cuca',
        'gal': 'gaca',
        'hop': 'neho',
        'new': 'neho',
        'tes': 'arte',
    }

    SUBDIV_TO_DIV_2017 = {  # 2017+
        'arc': 'arda',
        'cars': 'cate',
        'carv': 'cane',
        'cur': 'cuda',
        'dal': 'arda',
        'dar': 'cuda',
        'gal': 'garo',
        'hop': 'hotu',
        'new': 'cane',
        'roe': 'garo',
        'tes': 'cate',
        'tur': 'hotu',
    }

    SAVED_RESPONSE_DIR_PATTERN = '/tbatv-prod-hrd.appspot.com/frc-api-response/{}/'  # % (url)

    def __init__(self, version, sim_time=None, save_response=False):
        self._sim_time = sim_time
        self._save_response = save_response and sim_time is None
        fms_api_secrets = Sitevar.get_by_id('fmsapi.secrets')
        if fms_api_secrets is None:
            if self._sim_time is None:
                raise Exception(
                    "Missing sitevar: fmsapi.secrets. Can't access FMS API.")
        else:
            fms_api_username = fms_api_secrets.contents['username']
            fms_api_authkey = fms_api_secrets.contents['authkey']
            self._fms_api_authtoken = base64.b64encode('{}:{}'.format(
                fms_api_username, fms_api_authkey))

        self._is_down_sitevar = Sitevar.get_by_id('apistatus.fmsapi_down')
        if not self._is_down_sitevar:
            self._is_down_sitevar = Sitevar(id="apistatus.fmsapi_down",
                                            description="Is FMSAPI down?")

        self.FMS_API_DOMAIN = 'https://frc-api.firstinspires.org/'
        if version == 'v1.0':
            FMS_API_URL_BASE = self.FMS_API_DOMAIN + 'api/v1.0'
            self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/awards/%s/%s'  # (year, event_short)
            self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/qual/hybrid'  # (year, event_short)
            self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/playoff/hybrid'  # (year, event_short)
            self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/rankings/%s/%s'  # (year, event_short)
            self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/alliances/%s/%s'  # (year, event_short)
            self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/teams/%s/?teamNumber=%s'  # (year, teamNumber)
            self.FMS_API_TEAM_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?teamNumber=%s'  # (year, teamNumber)
            self.FMS_API_EVENT_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?eventCode=%s&page=%s'  # (year, eventCode, page)
            self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/events/season=%s'
            self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/teams/?season=%s&eventCode=%s&page=%s'  # (year, eventCode, page)
        elif version == 'v2.0':
            FMS_API_URL_BASE = self.FMS_API_DOMAIN + 'v2.0'
            self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/%s/awards/%s'  # (year, event_short)
            self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/qual/hybrid'  # (year, event_short)
            self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/playoff/hybrid'  # (year, event_short)
            self.FMS_API_MATCH_DETAILS_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/qual'  # (year, event_short)
            self.FMS_API_MATCH_DETAILS_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/playoff'  # (year, event_short)
            self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/%s/rankings/%s'  # (year, event_short)
            self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/%s/alliances/%s'  # (year, event_short)
            self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?teamNumber=%s'  # (year, teamNumber)
            self.FMS_API_TEAM_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?teamNumber=%s'  # (year, teamNumber)
            self.FMS_API_EVENT_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?eventCode=%s&page=%s'  # (year, eventCode, page)
            self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/events'  # year
            self.FMS_API_EVENT_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/%s/events?eventCode=%s'  # (year, event_short)
            self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?eventCode=%s&page=%s'  # (year, eventCode, page)
            self.FMS_API_DISTRICT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/districts'  # (year)
            self.FMS_API_DISTRICT_RANKINGS_PATTERN = FMS_API_URL_BASE + '/%s/rankings/district?districtCode=%s&page=%s'  # (year, district abbreviation, page)
        else:
            raise Exception("Unknown FMS API version: {}".format(version))

    def _get_event_short(self, event_short, event=None):
        # First, check if we've manually set the FRC API key
        if event and event.first_code:
            return event.first_code

        # Otherwise, check hard-coded exceptions
        return self.EVENT_SHORT_EXCEPTIONS.get(event_short, event_short)

    @ndb.tasklet
    def _parse_async(self, url, parser):
        # For URLFetches
        context = ndb.get_context()

        # Prep for saving/reading raw API response into/from cloudstorage
        gcs_dir_name = self.SAVED_RESPONSE_DIR_PATTERN.format(
            url.replace(self.FMS_API_DOMAIN, ''))
        if self._save_response and tba_config.CONFIG['save-frc-api-response']:
            try:
                gcs_dir_contents = cloudstorage.listbucket(
                    gcs_dir_name)  # This is async
            except Exception, exception:
                logging.error(
                    "Error prepping for saving API response for: {}".format(
                        url))
                logging.error(traceback.format_exc())
                gcs_dir_contents = []

        if self._sim_time:
            """
            Simulate FRC API response at a given time
            """
            content = None

            # Get list of responses
            file_prefix = 'frc-api-response/{}/'.format(
                url.replace(self.FMS_API_DOMAIN, ''))
            bucket_list_url = 'https://www.googleapis.com/storage/v1/b/bucket/o?bucket=tbatv-prod-hrd.appspot.com&prefix={}'.format(
                file_prefix)
            try:
                result = yield context.urlfetch(bucket_list_url)
            except Exception, e:
                logging.error(
                    "URLFetch failed for: {}".format(bucket_list_url))
                logging.info(e)
                raise ndb.Return(None)

            # Find appropriate timed response
            last_file_url = None
            for item in json.loads(result.content)['items']:
                filename = item['name']
                time_str = filename.replace(file_prefix,
                                            '').replace('.json', '').strip()
                file_time = datetime.datetime.strptime(time_str,
                                                       "%Y-%m-%d %H:%M:%S.%f")
                if file_time <= self._sim_time:
                    last_file_url = item['mediaLink']
                else:
                    break

            # Fetch response
            if last_file_url:
                try:
                    result = yield context.urlfetch(last_file_url)
                except Exception, e:
                    logging.error(
                        "URLFetch failed for: {}".format(last_file_url))
                    logging.info(e)
                    raise ndb.Return(None)
                content = result.content
コード例 #23
0
def _FindOrInsertNamedDiagnosticsOutOfOrder(
    new_diagnostic, old_diagnostics, rev):
  new_guid = new_diagnostic.key.id()
  guid_mapping = {}

  for i in xrange(len(old_diagnostics)):
    cur = old_diagnostics[i]

    suite_key = utils.TestKey('/'.join(cur.test.id().split('/')[:3]))

    next_diagnostic = None if i == 0 else old_diagnostics[i-1]

    # Overall there are 2 major cases to handle. Either you're clobbering an
    # existing diagnostic by uploading right to the start of that diagnostic's
    # range, or you're splitting the range.
    #
    # We treat insertions by assuming that the new diagnostic is valid until the
    # next uploaded commit, since that commit will have had a diagnostic on it
    # which will have been diffed and inserted appropriately at the time.

    # Case 1, clobber the existing diagnostic.
    if rev == cur.start_revision:
      if not cur.IsDifferent(new_diagnostic):
        raise ndb.Return(guid_mapping)

      next_revision = yield HistogramRevisionRecord.FindNextRevision(
          suite_key, rev)

      futures = []

      # There's either a next diagnostic or there isn't, check each separately.
      if not next_diagnostic:
        # If this is the last diagnostic in the range, there are only 2 cases
        # to consider.
        #  1. There are no commits after this diagnostic.
        #  2. There are commits, in which case we need to split the range.

        # 1. There are no commits.
        if next_revision == sys.maxint:
          cur.data = new_diagnostic.data
          cur.data['guid'] = cur.key.id()

          guid_mapping[new_guid] = cur.data
          new_diagnostic = None

        # 2. There are commits, in which case we need to split the range.
        else:
          new_diagnostic.start_revision = cur.start_revision
          new_diagnostic.end_revision = next_revision

          # Nudge the old diagnostic range forward, that way you don't have to
          # resave the histograms.
          cur.start_revision = next_revision + 1

      # There is another diagnostic range after this one.
      else:
        # If there is another diagnostic range after this, we need to check:
        #  1. Are there any commits between this revision and the next
        #     diagnostic
        #   a. If there are, we need to split the range
        #   b. If there aren't, we just overwrite the diagnostic.

        # 1a. There are commits after this revision before the start of the next
        #     diagnostic.
        if next_revision != next_diagnostic.start_revision - 1:
          new_diagnostic.start_revision = cur.start_revision
          new_diagnostic.end_revision = next_revision

          # Nudge the old diagnostic range forward, that way you don't have to
          # resave the histograms.
          cur.start_revision = next_revision + 1

        # No commits after before next diagnostic, just straight up overwrite.
        else:
          # A. They're not the same.
          if new_diagnostic.IsDifferent(next_diagnostic):
            cur.data = new_diagnostic.data
            cur.data['guid'] = cur.key.id()

            guid_mapping[new_guid] = cur.data
            new_diagnostic = None

          # B. They're the same, in which case we just want to extend the next
          #    diagnostic's range backwards.
          else:
            guid_mapping[new_guid] = next_diagnostic.data
            next_diagnostic.start_revision = cur.start_revision
            new_diagnostic = None
            futures.append(cur.key.delete_async())
            cur = next_diagnostic

      # Finally, check if there was a diagnostic range before this, and wheter
      # it's different than the new one.
      prev_diagnostic = None
      if i + 1 < len(old_diagnostics):
        prev_diagnostic = old_diagnostics[i+1]

      cur_diagnostic = cur
      if new_diagnostic:
        cur_diagnostic = new_diagnostic

      # Previous diagnostic range is different, so just ignore it.
      if not prev_diagnostic or cur_diagnostic.IsDifferent(prev_diagnostic):
        futures.append(cur.put_async())
        if new_diagnostic:
          futures.append(new_diagnostic.put_async())

      # Previous range is the same, so merge.
      else:
        guid_mapping[new_guid] = prev_diagnostic.data
        prev_diagnostic.end_revision = cur_diagnostic.end_revision

        futures.append(prev_diagnostic.put_async())
        if new_diagnostic:
          new_diagnostic = None
          futures.append(cur.put_async)
        else:
          futures.append(cur.key.delete_async())

      yield futures
      raise ndb.Return(guid_mapping)

    # Case 2, split the range.
    elif rev > cur.start_revision and rev <= cur.end_revision:
      if not cur.IsDifferent(new_diagnostic):
        raise ndb.Return(guid_mapping)

      next_revision = yield HistogramRevisionRecord.FindNextRevision(
          suite_key, rev)

      cur.end_revision = rev - 1
      new_diagnostic.start_revision = rev
      new_diagnostic.end_revision = next_revision

      futures = [cur.put_async()]

      # There's either a next diagnostic or there isn't, check each separately.
      if not next_diagnostic:
        # There's no commit after this revision, which means we can extend this
        # diagnostic range to infinity.
        if next_revision == sys.maxint:
          new_diagnostic.end_revision = next_revision
        else:
          new_diagnostic.end_revision = next_revision

          clone_of_cur = SparseDiagnostic(
              data=cur.data, test=cur.test,
              start_revision=next_revision + 1, end_revision=sys.maxint,
              name=cur.name, internal_only=cur.internal_only)
          futures.append(clone_of_cur.put_async())

        futures.append(new_diagnostic.put_async())
      else:
        # If there is another diagnostic range after this, we need to check:
        #  1. Are there any commits between this revision and the next
        #     diagnostic
        #   a. If there are, we need to split the range
        #   b. If there aren't, we need to check if the next diagnostic is
        #      any different than the current one, because we may just merge
        #      them together.

        # 1a. There are commits after this revision before the start of the next
        #     diagnostic.
        if next_revision != next_diagnostic.start_revision - 1:
          new_diagnostic.end_revision = next_revision

          clone_of_cur = SparseDiagnostic(
              data=cur.data, test=cur.test,
              start_revision=next_revision + 1,
              end_revision=next_diagnostic.start_revision - 1,
              name=cur.name, internal_only=cur.internal_only)

          futures.append(clone_of_cur.put_async())
          futures.append(new_diagnostic.put_async())

        # 1b. There aren't commits between this revision and the start of the
        #     next diagnostic range. In this case there are 2 possible outcomes.
        #   A. They're not the same, so just split the range as normal.
        #   B. That the new diagnostic we're inserting and the next one are the
        #      same, in which case they can be merged.
        else:
          # A. They're not the same.
          if new_diagnostic.IsDifferent(next_diagnostic):
            new_diagnostic.end_revision = next_diagnostic.start_revision - 1
            futures.append(new_diagnostic.put_async())

          # B. They're the same, in which case we just want to extend the next
          #    diagnostic's range backwards.
          else:
            guid_mapping[new_guid] = next_diagnostic.data
            next_diagnostic.start_revision = new_diagnostic.start_revision
            new_diagnostic = None
            futures.append(next_diagnostic.put_async())

      yield futures
      raise ndb.Return(guid_mapping)

  # Can't find a spot to put it, which indicates that it should go before any
  # existing diagnostic.
  next_diagnostic = old_diagnostics[-1]

  if not next_diagnostic.IsDifferent(new_diagnostic):
    next_diagnostic.start_revision = rev
    guid_mapping[new_guid] = next_diagnostic.data
    yield next_diagnostic.put_async()
    raise ndb.Return(guid_mapping)

  new_diagnostic.start_revision = rev
  new_diagnostic.end_revision = next_diagnostic.start_revision - 1
  yield new_diagnostic.put_async()
  raise ndb.Return(guid_mapping)
コード例 #24
0
def get_buckets_async():
    """Returns a list of project_config_pb2.Bucket objects."""
    buckets = yield Bucket.query().fetch_async()
    raise ndb.Return([parse_bucket_config(b.config_content) for b in buckets])
コード例 #25
0
def get_versioned_most_recent_async(cls, root_key):
    """Returns the most recent entity of cls child of root_key."""
    _, entity = yield get_versioned_most_recent_with_root_async(cls, root_key)
    raise ndb.Return(entity)
コード例 #26
0
def get_bucket_async(name):
    """Returns a project_config_pb2.Bucket by name."""
    bucket = yield Bucket.get_by_id_async(name)
    if bucket is None:
        raise ndb.Return(None)
    raise ndb.Return(parse_bucket_config(bucket.config_content))
コード例 #27
0
 def run():
     if (yield entities[0].key.get_async()):
         # The entity exists, abort.
         raise ndb.Return(False)
     yield ndb.put_multi_async(entities)
     raise ndb.Return(True)
コード例 #28
0
 def _GetConfiguration(cls, key, default=None):
     if key not in cls.CONFIGURATION:
         cls.CONFIGURATION[key] = (yield
                                   stored_object.GetAsync(key)) or default
     raise ndb.Return(cls.CONFIGURATION[key])
コード例 #29
0
def _ProcessTest(test_key):
    """Processes a test to find new anomalies.

  Args:
    test_key: The ndb.Key for a TestMetadata.
  """
    test = yield test_key.get_async()

    sheriff = yield _GetSheriffForTest(test)
    if not sheriff:
        logging.error('No sheriff for %s', test_key)
        raise ndb.Return(None)

    config = yield anomaly_config.GetAnomalyConfigDictAsync(test)
    max_num_rows = config.get('max_window_size', DEFAULT_NUM_POINTS)
    rows = yield GetRowsToAnalyzeAsync(test, max_num_rows)
    # If there were no rows fetched, then there's nothing to analyze.
    if not rows:
        # In some cases (e.g. if some points are deleted) it might be possible
        # that last_alerted_revision is incorrect. In this case, reset it.
        highest_rev = yield _HighestRevision(test_key)
        if test.last_alerted_revision > highest_rev:
            logging.error(
                'last_alerted_revision %d is higher than highest rev %d '
                'for test %s; setting last_alerted_revision to None.',
                test.last_alerted_revision, highest_rev, test.test_path)
            test.last_alerted_revision = None
            yield test.put_async()
        logging.error('No rows fetched for %s', test.test_path)
        raise ndb.Return(None)

    # Get anomalies and check if they happen in ref build also.
    change_points = FindChangePointsForTest(rows, config)
    change_points = yield _FilterAnomaliesFoundInRef(change_points, test_key,
                                                     len(rows))

    anomalies = yield [
        _MakeAnomalyEntity(c, test, rows) for c in change_points
    ]

    # If no new anomalies were found, then we're done.
    if not anomalies:
        return

    logging.info('Created %d anomalies', len(anomalies))
    logging.info(' Test: %s', test_key.id())
    logging.info(' Sheriff: %s', test.sheriff.id())

    # Update the last_alerted_revision property of the test.
    test.last_alerted_revision = anomalies[-1].end_revision
    yield test.put_async()
    yield alert_group.GroupAlertsAsync(anomalies,
                                       utils.TestSuiteName(test.key),
                                       'Anomaly')

    yield ndb.put_multi_async(anomalies)

    # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
    # code will run serially.
    # Email sheriff about any new regressions.
    for anomaly_entity in anomalies:
        if (anomaly_entity.bug_id is None and not anomaly_entity.is_improvement
                and not sheriff.summarize):
            email_sheriff.EmailSheriff(sheriff, test, anomaly_entity)
コード例 #30
0
def FetchCachedTestSuites2Async():
    results = yield namespaced_stored_object.GetAsync(TEST_SUITES_2_CACHE_KEY)
    raise ndb.Return(results)