def find_test_results(self, configurations, suite, branch=None, begin=None, end=None, recent=True, limit=100): if not isinstance(suite, str): raise TypeError(f'Expected type {str}, got {type(suite)}') with self: result = {} for configuration in configurations: # FIXME: Remove the UploadsByConfigurationLegacy once results in it are sufficently old in Spring 2021 # We don't need to ignore duplicates because we never reported to both databases result.update({ config: [value.unpack() for value in values] for config, values in self.configuration_context. select_from_table_with_configurations( self.UploadsByConfigurationLegacy.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid( end, CommitContext.timestamp_to_uuid()), limit=limit, ).items() }) result.update({ config: [value.unpack() for value in values] for config, values in self.configuration_context. select_from_table_with_configurations( self.UploadsByConfiguration.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid( end, CommitContext.timestamp_to_uuid()), limit=limit, ).items() }) return result
def find_test_results(self, configurations, suite, branch=None, begin=None, end=None, recent=True, limit=100): if not isinstance(suite, str): raise TypeError(f'Expected type {str}, got {type(suite)}') with self: result = {} for configuration in configurations: result.update({ config: [value.unpack() for value in values] for config, values in self.configuration_context. select_from_table_with_configurations( self.UploadsByConfiguration.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid( end, CommitContext.timestamp_to_uuid()), limit=limit, ).items() }) return result
def _find_results( self, table, configurations, suite, recent=True, branch=None, begin=None, end=None, begin_query_time=None, end_query_time=None, limit=DEFAULT_LIMIT, ): if not isinstance(suite, str): raise TypeError(f'Expected type {str}, got {type(suite)}') def get_time(time): if isinstance(time, datetime): return time elif time: return datetime.utcfromtimestamp(int(time)) return None with self: result = {} for configuration in configurations: result.update({config: [value.unpack() for value in values] for config, values in self.configuration_context.select_from_table_with_configurations( table.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid(end, CommitContext.timestamp_to_uuid()), start_time__gte=get_time(begin_query_time), start_time__lte=get_time(end_query_time), limit=limit, ).items()}) return result
def find_archive( self, configurations=None, suite=None, recent=True, branch=None, begin=None, end=None, begin_query_time=None, end_query_time=None, limit=DEFAULT_LIMIT, ): if not configurations: configurations = [] if not isinstance(suite, str): raise TypeError(f'Expected type {str}, got {type(suite)}') with self: metadata_by_config = {} for configuration in configurations: metadata_by_config.update({config: [value.unpack() for value in values] for config, values in self.configuration_context.select_from_table_with_configurations( self.ArchiveMetaDataByCommit.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid(end, CommitContext.timestamp_to_uuid()), start_time__gte=_get_time(begin_query_time), start_time__lte=_get_time(end_query_time), limit=limit, ).items()}) memory_used = 0 for values in metadata_by_config.values(): for value in values: if not value.get('digest'): continue memory_used += value.get('size', 0) if memory_used > self.MEMORY_LIMIT: raise RuntimeError('Hit soft-memory cap when fetching archives, aborting') archive_by_digest = {} result = {} for config, values in metadata_by_config.items(): for value in values: if not value.get('digest'): continue if not archive_by_digest.get(value.get('digest')): archive = self.archiver.retrieve(value.get('digest'), value.get('size', None)) if not archive: continue archive_by_digest[value.get('digest')] = archive archive_by_digest.get(value.get('digest')).seek(0) result.setdefault(config, []) result[config].append(dict( archive=archive_by_digest.get(value.get('digest')), digest=value.get('digest'), uuid=value['uuid'], start_time=value['start_time'], )) return result
def _failures( self, all_table, unexpected_table, configurations, suite, recent=True, branch=None, begin=None, end=None, begin_query_time=None, end_query_time=None, unexpected=True, collapsed=True, limit=DEFAULT_LIMIT, ): table = unexpected_table if unexpected else all_table if not isinstance(suite, str): raise TypeError( f'Expected type {str} for suite, got {type(suite)}') def get_time(time): if isinstance(time, datetime): return time elif time: return datetime.utcfromtimestamp(int(time)) return None with self: has_test_runs = False if collapsed: result = set() else: result = {} for configuration in configurations: for config, values in self.configuration_context.select_from_table_with_configurations( table.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid( end, CommitContext.timestamp_to_uuid()), start_time__gte=get_time(begin_query_time), start_time__lte=get_time(end_query_time), limit=limit, ).items(): if collapsed: for value in values: has_test_runs = True for test in value.unpack(): if test not in ['uuid', 'start_time']: result.add(test) else: runs = [] for value in values: has_test_runs = True # uuid and start_time are not in the unpacked values unpacked = value.unpack() if len(unpacked) > 2: runs.append(unpacked) if runs: result.update({config: runs}) return result if has_test_runs else None
def find_archive( self, configurations=None, suite=None, recent=True, branch=None, begin=None, end=None, begin_query_time=None, end_query_time=None, limit=DEFAULT_LIMIT, ): if not configurations: configurations = [] if not isinstance(suite, str): raise TypeError(f'Expected type {str}, got {type(suite)}') with self: metadata_by_config = {} for configuration in configurations: metadata_by_config.update({config: [value.unpack() for value in values] for config, values in self.configuration_context.select_from_table_with_configurations( self.ArchiveMetaDataByCommit.__table_name__, configurations=[configuration], recent=recent, suite=suite, sdk=configuration.sdk, branch=branch or self.commit_context.DEFAULT_BRANCH_KEY, uuid__gte=CommitContext.convert_to_uuid(begin), uuid__lte=CommitContext.convert_to_uuid(end, CommitContext.timestamp_to_uuid()), start_time__gte=_get_time(begin_query_time), start_time__lte=_get_time(end_query_time), limit=limit, ).items()}) memory_used = 0 for values in metadata_by_config.values(): for value in values: if not value.get('digest'): continue memory_used += value.get('size', 0) if memory_used > self.MEMORY_LIMIT: raise RuntimeError('Hit soft-memory cap when fetching archives, aborting') result = {} for config, values in metadata_by_config.items(): for value in values: if not value.get('digest'): continue rows = self.cassandra.select_from_table( self.ArchiveChunks.__table_name__, digest=value.get('digest'), limit=1 + int(value.get('size', 0) / self.CHUNK_SIZE), ) if len(rows) == 0: continue digest = hashlib.md5() archive = io.BytesIO() archive_size = 0 for row in rows: archive_size += len(row.chunk) digest.update(row.chunk) archive.write(row.chunk) if archive_size != value.get('size', 0) or value.get('digest', '') != digest.hexdigest(): raise RuntimeError('Failed to reconstruct archive from chunks') archive.seek(0) result.setdefault(config, []) result[config].append(dict( archive=archive, uuid=value['uuid'], start_time=value['start_time'], )) return result