コード例 #1
0
ファイル: bpEndpoint.py プロジェクト: vikramkapoor/map
 def getDataSource(path):
     logger.debug(f"open data source: {path}")
     if path.endswith("hdf5"):
         if BranchTrainingDatasource.can_read(path):
             return BranchTrainingDatasource(path)
         elif PeventDataSource.can_read(path):
             return PeventDataSource(path)
         else:
             raise ValueError(
                 "unknown type of file, can't choose data source")
     else:
         return SpartaDataSource(path)
コード例 #2
0
ファイル: bpEndpoint.py プロジェクト: vikramkapoor/map
    def getSourceInformation(self, path):
        '''
        go look at the file type and get info about it, determine what can read
        it and then go cache the reader for it
        '''
        isHdf5File = path.endswith("hdf5")

        if isHdf5File:
            if BranchTrainingDatasource.can_read(path):
                return BranchTrainingDatasource.get_source_information(
                    path), "branch-predictor-training-trace"
            elif PeventDataSource.can_read(path):
                return PeventDataSource.get_source_information(
                    path), "pevent-trace"
            else:
                raise ValueError(
                    "unknown hdf5 type, pevent and branch training data sources cannot read this"
                )
        else:
            return SpartaDataSource.get_source_information(
                path), "sparta-statistics"
コード例 #3
0
from plato.backend.processors.branch_training_heatmap.adapter import BranchTrainingHeatMapAdapter
from plato.backend.processors.branch_training_heatmap.generator import BranchTrainingHeatMapGenerator


if len(sys.argv) == 1:
    filename = path.join(path.dirname(__file__),'test-branch-training-trace.hdf5')
else:
    filename = sys.argv[1]


def plot_heatmap(hm):
    plt.figure()
    plt.imshow(hm, cmap='hot', interpolation='nearest', aspect='auto')

# Load source data
branch_hm_data = BranchTrainingDatasource(filename)
print('\nstats', branch_hm_data.stats)
bphma = BranchTrainingHeatMapAdapter(branch_hm_data)
print('\nnum events', bphma.num_events)

bin_size = 200000


bphmg = BranchTrainingHeatMapGenerator(bphma, ['thrash_1', 'd_weight'], bin_size)

hm, table_means, row_means = bphmg.generate_2d_heatmap_with_profiles(0, len(branch_hm_data.ddf_branch_training_events)-1, Units.BRANCH_TRAINING_INDEX, 'd_weight')

print('max {} at {}'.format(hm.max(), hm.argmax()))
print('min {} at {}'.format(hm.min(), hm.argmin()))

hm, table_means, row_means = bphmg.generate_2d_heatmap_with_profiles(0, len(branch_hm_data.ddf_branch_training_events)-1, Units.BRANCH_TRAINING_INDEX, 'd_weight', allow_bins=False)
コード例 #4
0
ファイル: bpEndpoint.py プロジェクト: vikramkapoor/map
    def loadDirectory(self, jsonData, returnValue):
        '''
        go scan a directory and return metadata + generated UUIDs
        '''
        returnValue['waitEstimate'] = 12345
        directory = Path(jsonData['directory']).resolve()
        returnValue['directory'] = str(directory)
        returnValue["result"] = "in-progress"
        hdf5GlobPattern = jsonData.get('globPattern', "*hdf5")
        dbGlobPattern = jsonData.get('globPattern', "*db")

        self.sendMessage(returnValue)

        # detect errors
        if not directory.is_dir():
            raise ValueError(f"cannot read directory: {str(directory)}")
        if not access(directory, os.R_OK):
            raise ValueError("cannot access directory")

        returnValue['subDirectories'] = [
            str(x) for x in Path(directory).iterdir() if x.is_dir()
        ]

        dbFiles = list(filter(os.path.isfile, directory.glob(dbGlobPattern)))

        hdf5Files = list(
            filter(os.path.isfile, directory.glob(hdf5GlobPattern)))

        returnValue['result'] = 'partial'
        returnValue['sources'] = []

        for i, currentFile in enumerate(hdf5Files + dbFiles):
            # need the canonical path of the file to generate a UUID
            relativePath = currentFile.relative_to(directory)
            currentFile = realpath(currentFile)

            isHdf5File = currentFile.endswith("hdf5")

            if isHdf5File:
                # two options, branch training or p-events
                if BranchTrainingDatasource.can_read(currentFile):
                    typeId = "branch-predictor-training-trace"
                elif PeventDataSource.can_read(currentFile):
                    typeId = "pevent-trace"
                else:
                    raise ValueError(
                        "unknown hdf5 type, pevent and branch training data sources cannot read this"
                    )
            else:
                typeId = "sparta-statistics"

            dataIdObj, created = DataId.objects.get_or_create(
                path=currentFile,
                defaults={'uuid': str(uuid5(uuid.NAMESPACE_URL, currentFile))})

            logger.debug(f"{currentFile} created? {created}: {dataIdObj}")
            newDict = {
                "name": str(relativePath),
                "typeId": typeId,
                "dataId": dataIdObj.uuid
            }

            returnValue['sources'].append(newDict)

            if i + 1 != len(hdf5Files + dbFiles):
                self.sendMessage(returnValue)

        returnValue['result'] = 'complete'
コード例 #5
0
sys.path.append(path.split(path.dirname(__file__))[0])

from plato.backend.units import Units
from plato.backend.datasources.branch_training_trace.datasource import BranchTrainingDatasource
from plato.backend.processors.branch_training_heatmap.adapter import BranchTrainingHeatMapAdapter
from plato.backend.processors.branch_training_heatmap.generator import BranchTrainingHeatMapGenerator

if len(sys.argv) == 1:
    filename = path.join(path.dirname(__file__),
                         'test-branch-training-trace.hdf5')
else:
    filename = sys.argv[1]

# Get extents:
cycles_range = []
for item in BranchTrainingDatasource.get_source_information(
        filename)['timeRange']:
    if item['units'] == Units.CYCLES:
        cycles_range = [item['first'], item['last']]

# Load source data
branch_hm_data = BranchTrainingDatasource(filename)
bphma = BranchTrainingHeatMapAdapter(branch_hm_data)

# Note: for a very large trace, bin size needs to go up. This is just a test size
bin_size = 10000
if bphma.num_events > 1000000:
    bin_size = 200000

bphmg = BranchTrainingHeatMapGenerator(bphma, ['thrash_1', 'd_weight'],
                                       bin_size)
コード例 #6
0
import sys
sys.path.append(path.split(path.dirname(__file__))[0])

from plato.backend.units import Units
from plato.backend.datasources.branch_training_trace.datasource import BranchTrainingDatasource
from plato.backend.adapters.branch_training_trace.adapter import BranchTrainingTraceAdapter
from plato.backend.processors.branch_training_line_plot.generator import BranchTrainingLinePlotGenerator

if len(sys.argv) == 1:
    filename = path.join(path.dirname(__file__),
                         'test-branch-training-trace.hdf5')
else:
    filename = sys.argv[1]

# Load source data
branch_hm_data = BranchTrainingDatasource(filename)
print('List of stats are: ', branch_hm_data.stats)
print('List of stats (static) are: ',
      BranchTrainingDatasource.get_source_information(filename)['stats'])

# Constructor adapter + generator
bphma = BranchTrainingTraceAdapter(branch_hm_data)
bplpg = BranchTrainingLinePlotGenerator(bphma)

print('All tables thrashing')
kwargs = {
    "stat_cols": [
        "table[1].thrash_1", "table[2].thrash_1", "table[3].thrash_1",
        "table[4].thrash_1", "table[5].thrash_1", "table[6].thrash_1",
        "table[7].thrash_1", "table[8].thrash_1", "table[9].thrash_1",
        "table[10].thrash_1", "table[11].thrash_1", "table[12].thrash_1",