Ejemplo n.º 1
0
from __future__ import division, print_function

import sys

from configure import configure, mc_event_model, run_model
from plotting import plotEvents

GIGA = 1e9

modelNames = None
if len(sys.argv) > 1:
    modelNames = sys.argv[1].split(',')
model = configure(modelNames)

YEARS = list(range(model['start_year'], model['end_year'] + 1))

# Call the data model with a random year to get the fields
dataKinds = [key + ' MC' for key in mc_event_model(model, 2020).keys()]
dataKinds.append('Data')

eventsByYear = [[0 for _i in range(len(dataKinds))] for _j in YEARS]

for year in YEARS:
    eventsByYear[YEARS.index(year)][dataKinds.index('Data')] = run_model(model, year).events / GIGA
    mcEvents = mc_event_model(model, year)
    for mcKind, count in mcEvents.items():
        eventsByYear[YEARS.index(year)][dataKinds.index(mcKind + ' MC')] = count / GIGA

plotEvents(eventsByYear, name='Produced by Kind.png', title='Events produced by type', columns=dataKinds, index=YEARS)
Ejemplo n.º 2
0
hllhc_sim_time = {year: performance_by_year(model, year, 'GENSIM',
                                            data_type='mc', kind='2026')[0] +
                        performance_by_year(model, year, 'DIGI',
                                            data_type='mc', kind='2026')[0] +
                        performance_by_year(model, year, 'RECO',
                                            data_type='mc', kind='2026')[0] for year in YEARS}

# general pattern:
# _required: HS06
# _time: HS06s

# CPU time requirement calculations, in HS06 * s
# Take the running time and event rate from the model

data_events = {i: run_model(model, i, data_type='data').events for i in YEARS}
lhc_mc_events = {i: mc_event_model(model, i)['2017'] for i in YEARS}
hllhc_mc_events = {i: mc_event_model(model, i)['2026'] for i in YEARS}

#Note the quantity below is for prompt reco only.
data_cpu_time = {i : data_events[i] * reco_time[i] for i in YEARS}
lhc_mc_cpu_time = {i : lhc_mc_events[i] * lhc_sim_time[i] for i in YEARS}
hllhc_mc_cpu_time = {i : hllhc_mc_events[i] * hllhc_sim_time[i] for i in YEARS}

# The data need to be reconstructed about as quickly as we record them.  In
# addition, we need to factor in express, repacking, AlCa, CAF
# functionality and skimming.  Presumably these all scale like the data.
# Per the latest CRSG document, these total to 123 kHS06 compared to 240
# kHS016 for the prompt reconstruction, which we can round to 50%, so
# multiply by 50%.  (Ignoring the 10 kHS06 needed for VO boxes, which
# won't scale up and is also pretty small.)
Ejemplo n.º 3
0
            ['disk_replicas'][tier])
    ]
    # Assume we have the highest number of versions in year 1, save n replicas of that
    tapeCopies[tier] = model['storage_model']['versions'][tier][0] * model[
        'storage_model']['tape_replicas'][tier]
    if not tapeCopies[tier]: tapeCopies[tier] = [0, 0, 0]

# Loop over years to determine how much is produced without versions or replicas
for year in YEARS:
    for tier in TIERS:
        if tier not in model['mc_only_tiers']:
            dummyCPU, tierSize = performance_by_year(model,
                                                     year,
                                                     tier,
                                                     data_type='data')
            dataProduced[year]['data'][tier] += tierSize * run_model(
                model, year, data_type='data').events
        if tier not in model['data_only_tiers']:
            mcEvents = mc_event_model(model, year)
            for kind, events in mcEvents.items():
                dummyCPU, tierSize = performance_by_year(model,
                                                         year,
                                                         tier,
                                                         data_type='mc',
                                                         kind=kind)
                dataProduced[year]['mc'][tier] += tierSize * events

producedByTier = [[0 for _i in range(len(TIERS))] for _j in YEARS]
for year, dataDict in dataProduced.items():
    for dataType, tierDict in dataDict.items():
        for tier, size in tierDict.items():
            producedByTier[YEARS.index(year)][TIERS.index(tier)] += size / PETA