Example #1
0
payload_handler.load_payload(options.input)

named_pipe = payload_handler.payload['workflow']['NamedPipe']

stat_client = StatsPipeClient(named_pipe)

initial = payload_handler.payload['workflow']['dataset']

timing = {'stats':{'api':'listFiles', 'query':initial}}

## last step (list all files in DBS3 below the 'initial' root)
with TimingStat(timing, stat_client) as timer:
  files = api.listFiles(dataset=initial, detail=True)

timer.update_stats({'server_request_timing' : float(api.request_processing_time)/1000000.0,
                    'server_request_timestamp' : (api.request_time),
                    'request_content_length' : api.content_length})

timer.stat_to_server()

print("Found %s files"  %(len(files)))

for this_file, interval in zip(files, increase_interval(0.0, 0.1)):
  p = payload_handler.clone_payload()
  p['workflow']['logical_file_name'] = this_file['logical_file_name']
  del p['workflow']['dataset']
  #p['workflow']['Intervals']['getFileParents'] += interval
  payload_handler.append_payload(p)
 
payload_handler.save_payload(options.output)
Example #2
0
with TimingStat(timing, stat_client) as timer:
    if isinstance(initial, str):
        datasets = api.listDatasets(dataset=initial)
    else:
        datasets = api.listDatasets(**initial)
    
request_processing_time, request_time = api.requestTimingInfo
timer.update_stats({'server_request_timing' : float(request_processing_time)/1000000.0,
                    'server_request_timestamp' : float(request_time)/1000000.0,
                    'request_content_length' : api.requestContentLength})

timer.stat_to_server()

#remove T0TEST datasets, since they are not analysed by users
datasets = map(lambda x: x['dataset'], datasets)
datasets = filter(lambda x: x.find('T0TEST')==-1, datasets)

#re-arrange the order of datasets, to have a more realistic chaotic use
shuffle(datasets)

print("Found %s datasets" % (len(datasets)))

for dataset, interval in zip(datasets, increase_interval(start=0.0, step=0.2)):
  p = payload_handler.clone_payload()
  p['workflow']['dataset'] = dataset
  #p['workflow']['Intervals']['getPrimaryDatasetType'] += interval
  #p['workflow']['Intervals']['CrabWorkflow'] += interval
  payload_handler.append_payload(p)

payload_handler.save_payload(options.output)
Example #3
0
with TimingStat(timing, stat_client) as timer:
    if isinstance(initial, str):
        datasets = api.listDatasets(dataset=initial)
    else:
        datasets = api.listDatasets(**initial)
    
request_processing_time, request_time = api.requestTimingInfo
timer.update_stats({'server_request_timing' : float(request_processing_time)/1000000.0,
                    'server_request_timestamp' : float(request_time)/1000000.0,
                    'request_content_length' : api.requestContentLength})

timer.stat_to_server()

#remove T0TEST datasets, since they are not analysed by users
datasets = map(lambda x: x['dataset'], datasets)
datasets = filter(lambda x: x.find('T0TEST')==-1, datasets)

#re-arrange the order of datasets, to have a more realistic chaotic use
shuffle(datasets)

print "Found %s datasets" % (len(datasets))

for dataset, interval in zip(datasets, increase_interval(start=0.0, step=0.2)):
  p = payload_handler.clone_payload()
  p['workflow']['dataset'] = dataset
  #p['workflow']['Intervals']['getPrimaryDatasetType'] += interval
  #p['workflow']['Intervals']['CrabWorkflow'] += interval
  payload_handler.append_payload(p)

payload_handler.save_payload(options.output)