Esempio n. 1
0
    def getDataset(self, **kwargs):
        '''Interactive method. It prints the datasets (the user can apply filters),
        the user chooses one of them and inserts the number of LFNs he wants.'''

        manager = SBDatasetManager.SBDatasetManager()

        def validateFilter(filter, allowed):
            kwargs[filter] = kwargs.get(filter, allowed)
            if not isinstance(kwargs[filter], list):
                kwargs[filter] = [kwargs[filter]]
            if not set(kwargs[filter]).issubset(set(allowed)):
                raise GangaException('%s must be %s' % (filter, allowed))

        validateFilter('status', ['open', 'closed'])
        validateFilter('session', ['analysis'])
        kwargs['files'] = 0

        datasets = manager.getDataset(**kwargs)
        dataset = manager.printDatasets(datasets)

        self.dataset_id = dataset['dataset_id']

        print('\nChosen dataset details:')
        manager.printDatasetDetail(dataset)

        print(
            '\nInsert the minimum number of files that you need for your analysis (zero for all):'
        )
        self.files_total = utils.getIndex(maxInclusive=int(dataset['files']))

        lfns = self.__getLFNs()

        tot_size = 0
        tot_files = len(lfns)

        for lfn in lfns:
            tot_size += int(lfn['size'])

        print('\nTotal job input size: ' +
              str(utils.sizeof_fmt_binary(tot_size)))
        print('Total number of involved lfns: ' + str(tot_files))

        print(
            '\nInsert the maximum number of files for each subjob. Remember:')
        print('- maximum output size is 2GiB.')
        print('- suggested maximum job duration 18h.')
        print('- maximum input size job is 10GiB.')

        self.files_per_subjobs = utils.getIndex(minInclusive=1,
                                                maxInclusive=tot_files)
        job = self.__createInputPath(lfns)

        print('\nSubjobs details:')
        column_names = ['id', 'list_path', 'size', 'lfns']
        print(utils.format_dict_table(job, column_names))
Esempio n. 2
0
 def getDataset(self, **kwargs):
     '''Interactive mathod. It prints the datasets (the user can apply filters),
     the user chooses one of them and inserts the number of events he wants.'''
     
     manager = SBDatasetManager.SBDatasetManager()
     
     def validateFilter(filter, allowed):
         kwargs[filter] = kwargs.get(filter, allowed)
         if not isinstance(kwargs[filter], list):
             kwargs[filter] = [kwargs[filter]]
         if not set(kwargs[filter]).issubset(set(allowed)):
             raise GangaException('%s must be %s' % (filter, allowed))
     
     validateFilter('status', ['open', 'closed'])
     validateFilter('session', ['fastsim', 'fullsim'])
     
     datasets = manager.getDataset(**kwargs)
     dataset = manager.printDatasets(datasets)
     
     self.dataset_id = dataset['dataset_id']
     
     print('\nChosen dataset details:')
     manager.printDatasetDetail(dataset)
     
     print('\nInsert the minimum number of events that you need for your analysis (zero for all):')
     self.events_total = utils.getIndex(maxInclusive=int(dataset['parameters']['evt_tot']))
     
     lfns = self.__getLFNs(dataset['parameters']['evt_file'])
     
     tot_size = 0
     tot_files = len(lfns)
     tot_events = int(dataset['parameters']['evt_file']) * tot_files
     
     for lfn in lfns:
         tot_size += int(lfn['size'])
     
     print('\nTotal job input size: ' + str(utils.sizeof_fmt_binary(tot_size)))
     print('Total selected number of events: ' + str(utils.sizeof_fmt_decimal(tot_events)))
     print('Total number of involved lfns: ' + str(tot_files))
     
     print('\nInsert the maximum number of events for each subjob. Remember:')
     print('- maximum output size is 2GiB.')
     print('- suggested maximum job duration 18h.')
     print('- maximum input size job is 10GiB.')
     print('- at least %s (that is the number of events of one file).' % dataset['parameters']['evt_file'])
     
     self.events_per_subjobs = utils.getIndex(minInclusive=int(dataset['parameters']['evt_file']), maxInclusive=tot_events)
     job = self.__createInputPath(lfns, dataset['parameters']['evt_file'])
     
     print('\nSubjobs details:')
     column_names = ['id', 'list_path', 'size', 'events', 'lfns']
     print(utils.format_dict_table(job, column_names))
Esempio n. 3
0
 def asksParameter(parameter):
     '''Interactive method requesting user the value of each parameter 
     per session (FastSim, FullSim, Analysis)'''
     if parameter['customValue'] and len(parameter['values']) == 0:
         value = raw_input('\nEnter %s: ' % parameter['label'])
     elif not parameter['customValue'] and len(parameter['values']) == 0:
         raise GangaException('Invalid rule (customValue:False and values=0).')
     else:
         table = list()
         
         i = 0
         for value in parameter['values']:
             table.append({'id': i, 'value': value})
             i += 1
         
         if parameter['customValue']:
             table.append({'id': i, 'value': 'Enter a custom value'})
         
         print('\nChoose %s:' % parameter['label'])
         column_names = ('id', 'value')
         print(utils.format_dict_table(table, column_names))
         index = utils.getIndex(maxExclusive=len(table))
         
         if parameter['customValue'] and index == len(table)-1:
             value = raw_input('Custom value: ')
         else:
             value = table[index]['value']
     
     # parameter insertion in dictionary. It will be subsequently 
     #inserted into dataset analysis bookkeeping table, hstore field
     new_dataset['parameters'][parameter['name']] = value
     
     return value
Esempio n. 4
0
        def asksParameter(parameter):
            '''Interactive method requesting user the value of each parameter 
            per session (FastSim, FullSim, Analysis)'''
            if parameter['customValue'] and len(parameter['values']) == 0:
                value = raw_input('\nEnter %s: ' % parameter['label'])
            elif not parameter['customValue'] and len(
                    parameter['values']) == 0:
                raise GangaException(
                    'Invalid rule (customValue:False and values=0).')
            else:
                table = list()

                i = 0
                for value in parameter['values']:
                    table.append({'id': i, 'value': value})
                    i += 1

                if parameter['customValue']:
                    table.append({'id': i, 'value': 'Enter a custom value'})

                print('\nChoose %s:' % parameter['label'])
                column_names = ('id', 'value')
                print(utils.format_dict_table(table, column_names))
                index = utils.getIndex(maxExclusive=len(table))

                if parameter['customValue'] and index == len(table) - 1:
                    value = raw_input('Custom value: ')
                else:
                    value = table[index]['value']

            # parameter insertion in dictionary. It will be subsequently
            #inserted into dataset analysis bookkeeping table, hstore field
            new_dataset['parameters'][parameter['name']] = value

            return value
Esempio n. 5
0
def baseline(timestamps):
    """
    Implements Baseline algorithm

    Parameters
    ----------
    timestamps : list of tuples
        Sorted list of interactions [(t1, n1, n2), (t2, n3, n4),...], where t is timestamp, n1 and n2 are interactiong nodes.
        Nodes in the interactions are sorted lexicographically.

    Returns
    -------
    tuple of dicts
        (dict1, dict2): dict1 of a shape {n1: t1, n2: t2, ..} contains starting point t1 of activity interval of node n1, only for nodes chosen to be active.
                        dict2 of a shape {n1: t1, n2: t2, ..} contains ending point t1 of activity interval of node n1, only for nodes chosen to be active.

    """
    Xstart, Xend = {}, {}
    while timestamps:
        nodeEdgeIndex = utils.getIndex(timestamps)

        gain = {
            node: np.float64(len(tstmps)) / (tstmps[-1][0] - tstmps[0][0])
            for node, tstmps in nodeEdgeIndex.iteritems()
        }
        node, gain = (sorted(gain.items(), key=operator.itemgetter(1)))[-1]
        s = sorted([i[0] for i in nodeEdgeIndex[node]])
        Xstart[node], Xend[node] = s[0], s[-1]
        timestamps = [x for x in timestamps if x[1] != node and x[2] != node]
    return Xstart, Xend
Esempio n. 6
0
def getDH():
    global g, p, a, b, A, B, K1, K2
    simpes = utils.getPrime(0)
    arr = utils.getIndex(simpes, 0)
    i = random.randint(1, len(arr) - 1)
    g = arr[i]
    arr.pop(i)
    p = arr[random.randint(1, len(arr) - 1)]

    simpes = utils.getPrime(4096)
    arr = utils.getIndex(simpes, 4096)
    index = random.randint(1, len(arr) - 1)
    a = arr[index]
    arr.pop(index)
    b = arr[random.randint(1, len(arr) - 1)]
    A = pow(g, a, p)
    B = pow(g, b, p)
    K1 = pow(B, a, p)
    K2 = pow(A, b, p)
    return g, p, a, b, A, B, K1, K2
Esempio n. 7
0
def index(request):
    '''显示首页'''

    # 定义模板上下文
    request.session.set_expiry(0)
    context = utils.getIndex()
    # 使用模板
    if 'username' in request.session:
        logger.info(request.session["username"])
    else:
        logger.info('anonymous')
    print(context)
    return render(request, 'books/index.html', context)
Esempio n. 8
0
def getRSA ():
  simples = utils.getPrime(1048576)
  arr = utils.getIndex(simples, 1048576)
  p = arr[random.randint(1, len(arr) - 1)]
  q = arr[random.randint(1, len(arr) - 1)]
  n = p * q
  predF = (p - 1) * (q - 1)
  d = 0
  while d == 0:
    e = random.randint(1, predF)
    try:
      d = mod_inverse(e, predF)
    except:
      d = 0

  d = int(d)
  return d, e, n, predF
def runInner(timestamps, max_iter=10):
    """
    Implements Inner algorithm

    Parameters
    ----------
    timestamps : list of tuples
        Sorted list of interactions [(t1, n1, n2), (t2, n3, n4),...], where t is timestamp, n1 and n2 are interactiong nodes.
        Nodes in the interactions are sorted lexicographically.
    maxiter : int
        maximum number of interactions

    Returns
    -------
    tuple of dicts
        (dict1, dict2): dict1 of a shape {n1: t1, n2: t2, ..} contains starting point t1 of activity interval of node n1, for every node,
                        dict2 of a shape {n1: t1, n2: t2, ..} contains ending point t1 of activity interval of node n1, for every node.

    """

    nodeEdgeIndex = utils.getIndex(timestamps, 'inner')
    m = getInitial(nodeEdgeIndex)
    alpha, alphaNodes = getAlphas(timestamps, m)
    Xstart, Xend = getSolution(alphaNodes, m)

    best = utils.getCost(Xstart, Xend)
    bestSol = (Xstart, Xend)

    for k in xrange(1, max_iter):
        m = getNextInput(nodeEdgeIndex, Xstart, Xend)
        alpha, alphaNodes = getAlphas(timestamps, m)
        Xstart, Xend = getSolution(alphaNodes, m)

        t = utils.getCost(Xstart, Xend)
        if t < best:
            best = t
            bestSol = (Xstart, Xend)
        else:
            break

    return bestSol[0], bestSol[1]
def runBudget(timestamps, maxiter=10):
    """
    Implements Budget algorithm

    Parameters
    ----------
    timestamps : list of tuples
        Sorted list of interactions [(t1, n1, n2), (t2, n3, n4),...], where t is timestamp, n1 and n2 are interactiong nodes.
        Nodes in the interactions are sorted lexicographically.
    maxiter : int
        maximum number of interactions in binary search

    Returns
    -------
    tuple of dicts
        (dict1, dict2): dict1 of a shape {n1: t1, n2: t2, ..} contains starting point t1 of activity interval of node n1, for every node,
                        dict2 of a shape {n1: t1, n2: t2, ..} contains ending point t1 of activity interval of node n1, for every node.

    """

    nodeEdgeIndex = utils.getIndex(timestamps, 'budget')
    maxb = timestamps[-1][0] - timestamps[0][0]
    klow, kup = 1. / maxb, 1.0
    b = {n: maxb for n in nodeEdgeIndex}
    LXstart, LXend = budgetAlgorithm(timestamps, nodeEdgeIndex, b)
    c = 1

    alpha = 1.0 / maxb
    while (kup - klow) > alpha and c < maxiter:
        k = (kup + klow) / 2
        b = {n: maxb * k for n in nodeEdgeIndex}
        Xstart, Xend = budgetAlgorithm(timestamps, nodeEdgeIndex, b)
        c += 1
        uncovered = utils.checkCoverage(Xstart, Xend, timestamps)
        if uncovered:
            klow = k
        else:
            kup = k
            LXstart, LXend = Xstart, Xend
    return LXstart, LXend
Esempio n. 11
0
    def setSoftwareVersion(self):
        '''Set software version. This function is called when interactive() 
        cannot find it automatically or is called by user if prefer set it 
        manually'''

        results = db.read('''SELECT session_name, soft_version
            FROM session_site_soft
            GROUP BY session_name, soft_version
            ORDER BY session_name, soft_version''')

        i = 0
        for result in results:
            result['id'] = i
            i += 1

        print('Choose simulation type:')
        column_names = ('id', 'session_name', 'soft_version')
        print(utils.format_dict_table(results, column_names))

        index = utils.getIndex(maxExclusive=len(results))

        self.session = results[index]['session_name']
        self.soft_version = results[index]['soft_version']
Esempio n. 12
0
 def setSoftwareVersion(self):
     '''Set software version. This function is called when interactive() 
     cannot find it automatically or is called by user if prefer set it 
     manually'''
     
     results = db.read('''SELECT session_name, soft_version
         FROM session_site_soft
         GROUP BY session_name, soft_version
         ORDER BY session_name, soft_version''')
     
     i = 0
     for result in results:
         result['id'] = i
         i += 1
     
     print('Choose simulation type:')
     column_names = ('id', 'session_name', 'soft_version')
     print(utils.format_dict_table(results, column_names))
     
     index = utils.getIndex(maxExclusive=len(results))
     
     self.session = results[index]['session_name']
     self.soft_version = results[index]['soft_version']
        default=0.5,
        type=float)
    parser.add_argument("--nnodes",
                        help="number of nodes in the graph",
                        default=100,
                        type=int)
    args = parser.parse_args()

    event_length = args.intlen
    overlap = args.overlap
    num_nodes = args.nnodes

    G = utils.generateGraph(n=num_nodes)
    timestamps, active_truth = utils.generateIntervals(
        G, event_length=event_length, overlap=overlap)
    nodeEdgeIndex = utils.getIndex(timestamps, 'inner')

    Cost_IP, P_IP, R_IP, F_IP, Costmax_IP = [], [], [], [], []

    iterations = range(1, 11)

    for iter in iterations:
        if iter == 1:
            m = inner.getInitial(nodeEdgeIndex)
            Xstart, Xend = inner.runInner_iteration(timestamps, nodeEdgeIndex,
                                                    m)
        else:
            m = inner.getNextInput(nodeEdgeIndex, Xstart, Xend)
            Xstart, Xend = inner.runInner_iteration(timestamps, nodeEdgeIndex,
                                                    m)
Esempio n. 14
0
    def printDatasets(self, datasets):
        ''' Given the heterogeneous dataset list, the method splits it in 
        categories and build the table per session. A unique id all over 
        the sessions permit the user to select univocally a dataset. All 
        the metadata will be printed with the parent chain: a parent dataset
        is defined as the one used as input to create the child one. The 
        method is public but not exported in GPI'''

        # check the term width
        (width, height) = utils.getTerminalSize()
        if width < 160:
            logger.error(
                "Your terminal's width is %d; must be at least 160\nYou can make your font size smaller"
                % width)
            raise utils.QuitException()

        # better a named tuple but available in Python 2.6 only
        # fullsim: the table elements should be common to all the dataset keys in the list
        # print table rules:
        grouped_datasets = [
            # 0
            {
                'title':
                'Fastsim Official Production',
                'dataset':
                list(),
                'order_by': [
                    'prod_series', 'analysis', 'generator', 'dg', 'bkg_mixing',
                    'analysis_type'
                ],
                'columns': [
                    'id', 'prod_series', 'analysis', 'generator', 'dg',
                    'bkg_mixing', 'analysis_type', 'status'
                ]
            },
            # 1
            {
                'title':
                'Fastsim Personal Production',
                'dataset':
                list(),
                'order_by': [
                    'free_string', 'analysis', 'generator', 'dg', 'bkg_mixing',
                    'analysis_type'
                ],
                'columns': [
                    'id', 'free_string', 'analysis', 'generator', 'dg',
                    'bkg_mixing', 'analysis_type', 'status'
                ],
            },
            # 2
            {
                'title':
                'Fullsim Official Production',
                'dataset':
                list(),
                'order_by': [
                    'prod_series', 'simtype', 'generator', 'dg', 'pl', 'g4ver',
                    'opt_photons'
                ],
                'columns': [
                    'id', 'prod_series', 'simtype', 'generator', 'dg', 'pl',
                    'g4ver', 'opt_photons', 'status'
                ]
            },
            # 3
            {
                'title':
                'Fullsim Personal Production',
                'dataset':
                list(),
                'order_by': [
                    'free_string', 'generator', 'dg', 'pl', 'g4ver',
                    'opt_photons'
                ],
                'columns': [
                    'id', 'free_string', 'generator', 'dg', 'pl', 'g4ver',
                    'opt_photons', 'status'
                ]
            },
            # 4
            {
                'title': 'Analysis',
                'dataset': list(),
                'order_by': ['free_string', 'creation_date'],
                'columns': ['id', 'free_string', 'creation_date', 'status']
            }
        ]

        for dataset in datasets:
            # put sub dictionary elements to level zero dictionary
            for key, value in dataset.items():
                if type(dataset[key]) is types.DictType:
                    for key1, value1 in dataset[key].iteritems():
                        dataset[key1] = value1
                    #del dataset[key]

            # dataset selection
            if dataset['session'] == 'fastsim':
                if dataset['owner'] == 'Official':
                    grouped_datasets[0]['dataset'].append(dataset)
                else:
                    grouped_datasets[1]['dataset'].append(dataset)
            elif dataset['session'] == 'fullsim':
                if dataset['owner'] == 'Official':
                    grouped_datasets[2]['dataset'].append(dataset)
                else:
                    grouped_datasets[3]['dataset'].append(dataset)
            elif dataset['session'] == 'analysis':
                grouped_datasets[4]['dataset'].append(dataset)
            else:
                raise GangaException('session not recognized: %s' %
                                     dataset['session'])

        i = 0

        # field sort, adding id and print
        for group in grouped_datasets:
            if len(group['dataset']) > 0:
                print('\n%s' % group['title'])

                # dictionary sorting
                group['dataset'] = sorted(
                    group['dataset'],
                    key=lambda elem: ('%s ' * len(group['order_by'])) % tuple(
                        [elem[d] for d in group['order_by']]))

                # id adding
                for dataset in group['dataset']:
                    dataset['id'] = i
                    i += 1

                print(
                    utils.format_dict_table(group['dataset'],
                                            group['columns']))

        # ask for input and print dataset details
        if i == 1:
            index = 0
            print('\nAutomatically selected the only entry')
        else:
            print('\nChoose the dataset:')
            index = utils.getIndex(maxExclusive=i)

        # Object oriented solution to investigate and/or binary search
        # datasets have been grouped per print rules
        for group in grouped_datasets:
            for d in group['dataset']:
                if d['id'] == index:
                    for dataset in datasets:
                        if dataset['dataset_id'] == d['dataset_id']:
                            return dataset
def runBudget_fixed_budget(timestamps, b={}):
    nodeEdgeIndex = utils.getIndex(timestamps, 'budget')
    if not b:
        b = {n: (timestamps[-1][0] - timestamps[0][0]) for n in nodeEdgeIndex}
    Xstart, Xend = budgetAlgorithm(timestamps, nodeEdgeIndex, b)
    return Xstart, Xend
Esempio n. 16
0
 def createDataset(self):
     '''Interactive method to guide the user in dataset creation procedure.
     If the dataset is a 'personal production' type, force user to provide 
     a filter key.'''
     
     def asksParameter(parameter):
         '''Interactive method requesting user the value of each parameter 
         per session (FastSim, FullSim, Analysis)'''
         if parameter['customValue'] and len(parameter['values']) == 0:
             value = raw_input('\nEnter %s: ' % parameter['label'])
         elif not parameter['customValue'] and len(parameter['values']) == 0:
             raise GangaException('Invalid rule (customValue:False and values=0).')
         else:
             table = list()
             
             i = 0
             for value in parameter['values']:
                 table.append({'id': i, 'value': value})
                 i += 1
             
             if parameter['customValue']:
                 table.append({'id': i, 'value': 'Enter a custom value'})
             
             print('\nChoose %s:' % parameter['label'])
             column_names = ('id', 'value')
             print(utils.format_dict_table(table, column_names))
             index = utils.getIndex(maxExclusive=len(table))
             
             if parameter['customValue'] and index == len(table)-1:
                 value = raw_input('Custom value: ')
             else:
                 value = table[index]['value']
         
         # parameter insertion in dictionary. It will be subsequently 
         #inserted into dataset analysis bookkeeping table, hstore field
         new_dataset['parameters'][parameter['name']] = value
         
         return value
     
     
     type = [
         dict(id = 0, dataset_type = 'FastSim Personal Production'),
         dict(id = 1, dataset_type = 'FullSim Personal Production'),
         dict(id = 2, dataset_type = 'Analysis'),
         ]
     
     column_names = ('id', 'dataset_type')
     print(utils.format_dict_table(type, column_names))
     index = utils.getIndex(maxExclusive=len(type))
     
     new_dataset = dict()
     new_dataset['parameters'] = dict()
     
     ####################
     # FAST Simulation session
     ####################
     # parameter check: mandatory, free string param management
     # TODO: parameter type check, evaluate the config file option to store parameters
     
     if index == 0:
         new_dataset['session'] = 'fastsim'
         
         parameters = [
             {"name": "evt_file", "label": "Events per file", "customValue": True, "values": []},
             {"name": "analysis", "label": "Analysis", "customValue": True, "values": ["BtoKNuNu", "BtoKstarNuNu", "DstD0ToXLL", "DstD0ToXLL", "Generics", "HadRecoilCocktail", "KplusNuNu", "SLRecoilCocktail", "tau->3mu"]},
             {"name": "dg", "label": "Geometry", "customValue": True, "values": ["DG_4", "DG_4a", "DG_BaBar"]},
             {"name": "generator", "label": "Generator", "customValue": True, "values": ["B0B0bar_Btag-HD_Cocktail", "B0B0bar_Btag-SL_e_mu_tau_Bsig-HD_SL_Cocktail", "B0B0bar_generic", "B0B0bar_K0nunu", "B0B0bar_K0nunu_SL_e_mu_tau", "B0B0bar_Kstar0nunu_Kpi", "B0B0bar_Kstar0nunu_Kpi_SL_e_mu_tau", "B+B-_Btag-HD_Cocktail", "B+B-_Btag-SL_e_mu_tau_Bsig-HD_SL_Cocktail", "B+B-_generic", "B+B-_K+nunu", "B+B-_K+nunu_SL_e_mu_tau", "B+B-_Kstar+nunu", "B+B-_Kstar+nunu_SL_e_mu_tau", "B+B-_taunu_SL_e_mu_tau", "bhabha_bhwide", "ccbar", "tau+tau-_kk2f", "uds", "udsc", "Upsilon4S_generic"]},
             {"name": "bkg_mixing", "label": "Background Mixing Type", "customValue": True, "values": ["All", "NoPair", "NoMixing"]},
             {"name": "analysis_type", "label": "Analysis Type", "customValue": True, "values": ["BtoKNuNu", "BtoKstarNuNu", "HadRecoil", "SemiLepKplusNuNu"]}
         ]
         
         for parameter in parameters:
             asksParameter(parameter)
     
     ####################
     # FULL Simulation session
     ####################
     elif index == 1:
         new_dataset['session'] = 'fullsim'
         
         parameters = [
             {"name": "evt_file", "label": "Events per file", "customValue": True, "values": []},
             {"name": "sim_type", "label": "Simulation Type", "customValue": False, "values": ["fullsim", "background_frame"]},
             {"name": "generator", "label": "Generator", "customValue": False, "values": ["RadBhaBha", "singleparticle"]},
             {"name": "dg", "label": "Geometry", "customValue": True, "values": ["Geometry_CIPE", "Geometry_CIPE_BGO", "Geometry_CIPE_CSI", "Geometry_CIPE_V00-00-02"]},
             {"name": "pl", "label": "Physics list", "customValue": True, "values": ["QGSP", "QGSP_BERT", "QGSP_BERT_HP"]},
             {"name": "g4ver", "label": "Geant 4 version", "customValue": True, "values": ["9.2", "9.3"]},
             {"name": "opt_photons", "label": "Optical Photons", "customValue": False, "values": ["OFF", "ON"]}
         ]
         radbhabha = [
             {"name": "brunobbbminde", "label": "Min. Delta E", "customValue": True, "values": []}
         ]
         singleParticle = [
             {"name": "brunopdg", "label": "PDG Code", "customValue": True, "values": []},
             {"name": "brunothetamin", "label": "Theta min.", "customValue": True, "values": []},
             {"name": "brunothetamax", "label": "Theta max.", "customValue": True, "values": []},
             {"name": "brunophimin", "label": "Phi min.", "customValue": True, "values": []},
             {"name": "brunophimax", "label": "Phi max.", "customValue": True, "values": []},
             {"name": "brunoemin", "label": "Energy (GeV) min.", "customValue": True, "values": []},
             {"name": "brunoemax", "label": "Energy (GeV) max.", "customValue": True, "values": []}
         ]
         
         for parameter in parameters:
             value = asksParameter(parameter)
             
             # parameter dependencies management
             if parameter['name'] == 'generator':
                 if value == 'singleparticle':
                     parameters.extend(singleParticle)
                 elif value == 'RadBhaBha':
                     parameters.extend(radbhabha)
     
     ####################
     # ANALYSIS session
     ####################
     elif index == 2:
         new_dataset['session'] = 'analysis'
     else:
         raise GangaException('Invalid selection.')
     
     
     while True:
         free_string = raw_input('\nEnter free string: ')
         max_length = 128
         
         if len(free_string) <= max_length:
             new_dataset['parameters']['free_string'] = free_string
             break
         else:
             print('Free string must be <= %d char long.' % max_length)
     
     # dataset-site relation set
     new_dataset['site'] = getConfig('SuperB')['submission_site']
     new_dataset['owner'] = utils.getOwner()
     new_dataset['dataset_id'] = str(objectid.ObjectId())
     
     print('\nNew dataset details:')
     self.printDatasetDetail(new_dataset)
     
     
     value = ''
     while True:
         value = raw_input('Type \'yes\' to confirm the dataset creation or (q)uit: ')
         if value == 'yes':
             break
         elif value == 'q':
             raise utils.QuitException()
     
     sql = '''INSERT INTO analysis_dataset
         (owner, dataset_id, session, parameters, status)
         VALUES (%s, decode(%s, 'hex'), %s, %s, 'prepared');
         
         INSERT INTO analysis_dataset_site
         (dataset_id, site)
         VALUES (decode(%s, 'hex'), %s);'''
     params = (new_dataset['owner'], 
         new_dataset['dataset_id'], 
         new_dataset['session'], 
         new_dataset['parameters'],
         new_dataset['dataset_id'],
         new_dataset['site'])
     db.write(sql, params)
Esempio n. 17
0
 def printDatasets(self, datasets):
     ''' Given the heterogeneous dataset list, the method splits it in 
     categories and build the table per session. A unique id all over 
     the sessions permit the user to select univocally a dataset. All 
     the metadata will be printed with the parent chain: a parent dataset
     is defined as the one used as input to create the child one. The 
     method is public but not exported in GPI'''
     
     # check the term width
     (width, height) = utils.getTerminalSize()
     if width < 160:
         logger.error("Your terminal's width is %d; must be at least 160\nYou can make your font size smaller" % width)
         raise utils.QuitException()
     
     # better a named tuple but available in Python 2.6 only
     # fullsim: the table elements should be common to all the dataset keys in the list
     # print table rules:
     grouped_datasets = [
         # 0
         {'title': 'Fastsim Official Production',
          'dataset': list(),
          'order_by': ['prod_series', 'analysis', 'generator', 'dg', 'bkg_mixing', 'analysis_type'],
          'columns': ['id', 'prod_series', 'analysis', 'generator', 'dg', 'bkg_mixing', 'analysis_type', 'status']
          },
         # 1
         {'title': 'Fastsim Personal Production',
          'dataset': list(),
          'order_by': ['free_string', 'analysis', 'generator', 'dg', 'bkg_mixing', 'analysis_type'],
          'columns': ['id', 'free_string', 'analysis', 'generator', 'dg', 'bkg_mixing', 'analysis_type', 'status'],
          },
         # 2
         {'title': 'Fullsim Official Production',
          'dataset': list(),
          'order_by': ['prod_series', 'simtype', 'generator', 'dg', 'pl', 'g4ver', 'opt_photons'],
          'columns': ['id', 'prod_series', 'simtype', 'generator', 'dg', 'pl', 'g4ver', 'opt_photons', 'status']
          },
         # 3
         {'title': 'Fullsim Personal Production',
          'dataset': list(),
          'order_by': ['free_string', 'generator', 'dg', 'pl', 'g4ver', 'opt_photons'],
          'columns': ['id', 'free_string', 'generator', 'dg', 'pl', 'g4ver', 'opt_photons', 'status']
          },
         # 4
         {'title': 'Analysis',
          'dataset': list(),
          'order_by': ['free_string', 'creation_date'],
          'columns': ['id', 'free_string', 'creation_date', 'status']
          }
     ]
     
     for dataset in datasets:
         # put sub dictionary elements to level zero dictionary 
         for key, value in dataset.items():
             if type(dataset[key]) is types.DictType:
                 for key1, value1 in dataset[key].iteritems():
                     dataset[key1] = value1
                 #del dataset[key]
         
         # dataset selection
         if dataset['session'] == 'fastsim':
             if dataset['owner'] == 'Official':
                 grouped_datasets[0]['dataset'].append(dataset)
             else:
                 grouped_datasets[1]['dataset'].append(dataset)
         elif dataset['session'] == 'fullsim':
             if dataset['owner'] == 'Official':
                 grouped_datasets[2]['dataset'].append(dataset)
             else:
                 grouped_datasets[3]['dataset'].append(dataset)
         elif dataset['session'] == 'analysis':
             grouped_datasets[4]['dataset'].append(dataset)
         else:
             raise GangaException('session not recognized: %s' % dataset['session'])
     
     i = 0
     
     # field sort, adding id and print
     for group in grouped_datasets:
         if len(group['dataset']) > 0:
             print('\n%s' % group['title'])
             
             # dictionary sorting
             group['dataset'] = sorted(group['dataset'], key=lambda elem: ('%s ' * len(group['order_by'])) % tuple([elem[d] for d in group['order_by']]))
             
             # id adding
             for dataset in group['dataset']:
                 dataset['id'] = i
                 i += 1
             
             print(utils.format_dict_table(group['dataset'], group['columns']))
     
     
     # ask for input and print dataset details 
     if i == 1:
         index = 0
         print('\nAutomatically selected the only entry')
     else:
         print('\nChoose the dataset:')
         index = utils.getIndex(maxExclusive=i)
     
     
     # Object oriented solution to investigate and/or binary search
     # datasets have been grouped per print rules
     for group in grouped_datasets:
         for d in group['dataset']:
             if d['id'] == index:
                  for dataset in datasets:
                      if dataset['dataset_id'] == d['dataset_id']:
                         return dataset
Esempio n. 18
0
    def createDataset(self):
        '''Interactive method to guide the user in dataset creation procedure.
        If the dataset is a 'personal production' type, force user to provide 
        a filter key.'''
        def asksParameter(parameter):
            '''Interactive method requesting user the value of each parameter 
            per session (FastSim, FullSim, Analysis)'''
            if parameter['customValue'] and len(parameter['values']) == 0:
                value = raw_input('\nEnter %s: ' % parameter['label'])
            elif not parameter['customValue'] and len(
                    parameter['values']) == 0:
                raise GangaException(
                    'Invalid rule (customValue:False and values=0).')
            else:
                table = list()

                i = 0
                for value in parameter['values']:
                    table.append({'id': i, 'value': value})
                    i += 1

                if parameter['customValue']:
                    table.append({'id': i, 'value': 'Enter a custom value'})

                print('\nChoose %s:' % parameter['label'])
                column_names = ('id', 'value')
                print(utils.format_dict_table(table, column_names))
                index = utils.getIndex(maxExclusive=len(table))

                if parameter['customValue'] and index == len(table) - 1:
                    value = raw_input('Custom value: ')
                else:
                    value = table[index]['value']

            # parameter insertion in dictionary. It will be subsequently
            #inserted into dataset analysis bookkeeping table, hstore field
            new_dataset['parameters'][parameter['name']] = value

            return value

        type = [
            dict(id=0, dataset_type='FastSim Personal Production'),
            dict(id=1, dataset_type='FullSim Personal Production'),
            dict(id=2, dataset_type='Analysis'),
        ]

        column_names = ('id', 'dataset_type')
        print(utils.format_dict_table(type, column_names))
        index = utils.getIndex(maxExclusive=len(type))

        new_dataset = dict()
        new_dataset['parameters'] = dict()

        ####################
        # FAST Simulation session
        ####################
        # parameter check: mandatory, free string param management
        # TODO: parameter type check, evaluate the config file option to store parameters

        if index == 0:
            new_dataset['session'] = 'fastsim'

            parameters = [{
                "name": "evt_file",
                "label": "Events per file",
                "customValue": True,
                "values": []
            }, {
                "name":
                "analysis",
                "label":
                "Analysis",
                "customValue":
                True,
                "values": [
                    "BtoKNuNu", "BtoKstarNuNu", "DstD0ToXLL", "DstD0ToXLL",
                    "Generics", "HadRecoilCocktail", "KplusNuNu",
                    "SLRecoilCocktail", "tau->3mu"
                ]
            }, {
                "name": "dg",
                "label": "Geometry",
                "customValue": True,
                "values": ["DG_4", "DG_4a", "DG_BaBar"]
            }, {
                "name":
                "generator",
                "label":
                "Generator",
                "customValue":
                True,
                "values": [
                    "B0B0bar_Btag-HD_Cocktail",
                    "B0B0bar_Btag-SL_e_mu_tau_Bsig-HD_SL_Cocktail",
                    "B0B0bar_generic", "B0B0bar_K0nunu",
                    "B0B0bar_K0nunu_SL_e_mu_tau", "B0B0bar_Kstar0nunu_Kpi",
                    "B0B0bar_Kstar0nunu_Kpi_SL_e_mu_tau",
                    "B+B-_Btag-HD_Cocktail",
                    "B+B-_Btag-SL_e_mu_tau_Bsig-HD_SL_Cocktail",
                    "B+B-_generic", "B+B-_K+nunu", "B+B-_K+nunu_SL_e_mu_tau",
                    "B+B-_Kstar+nunu", "B+B-_Kstar+nunu_SL_e_mu_tau",
                    "B+B-_taunu_SL_e_mu_tau", "bhabha_bhwide", "ccbar",
                    "tau+tau-_kk2f", "uds", "udsc", "Upsilon4S_generic"
                ]
            }, {
                "name": "bkg_mixing",
                "label": "Background Mixing Type",
                "customValue": True,
                "values": ["All", "NoPair", "NoMixing"]
            }, {
                "name":
                "analysis_type",
                "label":
                "Analysis Type",
                "customValue":
                True,
                "values":
                ["BtoKNuNu", "BtoKstarNuNu", "HadRecoil", "SemiLepKplusNuNu"]
            }]

            for parameter in parameters:
                asksParameter(parameter)

        ####################
        # FULL Simulation session
        ####################
        elif index == 1:
            new_dataset['session'] = 'fullsim'

            parameters = [{
                "name": "evt_file",
                "label": "Events per file",
                "customValue": True,
                "values": []
            }, {
                "name": "sim_type",
                "label": "Simulation Type",
                "customValue": False,
                "values": ["fullsim", "background_frame"]
            }, {
                "name": "generator",
                "label": "Generator",
                "customValue": False,
                "values": ["RadBhaBha", "singleparticle"]
            }, {
                "name":
                "dg",
                "label":
                "Geometry",
                "customValue":
                True,
                "values": [
                    "Geometry_CIPE", "Geometry_CIPE_BGO", "Geometry_CIPE_CSI",
                    "Geometry_CIPE_V00-00-02"
                ]
            }, {
                "name": "pl",
                "label": "Physics list",
                "customValue": True,
                "values": ["QGSP", "QGSP_BERT", "QGSP_BERT_HP"]
            }, {
                "name": "g4ver",
                "label": "Geant 4 version",
                "customValue": True,
                "values": ["9.2", "9.3"]
            }, {
                "name": "opt_photons",
                "label": "Optical Photons",
                "customValue": False,
                "values": ["OFF", "ON"]
            }]
            radbhabha = [{
                "name": "brunobbbminde",
                "label": "Min. Delta E",
                "customValue": True,
                "values": []
            }]
            singleParticle = [{
                "name": "brunopdg",
                "label": "PDG Code",
                "customValue": True,
                "values": []
            }, {
                "name": "brunothetamin",
                "label": "Theta min.",
                "customValue": True,
                "values": []
            }, {
                "name": "brunothetamax",
                "label": "Theta max.",
                "customValue": True,
                "values": []
            }, {
                "name": "brunophimin",
                "label": "Phi min.",
                "customValue": True,
                "values": []
            }, {
                "name": "brunophimax",
                "label": "Phi max.",
                "customValue": True,
                "values": []
            }, {
                "name": "brunoemin",
                "label": "Energy (GeV) min.",
                "customValue": True,
                "values": []
            }, {
                "name": "brunoemax",
                "label": "Energy (GeV) max.",
                "customValue": True,
                "values": []
            }]

            for parameter in parameters:
                value = asksParameter(parameter)

                # parameter dependencies management
                if parameter['name'] == 'generator':
                    if value == 'singleparticle':
                        parameters.extend(singleParticle)
                    elif value == 'RadBhaBha':
                        parameters.extend(radbhabha)

        ####################
        # ANALYSIS session
        ####################
        elif index == 2:
            new_dataset['session'] = 'analysis'
        else:
            raise GangaException('Invalid selection.')

        while True:
            free_string = raw_input('\nEnter free string: ')
            max_length = 128

            if len(free_string) <= max_length:
                new_dataset['parameters']['free_string'] = free_string
                break
            else:
                print('Free string must be <= %d char long.' % max_length)

        # dataset-site relation set
        new_dataset['site'] = getConfig('SuperB')['submission_site']
        new_dataset['owner'] = utils.getOwner()
        new_dataset['dataset_id'] = str(objectid.ObjectId())

        print('\nNew dataset details:')
        self.printDatasetDetail(new_dataset)

        value = ''
        while True:
            value = raw_input(
                'Type \'yes\' to confirm the dataset creation or (q)uit: ')
            if value == 'yes':
                break
            elif value == 'q':
                raise utils.QuitException()

        sql = '''INSERT INTO analysis_dataset
            (owner, dataset_id, session, parameters, status)
            VALUES (%s, decode(%s, 'hex'), %s, %s, 'prepared');
            
            INSERT INTO analysis_dataset_site
            (dataset_id, site)
            VALUES (decode(%s, 'hex'), %s);'''
        params = (new_dataset['owner'], new_dataset['dataset_id'],
                  new_dataset['session'], new_dataset['parameters'],
                  new_dataset['dataset_id'], new_dataset['site'])
        db.write(sql, params)