def _edge_type(self): ''' produces an edge_type string eg "ecal_track" the order of id1 an id2 does not matter, eg for one track and one ecal the type will always be "ecal_track" (and never be a "track_ecal") ''' #consider creating an ENUM instead for the edge_type shortid1 = Identifier.type_short_code(self.id1) shortid2 = Identifier.type_short_code(self.id2) if shortid1 == shortid2: if shortid1 == "h": return "hcal_hcal" elif shortid1 == "e": return "ecal_ecal" elif shortid1 == "t": return "track_track" elif (shortid1 == "h" and shortid2 == "t" or shortid1 == "t" and shortid2 == "h"): return "hcal_track" elif (shortid1 == "e" and shortid2 == "t" or shortid1 == "t" and shortid2 == "e"): return "ecal_track" elif (shortid1 == "e" and shortid2 == "h" or shortid1 == "h" and shortid2 == "e"): return "ecal_hcal" return "unknown"
def short_elements_string(self): ''' Construct a string description of each of the elements in a block. The elements are given a short name E/H/T according to ecal/hcal/track and then sequential numbering starting from 0, this naming is also used to index the matrix of distances. The full unique id is also given. For example:- elements: { E0:1104134446736:SmearedCluster : ecal_in 0.57 0.33 -2.78 H1:2203643940048:SmearedCluster : hcal_in 6.78 0.35 -2.86 T2:3303155568016:SmearedTrack : 5.23 4.92 0.34 -2.63 } ''' count = 0 elemdetails = " elements:\n" for uid in self.element_uniqueids: elemdetails += "{shortname:>7}{count} = {strdescrip:9} value={val:5.1f} ({uid})\n".format( shortname=Identifier.type_letter(uid), count=count, strdescrip=Identifier.pretty(uid), val=Identifier.get_value(uid), uid=uid) count = count + 1 return elemdetails
def __init__(self, element_ids, edges, subtype): ''' element_ids: list of the uniqueids of the elements to go in this block [id1,id2,...] edges: is a dictionary of edges, it must contain at least all needed edges. It is not a problem if it contains additional edges as only the ones needed will be extracted subtype: used when making unique identifier, will normally be 'r' for reconstructed blocks and 's' for split blocks ''' #make a uniqueid for this block self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.BLOCK, subtype) self.element_uniqueids = sorted( element_ids, key=lambda x: Identifier.type_letter(x)) #comment out energy sorting for now as not available C++ sortby = lambda x: Identifier.type_letter(x) self.element_uniqueids = sorted(element_ids, key=sortby) #sequential numbering of blocks, not essential but helpful for debugging self.block_count = PFBlock.temp_block_count PFBlock.temp_block_count += 1 #extract the relevant parts of the complete set of edges and store this within the block self.edges = dict() for id1, id2 in itertools.combinations(self.element_uniqueids, 2): key = Edge.make_key(id1, id2) self.edges[key] = edges[key]
def __init__(self, element_ids, edges, pfevent): ''' element_ids: list of the uniqueids of the elements to go in this block [id1,id2,...] edges: is a dictionary of edges, it must contain at least all needed edges. It is not a problem if it contains additional edges as only the ones needed will be extracted pfevent: allows access to the underlying elements given a uniqueid must provide a get_object function ''' #make a uniqueid for this block self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.BLOCK) self.is_active = True # if a block is subsequently split it will be deactivated #allow access to the underlying objects self.pfevent = pfevent #comment out energy sorting for now as not available C++ sortby = lambda x: Identifier.type_short_code(x) self.element_uniqueids = sorted(element_ids, key=sortby) #sequential numbering of blocks, not essential but helpful for debugging self.block_count = PFBlock.temp_block_count PFBlock.temp_block_count += 1 #extract the relevant parts of the complete set of edges and store this within the block self.edges = dict() for id1, id2 in itertools.combinations(self.element_uniqueids, 2): key = Edge.make_key(id1, id2) self.edges[key] = edges[key]
def test_papasevent(self): papasevent = PapasEvent(0) ecals = dict() tracks = dict() mixed = dict() for i in range(0, 2): uid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER, i,'t', 4.5) ecals[uid] = uid for i in range(0, 2): uid = Identifier.make_id(Identifier.PFOBJECTTYPE.TRACK, i, 's', 4.5) tracks[uid] = uid lastid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER, 3, 't', 3) ecals[lastid] = lastid papasevent.add_collection(ecals) papasevent.add_collection(tracks) #check that adding the same collection twice fails self.assertRaises(ValueError, papasevent.add_collection, ecals) #check that adding a mixed collection fails mixed = ecals.copy() mixed.update(tracks) self.assertRaises(ValueError, papasevent.add_collection, mixed) #get we can get back collections OK self.assertTrue( papasevent.get_collection('zz') is None) self.assertTrue( len(papasevent.get_collection('et')) == 3 ) #check get_object self.assertTrue( Identifier.pretty(papasevent.get_object(lastid)) == 'et3' ) self.assertTrue( papasevent.get_object(499) is None )
def make_particles_from_block(self, block): ''' Take a block and use simple rules to construct particles ''' #take a block and find its parents (clusters and tracks) parents = block.element_uniqueids if (len(parents) == 1) & (Identifier.is_ecal(parents[0])): #print "make photon" self.make_photon(parents) elif ( (len(parents) == 2) & (block.count_ecal() == 1 ) & (block.count_tracks() == 1)): #print "make hadron" self.make_hadron(parents) elif ((len(parents) == 3) & (block.count_ecal() == 1) & (block.count_tracks() == 1) & (block.count_hcal() == 1)): #print "make hadron and photon" #probably not right but illustrates splitting of parents for more than one particle hparents = [] # will contain parents for the Hadron which gets everything except the #hcal which is used for the photom for elem in parents: if (Identifier.is_hcal(elem)): self.make_photon({elem}) else : hparents.append(elem) self.make_hadron(hparents) else : pass
def __init__(self, element_ids, edges, pfevent): ''' element_ids: list of the uniqueids of the elements to go in this block [id1,id2,...] edges: is a dictionary of edges, it must contain at least all needed edges. It is not a problem if it contains additional edges as only the ones needed will be extracted pfevent: allows access to the underlying elements given a uniqueid must provide a get_object function ''' #make a uniqueid for this block self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.BLOCK) self.is_active = True # if a block is subsequently split it will be deactivated #allow access to the underlying objects self.pfevent=pfevent #order the elements by element type (ecal, hcal, track) and then by energy #this is a bit yucky but needed to make sure the order returned is consistent #maybe should live outside of this class self.element_uniqueids = sorted(element_ids, key = lambda x: (Identifier.type_short_code(x),-self.pfevent.get_object(x).energy) ) #sequential numbering of blocks, not essential but helpful for debugging self.block_count = PFBlock.temp_block_count PFBlock.temp_block_count += 1 #extract the relevant parts of the complete set of edges and store this within the block self.edges = dict() for id1, id2 in itertools.combinations(self.element_uniqueids,2): key = Edge.make_key(id1,id2) self.edges[key] = edges[key]
def make_particles_from_block(self, block): ''' Take a block and use simple rules to construct particles ''' #take a block and find its parents (clusters and tracks) parents = block.element_uniqueids if (len(parents) == 1) & (Identifier.is_ecal(parents[0])): #print "make photon" self.make_photon(parents) elif ((len(parents) == 2) & (block.count_ecal() == 1) & (block.count_tracks() == 1)): #print "make hadron" self.make_hadron(parents) elif ((len(parents) == 3) & (block.count_ecal() == 1) & (block.count_tracks() == 1) & (block.count_hcal() == 1)): #print "make hadron and photon" #probably not right but illustrates splitting of parents for more than one particle hparents = [ ] # will contain parents for the Hadron which gets everything except the #hcal which is used for the photom for elem in parents: if (Identifier.is_hcal(elem)): self.make_photon({elem}) else: hparents.append(elem) self.make_hadron(hparents) else: pass
def _graph_add_topological_block(self, graph, graphnodes, pfblock): '''this adds the block links (distance, is_linked) onto the DAG in red''' for edge in pfblock.edges.itervalues(): if edge.linked: label = "{:.1E}".format(edge.distance) if edge.distance == 0: label = "0" graph.add_edge(pydot.Edge(graphnodes[Identifier.pretty(edge.id1)], graphnodes[Identifier.pretty(edge.id2)], label=label, style="dashed", color="red", arrowhead="none", arrowtail="none", fontsize='7'))
def build_collections_and_history(self, papasevent, sim_particles): #todo this should be integrated into the simulator in the future simulated_particles = dict() tracks = dict() smeared_tracks=dict() smeared_hcals = dict() true_hcals = dict() smeared_ecals = dict() true_ecals = dict() smeared_tracks = dict() true_tracks = dict() history = papasevent.history for ptc in sim_particles: uid = ptc.uniqueid simulated_particles[uid] = ptc history[uid] = Node(uid) if ptc.track: track_id = ptc.track.uniqueid true_tracks[track_id] = ptc.track history[track_id] = Node(track_id) history[uid].add_child(history[track_id]) if ptc.track_smeared: smtrack_id = ptc.track_smeared.uniqueid smeared_tracks[smtrack_id] = ptc.track_smeared history[smtrack_id] = Node(smtrack_id) history[track_id].add_child(history[smtrack_id]) if len(ptc.clusters) > 0 : for key, clust in ptc.clusters.iteritems(): if Identifier.get_type(clust.uniqueid) == Identifier.PFOBJECTTYPE.ECALCLUSTER: true_ecals[clust.uniqueid] = clust elif Identifier.get_type(clust.uniqueid) == Identifier.PFOBJECTTYPE.HCALCLUSTER: true_hcals[clust.uniqueid] = clust else: assert(False) history[clust.uniqueid] = Node(clust.uniqueid) history[uid].add_child(history[clust.uniqueid]) if len(ptc.clusters_smeared) > 0 : #need to put in link between true and smeared cluster for key1, smclust in ptc.clusters_smeared.iteritems(): if (key == key1): if Identifier.get_type(smclust.uniqueid) == Identifier.PFOBJECTTYPE.ECALCLUSTER: smeared_ecals[smclust.uniqueid]=smclust elif Identifier.get_type(smclust.uniqueid) == Identifier.PFOBJECTTYPE.HCALCLUSTER: smeared_hcals[smclust.uniqueid]=smclust history[smclust.uniqueid] = Node(smclust.uniqueid) history[clust.uniqueid].add_child(history[smclust.uniqueid]) papasevent.add_collection(simulated_particles) papasevent.add_collection(true_tracks) papasevent.add_collection(smeared_tracks) papasevent.add_collection(smeared_hcals) papasevent.add_collection(true_hcals) papasevent.add_collection(smeared_ecals) papasevent.add_collection(true_ecals)
def __init__(self, id, layer): ''' id is unique integer from 101-199 for ecal cluster unique integer from 201-299 for hcal cluster layer is ecal/hcal ''' if (layer == 'ecal_in'): self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER) elif (layer == 'hcal_in'): self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.HCALCLUSTER) else: assert false self.layer = layer self.id = id self.energy=0
def __init__(self, uid, layer): ''' uid is unique integer from 101-199 for ecal cluster unique integer from 201-299 for hcal cluster layer is ecal/hcal ''' if (layer == 'ecal_in'): self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER, 't') elif (layer == 'hcal_in'): self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.HCALCLUSTER, 't') else: assert false self.layer = layer self.uid = uid self.energy=0
def simplify_blocks(self, block, history_nodes=None): ''' Block: a block which contains list of element ids and set of edges that connect them history_nodes: optional dictionary of Nodes with element identifiers in each node returns None or a dictionary of new split blocks The goal is to remove, if needed, some links from the block so that each track links to at most one hcal within a block. In some cases this may separate a block into smaller blocks (splitblocks). The BlockSplitter is used to return the new smaller blocks. If history_nodes are provided then the history will be updated. Split blocks will have the tracks and cluster elements as parents, and also the original block as a parent ''' ids=block.element_uniqueids if len(ids)<=1 : #no links to remove return None # work out any links that need to be removed # - for tracks unink all hcals except the closest hcal # - for ecals unlink hcals to_unlink = [] for id in ids : if Identifier.is_track(id): linked = block.linked_edges(id,"hcal_track") # NB already sorted from small to large distance if linked!=None and len(linked)>1 : first_hcal = True for elem in linked: if first_hcal: first_dist=elem.distance first_hcal = False else: if (elem.distance==first_dist): pass to_unlink.append(elem) elif Identifier.is_ecal(id): # this is now handled elsewhere and so could be removed # remove all ecal-hcal links. ecal linked to hcal give rise to a photon anyway. linked = block.linked_edges(id,"ecal_hcal") to_unlink.extend(linked) #if there is something to unlink then use the BlockSplitter splitblocks=None if len(to_unlink): splitblocks= BlockSplitter(block, to_unlink, history_nodes).blocks return splitblocks
def __init__(self, uid,pdgid): ''' uid is unique integer from 601-699 pdgid is particle uid eg 22 for photon ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.PARTICLE, uid,'r') self.pdgid = pdgid self.uid = uid
def __init__(self, id, pdgid): ''' id is unique integer from 601-699 pdgid is particle id eg 22 for photon ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.RECPARTICLE) self.pdgid = pdgid self.id = id
def simplify_blocks(self, block, history_nodes=None): ''' Block: a block which contains list of element ids and set of edges that connect them history_nodes: optional dictionary of Nodes with element identifiers in each node returns a dictionary of new split blocks The goal is to remove, if needed, some links from the block so that each track links to at most one hcal within a block. In some cases this may separate a block into smaller blocks (splitblocks). The BlockSplitter is used to return the new smaller blocks. If history_nodes are provided then the history will be updated. Split blocks will have the tracks and cluster elements as parents, and also the original block as a parent ''' ids = block.element_uniqueids #create a copy of the edges and unlink some of these edges if needed newedges = copy.deepcopy(block.edges) if len(ids) > 1 : for uid in ids : if Identifier.is_track(uid): # for tracks unlink all hcals except the closest hcal linked_ids = block.linked_ids(uid, "hcal_track") # NB already sorted from small to large distance if linked_ids != None and len(linked_ids) > 1: first_hcal = True for id2 in linked_ids: newedge = newedges[Edge.make_key(uid, id2)] if first_hcal: first_dist = newedge.distance first_hcal = False else: if newedge.distance == first_dist: pass newedge.linked = False #create new block(s) splitblocks = BlockSplitter(block.uniqueid, ids, newedges, len(self.splitblocks), 's', history_nodes).blocks return splitblocks
def __init__(self, id): ''' id is unique integer from 1-99 ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.TRACK) self.id = id self.layer = 'tracker' self.energy=0
def __init__(self, id,pdgid): ''' id is unique integer from 601-699 pdgid is particle id eg 22 for photon ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.RECPARTICLE) self.pdgid = pdgid self.id = id
def __init__(self, uid,pdgid): ''' uid is unique integer from 601-699 pdgid is particle uid eg 22 for photon ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.PARTICLE, 'r') self.pdgid = pdgid self.uid = uid
def __init__(self, id): ''' id is unique integer from 1-99 ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.TRACK) self.id = id self.layer = 'tracker' self.energy = 0
def edge_matrix_string(self): ''' produces a string containing the the lower part of the matrix of distances between elements elements are ordered as ECAL(E), HCAL(H), Track(T) for example:- distances: E0 H1 T2 T3 E0 . H1 0.0267 . T2 0.0000 0.0000 . T3 0.0287 0.0825 --- . ''' # make the header line for the matrix count = 0 matrixstr = "" if len(self.element_uniqueids) > 1: matrixstr = " distances:\n " for e1 in self.element_uniqueids: # will produce short id of form E2 H3, T4 etc in tidy format elemstr = Identifier.type_letter(e1) + str(count) matrixstr += "{:>8}".format(elemstr) count += 1 matrixstr += "\n" #for each element find distances to all other items that are in the lower part of the matrix countrow = 0 for e1 in self.element_uniqueids: # this will be the rows countcol = 0 rowstr = "" #make short name for the row element eg E3, H5 etc rowname = Identifier.type_letter(e1) + str(countrow) for e2 in self.element_uniqueids: # these will be the columns countcol += 1 if e1 == e2: rowstr += " ." break elif self.get_edge(e1, e2).distance is None: rowstr += " ---" elif not self.get_edge(e1, e2).linked: rowstr += " ---" else: rowstr += "{:8.4f}".format( self.get_edge(e1, e2).distance) matrixstr += "{:>8}".format(rowname) + rowstr + "\n" countrow += 1 return matrixstr
def edge_matrix_string(self): ''' produces a string containing the the lower part of the matrix of distances between elements elements are ordered as ECAL(E), HCAL(H), Track(T) for example:- distances: E0 H1 T2 T3 E0 . H1 0.0267 . T2 0.0000 0.0000 . T3 0.0287 0.0825 --- . ''' # make the header line for the matrix count = 0 matrixstr = " distances:\n " for e1 in self.element_uniqueids : # will produce short id of form E2 H3, T4 etc in tidy format elemstr = Identifier.type_short_code(e1) + str(count) matrixstr += "{:>9}".format(elemstr) count += 1 matrixstr += "\n" #for each element find distances to all other items that are in the lower part of the matrix countrow = 0 for e1 in self.element_uniqueids : # this will be the rows countcol = 0 rowstr = "" #make short name for the row element eg E3, H5 etc rowname = Identifier.type_short_code(e1) +str(countrow) for e2 in self.element_uniqueids: # these will be the columns #make short name for the col element eg E3, H5 colname = Identifier.type_short_code(e1) + str(countcol) countcol += 1 if (e1 == e2): rowstr += " . " break elif self.get_edge(e1,e2).distance == None: rowstr += " ---" + " " elif self.get_edge(e1,e2).linked == False: rowstr += " xxx" + " " else : rowstr += "{:8.4f}".format(self.get_edge(e1,e2).distance) + " " matrixstr += "{:>11}".format(rowname) + rowstr + "\n" countrow += 1 return matrixstr +" }\n"
def filter_ids(self, ids, type_and_subtype): ''' returns a filtered subset of ids which have a type_and_subtype that matchs the type_and_subtype argument eg merged_ecal_ids = filter_ids(ids, 'em') @param ids: a list of ids @param type_and_subtype: a two letter type and subtype eg 'es' for smeared ecal ''' return [uid for uid in ids if Identifier.type_and_subtype(uid) == type_and_subtype]
def __init__(self, id, pdgid): ''' id is unique integer from 301-399 pdgid is particle id eg 22 for photon ''' self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.PARTICLE) #print "particle: ",self.uniqueid," ",id self.pdgid = pdgid self.id = id
def __str__(self): mainstr = super(Particle, self).__str__() idstr = '{pretty:6}:{id}'.format( pretty=Identifier.pretty(self.uniqueid), id=self.uniqueid) fields = mainstr.split(':') fields.insert(1, idstr) return ':'.join(fields)
def __str__(self): mainstr = super(Particle, self).__str__() idstr = '{pretty:6}:{uid}'.format(pretty=Identifier.pretty( self.uniqueid), uid=self.uniqueid) fields = mainstr.split(':') fields.insert(1, idstr) return ':'.join(fields)
def __init__(self, pfobjecttype=Identifier.PFOBJECTTYPE.NONE): #def __init__(self): super(PFObject, self).__init__() self.linked = [] self.locked = False self.block_label = None self.uniqueid = Identifier.make_id(pfobjecttype)
def reconstruct_muons(self, block): '''Reconstruct muons in block.''' uids = block.element_uniqueids for uid in uids: if Identifier.is_track(uid) and \ self.is_from_particle(uid, 'ps', 13): parent_ids = [block.uniqueid, uid] self.reconstruct_track(self.papasevent.get_object(uid), 13, parent_ids)
def reconstruct_electrons(self, block): '''Reconstruct electrons in block.''' uids = block.element_uniqueids for uid in uids: if Identifier.is_track(uid) and \ self.is_from_particle(uid, 'ps', 11): parent_ids = [block.uniqueid, uid] track = self.papasevent.get_object(uid) ptc = self.reconstruct_track(track, 11, parent_ids)
def get_object(self, uniqueid): ''' given a uniqueid return the underlying obejct ''' type = Identifier.get_type(uniqueid) subtype = Identifier.get_subtype(uniqueid) if type == Identifier.PFOBJECTTYPE.TRACK: return self.tracks[uniqueid] elif type == Identifier.PFOBJECTTYPE.ECALCLUSTER: return self.ecal_clusters[uniqueid] elif type == Identifier.PFOBJECTTYPE.HCALCLUSTER: return self.hcal_clusters[uniqueid] elif type == Identifier.PFOBJECTTYPE.PARTICLE: if subtype == 'g': return self.sim_particles[uniqueid] elif subtype == 'r': return self.reconstructed_particles[uniqueid] else: assert(False)
def filter_ids(self, ids, type_and_subtype): ''' returns a filtered subset of ids which have a type_and_subtype that matchs the type_and_subtype argument eg merged_ecal_ids = filter_ids(ids, 'em') @param ids: a list of ids @param type_and_subtype: a two letter type and subtype eg 'es' for smeared ecal ''' return [ uid for uid in ids if Identifier.type_and_subtype(uid) == type_and_subtype ]
def info(self): subclusterstr = str('sub(') for s in self.subclusters: subclusterstr += str('{:}, '.format(Identifier.pretty(s.uniqueid))) subclusterstr += ")" return '{energy:7.2f} {theta:5.2f} {phi:5.2f} {sub}'.format( energy=self.energy, theta=math.pi / 2. - self.position.Theta(), phi=self.position.Phi(), sub=subclusterstr)
def id_from_pretty(self, pretty): ''' Searches to find the true id given a pretty id string Not super efficient but OK for occasional use eg uid = self.id_from_pretty('et103') @param: pretty is the easily readable name from the Identifier class which is shown in prints and plots eg 'et103' ''' for uid in self.history.keys(): if Identifier.pretty(uid) == pretty: return uid return None
def get_object(self, uid): '''get an object corresponding to a unique uid''' #I am still not sure about this #would it be better to let it fail when asking for something that does not exist like this: # return self.get_collections(Identifier.type_and_subtype(uid))[uid] # collection = self.get_collection(Identifier.type_and_subtype(uid)) if collection: return collection.get(uid, None) return None
def __init__(self, pfobjecttype, index, subtype='u', identifiervalue = 0.0): '''@param pfobjecttype: type of the object to be created (used in Identifier class) eg Identifier.PFOBJECTTYPE.ECALCLUSTER @param subtype: Identifier subtype, eg 'm' for merged @param identifiervalue: The value to be encoded into the Identifier eg energy or pt ''' super(PFObject, self).__init__() self.linked = [] self.locked = False self.block_label = None self.uniqueid=Identifier.make_id(pfobjecttype, index, subtype, identifiervalue)
def reconstruct_block(self, block): ''' see class description for summary of reconstruction approach ''' uids = block.element_uniqueids #ids are already stored in sorted order inside block self.locked = dict((uid, False) for uid in uids) # first reconstruct muons and electrons self.reconstruct_muons(block) self.reconstruct_electrons(block) # keeping only the elements that have not been used so far uids = [uid for uid in uids if not self.locked[uid]] if len(uids) == 1: #TODO WARNING!!! LOTS OF MISSING CASES uid = uids[0] parent_ids = [block.uniqueid, uid] if Identifier.is_ecal(uid): self.reconstruct_cluster(self.papasevent.get_object(uid), "ecal_in", parent_ids) elif Identifier.is_hcal(uid): self.reconstruct_cluster(self.papasevent.get_object(uid), "hcal_in", parent_ids) elif Identifier.is_track(uid): self.reconstruct_track(self.papasevent.get_object(uid), 211, parent_ids) else: #TODO for uid in uids: #already sorted to have higher energy things first (see pfblock) if Identifier.is_hcal(uid): self.reconstruct_hcal(block, uid) for uid in uids: #already sorted to have higher energy things first if Identifier.is_track(uid) and not self.locked[uid]: # unused tracks, so not linked to HCAL # reconstructing charged hadrons. # ELECTRONS TO BE DEALT WITH. parent_ids = [block.uniqueid, uid] self.reconstruct_track(self.papasevent.get_object(uid), 211, parent_ids) # tracks possibly linked to ecal->locking cluster for idlink in block.linked_ids(uid, "ecal_track"): #ask colin what happened to possible photons here: self.locked[idlink] = True #TODO add in extra photonsbut decide where they should go? self.unused.extend( [uid for uid in block.element_uniqueids if not self.locked[uid]])
def reconstruct_block(self, block): ''' see class description for summary of reconstruction approach ''' particles = dict() ids = block.element_uniqueids #ids = sorted( ids, key = lambda id: Identifier.type_short_code ) self.locked = dict() for id in ids: self.locked[id] = False self.debugprint = False if (self.debugprint and len(block.element_uniqueids)> 4): print block if len(ids) == 1: #TODO WARNING!!! LOTS OF MISSING CASES id = ids[0] if Identifier.is_ecal(id): self.insert_particle(block, self.reconstruct_cluster(block.pfevent.ecal_clusters[id],"ecal_in")) elif Identifier.is_hcal(id): self.insert_particle(block, self.reconstruct_cluster(block.pfevent.hcal_clusters[id],"hcal_in")) elif Identifier.is_track(id): self.insert_particle(block, self.reconstruct_track(block.pfevent.tracks[id])) # ask Colin about energy balance - what happened to the associated clusters that one would expect? else: #TODO for id in sorted(ids) : #newsort if Identifier.is_hcal(id): self.reconstruct_hcal(block,id) for id in sorted(ids) : #newsort if Identifier.is_track(id) and not self.locked[id]: # unused tracks, so not linked to HCAL # reconstructing charged hadrons. # ELECTRONS TO BE DEALT WITH. self.insert_particle(block, self.reconstruct_track(block.pfevent.tracks[id])) # tracks possibly linked to ecal->locking cluster for idlink in block.linked_ids(id,"ecal_track"): #ask colin what happened to possible photons here: self.locked[idlink] = True
def reconstruct_block(self, block): ''' see class description for summary of reconstruction approach ''' particles = dict() ids = block.element_uniqueids self.locked = dict() for id in ids: self.locked[id] = False self.debugprint = False if (self.debugprint and len(block.element_uniqueids)> 4): print block if len(ids) == 1: #TODO WARNING!!! LOTS OF MISSING CASES id = ids[0] if Identifier.is_ecal(id): self.insert_particle(block, self.reconstruct_cluster(block.pfevent.ecal_clusters[id],"ecal_in")) elif Identifier.is_hcal(id): self.insert_particle(block, self.reconstruct_cluster(block.pfevent.hcal_clusters[id],"hcal_in")) elif Identifier.is_track(id): self.insert_particle(block, self.reconstruct_track(block.pfevent.tracks[id])) # ask Colin about energy balance - what happened to the associated clusters that one would expect? else: #TODO for id in ids : if Identifier.is_hcal(id): self.reconstruct_hcal(block,id) for id in ids : if Identifier.is_track(id) and not self.locked[id]: # unused tracks, so not linked to HCAL # reconstructing charged hadrons. # ELECTRONS TO BE DEALT WITH. self.insert_particle(block, self.reconstruct_track(block.pfevent.tracks[id])) # tracks possibly linked to ecal->locking cluster for idlink in block.linked_ids(id,"ecal_track"): #ask colin what happened to possible photons here: self.locked[idlink] = True
def __init__(self, tlv, vertex, charge, pdgid=None, ParticleType=Identifier.PFOBJECTTYPE.PARTICLE): super(Particle, self).__init__(pdgid, charge, tlv) self.uniqueid = Identifier.make_id(ParticleType) self.vertex = vertex self.path = None self.clusters = dict() self.track = None # Alice Experiment to match cpp debug Track(self.p3(), self.q(), self.path) self.clusters_smeared = dict() self.track_smeared = None
def __init__(self, tlv, vertex, charge, pdgid=None, subtype='s'): self.subtype = subtype super(Particle, self).__init__(pdgid, charge, tlv) self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.PARTICLE, subtype) self.vertex = vertex self.path = None self.clusters = dict() self.track = None # to match cpp self.clusters_smeared = dict() self.track_smeared = None
def get_object(self, uniqueid): ''' given a uniqueid return the underlying obejct ''' type = Identifier.get_type(uniqueid) subtype = Identifier.get_subtype(uniqueid) if type == Identifier.PFOBJECTTYPE.TRACK: return self.tracks[uniqueid] elif type == Identifier.PFOBJECTTYPE.ECALCLUSTER: return self.ecal_clusters[uniqueid] elif type == Identifier.PFOBJECTTYPE.HCALCLUSTER: return self.hcal_clusters[uniqueid] elif type == Identifier.PFOBJECTTYPE.PARTICLE: if subtype == 'g': return self.sim_particles[uniqueid] elif subtype == 'r': return self.reconstructed_particles[uniqueid] elif type == Identifier.PFOBJECTTYPE.BLOCK: return self.blocks[uniqueid] else: assert(False)
def info(self): subclusterstr = str('sub(') for s in self.subclusters: subclusterstr += str('{:}, '.format(Identifier.pretty(s.uniqueid))) subclusterstr += ")" return '{energy:7.2f} {theta:5.2f} {phi:5.2f} {sub}'.format( energy=self.energy, theta=math.pi/2. - self.position.Theta(), phi=self.position.Phi(), sub=subclusterstr )
def reconstruct_block(self, block): ''' see class description for summary of reconstruction approach ''' uids = block.element_uniqueids #ids are already stored in sorted order inside block self.locked = dict( (uid, False) for uid in uids ) # first reconstruct muons and electrons self.reconstruct_muons(block) self.reconstruct_electrons(block) # keeping only the elements that have not been used so far uids = [uid for uid in uids if not self.locked[uid]] if len(uids) == 1: #TODO WARNING!!! LOTS OF MISSING CASES uid = uids[0] parent_ids = [block.uniqueid, uid] if Identifier.is_ecal(uid): self.reconstruct_cluster(self.papasevent.get_object(uid), "ecal_in", parent_ids) elif Identifier.is_hcal(uid): self.reconstruct_cluster(self.papasevent.get_object(uid), "hcal_in", parent_ids) elif Identifier.is_track(uid): self.reconstruct_track(self.papasevent.get_object(uid), 211, parent_ids) else: #TODO for uid in uids: #already sorted to have higher energy things first (see pfblock) if Identifier.is_hcal(uid): self.reconstruct_hcal(block, uid) for uid in uids: #already sorted to have higher energy things first if Identifier.is_track(uid) and not self.locked[uid]: # unused tracks, so not linked to HCAL # reconstructing charged hadrons. # ELECTRONS TO BE DEALT WITH. parent_ids = [block.uniqueid, uid] self.reconstruct_track(self.papasevent.get_object(uid), 211, parent_ids) # tracks possibly linked to ecal->locking cluster for idlink in block.linked_ids(uid, "ecal_track"): #ask colin what happened to possible photons here: self.locked[idlink] = True #TODO add in extra photonsbut decide where they should go? self.unused.extend([uid for uid in block.element_uniqueids if not self.locked[uid]])
def summary_of_linked_elems(self, id): #find everything that is linked to this id #and write a summary of what is found #the BFS search returns a list of the ids that are connected to the id of interest BFS = BreadthFirstSearchIterative(self.history_nodes[id], "undirected") #collate the string descriptions track_descrips = [] ecal_descrips = [] hcal_descrips = [] #sim_particle_descrips = [] rec_particle_descrips = [] block_descrips = [] for n in BFS.result : z = n.get_value() obj = self.pfevent.get_object(z) descrip = obj.__str__() # if (Identifier.is_particle(z)): # sim_particle_descrips.append(descrip) if (Identifier.is_block(z)): block_descrips.append(descrip) elif (Identifier.is_track(z)): track_descrips.append(descrip) elif (Identifier.is_ecal(z)): ecal_descrips.append(descrip) elif (Identifier.is_hcal(z)): hcal_descrips.append(descrip) elif (Identifier.is_rec_particle(z)): rec_particle_descrips.append(descrip) print "history connected to node:", id print "block", block_descrips print " tracks", track_descrips print " ecals", ecal_descrips print " hcals", hcal_descrips print "rec particles", rec_particle_descrips
def __repr__(self): ''' Short Block description ''' description = "block:" description += str( '{shortname:8} :{prettyid:6}: ecals = {count_ecal} hcals = {count_hcal} tracks = {count_tracks}' .format(shortname=self.short_info(), prettyid=Identifier.pretty(self.uniqueid), count_ecal=self.count_ecal(), count_hcal=self.count_hcal(), count_tracks=self.count_tracks())) return description
def __repr__(self): ''' Short Block description ''' description = "block:" description += str('{shortname:8} :{prettyid:6}: ecals = {count_ecal} hcals = {count_hcal} tracks = {count_tracks}'.format( shortname=self.short_info(), prettyid=Identifier.pretty(self.uniqueid), count_ecal=self.count_ecal(), count_hcal=self.count_hcal(), count_tracks=self.count_tracks()) ) return description
def summary_of_linked_elems(self, id): # find everything that is linked to this id # and write a summary of what is found # the BFS search returns a list of the ids that are connected to the id of interest BFS = BreadthFirstSearchIterative(self.history_nodes[id], "undirected") # collate the string descriptions track_descrips = [] ecal_descrips = [] hcal_descrips = [] # sim_particle_descrips = [] rec_particle_descrips = [] block_descrips = [] for n in BFS.result: z = n.get_value() obj = self.pfevent.get_object(z) descrip = obj.__str__() # if (Identifier.is_particle(z)): # sim_particle_descrips.append(descrip) if Identifier.is_block(z): block_descrips.append(descrip) elif Identifier.is_track(z): track_descrips.append(descrip) elif Identifier.is_ecal(z): ecal_descrips.append(descrip) elif Identifier.is_hcal(z): hcal_descrips.append(descrip) elif Identifier.is_rec_particle(z): rec_particle_descrips.append(descrip) print "history connected to node:", id print "block", block_descrips print " tracks", track_descrips print " ecals", ecal_descrips print " hcals", hcal_descrips print "rec particles", rec_particle_descrips
def test_papasevent(self): papasevent = PapasEvent(0) ecals = dict() tracks = dict() mixed = dict() for i in range(0, 2): uid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER, i, 't', 4.5) ecals[uid] = uid for i in range(0, 2): uid = Identifier.make_id(Identifier.PFOBJECTTYPE.TRACK, i, 's', 4.5) tracks[uid] = uid lastid = Identifier.make_id(Identifier.PFOBJECTTYPE.ECALCLUSTER, 3, 't', 3) ecals[lastid] = lastid papasevent.add_collection(ecals) papasevent.add_collection(tracks) #check that adding the same collection twice fails self.assertRaises(ValueError, papasevent.add_collection, ecals) #check that adding a mixed collection fails mixed = ecals.copy() mixed.update(tracks) self.assertRaises(ValueError, papasevent.add_collection, mixed) #get we can get back collections OK self.assertTrue(len( papasevent.get_collection('zz')) == 0) # this one does not exist self.assertTrue(len(papasevent.get_collection('et')) == 3) #check get_object self.assertTrue( Identifier.pretty(papasevent.get_object(lastid)) == 'et3') self.assertTrue(papasevent.get_object(499) is None)
def _edge_type(self): ''' produces an edge_type string eg "ecal_track" the order of id1 an id2 does not matter, eg for one track and one ecal the type will always be "ecal_track" (and never be a "track_ecal") ''' #consider creating an ENUM instead for the edge_type shortid1=Identifier.type_short_code(self.id1); shortid2=Identifier.type_short_code(self.id2); if shortid1 == shortid2: if shortid1 == "h": return "hcal_hcal" elif shortid1 == "e": return "ecal_ecal" elif shortid1 == "t": return "track_track" elif (shortid1=="h" and shortid2=="t" or shortid1=="t" and shortid2=="h"): return "hcal_track" elif (shortid1=="e" and shortid2=="t" or shortid1=="t" and shortid2=="e"): return "ecal_track" elif (shortid1=="e" and shortid2=="h" or shortid1=="h" and shortid2=="e"): return "ecal_hcal" return "unknown"
def __init__(self, element_ids, edges, index, subtype): ''' @param element_ids: list of the uniqueids of the elements to go in this block [id1,id2,...] @param edges: is a dictionary of edges, it must contain at least all needed edges. It is not a problem if it contains additional edges as only the ones needed will be extracted @param index: index into the collection of blocks into which new block will be added @param subtype: used when making unique identifier, will normally be 'r' for reconstructed blocks and 's' for split blocks ''' #make a uniqueid for this block self.uniqueid = Identifier.make_id(Identifier.PFOBJECTTYPE.BLOCK, index, subtype, len(element_ids)) #this will sort by type eg ecal, hcal, track and then by energy (biggest first) sortby = lambda x: (Identifier.type_letter(x), -Identifier.get_value(x)) self.element_uniqueids = sorted(element_ids, key=sortby) #sequential numbering of blocks, not essential but helpful for debugging self.block_count = PFBlock.temp_block_count PFBlock.temp_block_count += 1 #extract the relevant parts of the complete set of edges and store this within the block self.edges = dict() for id1, id2 in itertools.combinations(self.element_uniqueids, 2): key = Edge.make_key(id1, id2) self.edges[key] = edges[key]
def __init__(self, fccobj): super(Particle, self).__init__(fccobj) self.uniqueid=Identifier.make_id(Identifier.PFOBJECTTYPE.PARTICLE) self._charge = fccobj.core().charge self._pid = fccobj.core().pdgId self._status = fccobj.core().status if hasattr(fccobj, 'startVertex'): start = fccobj.startVertex() self._start_vertex = Vertex(start) if start.isAvailable() \ else None end = fccobj.endVertex() self._end_vertex = Vertex(end) if end.isAvailable() \ else None self._tlv = TLorentzVector() p4 = fccobj.core().p4 self._tlv.SetXYZM(p4.px, p4.py, p4.pz, p4.mass)