-
Notifications
You must be signed in to change notification settings - Fork 0
/
patching.py
executable file
·123 lines (101 loc) · 3.42 KB
/
patching.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import StandardScaler
from sklearn.cluster.dbscan_ import dbscan
from scipy.optimize import differential_evolution
from trackml.dataset import load_event, load_dataset
from trackml_utils import score_event_fast, extend, create_one_event_submission
from trackml_visual import show_3Dplot, plot_tracks_from_particle_id, plot_tracks_from_submission
import math, time
import multiprocessing as mp
from tqdm import tqdm
import pp
path_to_test = "data/test"
path_to_train = "data/train_1"
def main():
print('\n')
print('-'*50)
print('Starting job...')
print('-'*50)
t0 = time.time()
event_id = '000001000'
event_path = os.path.join(path_to_train, 'event'+event_id)
hits, cells, particles, truth = load_event(event_path)
truth = truth.merge(hits, on=['hit_id'], how='left')
df = truth.copy()
df = df.assign(r = np.sqrt( df.x**2 + df.y**2))
df = df.assign(d = np.sqrt( df.x**2 + df.y**2 + df.z**2 ))
df = df.assign(a = np.arctan2(df.y, df.x))
df = df.assign(cosa= np.cos(df.a))
df = df.assign(sina= np.sin(df.a))
df = df.assign(phi = np.arctan2(df.z, df.r))
fig, ax = plt.subplots(figsize=(12,12))
ax = Axes3D(fig)
#ax.scatter(xs=seed.a,
# ys=seed.r,
# zs=seed.z,
# c='gray',
# s=1)
d_delta = 200
overlap = 0.3
its = math.ceil((1050/d_delta))
for i in range(its):
start = (i*d_delta)-(d_delta*overlap)
if start < 0:
start = 0
end = (i*d_delta)+d_delta
print(start, end)
seed_tracks(event_id, df, start, end, ax)
ax.set_xlabel('a')
ax.set_ylabel('r')
ax.set_zlabel('z (mm)')
plt.show()
print('-'*50)
print('Success!')
t1 = time.time()
print('Total time', (t1-t0)/60)
print('-'*50)
print('\n'*2)
##### IDEAS
#
# Perform dbscan on different ranges of d that overlap
# Then determine which groups share hits
# Group smaller clusters together into final tracks
#
#
#
def seed_tracks(event_id, df, start_d, end_d, ax):
seed = df.loc[df.d>start_d]
seed = seed.loc[seed.d<end_d]
N = len(seed)
p = seed[['particle_id']].values.astype(np.int64)
x,y,z,r,a,cosa,sina,phi = seed[['x', 'y', 'z', 'r', 'a', 'cosa', 'sina', 'phi']].values.astype(np.float32).T
particle_ids = np.unique(p)
particle_ids = particle_ids[particle_ids!=0]
num_particle_ids = len(particle_ids)
# do dbscan here =======================================
data = np.column_stack([a, z/r*0.1])
_,l = dbscan(data, eps=0.01, min_samples=1,)
#print(len(truth))
#print(len(seed))
#print(len(submission))
#print(len(l))
seed['l'] = pd.Series(l, index=seed.index)
#print(seed)
submission = pd.DataFrame(columns=['event_id', 'hit_id', 'track_id'],
data=np.column_stack(([int(event_id),]*len(seed), seed.hit_id.values, l))
).astype(int)
score = score_event_fast(seed, submission)
print(score)
predicted_tracks,counts = np.unique(l, return_counts=True)
predicted_tracks = predicted_tracks[counts>1]
for predicted_track in predicted_tracks[::100]:
track_hits = seed[seed.l == predicted_track]
ax.plot(xs=track_hits.a,
ys=track_hits.r,
zs=track_hits.z)
main()