def get_wall(obj): children = [] mins = [] maxs = [] for child in obj.children: location = {"x": child.location.x, "y": child.location.y} texture_path = utils.get_texture(child) indices = utils.get_indices(child) vertices = utils.get_vertices(child, indices) left_edge = min(vertices, key=lambda v: v["pos"]["x"])["pos"]["x"] bottom_edge = min(vertices, key=lambda v: v["pos"]["y"])["pos"]["y"] right_edge = max(vertices, key=lambda v: v["pos"]["x"])["pos"]["x"] top_edge = max(vertices, key=lambda v: v["pos"]["y"])["pos"]["y"] mins.append(child.location.y + bottom_edge) maxs.append(child.location.y + top_edge) bulbs = utils.get_bulbs(child) child_ref = { "type": child['type'], "location": location, "texture_path": texture_path, "indices": indices, "vertices": vertices, "bulbs": bulbs } children.append(child_ref) total_bottom = min(mins) total_top = max(maxs) total_height = total_top - total_bottom obj_ref = { "children": children, "total_height": total_height, } return obj_ref
def get_column(obj): texture_path = utils.get_texture(obj) indices = utils.get_indices(obj) vertices = utils.get_vertices(obj, indices) bulbs = utils.get_bulbs(obj) left_edge = min(vertices, key=lambda v: v["pos"]["x"])["pos"]["x"] bottom_edge = min(vertices, key=lambda v: v["pos"]["y"])["pos"]["y"] right_edge = max(vertices, key=lambda v: v["pos"]["x"])["pos"]["x"] top_edge = max(vertices, key=lambda v: v["pos"]["y"])["pos"]["y"] original_size = {"x": right_edge - left_edge, "y": top_edge - bottom_edge} for v in vertices: v["pos"]["x"] -= left_edge v["pos"]["y"] -= bottom_edge print(v["pos"]["x"], v["pos"]["y"]) children = [] for child in obj.children: location = { "x": child.location.x - left_edge, "y": child.location.y - bottom_edge } child_ref = {"type": child['type'], "location": location} children.append(child_ref) obj_ref = { "type": obj['type'], "original_size": original_size, "vertices": vertices, "indices": indices, "texture_path": texture_path, "bulbs": bulbs, "children": children } return obj_ref
def main(argv): parser = argparse.ArgumentParser(description='Plot data dumped to ROOT file.') parser.add_argument('-i', '--input', help="Input file", default='datadump_hist.root') parser.add_argument('-e', '--event', help="Event index", default='0') parser.add_argument('-v', '--view', help="view index", default='') parser.add_argument('-m', '--module', help="LArSoft module name", default='datadump') args = parser.parse_args() if args.view != '': view_base = 'view_' + args.view else: view_base = '' file0 = TFile(args.input) raw_keys = [k.GetName() for k in file0.Get(args.module).GetListOfKeys() if view_base + '_raw' in k.GetName()] pdg_keys = [k.GetName() for k in file0.Get(args.module).GetListOfKeys() if view_base + '_pdg' in k.GetName()] dep_keys = [k.GetName() for k in file0.Get(args.module).GetListOfKeys() if view_base + '_deposit' in k.GetName()] ev_idx = int(args.event) if ev_idx >= len(raw_keys): print 'event index out of range' return print len(raw_keys), 'keys in the file, reading event key:', raw_keys[ev_idx] dep = hist2array(file0.Get(args.module + '/' + dep_keys[ev_idx])) raw = hist2array(file0.Get(args.module + '/' + raw_keys[ev_idx])) mc = hist2array(file0.Get(args.module + '/' + pdg_keys[ev_idx])) pdg = mc & 0xFF vtx_list = get_vertices(mc) vtx_hadr = vtx_list[:,2] & 1 vtx_pi0 = (vtx_list[:,2] >> 1) & 1 vtx_decay = (vtx_list[:,2] >> 2) & 1 vtx_conv = (vtx_list[:,2] >> 3) & 1 vtx_eend = (vtx_list[:,2] >> 4) & 1 vtx_list[:,2] = vtx_hadr + 2*vtx_pi0 + 3*vtx_decay + 4*vtx_conv + 5*vtx_eend print 'all vtx:', np.count_nonzero(vtx_list[:,2]), \ 'hadr:', np.count_nonzero(vtx_hadr), \ 'pi0:', np.count_nonzero(vtx_pi0), \ 'decay:', np.count_nonzero(vtx_decay), \ 'conv:', np.count_nonzero(vtx_conv), \ 'eend:', np.count_nonzero(vtx_eend) fig, ax = plt.subplots(1, 3, figsize=(36, 10)) cs = ax[0].pcolor(np.transpose(pdg), cmap='gist_ncar') ax[0].scatter(vtx_list[:,0]+0.5, vtx_list[:,1]+0.5, s=50, c=vtx_list[:,2], cmap='rainbow', alpha=0.75) ax[0].set_title('PDG') fig.colorbar(cs, ax=ax[0]) cs = ax[1].pcolor(np.transpose(dep), cmap='jet') ax[1].set_title('MC truth deposit') fig.colorbar(cs, ax=ax[1]) cs = ax[2].pcolor(np.transpose(raw), cmap='jet') ax[2].scatter(vtx_list[:,0]+0.5, vtx_list[:,1]+0.5, s=50, c=vtx_list[:,2], cmap='rainbow', alpha=0.75) ax[2].set_title('ADC') fig.colorbar(cs, ax=ax[2]) plt.tight_layout() plt.show()
def EM2(model, DATA, epochs, n_iter, update_var=False, pretrain=False): batch_size = model['kwargs']['batch_size'] if model['model'] == 'VAE': L = [] for e in range(epochs): for i in range(len(DATA) // batch_size): L.append(model['train'](DATA[i * batch_size: (i + 1) * batch_size])) return L # PRETRAIN WITH GLO if pretrain: glo = create_glo(**model['kwargs']) error = 1 cpt = 0 Z = np.random.randn(DATA.shape[0], model['kwargs']['Ds'][0]) while 1: cpt +=1 II = np.random.permutation(len(DATA))[:batch_size] glo['reset'](Z[II]) bat = DATA[II] for j in range(10): z = glo['estimate'](bat) Z[II]=z for j in range(4): glo['train'](bat) error = glo['loss'](bat) if cpt % 200 == 0: print(cpt, error) if cpt > 2000: break # THEN SET IT UP print('Setting pu the weights') model['assign'](*glo['params']()) S, D, R = model['S'], model['D'], model['R'] z = np.random.randn(S)/10 m0 = np.zeros((DATA.shape[0], R)) m1 = np.zeros((DATA.shape[0], R, S)) m2 = np.zeros((DATA.shape[0], R, S, S)) m_loss = [] for e in range(epochs): output, A, b, inequalities, signs = model['input2all'](z) regions = utils.search_region(model['signs2ineq'], model['signs2Ab'], signs, model['input2signs']) # others = utils.search_region_sample(model['input2signs']) # print('regions', len(regions), len(others)) # print('Equal ?', regions.keys() == others) # print(regions.keys()) # print(utils.search_region_sample(model['input2signs'])) V = [] for r in regions: V.append(utils.get_vertices(regions[r]['ineq'][:, :-1], regions[r]['ineq'][:,-1])) print('VERTICES',np.sort(np.unique(V)).round(2)) # print(model['params']()) if len(regions) > R: print('ALARMMM') print(model['params']()) batch_signs = np.pad(np.array(list(regions.keys())), [[0, R - len(regions)], [0, 0]]) varx = np.eye(D) * model['varx']() varz = np.eye(S) * model['varz']() print('varx', np.diag(varx)) # print('varz', np.diag(varz)) m0 *= 0 m1 *= 0 m2 *= 0 m0[:, :len(regions)], m1[:, :len(regions)], m2[:, :len(regions)] = utils.marginal_moments(DATA, regions, varx, varz)[1:] # m_loss.append(NLL(model, DATA).mean()) # print('after E step', m_loss[-1]) for i in range(n_iter): if update_var: model['update_var'](batch_signs, DATA, m0, m1, m2) # m_loss.append(model['train'](batch_signs, DATA, m0, m1, m2)) # if np.isnan(m_loss[-1]): # return [None] # if i %10 == 0: # params = model['params']() # print('here?',np.max(params[0]), np.max(params[1]), np.max(params[2]), np.max(params[3]), # model['loss'](batch_signs, DATA, m0, m1, m2)) for l in np.random.permutation(2 * model['L']) % model['L']:#-1, -1, -1): if np.random.randn() < 0: model['update_vs'](0.05, l, batch_signs, DATA, m0, m1, m2) # m_loss.append(NLL(model, DATA).mean()) else: model['update_Ws'](0.05, l, batch_signs, DATA, m0, m1, m2) # m_loss.append(NLL(model, DATA).mean()) # print('after W', m_loss[-1]) m_loss.append(NLL(model, DATA).mean()) print('after M step', m_loss[-1]) # if n_iter > 1: # print('strictly decreasing M step ?:', np.diff(m_loss).max()) return m_loss
exp = np.einsum('Nns,nds->d', m21, As) + np.einsum('Nn,nd->d', m20, Bs) mu = m21.sum() muhat = np.average(xx[:, 0], weights=p2) print(np.average(np.ones(len(p2)), weights=p2), m20.sum()) print(np.average(xx[:, 0], weights=p2), mu) print(np.average(xx[:, 0]**2, weights=p2), m22.sum()) if Ds[0] == 1: plt.plot(xx, p2, label=r'$p(\boldsymbol{z}|\boldsymbol{x})$') plt.axvline(mu, color='g', linestyle='--', label='mean') else: plt.imshow((np.array(p)).reshape((N, N)), extent=[-L, L, -L, L]) for r in regions: vertices = utils.get_vertices(regions[r]['ineq'][:, :-1], regions[r]['ineq'][:, -1]) plt.plot([vertices[0], vertices[0]], [-0.1, 0.1], color='k', linewidth=0.8) plt.plot([vertices[1], vertices[1]], [-0.1, 0.1], color='k', linewidth=0.8) plt.xlim([-4, 4]) ax = plt.gca() ax.legend() plt.savefig('images/prior_{}_{}.png'.format(seed, i + int(ss > 0.05))) plt.close() # PLOT DATA plt.figure(figsize=(5, 5))
import matplotlib.pyplot as plt import utils mu = np.zeros(2) + 4.3 cov = np.array([[2.1, 0.3], [0.3, 0.8]]) m = cdd.Matrix([[30, 1., 0], [30, 0., 1.1]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov), np.outer(mu, mu) + cov) print('\n\n\n') m = cdd.Matrix([[40, 0., 1], [40, -1., 0], [40, 1., -1.]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov)) print('\n\n\n') m = cdd.Matrix([[40, 1., 0], [40, -1., 0], [40, 0., -1.], [40, 0, 1.]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov)) print('\n\n\n') m = cdd.Matrix([[40, 1., 0], [40, -1., 0], [40, 0., -1.], [40, 0, 1.], [40, 1., 1], [40, -1, -1]]) print('v', utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print('p', utils.phis_w(np.array(m), mu, cov)) print('\n\n\n')
def main(argv): parser = argparse.ArgumentParser( description='Makes training data set for various vertex/decay ID') parser.add_argument('-c', '--config', help="JSON with script configuration", default='config.json') parser.add_argument('-t', '--type', help="Input file format") parser.add_argument('-i', '--input', help="Input directory") parser.add_argument('-o', '--output', help="Output directory") parser.add_argument('-v', '--view', help="view") args = parser.parse_args() config = read_config(args.config) print '#' * 50, '\nPrepare data for CNN' INPUT_TYPE = config['prepare_data_vtx_id']['input_type'] INPUT_DIR = config['prepare_data_vtx_id']['input_dir'] OUTPUT_DIR = config['prepare_data_vtx_id']['output_dir'] PATCH_SIZE_W = config['prepare_data_vtx_id']['patch_size_w'] PATCH_SIZE_D = config['prepare_data_vtx_id']['patch_size_d'] print 'Using %s as input dir, and %s as output dir' % (INPUT_DIR, OUTPUT_DIR) print '#' * 50 rootModule = config['prepare_data_vtx_id'][ 'module_name'] # larsoft module name used for data dumps in ROOT format selected_view_idx = config['prepare_data_vtx_id'][ 'selected_view_idx'] # set the view id nearby_empty = config['prepare_data_vtx_id'][ 'nearby_empty'] # number of patches near each vtx, but with empty area in the central pixel nearby_on_track = config['prepare_data_vtx_id'][ 'nearby_on_track'] # number of patches on tracks or showers, somewhere close to each vtx crop_event = config['prepare_data_vtx_id'][ 'crop_event'] # use true only if no crop on LArSoft level and not a noise dump print 'Using', nearby_empty, 'empty and', nearby_on_track, 'on track patches per each verex in view', selected_view_idx max_capacity = 300000 db = np.zeros((max_capacity, PATCH_SIZE_W, PATCH_SIZE_D), dtype=np.float32) db_y = np.zeros((max_capacity, 5), dtype=np.int32) kHadr = 0x1 # hadronic inelastic scattering kPi0 = 0x2 # pi0 produced in this vertex kDecay = 0x4 # point of particle decay (except pi0 decays) kConv = 0x8 # gamma conversion cnt_ind = 0 cnt_vtx = 0 cnt_decay = 0 cnt_gamma = 0 cnt_nu = 0 cnt_trk = 0 cnt_void = 0 fcount = 0 event_list = [] if INPUT_TYPE == "root": fnames = [f for f in os.listdir(INPUT_DIR) if '.root' in f] for n in fnames: rootFile = TFile(INPUT_DIR + '/' + n) keys = [ rootModule + '/' + k.GetName()[:-4] for k in rootFile.Get(rootModule).GetListOfKeys() if '_raw' in k.GetName() ] event_list.append((rootFile, keys)) else: keys = [f[:-4] for f in os.listdir(INPUT_DIR) if '.raw' in f ] # only main part of file name, without extension event_list.append( (INPUT_DIR, keys)) # single entry in the list of txt files for entry in event_list: folder = entry[0] event_names = entry[1] for evname in event_names: finfo = evname.split('_') evt_no = finfo[2] tpc_idx = int(finfo[8]) view_idx = int(finfo[10]) if view_idx != selected_view_idx: continue fcount += 1 print 'Process event', fcount, evname, 'NO.', evt_no # get clipped data, margin depends on patch size in drift direction raw, deposit, pdg, tracks, showers = get_data( folder, evname, PATCH_SIZE_D / 2 + 2, crop_event) if raw is None: print 'Skip empty event...' continue vtx = get_vertices(pdg) nuvtx = get_nu_vertices(pdg) print 'Found', vtx.shape[ 0], 'hadronic vertices/decay', nuvtx.shape[ 0], 'neutrino vertices' for v in range(vtx.shape[0]): flags = 0 if vtx.shape[0] > 0: flags = vtx[v, 2] nuflags = 0 if nuvtx.shape[0] > 0: nuflags = nuvtx[v, 2] if (flags & kHadr) > 0 or (flags & kDecay) > 0 or ( (flags & kPi0) > 0 and (flags & kConv) > 0): wire = vtx[v, 0] drif = vtx[v, 1] x_start = np.max([0, wire - PATCH_SIZE_W / 2]) x_stop = np.min([raw.shape[0], x_start + PATCH_SIZE_W]) y_start = np.max([0, drif - PATCH_SIZE_D / 2]) y_stop = np.min([raw.shape[1], y_start + PATCH_SIZE_D]) if x_stop - x_start != PATCH_SIZE_W or y_stop - y_start != PATCH_SIZE_D: continue target = np.zeros( 5, dtype=np.int32 ) # [decay, hadronic_vtx, g_conversion, nu_primary, not_vtx] if nuflags > 0: target[3] = 1 cnt_nu += 1 elif (flags & kDecay) > 0: target[0] = 1 cnt_decay += 1 elif (flags & kHadr) > 0: target[1] = 1 cnt_vtx += 1 elif (flags & kConv) > 0: target[2] = 1 cnt_gamma += 1 patch = get_patch(raw, wire, drif, PATCH_SIZE_W, PATCH_SIZE_D) if cnt_ind < max_capacity: db[cnt_ind] = patch db_y[cnt_ind] = target cnt_ind += 1 else: break n_empty = 0 n_trials = 0 while n_empty < nearby_empty and n_trials < 500: wi = np.random.randint(x_start + 1, x_stop - 1) di = np.random.randint(y_start + 1, y_stop - 1) if (wi < wire - 1 or wi > wire + 1) and (di < drif - 2 or di > drif + 2): if tracks[wi, di] == 0 and showers[wi, di] == 0: if cnt_ind < max_capacity: patch = get_patch(raw, wi, di, PATCH_SIZE_W, PATCH_SIZE_D) target = np.zeros(5, dtype=np.int32) target[4] = 1 db[cnt_ind] = patch db_y[cnt_ind] = target cnt_void += 1 cnt_ind += 1 n_empty += 1 else: break n_trials += 1 n_track = 0 n_trials = 0 while n_track < nearby_on_track and n_trials < 500: wi = np.random.randint(x_start + 1, x_stop - 1) di = np.random.randint(y_start + 1, y_stop - 1) if (wi < wire - 1 or wi > wire + 1) and (di < drif - 2 or di > drif + 2): if tracks[wi, di] == 1 or showers[wi, di] == 1: if cnt_ind < max_capacity: patch = get_patch(raw, wi, di, PATCH_SIZE_W, PATCH_SIZE_D) target = np.zeros(5, dtype=np.int32) target[4] = 1 db[cnt_ind] = patch db_y[cnt_ind] = target cnt_trk += 1 cnt_ind += 1 n_track += 1 else: break n_trials += 1 print 'Total size', cnt_ind, ':: hadronic:', cnt_vtx, 'decays:', cnt_decay, 'nu-primary:', cnt_nu, 'g-conv:', cnt_gamma, 'empty:', cnt_void, 'on-track:', cnt_trk np.save(OUTPUT_DIR + '/db_view_' + str(selected_view_idx) + '_x', db[:cnt_ind]) np.save(OUTPUT_DIR + '/db_view_' + str(selected_view_idx) + '_y', db_y[:cnt_ind])