def read_data(batch_size, df, train=True): transform_train_test = transforms.Compose([ transforms.Resize(256), transforms.RandomCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # divide the data if train: train_df = df.iloc[trainIndices.indices] train_df = train_df[train_df['comment'].notna()] caption_len = captionLength(train_df) train_indices, random_indices = randomSelect(caption_len, batch_size) dsat = dataset(train_df, random_indices, transform_train_test) #check(train_df, train_indices) train_sampler = torch.utils.data.SubsetRandomSampler(train_indices) batchSampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=False) train_loader = torch.utils.data.DataLoader(dsat, batch_sampler=batchSampler) return train_loader, train_df else: test_df = df.iloc[testIndices.indices] dsat_test = dataset(test_df, transforms = transform_train_test) caption_len = captionLength(test_df) test_indices,_ = randomSelect(caption_len, 1) test_sampler = torch.utils.data.SubsetRandomSampler(test_indices) batchSampler = torch.utils.data.BatchSampler(test_sampler, batch_size=1, drop_last=False) test_loader = torch.utils.data.DataLoader(dsat_test, batch_sampler = batchSampler) return test_loader
def main(): print('Number of arguments: ', len(sys.argv), 'arguments.') print('Argument list:', str(sys.argv)) filename = sys.argv[1] print("Input file: ") print(filename) MB_FullJets_R04 = dataset("MBFullR04", NFIN=0, filename=filename, directory='AliJJetJtTask/AliJJetJtHistManager', color=1, style=24, rebin=5) Triggered_FullJets_R04 = dataset( "TriggeredFullR04", NFIN=0, filename=filename, directory='AliJJetJtTask_kEMCEJE/AliJJetJtHistManager', color=2, style=24, rebin=5) compareSetsWithRatio((MB_FullJets_R04, Triggered_FullJets_R04), 'JetConeJtWeightBin') plt.savefig("PythonFigures/MBvsTriggeredFullJetsR04JetConeJt.pdf", format='pdf') #Save figure plt.show() #Draw figure on screen
def demo(save, depth=100, growth_rate=12, efficient=True, valid_size=5000, n_epochs=300, batch_size=64, seed=None): """ A demo to show off training of efficient DenseNets. Trains and evaluates a DenseNet-BC on CIFAR-10. Args: save (str) - path to save the model to (default /tmp) depth (int) - depth of the network (number of convolution layers) (default 40) growth_rate (int) - number of features added per DenseNet layer (default 12) efficient (bool) - use the memory efficient implementation? (default True) valid_size (int) - size of validation set n_epochs (int) - number of epochs for training (default 300) batch_size (int) - size of minibatch (default 256) seed (int) - manually set the random seed (default None) """ # Get densenet configuration if (depth - 4) % 3: raise Exception('Invalid depth') block_config = [(depth - 4) // 6 for _ in range(3)] tr_dt, tr_lb, te_dt, te_lb = genconfig() tr_set = dataset(tr_dt, tr_lb) te_set = dataset(te_dt, te_lb) if valid_size: indices = torch.randperm(len(tr_set)) train_indices = indices[:len(indices) - valid_size] valid_indices = indices[len(indices) - valid_size:] train_set = torch.utils.data.Subset(tr_set, train_indices) valid_set = torch.utils.data.Subset(tr_set, valid_indices) else: train_set = tr_set valid_set = None # Models model = DenseNet( growth_rate=growth_rate, block_config=block_config, num_classes=4, small_inputs=True, efficient=efficient, ) print(model) # Make save directory if not os.path.exists(save): os.makedirs(save) if not os.path.isdir(save): raise Exception('%s is not a dir' % save) # Train the model train(model=model, train_set=train_set, valid_set=valid_set, test_set=te_set, save=save, n_epochs=n_epochs, batch_size=batch_size, seed=seed) print('Done!')
def __init__(self, saver_model_filename, scaler_filename): self.saver_model_filename = saver_model_filename self.scaler = self.get_scaler(scaler_filename) self.data = dataset(mat_files={ "train": ["data4/train_zoomout_0.mat"], "test": ["inference_result/zoomout_test_1_200.mat"] }, scaler=self.scaler) #tf graph input self.input_x = tf.placeholder(shape=[None, 12416], dtype=tf.float32) self.output_y = tf.placeholder(shape=[None, 21], dtype=tf.float32) self.is_training = tf.placeholder(dtype=tf.bool) self.dropout_prob = tf.placeholder(dtype=tf.float32) #Network parameters self.n_input = 12416 self.n_hidden_1 = 1024 self.n_hidden_2 = 1024 self.num_class = 21 #Store layer weights and biases self.weights = { 'W1': tf.Variable(tf.random_normal([self.n_input, self.n_hidden_1])), 'W2': tf.Variable(tf.random_normal([self.n_hidden_1, self.n_hidden_2])), 'W3': tf.Variable(tf.random_normal([self.n_hidden_2, self.num_class])) } self.biases = { 'b1': tf.Variable(tf.random_normal([self.n_hidden_1])), 'b2': tf.Variable(tf.random_normal([self.n_hidden_2])), 'b3': tf.Variable(tf.random_normal([self.num_class])) }
def __init__(self): self.learning_rate = 0.001 self.learning_rate_decay = 0.1 self.weight_decay = 0.1 self.batch_num = 100 self.batch_num_t = 1000 self.data = dataset() #self.data.decimate_bg_sp(0.97) #self.data.decimate_bg_sp(0,category="test") self.f_weights_v = [] self.count = 0 self.num_class = 21 self.histogram = self.data.get_histogram() print("histogram: %s" % str(self.histogram)) self.g = {'L2': 0} for one in range(self.num_class): key = str(one) self.f_weights_v.append(self.histogram[key]) self.count += self.histogram[key] self.f_weights_v = list(map(lambda x: self.count / x, self.f_weights_v)) print("weights: %s" % str(self.f_weights_v)) self.f_weights_v = np.array(self.f_weights_v) #tf graph input self.input_x = tf.placeholder(shape=[None, 12416], dtype=tf.float32) self.output_y = tf.placeholder(shape=[None, 21], dtype=tf.float32) self.is_training = tf.placeholder(dtype=tf.bool) self.dropout_prob = tf.placeholder(dtype=tf.float32) #self.is_training = True #Network parameters self.n_input = 12416 self.n_hidden_1 = 8192 self.n_hidden_2 = 4096 self.n_hidden_3 = 2048 self.n_hidden_4 = 1024 self.n_hidden_5 = 1024 self.num_class = 21 #Store layer weights and biases self.weights = { 'W1': tf.Variable(tf.random_normal([self.n_input, self.n_hidden_1])), 'W2': tf.Variable(tf.random_normal([self.n_hidden_1, self.n_hidden_2])), 'W3': tf.Variable(tf.random_normal([self.n_hidden_2, self.n_hidden_3])), 'W4': tf.Variable(tf.random_normal([self.n_hidden_3, self.n_hidden_4])), 'W5': tf.Variable(tf.random_normal([self.n_hidden_4, self.n_hidden_5])), 'W6': tf.Variable(tf.random_normal([self.n_hidden_5, self.num_class])) } self.biases = { 'b1': tf.Variable(tf.random_normal([self.n_hidden_1])), 'b2': tf.Variable(tf.random_normal([self.n_hidden_2])), 'b3': tf.Variable(tf.random_normal([self.n_hidden_3])), 'b4': tf.Variable(tf.random_normal([self.n_hidden_4])), 'b5': tf.Variable(tf.random_normal([self.n_hidden_5])), 'b6': tf.Variable(tf.random_normal([self.num_class])) }
def run_s22ab(): '''Run s22 test case.''' import time t0 = time.time() train_set = dataset('data/s22-ab.xyz') model = soap(lmax=4, nmax=6, sigma=0.3, rcut=5.0, zeta=4, sigma_nu_energy=0.003) model.fit(train_set) test_set = train_set true_energies = test_set.total_energy_list model_energies = model.total_energies(test_set) df = pd.DataFrame(list(zip(true_energies, model_energies)), columns=['True', 'Model']) df['Diff'] = df['Model'] - df['True'] print(df, end='\n') print(model) from sklearn.metrics import mean_absolute_error print('Mean Absolute Error (MAE) = {0.real:.4f}'.format(mean_absolute_error(df['True'], df['Model']))) if calc_f_from_rep: model_forces = model.total_forces_from_e(test_set) print(model_forces) t1 = time.time() print('Elapsed time = {0.real:.0f} seconds'.format(t1 - t0))
def test(): octree_gt, label = dataset(FLAGS.test_data, 1) code = octree_encoder(octree_gt, FLAGS.depth, nout=128, training=False, reuse=False) octree = octree_decode_shape(code, FLAGS.depth, training=False, reuse=False) ckpt = tf.train.latest_checkpoint(FLAGS.ckpt) assert (ckpt is not None) print('testing with ', ckpt) tf_saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: tf_saver.restore(sess, ckpt) tf.summary.FileWriter(FLAGS.logdir, sess.graph) for i in range(FLAGS.test_iter): print("iter: ", i) origin, reconstructed = sess.run([octree_gt, octree]) with open(FLAGS.logdir + ('/%04d_input.octree' % i), "wb") as f: f.write(origin.tobytes()) with open(FLAGS.logdir + ('/%04d_output.octree' % i), "wb") as f: f.write(reconstructed.tobytes())
def main(): print('Number of arguments: ', len(sys.argv), 'arguments.') print('Argument list:', str(sys.argv)) filename1 = '~/OneDrive/work/032.JTAnalysis/Unfolding/RooUnfold/CF_pPb_legotrain/legotrain_CF_pPb_CF_pPb-1209_20170807_LHC13bcde.root' filename2 = '~/OneDrive/work/032.JTAnalysis/Unfolding/RooUnfold/CF_pp_legotrain/legotrain_CF_pp_1405_20170810-7TeV_LHC10_p2_AOD147.root' print("Input file: ") print(filename1) Mixed_FullJets_R04 = datasetMixed( "FullR04", NFIN=0, range=5, filename=filename1, directory='AliJJetJtTask/AliJJetJtHistManager', directory2='AliJJetJtTask_kEMCEJE/AliJJetJtHistManager', color=2, style=24, rebin=5) pp_FullJets_R04 = dataset("ppFullR04", NFIN=0, filename=filename2, directory='AliJJetJtTask/AliJJetJtHistManager', color=1, style=24, rebin=5) fig, axs = defs.makegrid(4, 2, xlog=True, ylog=True, d=d, shareY=True) signal, jetPt = Mixed_FullJets_R04.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=True) axs = axs.reshape(8) axs[1].text(0.02, 0.0005, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n' + d['trigger'] + '\n' + Mixed_FullJets_R04.name() + '\n Jet Cone', fontsize=7) for jT, pT, ax, i in zip(signal[1:], jetPt[1:], axs, range(0, 9)): rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=Mixed_FullJets_R04.name(), fmt='o') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) if (i == 0): ax.set_xlim([0.01, 20]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits axs[0].legend(loc='lower left') #plt.savefig("PythonFigures/MixedFullJetsR04JetConeJtSignal.pdf",format='pdf') #Save figure plt.show() #Draw figure on screen
def read_data(): transformation = torchvision.transforms.Compose([ torchvision.transforms.ToPILImage(), torchvision.transforms.Resize((224, 224)), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ColorJitter(0.2, 0.75), torchvision.transforms.ToTensor(), ]) data = dataset(transformation) train_size = 0.7 val_size = 0.2 test_size = 0.1 indices = GroupShuffleSplit(n_splits=1, train_size=0.8, random_state=42) indices_train_validation = indices.split(data.imageName, data.newSteeringAngle, groups=data.newSteeringAngle) train, validation = list(indices_train_validation)[0] indices_test = GroupShuffleSplit(n_splits=1, test_size=0.1, train_size=0.9, random_state=42) new_img = list(map(lambda x: data.imageName[x], train)) new_steer = list(map(lambda x: data.newSteeringAngle[x], train)) indices_train_test = indices_test.split(new_img, new_steer, groups=new_steer) train, test = list(indices_train_test)[0] train_indices = torch.utils.data.SubsetRandomSampler(train) val_indices = torch.utils.data.SubsetRandomSampler(validation) test_indices = torch.utils.data.SubsetRandomSampler(test) ''' indices = np.arange(0,len(data.steeringAngle)+1)''' '''train_indices, test_indices, val_indices = indices[:40210],indices[40211:44677],indices[44677:]''' batch_sample_train = torch.utils.data.BatchSampler(train_indices, batch_size=32, drop_last=False) batch_sample_val = torch.utils.data.BatchSampler(val_indices, batch_size=32, drop_last=False) batch_sample_test = torch.utils.data.BatchSampler(test_indices, batch_size=32, drop_last=False) train_data = torch.utils.data.DataLoader(data, batch_sampler=batch_sample_train) val_data = torch.utils.data.DataLoader(data, batch_sampler=batch_sample_val) test_data = torch.utils.data.DataLoader(data, batch_sampler=batch_sample_test) return train_data, val_data, test_data
def train(sess, dataset, min_opacity=.15, max_opacity=.4): global_step = tf.Variable(0, name='global_step', trainable=False) with tf.device('/cpu:0'): next_image, iterator_init = dataset() masks = batch_masks(global_step, next_image.shape.as_list()[1], next_image.shape.as_list()[2], min_opacity, max_opacity) image_w = tf.clip_by_value(next_image - masks, 0, 1) predictions = model(image_w, True) * selection_margin(masks, 4) tf.summary.image('masks', predictions) # Define loss image_mask = -(image_w - next_image) # Mask after application on the image abs_loss = tf.losses.absolute_difference(predictions, image_mask)**.5 tf.losses.add_loss(abs_loss) loss = tf.losses.get_total_loss(True) tf.summary.scalar('loss', loss) # Optimizer learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, global_step, decay_steps=60000, end_learning_rate=.0005) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = tf.train.AdamOptimizer(learning_rate).minimize( loss, global_step=global_step) # Training loop sess.run(tf.global_variables_initializer()) iterator_init(sess) sess.run(tf.tables_initializer()) saver = tf.train.Saver() summaries = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph) for i in range(1, 2 if DEBUG else int(1e6)): if not DEBUG: _, summaries_, global_step_ = sess.run( [train_op, summaries, global_step]) train_writer.add_summary(summaries_, global_step_) # Save model if i % 2000 == 0: path = saver.save(sess, "/tmp/model.ckpt") print(i, 'Saving at', path) else: start_time = time() _, loss_, predictions_ = sess.run([train_op, loss, predictions]) batch_time = 1000 * (time() - start_time) / FLAGS.batch_size print('Time %dms, Loss %f' % (batch_time, loss_)) return
def __init__(self, sess, dataset_name = "cora", purpose = "classification", linkpred_ratio = 0.1, path_length = 10, num_paths = 100, window_size = 4, batch_size = 100, neg_size = 8, learning_rate = 0.005, optimizer = "Adam", embedding_dims = 30, save_path = "embeddings/GraLSP", num_steps = 10000, num_skips = 5, hidden_dim = 100, num_neighbor = 40, anonym_walk_len = 8, walk_loss_lambda = 0.1, walk_dim = 30, p = 0.25, q = 1): self.sess = sess self.batch_size = batch_size self.dataset_name = dataset_name self.path_length = path_length self.num_paths = num_paths self.window_size = window_size self.neg_size = neg_size self.learning_rate = learning_rate self.optimizer = optimizer self.embedding_dims = embedding_dims self.save_path = save_path + "/" + self.dataset_name self.num_steps = num_steps self.num_skips = num_skips self.hidden_dim = hidden_dim self.num_neighbor = num_neighbor self.anonym_walk_len = anonym_walk_len self.walk_loss_lambda = walk_loss_lambda self.purpose = purpose self.linkpred_ratio = linkpred_ratio self.walk_dim = walk_dim self.p = p self.q = q self.start_time = time.time() if not os.path.exists(self.save_path + "/" + str(self.embedding_dims)): os.makedirs(self.save_path + "/" + str(self.embedding_dims)) self.Dataset = dataset(self.dataset_name, self.purpose, self.linkpred_ratio, self.path_length, self.num_paths, self.window_size, self.batch_size, self.neg_size, self.num_skips, anonym_walk_len=self.anonym_walk_len, p = self.p, q = self.q) self.num_nodes = self.Dataset.num_nodes self.feature_dims = len(self.Dataset.node_features[0]) self.node_features = self.Dataset.node_features self.num_anonym_walk_types = len(self.Dataset.node_normalized_walk_distr[0]) print(self.num_anonym_walk_types) print(self.Dataset.node_anonym_walktypes.shape) self.build_model() print("[%.2fs]Finish sampling, begin training"%(time.time()-self.start_time)) self.saver = tf.train.Saver()
def __init__(self, params): self.dataset_name = params["dataset"] self.window_size = params["TopicGCN"]["window_size"] self.neg_size = params["TopicGCN"]["neg_size"] self.batch_size = params["TopicGCN"]["batch_size"] self.learning_rate = params["TopicGCN"]["learning_rate"] self.num_steps = params["TopicGCN"]["max_training_steps"] self.hidden_dim = params["TopicGCN"]["hidden_dim"] self.num_neighbor = params["TopicGCN"]["num_neighbor"] self.p = params["TopicGCN"]["p"] self.q = params["TopicGCN"]["q"] self.flag_input_node_feature = params["input_node_feature"] if self.flag_input_node_feature == "True": self.embedding_dims = params["TopicGCN"][ "embedding_dims"] // 2 # combined model, finally will concate else: self.embedding_dims = params["TopicGCN"]["embedding_dims"] self.save_path = os.path.join("../data/output", self.dataset_name, "model") if not os.path.exists(self.save_path): os.makedirs(self.save_path) self.sess = tf.Session() self.start_time = time.time() self.Dataset = dataset(params) self.num_nodes = self.Dataset.num_nodes self.feature_dims = len(self.Dataset.node_features[0]) # if do not input original feature, self.node_features will be topic features self.node_features = self.Dataset.node_features if self.flag_input_node_feature == "True": self.node_topic_features = self.Dataset.node_topic_features self.build_model() print("[%.2fs] Finish sampling, begin training ..." % (time.time() - self.start_time))
def run_s22(): '''Run s22 test case.''' import time t0 = time.time() train_set = dataset('data/s22.xyz') model = soap(lmax=4, nmax=6, sigma=0.3, rcut=5.0, zeta=4, sigma_nu_energy=0.003) model.fit(train_set) test_set = train_set true_energies = test_set.total_energy_list model_energies = model.total_energies(test_set) df = pd.DataFrame(list(zip(true_energies, model_energies)), columns=['True', 'Model']) df['Diff'] = df['Model'] - df['True'] df1, df2, df3 = df[:22], df[22:44], df[44:] df1.index += 1 df2.index = df1.index df3.index = df1.index df4 = df1.join(df2, lsuffix='_A', rsuffix='_B').join(df3) binding_energy = df1 + df2 - df3 df5 = df4.join(binding_energy, lsuffix='_AB', rsuffix='_dE') print(df5, end='\n') print(model) from sklearn.metrics import mean_absolute_error print('Mean Absolute Error (MAE) = {0.real:.4f}'.format(mean_absolute_error(df5['True_dE'], df5['Model_dE']))) if calc_f_from_rep: model_forces = model.total_forces_from_e(test_set) print(model_forces) t1 = time.time() print('Elapsed time = {0.real:.0f} seconds'.format(t1 - t0))
#!/usr/bin/env python import numpy as np from sklearn import svm from sklearn.externals import joblib from iccv07 import * from dataset import * N_pos = 2500 N_neg = 12000 dataset_name = "SVMs/newest/_svm_" #train_set = iccv07(seq=3) train_set = dataset("Mixed_data_set") test_set = iccv07(seq=1) pos_tr_imgs = train_set.get_pos_images(N_pos) neg_tr_imgs = train_set.get_neg_images(N_neg) samples = np.vstack((np.array(neg_tr_imgs), np.array(pos_tr_imgs))) print 'Number of positive training images: ', len(pos_tr_imgs) print 'Number of negative training images: ', len(neg_tr_imgs) # calculate HOG features def computeHOG(data): hog = cv2.HOGDescriptor() X = np.zeros((len(data), 3780)) i = 0
def main (): global options ds = dataset(options.classfile,options.featuredir,options.outputfile,options.verbose)
V = dataset.vocab_size dim_model = 512 fpred = open('pred.txt', 'w') fref = open('ref.txt', 'w') graph_pool = GraphPool() model = make_model(V, V, N=args.N, dim_model=dim_model) with open('checkpoints/{}.pkl'.format(exp_setting), 'rb') as f: model.load_state_dict( th.load(f, map_location=lambda storage, loc: storage)) model = model.to(device) model.eval() test_iter = dataset(graph_pool, mode='test', batch_size=args.batch, device=device, k=k) for i, g in enumerate(test_iter): with th.no_grad(): output = model.infer(g, dataset.MAX_LENGTH, dataset.eos_id, k, alpha=0.6) for line in dataset.get_sequence(output): if args.print: print(line) print(line, file=fpred) for line in dataset.tgt['test']: print(line.strip(), file=fref)
def main(): print('Number of arguments: ', len(sys.argv), 'arguments.') print('Argument list:', str(sys.argv)) filename1 = "Pythia/pythia8226_pp5TeV_Monash2013_clusterCorr100_9000.root" filename2 = "Pythia/pythia8226_pp5TeV_Monash2013_clusterCorr98_6800.root" filename3 = "Pythia/pythia8226_pp5TeV_Monash2013_clusterCorr102_5000.root" filename4 = "Pythia/pythia8226_pp5TeV_Monash2013_JetScaleUp_4000.root" filename5 = "Pythia/pythia8226_pp5TeV_Monash2013_JetScaleDown_2000.root" filename_default = "JetJtAna_Default_20190121_153453.root" filename_hadcorr = "JetJtAna_HadCorr_20190121_152726.root" # if(len(sys.argv) > 3): # start = int(sys.argv[3]) # else: # start = 1 start = 2 Default_data = dataset("LHC13b_default", NFIN=0, range=(0, 6), filename=filename_default, directory='AliJJetJtTask/AliJJetJtHistManager', color=2, style=24, rebin=4) HadCorr_data = dataset("LHC13b_hadcorr", NFIN=0, range=(0, 6), filename=filename_hadcorr, directory='AliJJetJtTask/AliJJetJtHistManager', color=2, style=24, rebin=4) Default = dataset("Default", NFIN=0, range=(0, 8), filename=filename1, directory='JCDijetBaseTask/jcdijet', color=2, style=24, rebin=4) HadCorr_high = dataset("-2%", NFIN=0, range=(0, 8), filename=filename2, directory='JCDijetBaseTask/jcdijet', color=2, style=24, rebin=4) HadCorr_low = dataset("+2%", NFIN=0, range=(0, 8), filename=filename3, directory='JCDijetBaseTask/jcdijet', color=2, style=24, rebin=4) JetScale_up = dataset("-2%", NFIN=0, range=(0, 8), filename=filename4, directory='JCDijetBaseTask/jcdijet', color=2, style=24, rebin=4) #JetScale_down = dataset("-2%",NFIN=0,range=(0,8),filename=filename5,directory='JCDijetBaseTask/jcdijet',color=2,style=24,rebin=4) LHC13b = [Default_data, HadCorr_data] LHC13b_signal = [ x.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) for x in LHC13b ] jetPt = Default_data.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=True)[1] LHC13b_graphs = [x.getGraphs() for x in LHC13b] LHC13b_gausRMS = [x[0] for x in LHC13b_graphs] LHC13b_gammaRMS = [x[1] for x in LHC13b_graphs] LHC13b_gausYield = [x[2] for x in LHC13b_graphs] LHC13b_gammaYield = [x[3] for x in LHC13b_graphs] LHC13b_systGausRMS_rel = defs.makeSystError(LHC13b_gausRMS[0], LHC13b_gausRMS[1], rel=True) LHC13b_systGausRMS_rel.SetName("GausRMS_hadcorrSystematics_relative") LHC13b_systGammaRMS_rel = defs.makeSystError(LHC13b_gammaRMS[0], LHC13b_gammaRMS[1], rel=True) LHC13b_systGammaRMS_rel.SetName("GammaRMS_hadcorrSystematics_relative") defs.drawErrors2( LHC13b_systGausRMS_rel, 20, 150, 0, -0.10, 0.10, 0, "Narrow RMS", "HadCorr", "_data", "SystematicErrors/SystematicErrorsGausRMS_EmcalHadCorr.pdf", 0, 0, 0.02) defs.drawErrors2( LHC13b_systGammaRMS_rel, 20, 150, 0, -0.10, 0.10, 0, "Wide RMS", "HadCorr", "_data", "SystematicErrors/SystematicErrorsGammaRMS_EmcalHadCorr.pdf", 0, 0, 0.02) colors = [1, 2, 3, 4] names = ["Default", "HadCorr"] n_figs = 8 n_rows = 2 fig, axs = defs.makegrid(4, n_figs // 4, xlog=True, ylog=True, d=d, shareY=False, figsize=(15, 7.5)) axs = axs.reshape(8) axs[1].text(0.12, 0.002, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n Jet Cone' + '\n Inclusive jT' + '\n', fontsize=7) for signal, name, color, j in zip(LHC13b_signal, names, colors, range(10)): print("Plot {}".format(name)) for jT, pT, ax, i in zip(signal[start:], jetPt[start:], axs[0:4], range(0, 9)): print(i) jT.SetMarkerColor(color) jT.SetLineColor(color) plot = rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=name, fmt='o', fillstyle='none', ecolor='blue') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) ax.set_xlim([0.1, 22]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(), horizontalalignment='left') ratios = [] if j > 0: for jT, div in zip(signal, LHC13b_signal[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel( 'Ratio', fontsize=9 ) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio', fontsize=9) for ratio, pT, ax in zip(ratios[start:], jetPt[start:], axs[4:9]): rplt.errorbar(ratio, xerr=False, emptybins=False, axes=ax, label='Ratio', fmt='o') #Plot ratio histogram, print("Draw {}".format(ratio.GetName())) #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1, 20]) #Set x-axis limits ax.set_ylim([0.95, 1.05]) #Set y-axis limits axs[0].legend(loc='lower left') plt.savefig( "SystematicErrors/LHC13bHadCorrComparisonJetPt{}To{}.pdf".format( start, start + 4), format='pdf') #Save figure plt.show() #Draw figure on screen return start = 4 datasets = [Default, HadCorr_high, HadCorr_low, JetScale_up] signal, jetPt = Default.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=True) signal2 = HadCorr_high.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) signal3 = HadCorr_low.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) signal4 = JetScale_up.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) #signal5 = JetScale_down.getSubtracted('JetConeJtWeightBin','BgJtWeightBin',jetpt = False) signals = [signal, signal2, signal3, signal4] names = ["Default", "-2%", "+2%", "Jet pT +2%"] if (True): graphs = [data.getGraphs() for data in datasets] gausRMS = [x[0] for x in graphs] gammaRMS = [x[1] for x in graphs] gausYield = [x[2] for x in graphs] gammaYield = [x[3] for x in graphs] systGausRMS = defs.makeSystError(gausRMS[0], gausRMS[1]) systGausRMS.SetName("GausRMS_emcalSystematics") systGammaRMS = defs.makeSystError(gammaRMS[0], gammaRMS[1]) systGammaRMS.SetName("GammaRMS_emcalSystematics") systGausRMS_abs = defs.makeSystError(gausRMS[0], gausRMS[1], abs=True) systGausRMS_abs.SetName("GausRMS_emcalSystematics_absolute") systGammaRMS_abs = defs.makeSystError(gammaRMS[0], gammaRMS[1], abs=True) systGammaRMS_abs.SetName("GammaRMS_emcalSystematics_absolute") systGausRMS_rel = defs.makeSystError(gausRMS[0], gausRMS[1], rel=True) systGausRMS_rel.SetName("GausRMS_emcalSystematics_relative") systGausRMS_rel2 = defs.makeSystError(gausRMS[0], gausRMS[2], rel=True) systGausRMS_rel2.SetName("GausRMS_emcalSystematics_relative2") systGammaRMS_rel = defs.makeSystError(gammaRMS[0], gammaRMS[1], rel=True) systGammaRMS_rel.SetName("GammaRMS_emcalSystematics_relative") systGammaRMS_rel2 = defs.makeSystError(gammaRMS[0], gammaRMS[2], rel=True) systGammaRMS_rel2.SetName("GammaRMS_emcalSystematics_relative2") systGausRMS_rel3 = defs.makeSystError(gausRMS[0], gausRMS[3], rel=True) systGausRMS_rel3.SetName("GausRMS_emcalSystematics_relative") #systGausRMS_rel4 = defs.makeSystError(gausRMS[0],gausRMS[4],rel=True) #systGausRMS_rel4.SetName("GausRMS_emcalSystematics_relative2") systGammaRMS_rel3 = defs.makeSystError(gammaRMS[0], gammaRMS[3], rel=True) systGammaRMS_rel3.SetName("GammaRMS_emcalSystematics_relative") #systGammaRMS_rel4 = defs.makeSystError(gammaRMS[0],gammaRMS[4],rel=True) #systGammaRMS_rel4.SetName("GammaRMS_emcalSystematics_relative2") defs.drawErrors2(systGausRMS_rel, 20, 150, 0, -0.04, 0.04, 0, "Narrow RMS", "-2%", "_data", "SystematicErrors/SystematicErrorsGausRMS_Emcal.pdf", 0, 0, 0.01, error2=systGausRMS_rel2, title3="+2%") defs.drawErrors2(systGammaRMS_rel, 20, 150, 0, -0.04, 0.04, 0, "Wide RMS", "-2%", "_data", "SystematicErrors/SystematicErrorsGammaRMS_Emcal.pdf", 0, 0, 0.01, error2=systGammaRMS_rel2, title3="+2%") defs.drawErrors2( systGausRMS_rel3, 20, 150, 0, -0.04, 0.04, 0, "Narrow RMS", "jet pT -2%", "_data", "SystematicErrors/SystematicErrorsGausRMS_jetScale.pdf", 0, 0, 0.01) defs.drawErrors2( systGammaRMS_rel3, 20, 150, 0, -0.04, 0.04, 0, "Wide RMS", "jet pT -2%", "_data", "SystematicErrors/SystematicErrorsGammaRMS_jetScale.pdf", 0, 0, 0.01) colors = [1, 2, 3, 4] n_figs = 8 n_rows = 2 fig, axs = defs.makegrid(4, n_figs // 4, xlog=True, ylog=True, d=d, shareY=False, figsize=(15, 7.5)) axs = axs.reshape(8) axs[1].text(0.12, 0.002, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n Jet Cone' + '\n Inclusive jT' + '\n', fontsize=7) for signal, name, color, j in zip(signals, names, colors, range(10)): print("Plot {}".format(name)) for jT, pT, ax, i in zip(signal[start:], jetPt[start:], axs[0:4], range(0, 9)): print(i) jT.SetMarkerColor(color) jT.SetLineColor(color) plot = rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=name, fmt='o', fillstyle='none', ecolor='blue') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) ax.set_xlim([0.1, 22]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(), horizontalalignment='left') ratios = [] if j > 0: for jT, div in zip(signal, signals[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel( 'Ratio', fontsize=9 ) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio', fontsize=9) for ratio, pT, ax in zip(ratios[start:], jetPt[start:], axs[4:9]): rplt.errorbar(ratio, xerr=False, emptybins=False, axes=ax, label='Ratio', fmt='o') #Plot ratio histogram, print("Draw {}".format(ratio.GetName())) #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1, 20]) #Set x-axis limits ax.set_ylim([0.95, 1.05]) #Set y-axis limits axs[0].legend(loc='lower left') plt.savefig("SystematicErrors/HadCorrComparisonJetPt{}To{}.pdf".format( start, start + 4), format='pdf') #Save figure plt.show() #Draw figure on screen
def main(): Rebin = 4 colors = [7, 1, 2, 4, 6] Pythia = dataset( "EFf 100", NFIN=1, range=(2, 8), filename="Pythia/pythia8226_pp5TeV_Monash2013_Rbinned.root", directory='/JCDijetBaseTask/jcdijet', color=2, style=24, rebin=Rebin) Pythia2 = dataset( "Eff 97", NFIN=1, range=(2, 8), filename="Pythia/pythia8226_pp5TeV_Monash2013_eff97_14k.root", directory='/JCDijetBaseTask/jcdijet', color=3, style=24, rebin=Rebin) inclusive, jetPt = Pythia.getHist('JetConeJtWeightBin', jetpt=True) datasets = [Pythia] datasets.append(Pythia2) incs = [inclusive] outputfile = "TrackingSyst.root" for data in datasets[1:]: incs.append(data.getHist('JetConeJtWeightBin', jetpt=False)) names = [data.name() for data in datasets] signals = [ data.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) for data in datasets ] graphs = [data.getGraphs() for data in datasets] gausRMS = [x[0] for x in graphs] gammaRMS = [x[1] for x in graphs] gausYield = [x[2] for x in graphs] gammaYield = [x[3] for x in graphs] systGausRMS = defs.makeSystError(gausRMS[0], gausRMS[1]) systGausRMS.SetName("GausRMS_trackingSystematics") systGammaRMS = defs.makeSystError(gammaRMS[0], gammaRMS[1]) systGammaRMS.SetName("GammaRMS_trackingSystematics") systGausRMS_abs = defs.makeSystError(gausRMS[0], gausRMS[1], abs=True) systGausRMS_abs.SetName("GausRMS_trackingSystematics_absolute") systGammaRMS_abs = defs.makeSystError(gammaRMS[0], gammaRMS[1], abs=True) systGammaRMS_abs.SetName("GammaRMS_trackingSystematics_absolute") systGausRMS_rel = defs.makeSystError(gausRMS[0], gausRMS[1], rel=True) systGausRMS_rel.SetName("GausRMS_trackingSystematics_relative") systGammaRMS_rel = defs.makeSystError(gammaRMS[0], gammaRMS[1], rel=True) systGammaRMS_rel.SetName("GammaRMS_trackingSystematics_relative") defs.drawErrors2(systGausRMS_rel, 40, 150, 0, -0.1, 0.1, 0, "Narrow RMS", "Tracking", "_data", "SystematicErrors/SystematicErrorsGausRMS_Tracking.pdf", 0, 0, 0.04) defs.drawErrors2(systGammaRMS_rel, 40, 150, 0, -0.15, 0.15, 0, "Wide RMS", "Tracking", "_data", "SystematicErrors/SystematicErrorsGammaRMS_Tracking.pdf", 0, 0, 0.05) with root_open(outputfile, 'recreate') as f: f.cd() systGausRMS.Write() systGammaRMS.Write() systGausRMS_abs.Write() systGammaRMS_abs.Write() systGausRMS_rel.Write() systGammaRMS_rel.Write() start = 2 end = 7 n_figs = 8 n_rows = 2 fig, axs = defs.makegrid(4, n_figs // 4, xlog=True, ylog=True, d=d, shareY=False, figsize=(15, 7.5)) axs = axs.reshape(n_figs) axs[1].text(0.12, 0.002, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n Jet Cone' + '\n Inclusive jT', fontsize=7) for inc, name, color, j in zip(incs, names, colors, range(10)): print("Plot {}".format(name)) for jT, pT, ax, i in zip(inc[start:], jetPt[start:], axs[0:4], range(0, 9)): jT.SetMarkerColor(color) jT.SetLineColor(color) plot = rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=name, fmt='o', fillstyle='none', ecolor='blue') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) ax.set_xlim([0.1, 22]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(), horizontalalignment='left') ratios = [] if j > 0: for jT, div in zip(inc, incs[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel( 'Ratio', fontsize=9 ) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio', fontsize=9) for ratio, pT, ax in zip(ratios[start:], jetPt[start:], axs[4:9]): rplt.errorbar(ratio, xerr=False, emptybins=False, axes=ax, label='Ratio', fmt='o') #Plot ratio histogram, #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1, 20]) #Set x-axis limits ax.set_ylim([0.8, 1.2]) #Set y-axis limits axs[0].legend(loc='lower left') plt.savefig( "PythonFigures/TrackingR04JetConeJtInclusivePtFrom{}To{}.pdf".format( start, end), format='pdf') #Save figure plt.show() #Draw figure on screen fig, axs = defs.makegrid(4, n_figs // 4, xlog=True, ylog=True, d=d, shareY=False, figsize=(15, 7.5)) axs = axs.reshape(n_figs) axs[1].text(0.12, 0.002, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n Jet Cone' + '\n Subtracted jT', fontsize=7) for signal, name, color, j in zip(signals, names, colors, range(10)): print("Plot {}".format(name)) for jT, pT, ax, i in zip(signal[start:], jetPt[start:], axs[0:4], range(0, 9)): jT.SetMarkerColor(color) jT.SetLineColor(color) plot = rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=name, fmt='o', fillstyle='none', ecolor='blue') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) ax.set_xlim([0.1, 22]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(), horizontalalignment='left') ratios = [] if j > 0: for jT, div in zip(signal, signals[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel( 'Ratio', fontsize=9 ) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio', fontsize=9) for ratio, pT, ax in zip(ratios[start:], jetPt[start:], axs[4:9]): rplt.errorbar(ratio, xerr=False, emptybins=False, axes=ax, label='Ratio', fmt='o') #Plot ratio histogram, #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1, 20]) #Set x-axis limits ax.set_ylim([0.8, 1.2]) #Set y-axis limits axs[0].legend(loc='lower left') plt.savefig( "PythonFigures/TrackingR04JetConeJtSignalPtFrom{}To{}.pdf".format( start, end), format='pdf') #Save figure plt.show() #Draw figure on screen drawWithErrors2Combined(gausRMS, gammaRMS, 15, 500, 1, 0, 1.65, 0, r'jet $p_T$', r'$\sqrt{\left<j_T^2\right>}$', 'Pythia', 'PythonFigures/TrackingSystematicsRMS', separate=True)
def test(args): source_test_data = DataLoader(dataset('images/stare/', train=False, img_count=15, height=605, width=605, type='stare', transforms=transforms.Compose( [ToTensor(), normalize()])), batch_size=1, shuffle=False) target_test_data = DataLoader(dataset('images/chasedb1/', train=False, img_count=15, height=605, width=605, type='chasedb1', transforms=transforms.Compose( [ToTensor(), normalize()])), batch_size=1, shuffle=False) model_g = UNet_G() model_mc = Unet_C() device = torch.device("cuda:" + args.device if torch.cuda.is_available() else "cpu") checkpoint = torch.load(args.checkpoint) model_g.load_state_dict(checkpoint['g_state_dict']) model_mc.load_state_dict(checkpoint['mc_state_dict']) model_g.to(device) model_mc.to(device) model_g.eval() model_mc.eval() source_loss_list = [] target_loss_list = [] with torch.no_grad(): ## Source dsc_src_sum = 0 for n, sample in enumerate(source_test_data): data, mask_cpu, name = sample["img"], sample["label"], sample[ "name"] data, mask = data.to(device), mask_cpu.to(device) output = model_g(data) output = model_mc(output) dsc = dice_loss(output, mask).item() dsc_src_sum += dsc source_loss_list.append(dsc) # print("Source {} - DSC: {}".format(filename, dsc)) pred = output.squeeze().cpu().numpy() pred[pred >= 0.5] = 1.0 pred[pred < 0.5] = 0.0 final_pred = np.uint8(pred * 255) # cv2.imwrite("./pred/source/{}_source_pred.png".format(filename), final_pred) mask_cpu = mask_cpu.squeeze().numpy() print(mask_cpu.shape) compare_output = np.stack([final_pred, mask_cpu, final_pred], axis=2) cv2.imwrite("./pred/source/{}_source_pred.png".format(name), final_pred) cv2.imwrite( "./pred/source_compare/{}_source_pred.png".format(name), compare_output) print("Source Average DSC - {}".format(dsc_src_sum / (n + 1))) print() ## Target dsc_tar_sum = 0 for m, sample in enumerate(target_test_data): data, mask_cpu, name = sample["img"], sample["label"], sample[ "name"] # filename = name[0] data, mask = data.to(device), mask_cpu.to(device) output = model_g(data) output = model_mc(output) dsc = dice_loss(output, mask).item() dsc_src_sum += dsc source_loss_list.append(dsc) # print("Target {} - DSC: {}".format(filename, dsc)) pred = output.squeeze().cpu().numpy() pred[pred >= 0.5] = 1.0 pred[pred < 0.5] = 0.0 final_pred = np.uint8(pred * 255) mask_cpu = mask_cpu.squeeze().numpy() print(mask_cpu.shape) compare_output = np.stack([final_pred, mask_cpu, final_pred], axis=2) cv2.imwrite("./pred/target/{}_target_pred.png".format(name), final_pred) cv2.imwrite( "./pred/target_compare/{}_target_pred.png".format(name), compare_output) # cv2.imwrite("./pred/target/{}_target_pred.png".format(filename), final_pred) print("Target Average DSC - {}".format(dsc_tar_sum / (m + 1)))
import sys sys.path.append('../') import numpy as np from dataset import * from sklearn import neighbors from sklearn import svm from sklearn.gaussian_process.kernels import RBF from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedShuffleSplit ds = dataset() x, y, x_train, x_test, y_train, y_test = ds.get_data() print(x_train.shape) print(y_train.shape) import numpy as np from deeplearning.train import train from deeplearning.nn import NeuralNet from deeplearning.activation import Tanh, Softmax, Sigmoid, ReLU from deeplearning.layers import Dense, Dropout, BatchNormalization from deeplearning.loss import MSE, CrossEntropy from deeplearning.optim import Momentum_SGD, SGD, AdaGrad, RMSProp, Adam from deeplearning.evaluation import accurarcy from deeplearning.reg import *
def encode(text, vocab): feature_vec = np.zeros(vocab_size) valid_id = [] wordlist = text.split() for word in wordlist: pos = vocab.get(word, -1) if pos == -1: pass feature_vec[pos] = 1 if pos not in valid_id: valid_id.append(pos) return feature_vec, valid_id if __name__ == '__main__': train_data = dataset('./data/train.csv') test_data = dataset('./data/test.csv') texts = train_data.texts + test_data.texts targets = train_data.targets + test_data.targets X_train, X_test, y_train, y_test = train_data.texts, test_data.texts, train_data.targets, test_data.targets # train_test_split(texts, targets, test_size=0.4) train_size = len(X_train) test_size = len(X_test) encoder = CountVectorizer() # encoder = TfidfVectorizer(max_features=15000) print('encoding...') data = encoder.fit_transform(X_train + X_test) X_train = data[:train_size] X_test = data[train_size:] print('naive bayes')
def inference(sess, dataset, passes=1, dataset_mask=False, dataset_selection=False, min_opacity=.15, max_opacity=.4): # Data sources next_image, iterator_init = dataset() image_shape = next_image.shape.as_list() if dataset_mask: next_mask, iterator_init = dataset_mask() next_mask = next_mask[:, :, :, 0:1] else: next_mask = batch_masks( None, image_shape[1], image_shape[2], min_opacity, max_opacity) if dataset_selection: next_selection, iterator_init = dataset_selection() # Model images_p = tf.placeholder(tf.float32, shape=[None] + image_shape[1:]) mask_p = tf.placeholder(tf.float32, shape=[None] + image_shape[1:3] + [1]) selection_p = tf.placeholder(tf.float32, shape=[None] + image_shape[1:3] + [1]) image_w = tf.clip_by_value(images_p - mask_p, 0, 1) selection_conv = selection_margin(mask_p, 4) predictions = model(image_w, False) * selection_p gen_mask = tf.clip_by_value(tf.abs(predictions), 0, 1) reconstruction = tf.clip_by_value(image_w + predictions, 0, 1) accuracy = get_accuracy(reconstruction, images_p) # Inference sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) iterator_init(sess) saver = tf.train.Saver() saver.restore(sess, "/tmp/model.ckpt") # Pass 1 batch = sess.run([next_image, next_mask]) if dataset_selection: selection = sess.run(tf.expand_dims(next_selection[:, :, :, 0], 3)) else: selection = sess.run(selection_conv, feed_dict={mask_p: batch[1]}) feed_dict = {images_p: batch[0][:, :, :, 0:3], mask_p: batch[1][:batch[0].shape[0]], selection_p: selection[:batch[0].shape[0]]} images = sess.run([image_w, reconstruction, gen_mask, accuracy], feed_dict=feed_dict) results = [images[0], images[1]] print('Mean accuracy %.3f%%' % (images[3] * 100)) # Pass 2 for _ in range(1, passes): reconstruction1 = images[1] feed_dict = {images_p: reconstruction1, mask_p: np.zeros(list(reconstruction1.shape[:-1]) + [1])} images = sess.run([image_w, reconstruction, gen_mask], feed_dict=feed_dict) results += [images[1]] # Reformat results += [images[2]] images = [unstandardize(x) for x in results] return images
model.src_embed.lut.weight = model.tgt_embed.lut.weight model.generator.proj.weight = model.tgt_embed.lut.weight model, criterion = model.to(devices[0]), criterion.to(devices[0]) model_opt = NoamOpt( dim_model, 1, 400, T.optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.98), eps=1e-9)) if len(devices) > 1: model, criterion = map(nn.parallel.replicate, [model, criterion], [devices, devices]) loss_compute = SimpleLossCompute if len( devices) == 1 else MultiGPULossCompute for epoch in range(100): train_iter = dataset(graph_pool, mode='train', batch_size=args.batch, devices=devices) valid_iter = dataset(graph_pool, mode='valid', batch_size=args.batch, devices=devices) print('Epoch: {} Training...'.format(epoch)) model.train(True) run_epoch(train_iter, model, loss_compute(criterion, model_opt), is_train=True) print('Epoch: {} Evaluating...'.format(epoch)) model.att_weight_map = None model.eval() run_epoch(valid_iter,
def train(): for dataset, dataset_name in zip(datasets, datasets_names): j = 0 texts, labels = dataset() texts, labels = clean_text(texts, labels) print('TAMANHO DO DATASET BRUTO:', len(labels)) texts, labels = remove_duplicates(texts, labels) print('TAMANHO DO DATASET REMOVENDO REPETIÇÕES:', len(labels)) texts, labels = under_sampling(texts, labels) print('TAMANHO DO DATASET FINAL:', len(texts)) tokenizer = create_tokenizer(texts) length = max_length(texts) vocab_size = len(tokenizer.word_index) + 1 tweets = encode_text(tokenizer, texts, length) for model_, model_name in zip(models, models_names): print('\nTAMANHO:', length) print('TAMANHO DO VOCABULARIO:', vocab_size) k_fold = 0 sss = StratifiedShuffleSplit(n_splits=3, random_state=42, test_size=0.2) labels = np.array([int(i) for i in labels]) for train_index, test_index in sss.split(tweets, labels): x_train, x_test = tweets[train_index], tweets[test_index] y_train, y_test = labels[train_index], labels[test_index] path = './models/' + dataset_name + '/' if not os.path.exists(path): os.makedirs(path) model = model_(length, vocab_size) check = ModelCheckpoint(path + dataset_name + model_name + str(k_fold) + '_model.h5', monitor='val_loss', save_best_only=True) stop = EarlyStopping(monitor='val_loss', patience=5) plot_model(model, to_file=path + model_name + 'model.png', show_shapes=True) print(model.summary()) model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy']) class_weight_list = compute_class_weight( 'balanced', np.unique(y_train), y_train) class_weight = dict(zip(np.unique(y_train), class_weight_list)) print(class_weight) callbacks = [check, stop] try: h = model.fit([x_train, x_train, x_train], y_train, epochs=epochs, batch_size=batch_size, validation_data=([x_test, x_test, x_test], y_test), callbacks=callbacks, class_weight=class_weight, verbose=1) except: h = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, y_test), callbacks=callbacks, class_weight=class_weight, verbose=1) del model model = load_model(path + dataset_name + model_name + str(k_fold) + '_model.h5') try: y_pred = model.predict(x_test) except: y_pred = model.predict([x_test, x_test, x_test]) for threshold in [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7]: y_pred_ = [1 if y > threshold else 0 for y in y_pred] print('thresh', threshold) print(accuracy_score(y_test, y_pred_)) log_path = path + 'log.csv' to_save = { 'arc': model_name, 'fold': k_fold, 'acc': accuracy_score(y_test, y_pred_), 'prec': precision_score(y_test, y_pred_), 'rec': recall_score(y_test, y_pred_), 'f1': f1_score(y_test, y_pred_), 'dataset': dataset_name, 'thresh': threshold } df = pd.DataFrame([to_save]) if k_fold == 0 and j == 0: with open(log_path, 'w') as f: df.to_csv(f, header=True) else: with open(log_path, 'a') as f: df.to_csv(f, header=False) j = j + 1 df_ = pd.read_csv(log_path, index_col=[0]) print(model_name) print(df_[df_['arc'] == model_name].acc.mean()) print(df_[df_['arc'] == model_name].acc.std()) k_fold = k_fold + 1
def main(): Rebin = 4 #Load data if (os.path.exists('RootFiles/Fig4.root')): inFile = "RootFiles/Fig4.root" inF = root_open(inFile, 'r') signals = [] signals_randomBg = [] for i in range(3): signal = [ inF.Get("jTSignalJetPt_perp_pT{:02d}_{:02d}".format(ij, i)) for ij in range(7) ] signal2 = [ inF.Get("jTSignalJetPt_random_pT{:02d}_{:02d}".format(ij, i)) for ij in range(7) ] signals.append(signal) signals_randomBg.append(signal2) names = ["Minimum Bias", "V0A, 0 - 10%", "ZDC, 0 - 10%"] jetPt = [ (int( re.search(r'p_{T,jet} : ([\d]*)\.[\d] - ([\d]*).[\d]*', h.GetTitle(), re.M | re.I).group(1)), int( re.search(r'p_{T,jet} : ([\d]*)\.[\d] - ([\d]*).[\d]*', h.GetTitle(), re.M | re.I).group(2))) for h in signals[0] ] #Use regular expressions to extract jet pT range from histogram titles else: Mixed_FullJets_R04_HM_01 = datasetMixed( "V0A, 0 - 1%", NFIN=0, range=(1, 5), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2274_20181219/legotrain_CF_pPb_2274_20181219_LHC13cde.root", directory='AliJJetJtTask_Central01/AliJJetJtHistManager', directory2='AliJJetJtTask_kEMCEJE_Central01/AliJJetJtHistManager', color=colors[1], style=24, rebin=Rebin) Mixed_FullJets_R04_HM_10 = datasetMixed( "V0A, 0 - 10%", NFIN=0, range=(1, 5), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2305_20190109/legotrain_CF_pPb_2305_20190109_LHC13bcde.root", directory='AliJJetJtTask_Central10/AliJJetJtHistManager', directory2='AliJJetJtTask_kEMCEJE_Central10/AliJJetJtHistManager', color=colors[2], style=24, rebin=Rebin) FullJets_R04_MB = dataset( "Minimum Bias", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2749_20190822_LHC13de.root", directory="AliJJetJtTask_kEMCEJE/AliJJetJtHistManager", color=colors[0], style=24, rebin=Rebin) FullJets_R04_HM_01_ZDC = dataset( "ZDC, 0 - 1%", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2749_20190822_LHC13de.root", directory="AliJJetJtTask_kEMCEJE_Central01/AliJJetJtHistManager", color=colors[4], style=24, rebin=Rebin) FullJets_R04_HM_10_ZDC = dataset( "ZDC, 0 - 10%", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2768_2019_0825LHC13d.root", directory="AliJJetJtTask_kEMCEJE_Central10/AliJJetJtHistManager", color=colors[5], style=24, rebin=Rebin) inclusive, jetPt = FullJets_R04_MB.getHist('JetConeJtWeightBin', jetpt=True) incs = [inclusive] datasets = [FullJets_R04_MB] datasets.append(Mixed_FullJets_R04_HM_10) datasets.append(FullJets_R04_HM_10_ZDC) for data in datasets[1:]: incs.append(data.getHist('JetConeJtWeightBin', jetpt=False)) names = [data.name() for data in datasets] signals = [ data.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) for data in datasets ] signals_randomBg = [ data.getSubtracted('JetConeJtWeightBin', 'BgRndmJtWeightBin', jetpt=False, randomBG=True) for data in datasets ] outFile = "RootFiles/Fig4.root" outF = root_open(outFile, "w+") for hists, hists2, j in zip(signals, signals_randomBg, range(5)): for signal, signal_random, ij in zip(hists, hists2, range(10)): signal.SetName("jTSignalJetPt_perp_pT{:02d}_{:02d}".format( ij, j)) signal.Write() signal_random.SetName( "jTSignalJetPt_random_pT{:02d}_{:02d}".format(ij, j)) signal_random.Write() outF.Close() systematics = [ getBackgroundSystematic(h1, h2, signals[0]) for h1, h2 in zip(signals, signals_randomBg) ] drawSignal(signals, systematics, names, colors, styles, jetPt)
def main(): Rebin = 4 #Mixed_FullJets_R04_MB = datasetMixed("Minimum Bias",NFIN=0,range=(1,5),filename="CF_pPb_legotrain/legotrain_CF_pPb_2274_20181219/legotrain_CF_pPb_2274_20181219_LHC13cde.root",directory='AliJJetJtTask/AliJJetJtHistManager',directory2='AliJJetJtTask_kEMCEJE/AliJJetJtHistManager',color=colors[0],style=24,rebin=Rebin) Mixed_FullJets_R04_HM_01 = datasetMixed( "V0A, 0 - 1%", NFIN=0, range=(1, 5), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2274_20181219/legotrain_CF_pPb_2274_20181219_LHC13cde.root", directory='AliJJetJtTask_Central01/AliJJetJtHistManager', directory2='AliJJetJtTask_kEMCEJE_Central01/AliJJetJtHistManager', color=colors[1], style=24, rebin=Rebin) Mixed_FullJets_R04_HM_10 = datasetMixed( "V0A, 0 - 10%", NFIN=0, range=(1, 5), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2305_20190109/legotrain_CF_pPb_2305_20190109_LHC13bcde.root", directory='AliJJetJtTask_Central10/AliJJetJtHistManager', directory2='AliJJetJtTask_kEMCEJE_Central10/AliJJetJtHistManager', color=colors[2], style=24, rebin=Rebin) #Mixed_FullJets_R04_HM_0_1 = datasetMixed("V0A, 0 - 0.1%",NFIN=0, range=(1,5),filename="CF_pPb_legotrain/legotrain_CF_pPb_2305_20190109/legotrain_CF_pPb_2305_20190109_LHC13bcde.root",directory='AliJJetJtTask_Central0_1/AliJJetJtHistManager',directory2='AliJJetJtTask_kEMCEJE_Central0_1/AliJJetJtHistManager',color=colors[3],style=24,rebin=Rebin) Mixed_FullJets_R04_MB = dataset( "Minimum Bias", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2749_20190822_LHC13de.root", directory="AliJJetJtTask_kEMCEJE/AliJJetJtHistManager", color=colors[0], style=24, rebin=Rebin) Mixed_FullJets_R04_HM_01_ZDC = dataset( "ZDC, 0 - 1%", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2749_20190822_LHC13de.root", directory="AliJJetJtTask_kEMCEJE_Central01/AliJJetJtHistManager", color=colors[4], style=24, rebin=Rebin) Mixed_FullJets_R04_HM_10_ZDC = dataset( "ZDC, 0 - 10%", NFIN=0, range=(1, 8), filename= "CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2768_2019_0825LHC13d.root", directory="AliJJetJtTask_kEMCEJE_Central10/AliJJetJtHistManager", color=colors[5], style=24, rebin=Rebin) #Mixed_FullJets_R04_HM_0_1 = dataset("ZDC, 0 - 0.1%",NFIN=0, range=(1,8),filename="CF_pPb_legotrain/legotrain_CF_pPb_2749_20190822/legotrain_CF_pPb_2749_20190822_LHC13de.root",directory="AliJJetJtTask_kEMCEJE_Central0_1/AliJJetJtHistManager",color=colors[0],style=24,rebin=Rebin) inclusive, jetPt = Mixed_FullJets_R04_MB.getHist('JetConeJtWeightBin', jetpt=True) datasets = [Mixed_FullJets_R04_MB] incs = [inclusive] datasets.append(Mixed_FullJets_R04_HM_10) datasets.append(Mixed_FullJets_R04_HM_10_ZDC) #datasets.append(Mixed_FullJets_R04_HM_01) #datasets.append(Mixed_FullJets_R04_HM_01_ZDC) for data in datasets[1:]: incs.append(data.getHist('JetConeJtWeightBin', jetpt=False)) names = [data.name() for data in datasets] signals = [ data.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) for data in datasets ] signals_randomBg = [ data.getSubtracted('JetConeJtWeightBin', 'BgRndmJtWeightBin', jetpt=False, randomBG=True) for data in datasets ] systematics = [ getBackgroundSystematic(h1, h2, signals[0]) for h1, h2 in zip(signals, signals_randomBg) ] background = [ data.getHist('BgJtWeightBin', jetpt=False, isBg=True) for data in datasets ] background_random = [ data.getHist('BgRndmJtWeightBin', jetpt=False, isRndmBg=True) for data in datasets ] graphs = [data.getGraphs() for data in datasets] jetpt = [data.getJetPt() for data in datasets] gausRMS = [x[0] for x in graphs] gammaRMS = [x[1] for x in graphs] gausYield = [x[2] for x in graphs] gammaYield = [x[3] for x in graphs] #drawBackgroundQA1(back, backg_randomBg, names, colors, styles) drawSignal(signals, systematics, names, colors, styles, jetPt) return drawBackgroundQA(signals, signals_randomBg, names, colors, styles) drawInclusive(incs, names, colors, styles) drawJetPt(jetpt, names, colors, styles) drawBackground(background, names, colors, styles)
rotation_degree = 30 perspective_scale = 0.5 aug_p = 0.5 data_dir = "./저장파일/datasets" ckpt_dir = "./checkpoint" log_dir = "./log" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ## dataloader transform_train = transforms.Compose([Noise(noise_std, p=aug_p), ToPILImage(), RandomPerspective(p=aug_p, scale=perspective_scale), RandomRotation(rotation_degree, p=aug_p), RandomAffine(affine_degree, p=aug_p), RandomCrop(p=aug_p), ToNumpy(), ToTensor()]) dataset_train = dataset(data_dir=os.path.join(data_dir, "train1.csv"), transform=transform_train) loader_train = DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True) ## 테스트용 data = dataset_train.__getitem__(0) img = data['img'] letter = data['letter'] ## img = img.reshape(-1, 1, 28, 28) letter = letter.reshape(-1, 26) ## net = Net5() net.eval() output = net(img, letter)
## import os import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn from torchvision import transforms from dataset import * ## data_dir = "./datasets" transform_raw = transforms.Compose([]) dataset_train_raw = dataset(data_dir=os.path.join(data_dir, "train.csv")) # transform = transforms.Compose([Noise(), ToPILImage(), RandomAffine(30), RandomCrop(), ToNumpy()]) # dataset_train = dataset(data_dir=os.path.join(data_dir, "train.csv"), transform=transform) transform = transforms.Compose( [ToPILImage(), RandomPerspective(p=1, scale=0.5), ToNumpy()]) dataset_train = dataset(data_dir=os.path.join(data_dir, "train.csv"), transform=transform) ## data_raw = dataset_train_raw.__getitem__(1) img_raw = data_raw['img'] img_raw = img_raw.squeeze() data = dataset_train.__getitem__(1)
def main(): print('Number of arguments: ', len(sys.argv), 'arguments.') print('Argument list:',str(sys.argv)) filename = sys.argv[1] separate = int(sys.argv[2]) if(len(sys.argv) > 3): start = int(sys.argv[3]) end = int(sys.argv[4]) else: start = 1 end = 6 n_figs = end-start print("Number of figs: {}".format(n_figs)) print("Input file: ") print(filename) FullJets_R04 = dataset('Full jets R=0.4',NFIN=0,range=(start,end),filename=filename,directory='AliJJetJtTask/AliJJetJtHistManager',color=2,style=24,rebin=2) #Mixed_FullJets_R04 = datasetMixed("Full jets R=0.4",NFIN=0,range=5,filename=filename,directory='AliJJetJtTask/AliJJetJtHistManager',directory2='AliJJetJtTask_kEMCEJE/AliJJetJtHistManager',color=2,style=24,rebin=2) #signal,jetPt = FullJets_R04.getHist('JtWeightBin',jetpt = True) #signal2 = FullJets_R04.getHist('JtWeightLeadingRefBin',jetpt=False) compareHistsWithRatio(FullJets_R04,['JtWeightBin','JtWeightLeadingRefBin','JtWeightLeadingRefBin','JtWeightLeadingRefBin','JtWeightLeadingRefBin'],['Jet axis ref.','leading ref. (xlong 0.0-0.2)','leading ref. (xlong 0.2-0.4)','leading ref. (xlong 0.4-0.6)','leading ref. (xlong 0.6-1.0)'],step=1,extras=['','Xlong00','Xlong01','Xlong02','Xlong03']) plt.savefig("PythonFigures/JetVsLeadingRefConst.pdf",format='pdf') #Save figure plt.show() sets = compareHistsWithRatio(FullJets_R04,['JetConeJtWeightBin','JetConeJtWeightLeadingRefBin','JetConeJtWeightLeadingRefBin','JetConeJtWeightLeadingRefBin','JetConeJtWeightLeadingRefBin'],['Jet axis ref.','leading ref.(xlong 0.0-0.2)','leading ref.(xlong 0.2-0.4)','leading ref.(xlong 0.4-0.6)','leading ref. (xlong 0.6-1.0)'],step=1,extras=['','Xlong00','Xlong01','Xlong02','Xlong03']) plt.savefig("PythonFigures/JetVsLeadingRefJetCone.pdf",format='pdf') #Save figure plt.show() JtJet = sets[0][0] JtLeadingxlong00 = sets[1][0] JtLeadingxlong01 = sets[2][0] JtLeadingxlong02 = sets[3][0] JtLeadingxlong03 = sets[4][0] JtLeading = [h.clone() for h in sets[1][0]] for h,s,s2,s3 in zip(JtLeading,sets[2][0],sets[3][0],sets[4][0]): h.Add(s,1) h.Add(s2,1) h.Add(s3,1) # print(JtLeading) # for set in sets[2:]: # for h,s in zip(JtLeading,set[0]): # print(s) # h.Add(s,1) jetPt = sets[0][1] jetPtCenter = array('d',[(a+b)/2.0 for a,b in jetPt]) jetPtErrors = array('d',[(b-a)/2.0 for a,b in jetPt]) FullJets_fit = [] FullJets_parameters = [] FullJets_gausRMS = [] FullJets_gammaRMS = [] FullJets_gausYield = [] FullJets_gammaYield = [] for jT,title in zip((JtJet,JtLeading,JtLeadingxlong00,JtLeadingxlong01,JtLeadingxlong02),("Jet ref.","Leading ref","Leading ref.(xlong 0.0-0.2)","Leading ref.(xlong 0.2-0.4)","Leading ref.(xlong 0.4-0.6)","Leading ref.(xlong 0.6-1.0)")): gausRMS = [] gammaRMS = [] gausRMSe = [] gammaRMSe = [] gausYield = [] gammaYield = [] gausYielde = [] gammaYielde = [] fits = [] parameters = [] for h,i in zip(jT,range(Njets)): fit,d = defs.fitJtHisto(h,'',1,i,8,title,draw=False) fits.append(fit) parameters.append(d) gausRMS.append(d['gausRMS']) gausRMSe.append(d['gausRMSe']) gammaRMS.append(d['gammaRMS']) gammaRMSe.append(d['gammaRMSe']) gausYield.append(d['gausYield']) gausYielde.append(d['gausYielde']) gammaYield.append(d['gammaYield']) gammaYielde.append(d['gammaYielde']) gausRMSg = Graph(len(gausRMS)-2) gammaRMSg = Graph(len(gammaRMS)-2) gausYieldg = Graph(len(gausYield) -2) gammaYieldg = Graph(len(gammaYield)-2) for h,he,g in zip((gausYield,gammaYield),(gausYielde,gammaYielde),(gausYieldg,gammaYieldg)): for x,xe,a,e,i in zip(jetPtCenter[2:],jetPtErrors[2:],h[2:],he[2:],range(len(gausRMS)-2)): g.SetPoint(i,x,a) g.SetPointError(i,xe,xe,e,e) for a,b,c,d,e,f,i in zip(gausRMS[2:],gammaRMS[2:],gausRMSe[2:],gammaRMSe[2:],jetPtCenter[2:],jetPtErrors[2:],range(len(gausRMS)-2)): gausRMSg.SetPoint(i,e,a) gausRMSg.SetPointError(i,f,f,c,c) gammaRMSg.SetPoint(i,e,b) gammaRMSg.SetPointError(i,f,f,d,d) FullJets_gausRMS.append(gausRMSg) FullJets_gammaRMS.append(gammaRMSg) FullJets_gausYield.append(gausYieldg) FullJets_gammaYield.append(gammaYieldg) FullJets_fit.append(fits) FullJets_parameters.append(parameters) print(gausRMS[2:]) print(gammaRMS[2:]) print(jetPtCenter[2:]) drawWithErrors2Combined(FullJets_gausRMS,FullJets_gammaRMS,["Jet ref.","Leading ref.","Leading ref.(xlong 0.0-0.2)","Leading ref.(xlong 0.2-0.4)","Leading ref.(xlong 0.4-0.6)"],15,500,1,0,1.85,0,r'jet $p_T$',r'$\sqrt{\left<j_T^2\right>}$','Pythia','PythonFigures/JetVsLeadingRefJetConeFits') return if(separate > 0): fig = plt.figure(1) ax = fig.add_subplot(1,1,1) ax.set_xscale('log') ax.set_yscale('log') ax.text(0.2,0.0005,d['system'] +'\n'+ d['jettype'] +'\n'+ d['jetalg'] + '\n Jet Cone',fontsize = 7) rplt.errorbar(signal[separate],xerr=False,emptybins=False,axes=ax,label="Jet axis reference",fmt='o') #Plot jT histogram, rplt.errorbar(signal2[separate],xerr=False,emptybins=False,axes=ax,label="Leading track reference",fmt='o') ax.text(0.3,1e2,r'$p_{{T,\mathrm{{jet}}}}$:''\n'r' {:02d}-{:02d} GeV'.format(jetPt[separate][0],jetPt[separate][1])) ax.set_xlim([0.1,12]) ax.set_ylim([5e-6,1.5e3]) ax.legend(loc = 'lower left') plt.savefig("PythonFigures/MixedFullJetsR04JetConeJtLeadingRefJetPt{0}.pdf".format(separate),format='pdf') #Save figure plt.show() #Draw figure on screen else: n_rows = n_figs//4 print(n_rows) fig, axs = defs.makegrid(4,n_figs//4,xlog=True,ylog=True,d=d,shareY=True,figsize=(10,5)) axs = axs.reshape(n_figs) axs[1].text(0.12,0.002,d['system'] +'\n'+ d['jettype'] +'\n'+ d['jetalg'] + '\n Jet Cone',fontsize = 7) for jT,jT2,pT,ax,i in zip(signal[start:],signal2[start:],jetPt[start:],axs,range(0,9)): jT.SetMarkerColor(1) jT2.SetMarkerColor(2) plot = rplt.errorbar(jT,xerr=False,emptybins=False,axes=ax,label="Jet axis reference",fmt='o',fillstyle='none',ecolor='black') #Plot jT histogram, plot = rplt.errorbar(jT2,xerr=False,emptybins=False,axes=ax,label="Leading track reference",fmt='o',fillstyle='none',ecolor='red') #Plot jT histogram, ax.text(0.3,1e2,r'$p_{{T,\mathrm{{jet}}}}$:''\n'r' {:02d}-{:02d} GeV'.format(pT[0],pT[1])) ax.set_xlim([0.1,22]) #Set x-axis limits ax.set_ylim([5e-4,2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(),horizontalalignment='left') axs[0].legend(loc = 'lower left') plt.savefig("PythonFigures/MixedFullJetsR04JetConeJtLeadingRefPtFrom{}To{}.pdf".format(start,end),format='pdf') #Save figure plt.show() #Draw figure on screen
def main(): print('Number of arguments: ', len(sys.argv), 'arguments.') print('Argument list:', str(sys.argv)) filename = sys.argv[1] filename2 = sys.argv[2] comment = sys.argv[3] if (len(sys.argv) > 4): start = int(sys.argv[4]) else: start = 1 print("Input file: ") print(filename) LHC13d_FullJets = dataset( "LHC13d", NFIN=0, range=(0, 8), filename=filename, directory='AliJJetJtTask_kEMCEJE_{}/AliJJetJtHistManager'.format( comment), color=2, style=24, rebin=2) LHC13e_FullJets = dataset( "LHC13e", NFIN=0, range=(0, 8), filename=filename2, directory='AliJJetJtTask_kEMCEJE_{}/AliJJetJtHistManager'.format( comment), color=2, style=24, rebin=2) signal, jetPt = LHC13d_FullJets.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=True) signal2 = LHC13e_FullJets.getSubtracted('JetConeJtWeightBin', 'BgJtWeightBin', jetpt=False) signals = [signal, signal2] names = ["LHC13d", "LHC13e"] colors = [1, 2] n_figs = 8 n_rows = 2 fig, axs = defs.makegrid(4, n_figs // 4, xlog=True, ylog=True, d=d, shareY=False, figsize=(15, 7.5)) axs = axs.reshape(8) axs[1].text(0.12, 0.002, d['system'] + '\n' + d['jettype'] + '\n' + d['jetalg'] + '\n Jet Cone' + '\n Inclusive jT' + '\n' + comment, fontsize=7) for signal, name, color, j in zip(signals, names, colors, range(10)): print("Plot {}".format(name)) for jT, pT, ax, i in zip(signal[start:], jetPt[start:], axs[0:4], range(0, 9)): print(i) jT.SetMarkerColor(color) jT.SetLineColor(color) plot = rplt.errorbar(jT, xerr=False, emptybins=False, axes=ax, label=name, fmt='o', fillstyle='none', ecolor='blue') #Plot jT histogram, ax.text( 0.3, 1e2, r'$p_{{T,\mathrm{{jet}}}}$:' '\n' r' {:02d}-{:02d} GeV'.format(pT[0], pT[1])) ax.set_xlim([0.1, 22]) #Set x-axis limits ax.set_ylim([5e-4, 2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(), horizontalalignment='left') ratios = [] if j > 0: for jT, div in zip(signal, signals[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel( 'Ratio e/d', fontsize=9 ) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio e/d', fontsize=9) for ratio, pT, ax in zip(ratios[start:], jetPt[start:], axs[4:9]): rplt.errorbar(ratio, xerr=False, emptybins=False, axes=ax, label='Ratio', fmt='o') #Plot ratio histogram, print("Draw {}".format(ratio.GetName())) #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1, 20]) #Set x-axis limits ax.set_ylim([0.8, 1.2]) #Set y-axis limits axs[0].legend(loc='lower left') plt.savefig( "PythonFigures/ACsideComparison/ACsideJetConeJtInclusivePtFrom{}To{}{}.pdf" .format(start, start + 4, comment), format='pdf') #Save figure plt.show() #Draw figure on screen
def main(): Rebin = 2 Mixed_FullJets_R04 = datasetMixed("pPb Full jets R=0.4",NFIN=0,range=(1,5),filename="CF_pPb_legotrain/legotrain_CF_pPb_1839_20180613_LHC13bcde.root",directory='AliJJetJtTask/AliJJetJtHistManager',directory2='AliJJetJtTask_kEMCEJE/AliJJetJtHistManager',color=colors[0],style=24,rebin=Rebin) Pythia = dataset("Pythia8 4C",NFIN=0,range=(1,8),filename="Pythia/Grid_Monash.root",directory='/JCDijetBaseTask/jcdijet',color=colors[1],style=24,rebin=Rebin) Pythia2 = dataset("Pythia8 Monash",NFIN=0,range=(1,8),filename="Pythia/Grid_Tune4c.root",directory='/JCDijetBaseTask/jcdijet',color=colors[2],style=24,rebin=Rebin) Herwig = dataset("Herwig 7.0",NFIN=0,range=(1,8),filename="Herwig/Herwig-LHCtest.root",directory='/JJetJt',color=colors[3],style=24,rebin=Rebin) Pythia_ALICE = dataset("ALICE Pythia6 Perugia2011",NFIN=0,range=(1,8),filename="CF_pPb_MC_legotrain/legotrain_610_20181010-1926_LHCb4_fix_CF_pPb_MC_ptHardMerged.root",directory='AliJJetJtTask/AliJJetJtHistManager',color=colors[4],style=24,rebin=Rebin) #datasets = [Pythia] #inclusive,jetPt = Pythia.getHist('JetConeJtWeightBin',jetpt = True) #datasets.append(Mixed_FullJets_R04) inclusive,jetPt = Mixed_FullJets_R04.getHist('JetConeJtWeightBin',jetpt = True) datasets = [Mixed_FullJets_R04] incs = [inclusive] datasets.append(Pythia) datasets.append(Pythia2) datasets.append(Herwig) datasets.append(Pythia_ALICE) for data in datasets[1:]: incs.append(data.getHist('JetConeJtWeightBin',jetpt = False)) names = [data.name() for data in datasets] signals = [data.getSubtracted('JetConeJtWeightBin','BgJtWeightBin',jetpt = False) for data in datasets] graphs = [data.getGraphs() for data in datasets] gausRMS = [x[0] for x in graphs] gammaRMS = [x[1] for x in graphs] gausYield = [x[2] for x in graphs] gammaYield = [x[3] for x in graphs] drawWithErrors2Combined(gausRMS,gammaRMS,names,30,150,0,0,1.45,0,r'jet $p_T$',r'$\sqrt{\left<j_T^2\right>}$','Pythia','PythonFigures/RMScomparison',separate=True) start = 3 end = 8 n_figs = 8 n_rows = 2 fig, axs = defs.makegrid(4,n_figs//4,xlog=True,ylog=True,d=d,shareY=False,figsize=(15,7.5)) axs = axs.reshape(n_figs) axs[1].text(0.12,0.002,d['system'] +'\n'+ d['jettype'] +'\n'+ d['jetalg'] + '\n Jet Cone' + '\n Inclusive jT',fontsize = 7) for inc,name,color,j in zip(incs,names,colors,range(10)): print("Plot {}".format(name)) for jT,pT,ax,i in zip(inc[start:],jetPt[start:],axs[0:4],range(0,9)): jT.SetMarkerColor(color) jT.SetMarkerStyle(styles[j]) jT.SetLineColor(color) plot = rplt.errorbar(jT,xerr=False,emptybins=False,axes=ax,label=name,fmt='o',fillstyle='none',ecolor='blue') #Plot jT histogram, line = plot.get_children()[0] line.set_markersize(mSize) if(styles[j] > 23): line.set_markerfacecolor('none') #line.set_markeredgecolor(color) line.set_color(color) ax.text(0.3,1e2,r'$p_{{T,\mathrm{{jet}}}}$:''\n'r' {:02d}-{:02d} GeV'.format(pT[0],pT[1])) ax.set_xlim([0.1,22]) #Set x-axis limits ax.set_ylim([5e-4,2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(),horizontalalignment='left') ratios = [] if j > 0: for jT,div in zip(inc,incs[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel('Ratio to {}'.format(names[0]),fontsize=9) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio to {}'.format(names[0]),fontsize=9) for ratio,pT,ax in zip(ratios[start:],jetPt[start:],axs[4:9]): plot = rplt.errorbar(ratio,xerr=False,emptybins=False,axes=ax,label='Ratio',fmt='o') #Plot ratio histogram, line = plot.get_children()[0] line.set_markersize(mSize) if(styles[j] > 23): line.set_markerfacecolor('none') line.set_color(color) #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1,20]) #Set x-axis limits ax.set_ylim([0,2.2]) #Set y-axis limits axs[0].legend(loc = 'lower left') plt.savefig("PythonFigures/PythiaR04JetConeJtInclusivePtFrom{}To{}.pdf".format(start,end),format='pdf') #Save figure plt.show() #Draw figure on screen fig, axs = defs.makegrid(4,n_figs//4,xlog=True,ylog=True,d=d,shareY=False,figsize=(15,7.5)) axs = axs.reshape(n_figs) axs[1].text(0.12,0.002,d['system'] +'\n'+ d['jettype'] +'\n'+ d['jetalg'] + '\n Jet Cone' + '\n Subtracted jT',fontsize = 7) for signal,name,color,j in zip(signals,names,colors,range(10)): print("Plot {}".format(name)) for jT,pT,ax,i in zip(signal[start:],jetPt[start:],axs[0:4],range(0,9)): jT.SetMarkerColor(color) jT.SetMarkerStyle(styles[j]) jT.SetLineColor(color) plot = rplt.errorbar(jT,xerr=False,emptybins=False,axes=ax,label=name,fmt='o',fillstyle='none',ecolor='blue') #Plot jT histogram, line = plot.get_children()[0] line.set_markersize(mSize) if(styles[j] > 23): line.set_markerfacecolor('none') #line.set_markeredgecolor(color) line.set_color(color) # line.set_markerfacecolor('none') # line.set_markeredgecolor(colors[c]) # line.set_markerfacecolor('none') # line.set_markeredgecolor('none') # line.set_drawstyle('default') # line.set_linestyle('dashed') # line.set_color(colors[c]) ax.text(0.3,1e2,r'$p_{{T,\mathrm{{jet}}}}$:''\n'r' {:02d}-{:02d} GeV'.format(pT[0],pT[1])) ax.set_xlim([0.1,22]) #Set x-axis limits ax.set_ylim([5e-4,2e3]) #Set y-axis limits ax.set_xticklabels(ax.get_xticklabels(),horizontalalignment='left') ratios = [] if j > 0: for jT,div in zip(signal,signals[0]): h = jT.Clone() h.Divide(div) ratios.append(h) axs[4].set_ylabel('Ratio to {}'.format(names[0]),fontsize=9) #Add y-axis labels to left- and righmost subfigures axs[-1].set_ylabel('Ratio to {}'.format(names[0]),fontsize=9) for ratio,pT,ax in zip(ratios[start:],jetPt[start:],axs[4:9]): plot = rplt.errorbar(ratio,xerr=False,emptybins=False,axes=ax,label='Ratio',fmt='o') #Plot ratio histogram, line = plot.get_children()[0] line.set_markersize(mSize) if(styles[j] > 23): line.set_markerfacecolor('none') line.set_color(color) #if(i == 0): ax.set_yscale('linear') ax.set_xlim([0.1,20]) #Set x-axis limits ax.set_ylim([0,2.2]) #Set y-axis limits axs[0].legend(loc = 'lower left') plt.savefig("PythonFigures/PythiaR04JetConeJtSignalPtFrom{}To{}.pdf".format(start,end),format='pdf') #Save figure plt.show() #Draw figure on screen