def get_labels(report): parser = ArgParser() args = parser.parse_args() loader = SingleLoader(report) extractor = Extractor(args.mention_phrases_dir, args.unmention_phrases_dir, verbose=args.verbose) classifier = Classifier(args.pre_negation_uncertainty_path, args.negation_path, args.post_negation_uncertainty_path, verbose=args.verbose) aggregator = Aggregator(CATEGORIES, verbose=args.verbose) # Load reports in place. loader.load() # Extract observation mentions in place. extractor.extract(loader.collection) # Classify mentions in place. classifier.classify(loader.collection) # Aggregate mentions to obtain one set of labels for each report. labels = aggregator.aggregate(loader.collection) return labels
def test(args): model_fn = models.__dict__[args_.model] model = model_fn(args.num_classes) model = nn.DataParallel(model, args.gpu_ids) ckpt_info = ModelSaver.load_model(args.ckpt_path, model) args.start_epoch = ckpt_info['epoch'] + 1 model = model.to(args.device) model.eval() _, test_loader, _ = get_cifar_loaders(args.batch_size, args.num_workers) logger = TestLogger(args) logger.start_epoch() for inputs, labels in test_loader: logger.start_iter() with torch.set_grad_enabled(True): # Forward logits = model.forward(inputs.to(args.device)) logger.end_iter(inputs, labels, logits) logger.end_epoch() if __name__ == '__main__': parser = ArgParser() args_ = parser.parse_args() test(args_)
experiment2probabilities, compare_groundtruth) # Test differences in AUC between default setting and other experiments. default_experiment_curve = experiment_curves["default"] for experiment in experiment_curves: if experiment != "default": p_value = tests.test_auc_difference(default_experiment_curve, experiment_curves[experiment]) logger.log(f"p-value for experiment {experiment}: {p_value}.") if __name__ == "__main__": parser = ArgParser() args = parser.parse_args() logger = Logger(args.log_dir) config = load_config(args.config_path, logger) # Load R functions. load_r() compare_groundtruth = load_data(config["compare_groundtruth_path"]) experiments = config["compare_probabilities_paths"].keys() experiment2probabilities = {} experiment2metrics = {} for experiment in experiments: print(experiment)
def main(self): p = ArgParser() (options, in_path, out_path) = p.parse_args() self.options = options obj = self.process_file(in_path, out_path)
out += '# %d "%s"\n' % (current_line + 1, current_file) elif is_asm: line_mapping += [(current_file, current_line)] asm += line else: out += line current_line += 1 # We have some left-over assembly code: missing an end marker if len(asm) > 0: raise Exception("Leftover assembler code. Did you miss a %s?" % self.marker[1]) # Write result f = open(out_path, 'w') f.write(out) f.close() if __name__ == '__main__': p = ArgParser() (options, in_path, out_path) = p.parse_args() if options.config == "x86" or options.config == "ia32": arch = Arch(X86) elif options.config == "arm": arch = Arch(ARM) else: raise Exception("Invalid configuration (-c): should be x86, ia32 or arm.") asm_rewriter = AssemblyRewriter(arch) asm_rewriter.process_file(in_path, out_path) sys.exit(0)
def get_labels(report): parser = ArgParser() args = parser.parse_args() loader = SingleLoader(report) extractor = Extractor(args.mention_phrases_dir, args.unmention_phrases_dir, verbose=args.verbose) classifier = Classifier(args.pre_negation_uncertainty_path, args.negation_path, args.post_negation_uncertainty_path, verbose=args.verbose) aggregator = Aggregator(CATEGORIES, verbose=args.verbose) # Load reports in place. loader.load() # Extract observation mentions in place. extractor.extract(loader.collection) # Classify mentions in place. classifier.classify(loader.collection) # Aggregate mentions to obtain one set of labels for each report. labels = aggregator.aggregate(loader.collection) return labels if __name__ == "__main__": parser = ArgParser() label(parser.parse_args())
# Evaluate on validation set val_loss = evaluate(model, test_loader, loss_fn, device=args.device) logger.write('[epoch {}]: val_loss: {:.3g}'.format( logger.epoch, val_loss)) logger.write_summaries({'loss': val_loss}, phase='val') if logger.epoch in args.save_epochs: saver.save(logger.epoch, val_loss) logger.end_epoch() scheduler.step() def evaluate(model, data_loader, loss_fn, device='cpu'): """Evaluate the model.""" model.eval() losses = [] print('Evaluating model...') for inputs, labels in tqdm(data_loader): with torch.no_grad(): # Forward outputs = model(inputs.to(device)) loss = loss_fn(outputs, labels.to(device)) losses.append(loss.item()) return np.mean(losses) if __name__ == '__main__': parser = ArgParser() train(parser.parse_args())
line_mapping += [(current_file, current_line)] asm += line else: out += line current_line += 1 # We have some left-over assembly code: missing an end marker if len(asm) > 0: raise Exception("Leftover assembler code. Did you miss a %s?" % self.marker[1]) # Write result f = open(out_path, 'w') f.write(out) f.close() if __name__ == '__main__': p = ArgParser() (options, in_path, out_path) = p.parse_args() if options.config == "x86" or options.config == "ia32": arch = Arch(X86) elif options.config == "arm": arch = Arch(ARM) else: raise Exception( "Invalid configuration (-c): should be x86, ia32 or arm.") asm_rewriter = AssemblyRewriter(arch) asm_rewriter.process_file(in_path, out_path) sys.exit(0)