def convert(self, value, param, ctx): try: return do_import(f'{value}.bundle', relative_import='stresstest') except AttributeError: self.fail( f"Domain '{value}' unknown, please choose from {implemented_domains}" )
def convert(self, value, param, ctx): try: return do_import(value, relative_import='stresstest.ds_utils') except AttributeError: import inspect from stresstest import ds_utils all_functions = inspect.getmembers(ds_utils, inspect.isfunction) all_function_names = [name for name, func in all_functions] self.fail( f"Format '{value}' unknown, please choose from {all_function_names}." )
def convert(self, value, param, ctx): vals = value.split(',') metric_classes = [] for val in vals: try: metric_classes.append( do_import(val, relative_import='stresstest.eval_utils')) except AttributeError: self.fail( f"Metric '{value}' unknown, please choose from {get_all_subclasses(EvalMetric)}" ) return metric_classes
def predict(in_files, output_folder, models, model_classes, gpu, batch_size): # There is a chance i'll need to scrap all of this and do convert to features stuff if gpu is None: gpu = _is_gpu_available() logger.debug(fmt_dict(locals())) if not len(models) == len(model_classes): click.echo( f"Num models supplied ({len(models)})!= num model classes supplied ({len(model_classes)})!" ) sys.exit(1) for cls, weights_path in zip(model_classes, models): model_cls: Model = do_import(cls, relative_import='stresstest.model') # TODO: Bidaf should also respect max answer length model = model_cls.make(weights_path, gpu=gpu) click.echo( f"Evaluating model '{click.style(model_cls.__name__, fg='green', bold=True)}' from weights file: " f"{click.style(weights_path, fg='blue')}.") click.echo( f"Running on {click.style('gpu' if gpu else 'cpu', fg='green', bold=True)}." ) for in_file in in_files: sample = load_json(in_file) num_q = num_questions(sample) click.echo( f"Evaluating on sample (n={num_q}, |{{C}}|={len(sample)}): {click.style(in_file, fg='blue')}" ) predictions = dict() for sample_batch in batch(tqdm(sample_iter(sample), position=1, total=num_q), batch_size=batch_size): sample_batch: List[Entry] batch_predictions = model.predict_batch(sample_batch) for entry, answer in zip(sample_batch, batch_predictions): logger.debug(f"Passage: {entry.passage}") logger.debug(f"Question: {entry.question}") logger.debug(f"Prediction: {answer}") predictions[entry.qa_id] = str(answer) output_file_name = get_output_predictions_file_name( in_file, output_folder, weights_path) click.echo( f"Saving predictions to {click.style(output_file_name, fg='blue')}" ) write_json(predictions, output_file_name, pretty=False)
def _get_templates_dict(module): return { name: do_import(name, f"{module.__name__}.{name}") for name in ['dollar', 'sentences', 'at', 'percent', 'bang', 'question_templates'] }