示例#1
0
def main() -> None:
    parser = argparse.ArgumentParser(
        description="Runs Neural Monkey as a web server.")
    parser.add_argument("--port", type=int, default=5000)
    parser.add_argument("--host", type=str, default="127.0.0.1")
    parser.add_argument("--configuration", type=str, required=True)
    parser.add_argument("--preprocess", type=str,
                        required=False, default=None)
    args = parser.parse_args()

    print("")

    if args.preprocess is not None:
        preprocessing = Configuration()
        preprocessing.add_argument("preprocess")
        preprocessing.load_file(args.preprocess)
        preprocessing.build_model()
        APP.config["preprocess"] = preprocessing.model.preprocess
    else:
        APP.config["preprocess"] = []

    exp = Experiment(config_path=args.configuration)
    exp.build_model()
    APP.config["experiment"] = exp
    APP.run(port=args.port, host=args.host)
示例#2
0
def main() -> None:
    # pylint: disable=no-member,broad-except
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("datasets",
                        metavar="INI-TEST-DATASETS",
                        help="the configuration of the test datasets")
    parser.add_argument("-g",
                        "--grid",
                        dest="grid",
                        action="store_true",
                        help="look at the SGE variables for slicing the data")
    args = parser.parse_args()

    test_datasets = Configuration()
    test_datasets.add_argument("test_datasets")
    test_datasets.add_argument("variables", cond=lambda x: isinstance(x, list))

    test_datasets.load_file(args.datasets)
    test_datasets.build_model()
    datasets_model = test_datasets.model

    exp = Experiment(config_path=args.config)
    exp.build_model()
    exp.load_variables(datasets_model.variables)

    if args.grid and len(datasets_model.test_datasets) > 1:
        raise ValueError("Only one test dataset supported when using --grid")

    for dataset in datasets_model.test_datasets:
        if args.grid:
            if ("SGE_TASK_FIRST" not in os.environ
                    or "SGE_TASK_LAST" not in os.environ
                    or "SGE_TASK_STEPSIZE" not in os.environ
                    or "SGE_TASK_ID" not in os.environ):
                raise EnvironmentError(
                    "Some SGE environment variables are missing")

            length = int(os.environ["SGE_TASK_STEPSIZE"])
            start = int(os.environ["SGE_TASK_ID"]) - 1
            end = int(os.environ["SGE_TASK_LAST"]) - 1

            if start + length > end:
                length = end - start + 1

            log("Running grid task {} starting at {} with step {}".format(
                start // length, start, length))

            dataset = dataset.subset(start, length)

        if exp.config.args.evaluation is None:
            exp.run_model(dataset, write_out=True)
        else:
            exp.evaluate(dataset, write_out=True)

    for session in exp.config.model.tf_manager.sessions:
        session.close()
示例#3
0
def main() -> None:
    parser = argparse.ArgumentParser(
        description="Runs Neural Monkey as a web server.")
    parser.add_argument("--port", type=int, default=5000)
    parser.add_argument("--host", type=str, default="127.0.0.1")
    parser.add_argument("--configuration", type=str, required=True)
    args = parser.parse_args()

    print("")

    exp = Experiment(config_path=args.configuration)
    exp.build_model()
    APP.config["experiment"] = exp
    APP.run(port=args.port, host=args.host)
示例#4
0
def main() -> None:
    parser = argparse.ArgumentParser(
        description="Runs Neural Monkey as a web server.")
    parser.add_argument("--port", type=int, default=5000)
    parser.add_argument("--host", type=str, default="127.0.0.1")
    parser.add_argument("--configuration", type=str, required=True)
    parser.add_argument("--preprocess", type=str, required=False, default=None)
    args = parser.parse_args()

    print("")

    if args.preprocess is not None:
        preprocessing = Configuration()
        preprocessing.add_argument("preprocess")
        preprocessing.load_file(args.preprocess)
        preprocessing.build_model()
        APP.config["preprocess"] = preprocessing.model.preprocess
    else:
        APP.config["preprocess"] = []

    exp = Experiment(config_path=args.configuration)
    exp.build_model()
    APP.config["experiment"] = exp
    APP.run(port=args.port, host=args.host)
def main() -> None:
    # pylint: disable=no-member,broad-except
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("datasets",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("--beam",
                        metavar="BEAM_SIZE",
                        type=int,
                        default=10,
                        help="Beam size.")
    parser.add_argument("--kenlm",
                        type=str,
                        default=None,
                        help="Path to a KenLM model arpa file.")
    parser.add_argument("--lm-weight",
                        type=float,
                        help="Weight of the language model.")
    parser.add_argument("--null-trail-weight",
                        type=float,
                        help="Weight of the null-trailing feature.")
    parser.add_argument("--nt-ratio-weight",
                        type=float,
                        help="Weight of the null-token ratio feature.")
    parser.add_argument("--out", type=str, help="Path to the output file.")
    args = parser.parse_args()

    test_datasets = Configuration()
    test_datasets.add_argument("test_datasets")
    test_datasets.add_argument("batch_size", cond=lambda x: x > 0)
    test_datasets.add_argument("variables", cond=lambda x: isinstance(x, list))

    test_datasets.load_file(args.datasets)
    test_datasets.build_model()
    datasets_model = test_datasets.model

    exp = Experiment(config_path=args.config)
    exp.build_model()
    exp.load_variables(datasets_model.variables)

    ctc_decoder = None
    for runner in exp.model.runners:
        if (isinstance(runner, PlainRunner)
                and isinstance(runner.decoder, CTCDecoder)):
            ctc_decoder = runner.decoder
            break

    if ctc_decoder is None:
        raise ValueError(
            "Was not able to detect CTC decoder in the configuration.")

    logits_runner = RepresentationRunner(output_series="logits",
                                         encoder=ctc_decoder,
                                         attribute="logits")
    exp.model.runners = [logits_runner]

    dataset = datasets_model.test_datasets[0]
    singleton_batches = dataset.batches(BatchingScheme(1))
    print("Loading language model")
    lm = NGramModel(args.kenlm)
    print("LM loaded")

    weights = {}

    if args.lm_weight:
        weights['lm_score'] = args.lm_weight

    if args.null_trail_weight:
        weights['null_trailing'] = args.null_trail_weight

    if args.nt_ratio_weight:
        weights['null_token_ratio'] = args.nt_ratio_weight

    print("Weights:", weights)

    i = 0
    stats = []

    with open(args.out, 'w') as out_file:
        for sent_dataset in singleton_batches:

            t1 = timeit.default_timer()
            ctc_model_result = exp.run_model(sent_dataset,
                                             write_out=False,
                                             batch_size=1)
            t2 = timeit.default_timer()

            logits = np.squeeze(ctc_model_result[1]['logits'], axis=1)

            t3 = timeit.default_timer()
            best_hyp = decode_beam(logits,
                                   args.beam,
                                   ctc_decoder.vocabulary,
                                   lm=lm,
                                   weights=weights)
            t4 = timeit.default_timer()

            stats.append([len(best_hyp.tokens), t2 - t1, t4 - t3])

            output = " ".join([best_hyp.tokens][0])
            out_file.write(output + "\n")

            if i % 10 == 0:
                print("[{}] {}".format(i, output))
            i += 1

    with open(args.out + ".stats", 'w') as stats_file:
        for line in stats:
            stats_file.write("{} {:.3f} {:.3f}\n".format(*line))

    for session in exp.config.model.tf_manager.sessions:
        session.close()
def main() -> None:
    # pylint: disable=no-member,broad-except
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("datasets",
                        metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("--beam",
                        metavar="BEAM_SIZE",
                        type=int,
                        default=10,
                        help="Beam size.")
    parser.add_argument("--kenlm",
                        type=str,
                        help="Path to a KenLM model arpa file.")
    parser.add_argument("--prefix",
                        type=str,
                        help="Path used as a prefix of stored checkpoints.")
    parser.add_argument("--lm-weight",
                        type=float,
                        help="Default weight of the language model.")
    parser.add_argument("--null-trail-weight",
                        type=float,
                        help="Default weight of the null-trailing feature.")
    parser.add_argument("--nt-ratio-weight",
                        type=float,
                        help="Default weight of the null-token ratio feature.")

    args = parser.parse_args()

    test_datasets = Configuration()
    test_datasets.add_argument("test_datasets")
    test_datasets.add_argument("batch_size", cond=lambda x: x > 0)
    test_datasets.add_argument("variables", cond=lambda x: isinstance(x, list))

    test_datasets.load_file(args.datasets)
    test_datasets.build_model()
    datasets_model = test_datasets.model

    exp = Experiment(config_path=args.config)
    exp.build_model()
    exp.load_variables(datasets_model.variables)

    weights = {}

    if args.lm_weight is not None:
        weights['lm_score'] = args.lm_weight

    if args.null_trail_weight is not None:
        weights['null_trailing'] = args.null_trail_weight

    if args.nt_ratio_weight is not None:
        weights['null_token_ratio'] = args.nt_ratio_weight

    if not weights:
        raise ValueError("No default weights specified, nothing to train.")

    ctc_decoder = None
    for runner in exp.model.runners:
        if (isinstance(runner, PlainRunner)
                and isinstance(runner.decoder, CTCDecoder)):
            ctc_decoder = runner.decoder
            break

    if ctc_decoder is None:
        raise ValueError(
            "Was not able to detect CTC decoder in the configuration.")

    print("Loading language model")
    lm = NGramModel(args.kenlm)
    print("LM loaded")

    logits_runner = RepresentationRunner(output_series="logits",
                                         encoder=ctc_decoder,
                                         attribute="logits")
    exp.model.runners = [logits_runner]

    dataset = datasets_model.test_datasets[0]
    singleton_batches = dataset.batches(BatchingScheme(1))

    DATASET_SIZE = dataset.length
    CHECKPOINTS = 5
    CHECKPOINT_ITERS = int(DATASET_SIZE / CHECKPOINTS)

    print(
        "{} sentences in the dataset, checkpoint every {} sentences ({} checkpoints in total)."
        .format(DATASET_SIZE, CHECKPOINT_ITERS, CHECKPOINTS))

    for i, sent_dataset in enumerate(singleton_batches):
        ctc_model_result = exp.run_model(sent_dataset,
                                         write_out=False,
                                         batch_size=1)

        logits = np.squeeze(ctc_model_result[1]['logits'], axis=1)
        target = ctc_model_result[2]['target'][0]

        train_weights(logits, args.beam, ctc_decoder.vocabulary, target,
                      weights, lm)

        print(
            "[{}] Weights:".format(i + 1), ", ".join([
                "{}: {:.3f}".format(key, value)
                for key, value in weights.items()
            ]))

        if i != 0 and (i + 1) % CHECKPOINT_ITERS == 0:
            with open("{}.{}".format(args.prefix, int(i / CHECKPOINT_ITERS)),
                      "w") as f:
                for key, value in weights.items():
                    f.write("{}={:.3f}\n".format(key.upper(), value))

            print("\nCheckpoint saved.\n")

    for session in exp.config.model.tf_manager.sessions:
        session.close()
class NeuralMonkeyModelWrapper(ModelWrapper):
    def __init__(self,
                 runs_on_features,
                 config_path,
                 vars_path,
                 data_series="",
                 src_caption_series="",
                 caption_series="",
                 alignments_series="",
                 bs_graph_series="bs_target",
                 name=None):
        """
        caption_series -> GreedyRunner output_series
        alignments_series -> WordAlignmentRunner output_series
        bs_graph_series -> BeamSearchRunner output_series
        """

        super(NeuralMonkeyModelWrapper, self).__init__(name, runs_on_features)

        if not os.path.isfile(config_path):
            raise ValueError("File {} does not exist.".format(config_path))

        self._config_path = config_path
        self._vars_path = vars_path
        self._data_series = data_series
        self._src_caption_series = src_caption_series
        self._caption_series = caption_series
        self._alignments_series = alignments_series
        self._bs_graph_series = bs_graph_series

        self._exp = Experiment(config_path=config_path)
        self._exp.build_model()
        self._exp.load_variables([vars_path])

        if self._src_caption_series:
            self.multimodal = True
        else:
            self.multimodal = False

    def run(self, inputs, src_captions=None):
        """
        Args:
            inputs: A Numpy Array of inputs (feature, or image arrays).
            src_captions: A list of string source captions.
        Returns:
            A list of dictionaries. Each dictionary contains the keys
            `caption`, `alignments`, `beam_search_output_graph`.
        """

        n_elems = inputs.shape[0]
        # enc-dec model (runs on images)
        if not self.runs_on_features:
            if self._src_caption_series:
                # TODO: handle multimodal translation case
                pass
            else:
                ds = Dataset("macaque_data",
                             {self._data_series: lambda: inputs}, {})

        # dec-only model (runs on feature maps)
        else:
            if self._src_caption_series:
                # TODO: handle multimodal translation case
                pass
            else:
                ds = Dataset("macaque_data",
                             {self._data_series: lambda: inputs}, {})

        _, output_series = self._exp.run_model(dataset=ds, write_out=False)

        if self._caption_series and self._caption_series in output_series:
            captions = output_series[self._caption_series]
        else:
            captions = [None] * n_elems

        if self._alignments_series and self._alignments_series in output_series:
            alignments = output_series[self._alignments_series]
            # WordAlignmentRunner is incompatible with beam search decoding.
            if self._bs_graph_series:
                alignments = None
        else:
            alignments = [None] * n_elems

        if self._bs_graph_series and self._bs_graph_series in output_series:
            bs_out = output_series[self._bs_graph_series]
            graphs = []
            for b in bs_out:
                attns = [_transform_alignments(a) for a in b['alignments']]
                graphs.append(
                    BeamSearchOutputGraph(scores=b['scores'],
                                          tokens=b['tokens'],
                                          parent_ids=b['parent_ids'],
                                          alignments=attns))

            hyps = [g.collect_hypotheses() for g in graphs]
            bs_caps = [h['tokens'] for h in hyps]
            bs_attns = [h['alignments'] for h in hyps]
            #bs_attns = [[_transform_alignments(h) for h in hyp['alignments']] for hyp in hyps]
        else:
            graphs = [None] * n_elems
            bs_caps = [None] * n_elems
            bs_attns = [None] * n_elems

        results = []
        for c, a, bs_g, bs_c, bs_a in zip(captions, alignments, graphs,
                                          bs_caps, bs_attns):
            r = {}
            for x in [(c, 'caption'), (a, 'alignments')]:
                if x[0] is not None:
                    if 'greedy' not in r:
                        r['greedy'] = {}
                    r['greedy'][x[1]] = x[0]
            for x in [(bs_g, 'graph'), (bs_c, 'captions'),
                      (bs_a, 'alignments')]:
                if x[0] is not None:
                    if 'beam_search' not in r:
                        r['beam_search'] = {}
                    r['beam_search'][x[1]] = x[0]
            results.append(r)
        return results
示例#8
0
def main() -> None:
    # pylint: disable=no-member
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config", metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("datasets", metavar="INI-TEST-DATASETS",
                        help="the configuration of the test datasets")
    parser.add_argument("--json", type=str, help="write the evaluation "
                        "results to this file in JSON format")
    parser.add_argument("-g", "--grid", dest="grid", action="store_true",
                        help="look at the SGE variables for slicing the data")
    args = parser.parse_args()

    datasets_model = load_runtime_config(args.datasets)

    exp = Experiment(config_path=args.config)
    exp.build_model()
    exp.load_variables(datasets_model.variables)

    if args.grid and len(datasets_model.test_datasets) > 1:
        raise ValueError("Only one test dataset supported when using --grid")

    results = []
    for dataset in datasets_model.test_datasets:
        if args.grid:
            if ("SGE_TASK_FIRST" not in os.environ
                    or "SGE_TASK_LAST" not in os.environ
                    or "SGE_TASK_STEPSIZE" not in os.environ
                    or "SGE_TASK_ID" not in os.environ):
                raise EnvironmentError(
                    "Some SGE environment variables are missing")

            length = int(os.environ["SGE_TASK_STEPSIZE"])
            start = int(os.environ["SGE_TASK_ID"]) - 1
            end = int(os.environ["SGE_TASK_LAST"]) - 1

            if start + length > end:
                length = end - start + 1

            log("Running grid task {} starting at {} with step {}"
                .format(start // length, start, length))

            dataset = dataset.subset(start, length)

        if exp.config.args.evaluation is None:
            exp.run_model(dataset,
                          write_out=True,
                          batch_size=datasets_model.batch_size)
        else:
            eval_result = exp.evaluate(dataset,
                                       write_out=True,
                                       batch_size=datasets_model.batch_size)
            results.append(eval_result)

    if args.json:
        with open(args.json, "w") as f_out:
            json.dump(results, f_out)
            f_out.write("\n")

    for session in exp.config.model.tf_manager.sessions:
        session.close()
示例#9
0
文件: run.py 项目: ufal/neuralmonkey
def main() -> None:
    # pylint: disable=no-member
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("config", metavar="INI-FILE",
                        help="the configuration file of the experiment")
    parser.add_argument("datasets", metavar="INI-TEST-DATASETS",
                        help="the configuration of the test datasets")
    parser.add_argument("-s", "--set", type=str, metavar="SETTING",
                        action="append", dest="config_changes", default=[],
                        help="override an option in the configuration; the "
                        "syntax is [section.]option=value")
    parser.add_argument("-v", "--var", type=str, metavar="VAR", default=[],
                        action="append", dest="config_vars",
                        help="set a variable in the configuration; the syntax "
                        "is var=value (shorthand for -s vars.var=value)")
    parser.add_argument("--json", type=str, help="write the evaluation "
                        "results to this file in JSON format")
    parser.add_argument("-g", "--grid", dest="grid", action="store_true",
                        help="look at the SGE variables for slicing the data")
    args = parser.parse_args()

    datasets_model = load_runtime_config(args.datasets)

    args.config_changes.extend("vars.{}".format(s) for s in args.config_vars)
    exp = Experiment(config_path=args.config,
                     config_changes=args.config_changes)

    exp.build_model()
    exp.load_variables(datasets_model.variables)

    if args.grid and len(datasets_model.test_datasets) > 1:
        raise ValueError("Only one test dataset supported when using --grid")

    results = []
    for dataset in datasets_model.test_datasets:
        if args.grid:
            if ("SGE_TASK_FIRST" not in os.environ
                    or "SGE_TASK_LAST" not in os.environ
                    or "SGE_TASK_STEPSIZE" not in os.environ
                    or "SGE_TASK_ID" not in os.environ):
                raise EnvironmentError(
                    "Some SGE environment variables are missing")

            length = int(os.environ["SGE_TASK_STEPSIZE"])
            start = int(os.environ["SGE_TASK_ID"]) - 1
            end = int(os.environ["SGE_TASK_LAST"]) - 1

            if start + length > end:
                length = end - start + 1

            log("Running grid task {} starting at {} with step {}"
                .format(start // length, start, length))

            dataset = dataset.subset(start, length)

        if exp.config.args.evaluation is None:
            exp.run_model(dataset, write_out=True)
        else:
            eval_result = exp.evaluate(dataset, write_out=True)
            results.append(eval_result)

    if args.json:
        with open(args.json, "w") as f_out:
            json.dump(results, f_out)
            f_out.write("\n")

    for session in exp.config.model.tf_manager.sessions:
        session.close()