Exemplo n.º 1
0
    def __init__(self):
        myconf = Component2.configsimple()
        topconfig.add_config(myconf)
        myconf.parse_args()
        print("Component2 foo is {}".format(myconf.get("foo")))


if __name__ == "__main__":
    topconfig.add_argument("--bar", help="The BAR setting")
    topconfig.add_argument("--foo", help="The toplevel FOO setting")
    topconfig.add_argument("--comp", type=int, choices=[1, 2], required=True,  help="Component number")
    topconfig.add_argument("pos1")
    topconfig.add_config(Component1.configsimple())
    topconfig.add_config(Component2.configsimple())
    topconfig.parse_args()
    print("Toplevel foo is {}".format(topconfig.get("foo")))
    compclass = [Component1, Component2][topconfig.get("comp")-1]
    comp = compclass()
    print("Get the global comp1.foo: {}".format(topconfig.get("comp1.foo")))
    print("Get the global comp2.foo: {}".format(topconfig.get("comp2.foo")))
    print("Get the global comp1.bar: {}".format(topconfig.get("comp1.bar")))
    print("Get the global comp1.sub1.sub2.foo: {}".format(topconfig["comp1.sub1.sub2.foo"]))
    print("Top positional parameter pos1: {}".format(topconfig.get("pos1")))
    print("All config keys: {}".format(topconfig.keys()))
    print("All config items: {}".format(topconfig.items()))
    print("The top config as string:", topconfig)
    print("The top config repr:", repr(topconfig))
    # set a config value that should percolate down to a component setting
    topconfig["comp1.sub1.sub2.foo"] = 123456
    print("Top config now:", topconfig)
Exemplo n.º 2
0
def main(sysargs):

    logger.debug("PYTHON APPLICATION SCRIPT, args=%s" % (sys.argv, ))

    topconfig.add_argument(
        "--cuda",
        type=flag,
        default=False,
        help="True/False to use CUDA or not, default is False")
    topconfig.add_argument("--metafile",
                           type=str,
                           default=None,
                           help="Meta file, if necessary")
    topconfig.add_argument(
        "--labeled",
        action="store_true",
        help="Pass labeled instances instead just the feature vectors")
    topconfig.add_argument(
        "--noret",
        action="store_true",
        help="Do not print the return value, only useful with labeled")
    topconfig.add_argument(
        "--logevery",
        type=int,
        default=0,
        help="Logg progress every k instances, default=0, no logging")
    topconfig.add_argument(
        "modelname", help="Prefix of the model files pathnames (REQUIRED)")

    args = topconfig.parse_args(args=sysargs[1:])
    # not needed for now
    # metafile = args.metafile
    modelprefix = args.modelname

    # If we need the datadir
    # datadir = str(Path(modelprefix).parent)

    wrapper = ModelWrapperDefault.load(modelprefix,
                                       cuda=args.cuda,
                                       metafile=args.metafile)
    # make sure we are in eval mode and we do not use autograd (so far all models do not need gradients at
    # application time!)
    wrapper.module.eval()
    torch.no_grad()
    logger.info("Model loaded:\n{}".format(wrapper.module))
    # get the target vocab
    vocab_target = wrapper.dataset.vocabs.get_vocab("<<TARGET>>")
    labels = vocab_target.itos

    ntotal = 0
    ncorrect = 0
    with sys.stdin as infile:
        for line in infile:
            logger.debug("Application input=%s" % (line, ))
            if line == "STOP":
                break
            # NOTE: currently the LF always sends instances individually.
            # This may change in the future, but for now this simplifies dealing with the
            # special case where the LF can use the assigned class of the previous instance as a feature.
            # However we need to always apply to a set of instances, so wrap into another array here
            instancedata = json.loads(line)
            target = None
            if args.labeled:
                target = instancedata[1]
                instancedata = instancedata[0]

            # NOTE: the  LF expects to get a map with the following elements:
            # status: must be "ok", anything else is interpreted as an error
            # output: the actual prediction: gets extracted from the returned data here
            # confidence: some confidence/probability score for the output, may be null: this gets extracted
            # from our returned data here
            # confidences: a map with confidences for all labels, may be null: this is NOT SUPPORTED in the LF yet!
            try:
                # NOTE: we put this into an extra list because the apply method expects a batch,
                # not a single instance
                # NOTE: the apply method also returns result for a whole batch!
                # print("DEBUG: calling wrapper.apply with instancedata=", instancedata, file=sys.stderr)
                batchof_labels, batchof_probs, batchof_probdists = wrapper.apply(
                    [instancedata])
                # NOTE: batchof_labels contains values for classification, but lists for
                # sequence tagging, so we check this first
                if not isinstance(batchof_labels, list):
                    raise Exception(
                        "Expected a list of predictions from apply but got %s"
                        % (type(batchof_labels)))
                if len(batchof_labels) != 1:
                    raise Exception(
                        "Expected a list of length 1 (batchsize) but got %s" %
                        (len(batchof_labels)))
                if isinstance(batchof_labels[0], list):
                    # we have a sequence tagging result
                    is_sequence = True
                else:
                    # we have a classification result
                    is_sequence = False
                output = batchof_labels[0]
                # print("DEBUG: output is", output, file=sys.stderr)
                if isinstance(batchof_probs, list) and len(batchof_probs) == 1:
                    prob = batchof_probs[0]
                    # NOTE: we still need to change the LF to handle this correctly!!!
                    # for now, just return prob as dist and prob[0] as prob/conf
                else:
                    prob = None
                if isinstance(batchof_probdists,
                              list) and len(batchof_probdists) == 1:
                    dist = batchof_probdists[0]
                else:
                    dist = None
                ret = {
                    "status": "ok",
                    "output": output,
                    "labels": labels,
                    "conf": prob,
                    "dist": dist
                }
            except Exception as e:
                logging.exception(
                    "Exception during processing of application result")
                ret = {"status": "error", "error": str(e)}
            logger.debug("Application result=%s" % (ret, ))
            logger.debug("Ret=%r" % (ret, ))
            retjson = json.dumps(ret)
            # print("DEBUG: returned JSON=", retjson, file=sys.stderr)
            if not args.noret:
                print(retjson)
                sys.stdout.flush()
            # now if we got labeled data, check if our stuff is correct, but only for single targets now
            if args.labeled and isinstance(target, str):
                if target == output:
                    ncorrect += 1
            ntotal += 1
            if args.logevery > 0 and ntotal % args.logevery == 0:
                logger.info("Processed: {}".format(ntotal))
    logger.debug("Application program terminating")
    if ntotal > 0:
        print("Total {}, correct {}, acc={}".format(ntotal, ncorrect,
                                                    ncorrect / ntotal),
              file=sys.stderr)