Esempio n. 1
0
def image_tfodapimodel(dataset,
                       frozen_model_path,
                       label_map_path,
                       source=None,
                       threshold=0.5,
                       api=None,
                       exclude=None,
                       use_display_name=False,
                       label=None):
    log("RECIPE: Starting recipe image.tfodapimodel", locals())
    log("RECIPE: Loading frozen model")
    global detection_graph
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(frozen_model_path, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
    global sess
    sess = tf.Session(graph=detection_graph)
    log("RECIPE: Loaded frozen model")
    # key class names
    reverse_class_mapping_dict = label_map_util.get_label_map_dict(
        label_map_path=label_map_path, use_display_name=use_display_name)
    if label is None:
        label = [k for k in reverse_class_mapping_dict.keys()]
    # key int
    class_mapping_dict = {v: k for k, v in reverse_class_mapping_dict.items()}
    stream = get_stream(source, api=api, loader="images", input_key="image")
    stream = fetch_images(stream)

    return {
        "view_id": "image_manual",
        "dataset": dataset,
        "stream": get_image_stream(stream, class_mapping_dict,
                                   float(threshold)),
        "exclude": exclude,
        "on_exit": free_graph,
        'config': {
            'label': ', '.join(label) if label is not None else 'all',
            'labels': label,  # Selectable label options,
        }
    }
Esempio n. 2
0
def evaluate(dataset,
             spacy_model,
             source,
             label='',
             api=None,
             loader=None,
             exclude=None):
    """
    Evaluate a text classification model and build an evaluation set from a
    stream.
    """
    log("RECIPE: Starting recipe attncat.eval", locals())
    nlp = spacy.load(spacy_model, disable=['tagger', 'parser', 'ner'])
    # Get attention layer weights from textcat
    textcat = nlp.get_pipe('textcat')
    assert textcat is not None
    with get_attention_weights(textcat) as attn_weights:
        stream = get_stream(source, api, loader)
        # Decorate items with attention data
        stream = attach_attention_data(stream, nlp, attn_weights)
        model = TextClassifier(nlp, label)
        log(
            'RECIPE: Initialised TextClassifier with model {}'.format(
                spacy_model), model.nlp.meta)

    def on_exit(ctrl):
        examples = ctrl.db.get_dataset(dataset)
        data = dict(model.evaluate(examples))
        print(printers.tc_result(data))

    return {
        'view_id': 'html',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'on_exit': on_exit,
        'config': {
            'lang': nlp.lang,
            'labels': model.labels,
            'html_template': template_text
        }
    }
Esempio n. 3
0
def image_servingmodel(dataset,
                       ip,
                       port,
                       model_name,
                       label_map_path,
                       source=None,
                       threshold=0.5,
                       api=None,
                       exclude=None,
                       use_display_name=False,
                       label=None):
    log("RECIPE: Starting recipe image.servingmodel", locals())

    # key class names
    reverse_class_mapping_dict = label_map_util.get_label_map_dict(
        label_map_path=label_map_path, use_display_name=use_display_name)
    if label is None:
        label = [k for k in reverse_class_mapping_dict.keys()]
    # key int
    class_mapping_dict = {v: k for k, v in reverse_class_mapping_dict.items()}
    stream = get_stream(source, api=api, loader="images", input_key="image")
    stream = fetch_images(stream)

    return {
        "view_id":
        "image_manual",
        "dataset":
        dataset,
        "stream":
        get_image_stream(stream, class_mapping_dict, ip, port, model_name,
                         float(threshold)),
        "exclude":
        exclude,
        'config': {
            'label': ', '.join(label) if label is not None else 'all',
            'labels': label,  # Selectable label options,
        }
    }
Esempio n. 4
0
def pipe(source=None, api=None, loader=None, from_dataset=False, exclude=None):
    """
    Load examples from an input source, and print them as newline-delimited
    JSON. This makes it easy to filter the stream with command-line utilities
    such as `grep`. It's also often useful to inspect the stream, by piping to
    `less`.
    """
    DB = connect()
    if from_dataset:
        stream = DB.get_dataset(source)
    else:
        stream = get_stream(source, api, loader)
        stream = (set_hashes(eg) for eg in stream)
    if exclude:
        log("RECIPE: Excluding tasks from datasets: {}".format(
            ', '.join(exclude)))
        exclude_hashes = DB.get_input_hashes(*exclude)
        stream = filter_inputs(stream, exclude_hashes)
    try:
        for eg in stream:
            print(ujson.dumps(eg, escape_forward_slashes=False))
    except KeyboardInterrupt:
        pass
Esempio n. 5
0
def evaluate(dataset,
             spacy_model,
             source,
             label='',
             api=None,
             loader=None,
             exclude=None):
    """
    Evaluate a text classification model and build an evaluation set from a
    stream.
    """
    log("RECIPE: Starting recipe attncat.eval", locals())
    nlp = spacy.load(spacy_model, disable=['tagger', 'parser', 'ner'])
    stream = get_stream(source, api, loader)
    stream = attach_structural_sensitivity_data(stream, nlp,
                                                label.split(',')[0])
    model = TextClassifier(nlp, label)
    log('RECIPE: Initialised TextClassifier with model {}'.format(spacy_model),
        model.nlp.meta)

    def on_exit(ctrl):
        examples = ctrl.db.get_dataset(dataset)
        data = dict(model.evaluate(examples))
        print(printers.tc_result(data))

    return {
        'view_id': 'html',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'on_exit': on_exit,
        'config': {
            'lang': nlp.lang,
            'labels': model.labels,
            'html_template': template_text
        }
    }
def textcat_al(dataset, spacy_model,source=None, label='', api=None, patterns=None,
          loader=None, long_text=False, exclude=None):
    """
    Collect the best possible training data for a text classification model
    with the model in the loop. Based on your annotations, Prodigy will decide
    which questions to ask next.
    """
    # logDB = setup_mongo('activelearning')
    #nlp = spacy.load('/home/ysun/pytorchprodigy/')
    if(spacy_model is not None):
        if(type(spacy_model) == str):
            print("Load model ",spacy_model)
            nlp=spacy.load(spacy_model, disable=['ner', 'parser'])
            model = TextClassifier(nlp, label, long_text=long_text)
        else:
            model = spacy_model
    else:
        print("build your customized model")
        nlp = spacy.load('en_core_web_lg')

    #pt_model = nn.Linear(100,1)
    #pt_model = LSTMSentiment(embedding_dim = 100, hidden_dim =100, vocab_size=259136, label_size=2, batch_size=3, dropout=0.5)
        pt_model = FastText_test(vocab_size=684831, emb_dim = 300)
        pt_model.embeds.weight.data.copy_(torch.from_numpy(nlp.vocab.vectors.data))
        model = PyTorchWrapper(pt_model)

        textcat = Loss_TextCategorizer(nlp.vocab,model)
        nlp.add_pipe(textcat)
        model = TextClassifier(nlp, label, long_text=long_text)
    stream = get_stream(source,input_key = 'text')
    if patterns is None:
        predict = model
        update = model.update
    else:
        matcher = PatternMatcher(model.nlp, prior_correct=5.,
                                 prior_incorrect=5., label_span=False,
                                 label_task=True)
        matcher = matcher.from_disk(patterns)
        #log("RECIPE: Created PatternMatcher and loaded in patterns", patterns)
        # Combine the textcat model with the PatternMatcher to annotate both
        # match results and predictions, and update both models.
        predict, update = combine_models(model, matcher)
    # Rank the stream. Note this is continuous, as model() is a generator.
    # As we call model.update(), the ranking of examples changes.
    stream = test_stream(stream,predict)

    def updateDB(answers):
        model.update(answers)
        #print("update model")
        #for eg in answers:
        #    print(eg)
        #for score,eg in model(answers):
        #    eg["update_score"] = score
        #    print("new",score)
        #print(answers)
        
    def on_exit():
        print("on_exit")
        return model
    
    return {
        'view_id': 'classification',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'update': updateDB,
        'on_exit': on_exit,
        'config': {'labels': model.labels,'batch_size':1}
    }
Esempio n. 7
0
def mark_custom(dataset,
                source=None,
                view_id=None,
                label='',
                api=None,
                loader=None,
                memorize=False,
                exclude=None):
    """
    Click through pre-prepared examples, with no model in the loop.
    """
    log('RECIPE: Starting recipe mark', locals())
    stream = list(get_stream(source, api, loader))

    counts = Counter()
    memory = {}

    def fill_memory(ctrl):
        if memorize:
            examples = ctrl.db.get_dataset(dataset)
            log("RECIPE: Add {} examples from dataset '{}' to memory".format(
                len(examples), dataset))
            for eg in examples:
                memory[eg[TASK_HASH_ATTR]] = eg['answer']

    def ask_questions(stream):
        for eg in stream:
            eg['time_loaded'] = datetime.now().isoformat()
            if TASK_HASH_ATTR in eg and eg[TASK_HASH_ATTR] in memory:
                answer = memory[eg[TASK_HASH_ATTR]]
                counts[answer] += 1
            else:
                if label:
                    eg['label'] = label
                yield eg

    def recv_answers(answers):
        for eg in answers:
            counts[eg['answer']] += 1
            memory[eg[TASK_HASH_ATTR]] = eg['answer']
            eg['time_returned'] = datetime.now().isoformat()

    def print_results(ctrl):
        print(printers.answers(counts))

    def get_progress(session=0, total=0, loss=0):
        progress = len(counts) / len(stream)
        return progress

    return {
        'view_id': view_id,
        'dataset': dataset,
        'stream': ask_questions(stream),
        'exclude': exclude,
        'update': recv_answers,
        'on_load': fill_memory,
        'on_exit': print_results,
        'config': {
            'label': label
        }
    }
Esempio n. 8
0
def ner_manual_tokenizers_bert(
    dataset: str,
    source: Union[str, Iterable[dict]],
    loader: Optional[str] = None,
    label: Optional[List[str]] = None,
    tokenizer_vocab: Optional[str] = None,
    lowercase: bool = False,
    hide_special: bool = False,
    hide_wp_prefix: bool = False,
) -> Dict[str, Any]:
    """Example recipe that shows how to use model-specific tokenizers like the
    BERT word piece tokenizer to preprocess your incoming text for fast and
    efficient NER annotation and to make sure that all annotations you collect
    always map to tokens and can be used to train and fine-tune your model
    (even if the tokenization isn't that intuitive, because word pieces). The
    selection automatically snaps to the token boundaries and you can double-click
    single tokens to select them.

    Setting "honor_token_whitespace": true will ensure that whitespace between
    tokens is only shown if whitespace is present in the original text. This
    keeps the text readable.

    Requires Prodigy v1.10+ and usese the HuggingFace tokenizers library."""
    stream = get_stream(source, loader=loader, input_key="text")
    # You can replace this with other tokenizers if needed
    tokenizer = BertWordPieceTokenizer(tokenizer_vocab, lowercase=lowercase)
    sep_token = tokenizer._parameters.get("sep_token")
    cls_token = tokenizer._parameters.get("cls_token")
    special_tokens = (sep_token, cls_token)
    wp_prefix = tokenizer._parameters.get("wordpieces_prefix")

    def add_tokens(stream):
        for eg in stream:
            tokens = tokenizer.encode(eg["text"])
            eg_tokens = []
            idx = 0
            for (text, (start, end), tid) in zip(tokens.tokens, tokens.offsets,
                                                 tokens.ids):
                # If we don't want to see special tokens, don't add them
                if hide_special and text in special_tokens:
                    continue
                # If we want to strip out word piece prefix, remove it from text
                if hide_wp_prefix and wp_prefix is not None:
                    if text.startswith(wp_prefix):
                        text = text[len(wp_prefix):]
                token = {
                    "text": text,
                    "id": idx,
                    "start": start,
                    "end": end,
                    # This is the encoded ID returned by the tokenizer
                    "tokenizer_id": tid,
                    # Don't allow selecting spacial SEP/CLS tokens
                    "disabled": text in special_tokens,
                }
                eg_tokens.append(token)
                idx += 1
            for i, token in enumerate(eg_tokens):
                # If the next start offset != the current end offset, we
                # assume there's whitespace in between
                if i < len(eg_tokens) - 1 and token[
                        "text"] not in special_tokens:
                    next_token = eg_tokens[i + 1]
                    token["ws"] = (next_token["start"] > token["end"]
                                   or next_token["text"] in special_tokens)
                else:
                    token["ws"] = True
            eg["tokens"] = eg_tokens
            yield eg

    stream = add_tokens(stream)

    return {
        "dataset": dataset,
        "stream": stream,
        "view_id": "ner_manual",
        "config": {
            "honor_token_whitespace": True,
            "labels": label,
            "exclude_by": "input",
            "force_stream_order": True,
        },
    }
Esempio n. 9
0
def teach(dataset,
          spacy_model,
          source=None,
          label='',
          api=None,
          loader=None,
          seeds=None,
          long_text=False,
          exclude=None):
    """
    Collect the best possible training data for a text classification model
    with the model in the loop. Based on your annotations, Prodigy will decide
    which questions to ask next.
    """
    log('RECIPE: Starting recipe attncat.teach', locals())
    DB = connect()
    nlp = spacy.load(spacy_model)
    log('RECIPE: Creating TextClassifier with model {}'.format(spacy_model))
    model = TextClassifier(nlp, label.split(','), long_text=long_text)
    stream = get_stream(source,
                        api,
                        loader,
                        rehash=True,
                        dedup=True,
                        input_key='text')

    # Get attention layer weights from textcat
    textcat = nlp.get_pipe('textcat')
    assert textcat is not None
    with get_attention_weights(textcat) as attn_weights:
        if seeds is not None:
            if isinstance(seeds, str) and seeds in DB:
                seeds = get_seeds_from_set(seeds, DB.get_dataset(seeds))
            else:
                seeds = get_seeds(seeds)
            # Find 'seedy' examples
            examples_with_seeds = list(
                find_with_terms(stream,
                                seeds,
                                at_least=10,
                                at_most=1000,
                                give_up_after=10000))
            for eg in examples_with_seeds:
                eg.setdefault('meta', {})
                eg['meta']['via_seed'] = True
            print("Found {} examples with seeds".format(
                len(examples_with_seeds)))
            examples_with_seeds = [
                task for _, task in model(examples_with_seeds)
            ]
        # Rank the stream. Note this is continuous, as model() is a generator.
        # As we call model.update(), the ranking of examples changes.
        stream = prefer_uncertain(model(stream))
        # Prepend 'seedy' examples, if present
        if seeds:
            log("RECIPE: Prepending examples with seeds to the stream")
            stream = cytoolz.concat((examples_with_seeds, stream))

        # Decorate items with attention data
        stream = attach_attention_data(stream, nlp, attn_weights)
    return {
        'view_id': 'html',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'update': model.update,
        'config': {
            'lang': nlp.lang,
            'labels': model.labels,
            'html_template': template_text
        }
    }
def textcat_teach(dataset,
                  model,
                  source=None,
                  label='',
                  prefer_method=False,
                  exclude=None,
                  init_path=None,
                  vectorizer_path=None,
                  track_dataset=None,
                  exit_model=0):
    """
    Collect the best possible training data for a text classification model
    with the model in the loop. Based on your annotations, Prodigy will decide
    which questions to ask next.
    """
    if (type(model) == str
        ):  #choose model, if not str, it's of prodigywrapper class
        print("Build your customized model", model)
        model = parse_model(model, label, vectorizer_path, init_path,
                            track_dataset)
    predict = model.predict
    update = model.update

    stream = get_stream(source, input_key='text')
    if (prefer_method == "probability"):
        #probability
        stream = probability_stream(stream, predict)
    else:
        #exponential moving average
        stream = test_stream(stream, predict)

    def updateDB(answers):
        model.update(answers)

    def on_exit():
        print("on_exit")
        return model

    if (exit_model):
        return {
            'view_id': 'classification',
            'dataset': dataset,
            'stream': stream,
            'exclude': exclude,
            'update': updateDB,
            'on_exit': on_exit,
            'config': {
                'labels': model.labels,
                'batch_size': 32
            }
        }
    else:
        return {
            'view_id': 'classification',
            'dataset': dataset,
            'stream': stream,
            'exclude': exclude,
            'update': updateDB,
            'config': {
                'labels': model.labels,
                'batch_size': 32
            }
        }
def textcat_custom(dataset,
                   spacy_model,
                   source=None,
                   label='',
                   api=None,
                   patterns=None,
                   loader=None,
                   long_text=False,
                   exclude=None):
    """
    Collect the best possible training data for a text classification model
    with the model in the loop. Based on your annotations, Prodigy will decide
    which questions to ask next.
    """
    # logDB = setup_mongo('activelearning')
    #nlp = spacy.load('/home/ysun/pytorchprodigy/')
    if (spacy_model is not None):
        if (type(spacy_model) == str):
            print("Load model ", spacy_model)
            nlp = spacy.load(spacy_model, disable=['ner', 'parser'])
            model = TextClassifier(nlp, label, long_text=long_text)
        else:
            model = spacy_model
    else:
        print("build your customized model")
        #nlp = spacy.load('en_core_web_lg')
        pt_model = FastText(vocab_size=50966, emb_dim=300)
        #pt_model.embeds.weight.data.copy_(torch.from_numpy(nlp.vocab.vectors.data))
        #model = PyTorchWrapper(pt_model)

        #textcat = Loss_TextCategorizer(nlp.vocab,model)
        #nlp.add_pipe(textcat)
        #model = TextClassifier(nlp, label, long_text=long_text)
        optimizer = torch.optim.Adam(pt_model.parameters(), lr=0.001)
        criterion = nn.BCELoss()
        #example_path = "/liveperson/data/alloy/prodigy/data/newsgroup_initial.jsonl"
        example_path = "/liveperson/data/alloy/prodigy/data/newsgroup_example.jsonl"
        vectorizer_path = "/liveperson/data/alloy/prodigy/data/newsgroup_all.jsonl"
        model = Prodigy_model_cpu(pt_model,
                                  vectorizer_path,
                                  None,
                                  label_size=1,
                                  optimizer=optimizer,
                                  loss=criterion)
        # model = Prodigy_svm_cpu(pt_model,label_size=1,optimizer=optimizer,loss=criterion)

    stream = get_stream(source, input_key='text')
    if patterns is None:
        predict = model.predict
        update = model.update

    stream = test_stream(stream, predict)

    def updateDB(answers):
        model.update(answers)

    def on_exit():
        print("on_exit")
        return model

    return {
        'view_id': 'classification',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'update': updateDB,
        'on_exit': on_exit,
        'config': {
            'labels': ['POSITIVE', 'NEGATIVE'],
            'batch_size': 32
        }
    }
def textcat_log(dataset,
                spacy_model,
                source=None,
                label='',
                api=None,
                patterns=None,
                loader=None,
                long_text=False,
                exclude=None):
    """
    Collect the best possible training data for a text classification model
    with the model in the loop. Based on your annotations, Prodigy will decide
    which questions to ask next.
    """
    # logDB = setup_mongo('activelearning')
    #nlp = spacy.load('/home/ysun/pytorchprodigy/')
    if (spacy_model is not None):
        if (type(spacy_model) == str):
            print("Load model ", spacy_model)
            nlp = spacy.load(spacy_model, disable=['ner', 'parser'])
            model = TextClassifier(nlp, label, long_text=long_text)
        else:
            model = spacy_model
    else:
        print("build your customized model,log")
        pt_model = linear_model.SGDClassifier(loss="log")
        # pt_model = linear_model.SGDClassifier()
        example = ["Could you check my order status"]
        example_label = [1]
        #vectorizer_path = "/liveperson/data/alloy/prodigy/data/db-out/tmo_order_status.jsonl"
        #example_path = "/liveperson/data/alloy/prodigy/data/newsgroup_initial.jsonl"
        example_path = "/liveperson/data/alloy/prodigy/data/newsgroup_example.jsonl"
        vectorizer_path = "/liveperson/data/alloy/prodigy/data/newsgroup_all.jsonl"
        model = Prodigy_log_cpu(pt_model, 1, vectorizer_path, example_path)

    stream = get_stream(source, input_key='text')
    if patterns is None:
        predict = model.predict
        update = model.update

    stream = test_stream(stream, predict)

    # stream = probability_stream(stream,predict)

    def updateDB(answers):
        model.update(answers)

    def on_exit():
        print("on_exit")
        return model

    return {
        'view_id': 'classification',
        'dataset': dataset,
        'stream': stream,
        'exclude': exclude,
        'update': updateDB,
        'on_exit': on_exit,
        'config': {
            'labels': ['ORDER_STATUS'],
            'batch_size': 32
        }
    }
Esempio n. 13
0
def image_trainmodel(dataset, source, config_path, ip, port, model_name,
                     label_map_path=None, label=None, model_dir="model_dir",
                     export_dir="export_dir", data_dir="data_dir",
                     steps_per_epoch=-1, threshold=0.5, temp_files_num=5,
                     max_checkpoints_num=5, run_eval=False, eval_steps=50,
                     use_display_name=False, tf_logging_level=40, api=None,
                     exclude=None):
    tf.logging.set_verbosity(tf_logging_level)
    _create_dir(model_dir)
    _create_dir(export_dir)
    _create_dir(data_dir)
    log("Building the Tensorflow Object Detection API model")
    run_config = tf.estimator.RunConfig(model_dir=model_dir,
                                        keep_checkpoint_max=max_checkpoints_num
                                        )
    odapi_configs = config_util.get_configs_from_pipeline_file(config_path)
    if label_map_path:
        log("Overriding label_map_path given in the odapi config file")
        odapi_configs["train_input_config"].label_map_path = label_map_path
        odapi_configs["eval_input_config"].label_map_path = label_map_path
    else:
        label_map_path = odapi_configs["train_input_config"].label_map_path

    # Set input reader config low to make sure you don't hit memory errors
    train_input_config = odapi_configs["train_input_config"]
    train_input_config.shuffle = False
    train_input_config.num_readers = 1
    train_input_config.num_parallel_batches = 1
    train_input_config.num_prefetch_batches = -1  # autotune
    train_input_config.queue_capacity = 2
    train_input_config.min_after_dequeue = 1
    train_input_config.read_block_length = 10
    train_input_config.prefetch_size = 2
    train_input_config.num_parallel_map_calls = 2

    # key class names
    reverse_class_mapping_dict = label_map_util.get_label_map_dict(
        label_map_path=label_map_path,
        use_display_name=use_display_name)
    if label is None:
        label = [k for k in reverse_class_mapping_dict.keys()]
    # key int
    class_mapping_dict = {v: k for k, v in reverse_class_mapping_dict.items()}

    detection_model_fn = functools.partial(model_builder.build,
                                           model_config=odapi_configs["model"])
    model_func = create_model_fn(detection_model_fn,
                                 hparams=create_hparams(None),
                                 configs=odapi_configs, use_tpu=False,
                                 postprocess_on_cpu=False)
    estimator = tf.estimator.Estimator(model_fn=model_func, config=run_config)
    if estimator.latest_checkpoint() is None:
        log(("Running a single dummy training step! "
             "Else saving SavedModel for Tensorflow Serving does not work"))
        train_input_config = odapi_configs["train_input_config"]
        train_input_fn = create_train_input_fn(
            train_config=odapi_configs["train_config"],
            model_config=odapi_configs["model"],
            train_input_config=train_input_config)
        estimator.train(input_fn=train_input_fn,
                        steps=1)

    _export_saved_model(export_dir, estimator, odapi_configs)
    log("Make sure to start Tensorflow Serving before opening Prodigy")
    log(("Training and evaluation (if enabled) can be monitored by "
         "pointing Tensorboard to {} directory").format(model_dir))

    stream = get_stream(source, api=api, loader="images", input_key="image")
    stream = fetch_images(stream)
    update_fn = functools.partial(
        update_odapi_model, estimator=estimator,
        data_dir=data_dir,
        reverse_class_mapping_dict=reverse_class_mapping_dict,
        odapi_configs=odapi_configs,
        steps_per_epoch=steps_per_epoch,
        export_dir=export_dir, run_eval=run_eval,
        eval_steps=eval_steps,
        temp_files_num=temp_files_num)

    return {
        "view_id": "image_manual",
        "dataset": dataset,
        "stream": get_image_stream(stream, class_mapping_dict,
                                   ip, port, model_name, float(threshold)),
        "exclude": exclude,
        "update": update_fn,
        # "progress": lambda *args, **kwargs: 0,
        'config': {
            'label': ', '.join(label) if label is not None else 'all',
            'labels': label,       # Selectable label options,
        }
    }