Ejemplo n.º 1
0
 def __init__(self, pc, dim: int, conf: VRecEncoderConf):
     super().__init__(pc, conf, output_dims=(dim, ))
     # -----
     if conf.share_layers:
         node = self.add_sub_node("m", VRecNode(pc, dim, conf.vr_conf))
         self.layers = [node for _ in range(conf.num_layer)
                        ]  # use the same node!!
     else:
         self.layers = [
             self.add_sub_node("m", VRecNode(pc, dim, conf.vr_conf))
             for _ in range(conf.num_layer)
         ]
     self.attn_ranges = conf.attn_ranges.copy()
     self.attn_ranges.extend(
         [None] * (conf.num_layer -
                   len(self.attn_ranges)))  # None means no restrictions
     # scheduled values
     self.temperature = ScheduledValue(f"{self.name}:temperature",
                                       conf.temperature)
     self.lambda_noop_prob = ScheduledValue(f"{self.name}:lambda_noop_prob",
                                            conf.lambda_noop_prob)
     self.lambda_entropy = ScheduledValue(f"{self.name}:lambda_entropy",
                                          conf.lambda_entropy)
     self.lambda_attn_l1 = ScheduledValue(f"{self.name}:lambda_attn_l1",
                                          conf.lambda_attn_l1)
Ejemplo n.º 2
0
 def __init__(self, conf: MyIEModelConf, vpack: VocabPackage):
     self.conf = conf
     self.vpack = vpack
     tconf = conf.tconf
     # ===== Vocab =====
     # ===== Model =====
     self.pc = BK.ParamCollection(True)
     # bottom-part: input + encoder
     self.bter: MyIEBT = self.build_encoder()
     self.lexi_output_dim = self.bter.emb_output_dim
     self.enc_ef_output_dim, self.enc_evt_output_dim = self.bter.get_output_dims()[0]
     self.enc_lrf_sv = ScheduledValue("enc_lrf", tconf.enc_lrf)
     self.pc.optimizer_set(tconf.enc_optim.optim, self.enc_lrf_sv, tconf.enc_optim,
                           params=self.bter.get_parameters(), check_repeat=True, check_full=True)
     # upper-parts: the decoders
     self.decoders: List = self.build_decoders()
     self.dec_lrf_sv = ScheduledValue("dec_lrf", tconf.dec_lrf)
     self.pc.optimizer_set(tconf.dec_optim.optim, self.dec_lrf_sv, tconf.dec_optim,
                           params=Helper.join_list(z.get_parameters() for z in self.decoders),
                           check_repeat=True, check_full=True)
     # ===== For training =====
     # schedule values
     self.margin = ScheduledValue("margin", tconf.margin)
     self._scheduled_values = [self.margin, self.enc_lrf_sv, self.dec_lrf_sv]
     # for refreshing dropouts
     self.previous_refresh_training = True
     # =====
     # others
     self.train_constrain_evt_types = {"": None, "kbp17": KBP17_TYPES}[conf.tconf.constrain_evt_types]
     self.test_constrain_evt_types = {"": None, "kbp17": KBP17_TYPES}[conf.iconf.constrain_evt_types]
Ejemplo n.º 3
0
 def __init__(self, conf: MySimpleIEModelConf, vpack: IEVocabPackage):
     super().__init__(conf, vpack)
     # components
     self.ef_extractor: NodeExtractorBase = self.decoders[0]
     self.evt_extractor: NodeExtractorBase = self.decoders[1]
     self.arg_linker: Linker = self.decoders[2]
     #
     self.hl_ef: HLabelVocab = self.vpack.get_voc("hl_ef")
     self.hl_evt: HLabelVocab = self.vpack.get_voc("hl_evt")
     self.hl_arg: HLabelVocab = self.vpack.get_voc("hl_arg")
     # lambdas for training
     self.lambda_ef = ScheduledValue("lambda_ef", conf.tconf.lambda_ef)
     self.lambda_evt = ScheduledValue("lambda_evt", conf.tconf.lambda_evt)
     self.lambda_arg = ScheduledValue("lambda_arg", conf.tconf.lambda_arg)
     self.add_scheduled_values(self.lambda_ef)
     self.add_scheduled_values(self.lambda_evt)
     self.add_scheduled_values(self.lambda_arg)
Ejemplo n.º 4
0
 def __init__(self,
              pc: BK.ParamCollection,
              conf: BaseModuleConf,
              name=None,
              init_rop=None,
              output_dims=None):
     super().__init__(pc, name, init_rop)
     self.conf = conf
     self.output_dims = output_dims
     # scheduled values
     self.lrf = ScheduledValue(f"{self.name}:lrf", conf.lrf)
     self.margin = ScheduledValue(f"{self.name}:margin", conf.margin)
     self.loss_lambda = ScheduledValue(f"{self.name}:lambda",
                                       conf.loss_lambda)
     self.hdrop = conf.hdrop
     if self.hdrop >= 0.:
         zlog(f"Forcing different hdrop at {self.name}: {self.hdrop}")
Ejemplo n.º 5
0
 def __init__(self, conf: M3AIEModelConf, vpack: IEVocabPackage):
     super().__init__(conf, vpack)
     # components
     self.cand_extractor: CandidateExtractor = self.decoders[0]
     self.arg_linker: ArgLinker = self.decoders[1]
     self.span_expander: ArgSpanExpander = self.decoders[2]
     # vocab
     self.hl_arg: HLabelVocab = self.vpack.get_voc("hl_arg")
     # lambdas for training
     self.lambda_cand = ScheduledValue("lambda_cand",
                                       conf.tconf.lambda_cand)
     self.lambda_arg = ScheduledValue("lambda_arg", conf.tconf.lambda_arg)
     self.lambda_span = ScheduledValue("lambda_span",
                                       conf.tconf.lambda_span)
     self.add_scheduled_values(self.lambda_cand)
     self.add_scheduled_values(self.lambda_arg)
     self.add_scheduled_values(self.lambda_span)
     # others
     self.random_sample_stream = Random.stream(Random.random_sample)
Ejemplo n.º 6
0
 def __init__(self, conf: BaseModelConf):
     self.conf = conf
     # ===== Model =====
     self.pc = BK.ParamCollection()
     self.main_lrf = ScheduledValue(f"main:lrf", conf.main_lrf)
     self._scheduled_values = [self.main_lrf]
     # -----
     self.nodes: Dict[str, BasicNode] = OrderedDict()
     self.components: Dict[str, BaseModule] = OrderedDict()
     # for refreshing dropouts
     self.previous_refresh_training = True
Ejemplo n.º 7
0
 def __init__(self, conf: TdParserConf, vpack: VocabPackage):
     super().__init__(conf, vpack)
     # ===== For decoding =====
     self.inferencer = TdInferencer(self.scorer, conf.iconf)
     # ===== For training =====
     sched_depth = ScheduledValue("depth", conf.tconf.sched_depth)
     self.add_scheduled_values(sched_depth)
     self.fber = TdFber(self.scorer, conf.iconf, conf.tconf, self.margin, self.sched_sampling, sched_depth)
     # todo(warn): not elegant, global flag!
     TdState.is_bfs = conf.is_bfs
     # =====
     zcheck(not self.bter.jpos_multitask_enabled(), "Not implemented for joint pos in this mode!!")
     zwarn("WARN: This topdown mode is deprecated!!")
Ejemplo n.º 8
0
 def __init__(self, conf: FpParserConf, vpack: VocabPackage):
     self.conf = conf
     self.vpack = vpack
     tconf = conf.tconf
     # ===== Vocab =====
     self.label_vocab = vpack.get_voc("label")
     # ===== Model =====
     self.pc = BK.ParamCollection(True)
     # bottom-part: input + encoder
     self.enc = FpEncoder(self.pc, conf.encoder_conf, vpack)
     self.enc_output_dim = self.enc.get_output_dims()[0]
     self.enc_lrf_sv = ScheduledValue("enc_lrf", tconf.enc_lrf)
     self.pc.optimizer_set(tconf.enc_optim.optim,
                           self.enc_lrf_sv,
                           tconf.enc_optim,
                           params=self.enc.get_parameters(),
                           check_repeat=True,
                           check_full=True)
     # middle-part: structured layer at the middle (build later for convenient re-loading)
     self.slayer = self.build_slayer()
     self.mid_lrf_sv = ScheduledValue("mid_lrf", tconf.mid_lrf)
     if self.slayer is not None:
         self.pc.optimizer_set(tconf.mid_optim.optim,
                               self.mid_lrf_sv,
                               tconf.mid_optim,
                               params=self.slayer.get_parameters(),
                               check_repeat=True,
                               check_full=True)
     # upper-part: decoder
     self.dec = self.build_decoder()
     self.dec_lrf_sv = ScheduledValue("dec_lrf", tconf.dec_lrf)
     self.pc.optimizer_set(tconf.dec_optim.optim,
                           self.dec_lrf_sv,
                           tconf.dec_optim,
                           params=self.dec.get_parameters(),
                           check_repeat=True,
                           check_full=True)
     # extra aux loss
     conf.masklm_conf._input_dim = self.enc_output_dim
     self.masklm = MaskLMNode(self.pc, conf.masklm_conf, vpack)
     self.pc.optimizer_set(tconf.dec_optim.optim,
                           self.dec_lrf_sv,
                           tconf.dec_optim,
                           params=self.masklm.get_parameters(),
                           check_repeat=True,
                           check_full=True)
     # ===== For training =====
     # schedule values
     self.margin = ScheduledValue("margin", tconf.margin)
     self.lambda_parse = ScheduledValue("lambda_parse", conf.lambda_parse)
     self.lambda_masklm = ScheduledValue("lambda_masklm",
                                         conf.lambda_masklm)
     self._scheduled_values = [
         self.margin, self.enc_lrf_sv, self.mid_lrf_sv, self.dec_lrf_sv,
         self.lambda_parse, self.lambda_masklm
     ]
     # for refreshing dropouts
     self.previous_refresh_training = True
Ejemplo n.º 9
0
 def __init__(self, conf: BaseParserConf, vpack: VocabPackage):
     self.conf = conf
     self.vpack = vpack
     tconf = conf.tconf
     # ===== Vocab =====
     self.label_vocab = vpack.get_voc("label")
     # ===== Model =====
     self.pc = BK.ParamCollection(conf.new_name_conv)
     # bottom-part: input + encoder
     self.bter = ParserBT(self.pc, conf.bt_conf, vpack)
     self.enc_output_dim = self.bter.get_output_dims()[0]
     self.enc_lrf_sv = ScheduledValue("enc_lrf", tconf.enc_lrf)
     self.pc.optimizer_set(tconf.enc_optim.optim,
                           self.enc_lrf_sv,
                           tconf.enc_optim,
                           params=self.bter.get_parameters(),
                           check_repeat=True,
                           check_full=True)
     # upper-part: decoder
     # todo(+2): very ugly here!
     self.scorer = self.build_decoder()
     self.dec_lrf_sv = ScheduledValue("dec_lrf", tconf.dec_lrf)
     self.dec2_lrf_sv = ScheduledValue("dec2_lrf", tconf.dec2_lrf)
     try:
         params, params2 = self.scorer.get_split_params()
         self.pc.optimizer_set(tconf.dec_optim.optim,
                               self.dec_lrf_sv,
                               tconf.dec_optim,
                               params=params,
                               check_repeat=True,
                               check_full=False)
         self.pc.optimizer_set(tconf.dec2_optim.optim,
                               self.dec2_lrf_sv,
                               tconf.dec2_optim,
                               params=params2,
                               check_repeat=True,
                               check_full=True)
     except:
         self.pc.optimizer_set(tconf.dec_optim.optim,
                               self.dec_lrf_sv,
                               tconf.dec_optim,
                               params=self.scorer.get_parameters(),
                               check_repeat=True,
                               check_full=True)
     # middle-part: structured layer at the middle (build later for convenient re-loading)
     self.slayer = None
     self.mid_lrf_sv = ScheduledValue("mid_lrf", tconf.mid_lrf)
     # ===== For training =====
     # schedule values
     self.margin = ScheduledValue("margin", tconf.margin)
     self.sched_sampling = ScheduledValue("ss", tconf.sched_sampling)
     self._scheduled_values = [
         self.margin, self.sched_sampling, self.enc_lrf_sv, self.dec_lrf_sv,
         self.dec2_lrf_sv, self.mid_lrf_sv
     ]
     self.reg_scores_lambda = conf.tconf.reg_scores_lambda
     # for refreshing dropouts
     self.previous_refresh_training = True
Ejemplo n.º 10
0
 def __init__(self, conf: EfParserConf, vpack: VocabPackage):
     super().__init__(conf, vpack)
     # ===== basic G1 Parser's loading (also possibly load g1's params)
     self.g1parser = G1Parser.pre_g1_init(self, conf.pre_g1_conf)
     self.lambda_g1_arc_training = conf.pre_g1_conf.lambda_g1_arc_training
     self.lambda_g1_arc_testing = conf.pre_g1_conf.lambda_g1_arc_testing
     self.lambda_g1_lab_training = conf.pre_g1_conf.lambda_g1_lab_training
     self.lambda_g1_lab_testing = conf.pre_g1_conf.lambda_g1_lab_testing
     #
     self.add_slayer()
     # True if ignored
     ignore_chs_label_mask = DepLabelHelper.select_label_idxes(conf.ef_ignore_chs, self.label_vocab.keys(), True, True)
     # ===== For decoding =====
     self.inferencer = EfInferencer(self.scorer, self.slayer, conf.iconf, ignore_chs_label_mask)
     # ===== For training =====
     self.cost0_weight = ScheduledValue("c0w", conf.tconf.cost0_weight)
     self.add_scheduled_values(self.cost0_weight)
     self.losser = EfLosser(self.scorer, self.slayer, conf.tconf, self.margin, self.cost0_weight, ignore_chs_label_mask)
     # ===== adjustable beam size
     self.arc_abs = ScheduledValue("aabs", conf.aabs)
     self.add_scheduled_values(self.arc_abs)
     #
     self.num_label = self.label_vocab.trg_len(True)  # todo(WARN): use the original idx
Ejemplo n.º 11
0
 def __init__(self, conf: M3IEModelConf, vpack: IEVocabPackage):
     super().__init__(conf, vpack)
     # components
     self.ef_extractor: MentionExtractor = self.decoders[0]
     self.evt_extractor: MentionExtractor = self.decoders[1]
     self.arg_linker: ArgLinker = self.decoders[2]
     self.span_expander: ArgSpanExpander = self.decoders[3]
     # vocab
     self.hl_ef: HLabelVocab = self.vpack.get_voc("hl_ef")
     self.hl_evt: HLabelVocab = self.vpack.get_voc("hl_evt")
     self.hl_arg: HLabelVocab = self.vpack.get_voc("hl_arg")
     # lambdas for training
     self.lambda_mention_ef = ScheduledValue("lambda_mention_ef",
                                             conf.tconf.lambda_mention_ef)
     self.lambda_mention_evt = ScheduledValue("lambda_mention_evt",
                                              conf.tconf.lambda_mention_evt)
     self.lambda_affi_ef = ScheduledValue("lambda_affi_ef",
                                          conf.tconf.lambda_affi_ef)
     self.lambda_affi_evt = ScheduledValue("lambda_affi_evt",
                                           conf.tconf.lambda_affi_evt)
     self.lambda_arg = ScheduledValue("lambda_arg", conf.tconf.lambda_arg)
     self.lambda_span = ScheduledValue("lambda_span",
                                       conf.tconf.lambda_span)
     self.add_scheduled_values(self.lambda_mention_ef)
     self.add_scheduled_values(self.lambda_mention_evt)
     self.add_scheduled_values(self.lambda_affi_ef)
     self.add_scheduled_values(self.lambda_affi_evt)
     self.add_scheduled_values(self.lambda_arg)
     self.add_scheduled_values(self.lambda_span)
     #
     self.random_sample_stream = Random.stream(Random.random_sample)
     self.pos_ef_getter_set = set(conf.pos_ef_getter_list)
     #
     if conf.iconf.expand_span_method == "dep":
         self.static_span_expander = SpanExpanderDep()
     elif conf.iconf.expand_span_method == "ext":
         self.static_span_expander = SpanExpanderExternal(
             conf.iconf.expand_span_ext_file)
     else:
         zlog("No static span expander!")
         self.static_span_expander = None
Ejemplo n.º 12
0
 def __init__(self, conf: MtlMlmModelConf, vpack: VocabPackage):
     super().__init__(conf)
     # for easier checking
     self.word_vocab = vpack.get_voc("word")
     # components
     self.embedder = self.add_node("emb", EmbedderNode(self.pc, conf.emb_conf, vpack))
     self.inputter = Inputter(self.embedder, vpack)  # not a node
     self.emb_out_dim = self.embedder.get_output_dims()[0]
     self.enc_attn_count = conf.default_attn_count
     if conf.enc_choice == "vrec":
         self.encoder = self.add_component("enc", VRecEncoder(self.pc, self.emb_out_dim, conf.venc_conf))
         self.enc_attn_count = self.encoder.attn_count
     elif conf.enc_choice == "original":
         conf.oenc_conf._input_dim = self.emb_out_dim
         self.encoder = self.add_node("enc", MyEncoder(self.pc, conf.oenc_conf))
     else:
         raise NotImplementedError()
     zlog(f"Finished building model's encoder {self.encoder}, all size is {self.encoder.count_allsize_parameters()}")
     self.enc_out_dim = self.encoder.get_output_dims()[0]
     # --
     conf.rprep_conf._rprep_vr_conf.matt_conf.head_count = self.enc_attn_count  # make head-count agree
     self.rpreper = self.add_node("rprep", RPrepNode(self.pc, self.enc_out_dim, conf.rprep_conf))
     # --
     self.lambda_agree = self.add_scheduled_value(ScheduledValue(f"agr:lambda", conf.lambda_agree))
     self.agree_loss_f = EntropyHelper.get_method(conf.agree_loss_f)
     # --
     self.masklm = self.add_component("mlm", MaskLMNode(self.pc, self.enc_out_dim, conf.mlm_conf, self.inputter))
     self.plainlm = self.add_component("plm", PlainLMNode(self.pc, self.enc_out_dim, conf.plm_conf, self.inputter))
     # todo(note): here we use attn as dim_pair, do not use pair if not using vrec!!
     self.orderpr = self.add_component("orp", OrderPredNode(
         self.pc, self.enc_out_dim, self.enc_attn_count, conf.orp_conf, self.inputter))
     # =====
     # pre-training pre-load point!!
     if conf.load_pretrain_model_name:
         zlog(f"At preload_pretrain point: Loading from {conf.load_pretrain_model_name}")
         self.pc.load(conf.load_pretrain_model_name, strict=False)
     # =====
     self.dpar = self.add_component("dpar", DparG1Decoder(
         self.pc, self.enc_out_dim, self.enc_attn_count, conf.dpar_conf, self.inputter))
     self.upos = self.add_component("upos", SeqLabNode(
         self.pc, "pos", self.enc_out_dim, self.conf.upos_conf, self.inputter))
     if conf.do_ner:
         if conf.ner_use_crf:
             self.ner = self.add_component("ner", SeqCrfNode(
                 self.pc, "ner", self.enc_out_dim, self.conf.ner_conf, self.inputter))
         else:
             self.ner = self.add_component("ner", SeqLabNode(
                 self.pc, "ner", self.enc_out_dim, self.conf.ner_conf, self.inputter))
     else:
         self.ner = None
     # for pairwise reprs (no trainable params here!)
     self.rel_dist_embed = self.add_node("oremb", PosiEmbedding2(self.pc, n_dim=self.enc_attn_count, max_val=100))
     self._prepr_f_attn_sum = lambda cache, rdist: BK.stack(cache.list_attn, 0).sum(0) if (len(cache.list_attn))>0 else None
     self._prepr_f_attn_avg = lambda cache, rdist: BK.stack(cache.list_attn, 0).mean(0) if (len(cache.list_attn))>0 else None
     self._prepr_f_attn_max = lambda cache, rdist: BK.stack(cache.list_attn, 0).max(0)[0] if (len(cache.list_attn))>0 else None
     self._prepr_f_attn_last = lambda cache, rdist: cache.list_attn[-1] if (len(cache.list_attn))>0 else None
     self._prepr_f_rdist = lambda cache, rdist: self._get_rel_dist_embed(rdist, False)
     self._prepr_f_rdist_abs = lambda cache, rdist: self._get_rel_dist_embed(rdist, True)
     self.prepr_f = getattr(self, "_prepr_f_"+conf.prepr_choice)  # shortcut
     # --
     self.testing_rand_gen = Random.create_sep_generator(conf.testing_rand_gen_seed)  # especial gen for testing
     # =====
     if conf.orp_loss_special:
         self.orderpr.add_node_special(self.masklm)
     # =====
     # extra one!!
     self.aug_word2 = self.aug_encoder = self.aug_mixturer = None
     if conf.aug_word2:
         self.aug_word2 = self.add_node("aug2", AugWord2Node(self.pc, conf.emb_conf, vpack,
                                                             "word2", conf.aug_word2_dim, self.emb_out_dim))
         if conf.aug_word2_aug_encoder:
             assert conf.enc_choice == "vrec"
             self.aug_detach_drop = self.add_node("dd", Dropout(self.pc, (self.enc_out_dim,), fix_rate=conf.aug_detach_dropout))
             self.aug_encoder = self.add_component("Aenc", VRecEncoder(self.pc, self.emb_out_dim, conf.venc_conf))
             self.aug_mixturer = self.add_node("Amix", BertFeaturesWeightLayer(self.pc, conf.aug_detach_numlayer))
Ejemplo n.º 13
0
def main(args):
    conf: OverallConf = init_everything(args)
    dconf, mconf = conf.dconf, conf.mconf
    tconf = mconf.tconf
    iconf = mconf.iconf
    #
    # dev/test can be non-existing!
    if not dconf.dev and dconf.test:
        utils.zwarn(
            "No dev but give test, actually use test as dev (for early stopping)!!"
        )
    dt_golds, dt_aux_reprs = [], []
    for file, aux_repr in [(dconf.dev, dconf.aux_repr_dev),
                           (dconf.test, dconf.aux_repr_test)]:
        if len(file) > 0:
            utils.zlog(
                f"Add file `{file}(aux_repr={aux_repr})' as dt-file #{len(dt_golds)}."
            )
            dt_golds.append(file)
            dt_aux_reprs.append(aux_repr)
    # data
    if len(dconf.ms_train) > 0:
        # do ms train, ignore dconf.train
        train_streamers = [
            get_data_reader(f,
                            dconf.input_format,
                            dconf.use_label0,
                            dconf.noef_link0,
                            dconf.aux_repr_train,
                            max_evt_layers=dconf.max_evt_layers)
            for f in dconf.ms_train
        ]
        train_streamer = MultiCatStreamer(
            train_streamers)  # simple concat for building vocab
        ms_budgets = [
            ScheduledValue(f"ms_budget{i}", c) for i, c in enumerate(
                dconf.ms_train_budget_list[:len(train_streamers)])
        ]
        assert len(ms_budgets) == len(train_streamers)
        utils.zlog(f"Multi-source training with inputsL {dconf.ms_train}")
    else:
        train_streamers = ms_budgets = None
        train_streamer = get_data_reader(dconf.train,
                                         dconf.input_format,
                                         dconf.use_label0,
                                         dconf.noef_link0,
                                         dconf.aux_repr_train,
                                         max_evt_layers=dconf.max_evt_layers)
    dt_streamers = [
        get_data_reader(f, dconf.input_format, dconf.use_label0,
                        dconf.noef_link0, aux_r)
        for f, aux_r in zip(dt_golds, dt_aux_reprs)
    ]
    # vocab
    if tconf.no_build_dict:
        vpack = IEVocabPackage.build_by_reading(conf)
    else:
        # include dev/test only for convenience of including words hit in pre-trained embeddings
        vpack = IEVocabPackage.build_from_stream(
            conf, train_streamer, MultiCatStreamer(dt_streamers))
        vpack.save(dconf.dict_dir)
    # model
    model = build_model(conf.model_type, conf, vpack)
    # use bert? todo(note): pre-compute here?
    if dconf.use_bert:
        bmodel = get_berter(dconf.bconf)
        train_streamer = BerterDataAuger(train_streamer, bmodel, "aux_repr")
        dt_streamers = [
            BerterDataAuger(z, bmodel, "aux_repr") for z in dt_streamers
        ]
    # index the data
    train_inst_preparer = model.get_inst_preper(True)
    test_inst_preparer = model.get_inst_preper(False)
    to_cache = dconf.cache_data
    to_cache_shuffle = dconf.cache_shuffle
    # -----
    if ms_budgets is None:
        train_iter = batch_stream(
            index_stream(train_streamer, vpack, to_cache, to_cache_shuffle,
                         train_inst_preparer), tconf, True)
    else:
        indexes_streamers = [
            index_stream(s, vpack, to_cache, to_cache_shuffle,
                         train_inst_preparer) for s in train_streamers
        ]
        multi_streamer = MultiSpecialJoinStream(indexes_streamers, ms_budgets,
                                                dconf.ms_stop_idx)
        train_iter = batch_stream(multi_streamer, tconf, True)
    # -----
    dt_iters = [
        batch_stream(
            index_stream(z, vpack, to_cache, to_cache_shuffle,
                         test_inst_preparer), iconf, False)
        for z in dt_streamers
    ]
    # training runner
    tr = MyIETrainingRunner(tconf,
                            model,
                            vpack,
                            dev_outfs=dconf.output_file,
                            dev_goldfs=dt_golds,
                            dev_out_format=dconf.output_format,
                            eval_conf=dconf.eval_conf)
    # -----
    if ms_budgets is not None:
        tr.add_scheduled_values(ms_budgets)  # add s-values
    # -----
    if tconf.load_model:
        tr.load(dconf.model_load_name, tconf.load_process)
    # go
    tr.run(train_iter, dt_iters)
    utils.zlog("The end of Training.")