def __init__(self, ts, data=0, weights=1.0):
     self.ts = ts
     self.weights = weights
     self.data = data
     self.calc = calc  #DAECalc.DAECalc("__PC12_MA__","__dPC12_MA__","__d2PC12_MA__")
     BaseModel.__init__(self,
                        len(ts) * 15, 91,
                        "PC12_MA")  #15 of 54 dVars, 91 Parameters
     self.calc.kwargs['max_steps'] = 5000
Пример #2
0
def get_st_embeds(args, dataset, config, lang, base_model=None):
    logger.info("***** Compute sentence embeddings for [%s] plain text dataset using the [%s] base_model *****", lang, "pre-trained" if  base_model is None else "domain")
    if base_model is None:
        base_model = BaseModel.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config)
        base_model.to(args.device)

    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
    eval_sampler = SequentialSampler(dataset)
    eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)

    logger.info("  Num examples = %d", len(dataset))
    logger.info("  Batch size = %d", args.eval_batch_size)

    st_embeds = None
    base_model.eval()
    for batch in eval_dataloader:
        batch = tuple(t.to(args.device) for t in batch)

        with torch.no_grad():
            inputs = {"input_ids": batch[0],
                      "attention_mask": batch[1],
                      "token_type_ids": batch[2]}
            outputs = base_model(**inputs)
            pooled_outputs = outputs[1]

        st_embeds = pooled_outputs.detach() if st_embeds is None else torch.cat((st_embeds, pooled_outputs.detach()), dim=0)  # dataset_len x hidden_size

    return st_embeds
Пример #3
0
def get_init_domain_embed(args, dataset, lang):
    config = BertConfig.from_pretrained(args.model_name_or_path)
    base_model = BaseModel.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=config)
    base_model.to(args.device)

    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
    # Note that DistributedSampler samples randomly
    eval_sampler = SequentialSampler(dataset)
    eval_dataloader = DataLoader(dataset,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    # compute logits for the dataset using the model!
    logger.info(
        "***** Compute logits for [%s] dataset using the base_model *****",
        lang)
    logger.info("  Num examples = %d", len(dataset))
    logger.info("  Batch size = %d", args.eval_batch_size)

    st_embeds = None
    base_model.eval()
    for batch in eval_dataloader:
        batch = tuple(t.to(args.device) for t in batch)

        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2]
            }
            outputs = base_model(**inputs)
            pooled_outputs = outputs[1]

        st_embeds = pooled_outputs.detach(
        ) if st_embeds is None else torch.cat(
            (st_embeds,
             pooled_outputs.detach()), dim=0)  # dataset_len x hidden_size

    return st_embeds
Пример #4
0
 def __init__(self, N):
     BaseModel.__init__(self, N, N, "%i Parameter Exponential" % N)
 def __init__(self, weights):
     self.weights = weights
     BaseModel.__init__(self, 22, 70, "MMPrior")
 def __init__(self, x0=1, weights=25.0):
     self.x0 = x0
     self.weights = weights
     BaseModel.__init__(self, 70, 70, "LinearPrior")
 def __init__(self, x0):
     self.x0 = x0  # x0 contains the default values for the experiment
     BaseModel.__init__(self, 91, 70, "Expt")
 def __init__(self, ts, weights = 1.0):
     self.ts = ts
     self.weights = weights
     self.calc = calc #DAECalc.DAECalc("__PC12_MA__","__dPC12_MA__","__d2PC12_MA__")
     BaseModel.__init__(self,len(ts) * 54, 91, "PC12_MA") #54 (not 51) dVars, 91(21+70, not 21+64=85) Parameters
     self.calc.kwargs['max_steps']=5000