コード例 #1
0
def generating_poem(app,prefix,model,config,tokenizer,device,quick=False,num=5,batchGenerating=False,gpu='0',onlyMax=False,fast_pattern=False):
    torch.cuda.set_device(int(gpu))
    if len(prefix)>10:
        return []
    #print("start:", prefix)
    global a
    a = app
    n_ctx = model.config.n_ctx
    length = config['length']
    nsamples = num
    batch_size = config['batch_size']
    temperature = config['temperature']
    topk = config['topk']
    topp = config['topp']
    quick_pattern = quick
    repetition_penalty = config['repetition_penalty']
    if length == -1:
        length = model.config.n_ctx
    #print('generating-begin for %s'%prefix)
    raw_text = prefix[0]+prefix
    context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text))
    if batchGenerating:
        if onlyMax:
            outs = sample_sequence_batch_max(model, context_tokens, length, n_ctx, tokenizer, nsamples=2,
                                              temperature=temperature, top_k=topk,
                                              top_p=topp, repitition_penalty=repetition_penalty,
                                              device=device)
        else:
            if fast_pattern:
                outs = fast_sample_sequence_batch(model, context_tokens, length, nsamples=nsamples,
                                                  temperature=temperature, top_k=topk,
                                                  repitition_penalty=repetition_penalty, device=device)
            else:
                outs = sample_sequence_batch_opti(model, context_tokens, length, n_ctx, tokenizer, nsamples, temperature=temperature, top_k=topk,
                                  top_p=topp, repitition_penalty=repetition_penalty,
                                  device=device)
        S = []
        for out in outs:
            tmptext = untokenization_poem(out, tokenizer, config)
            poem = poemFilter1(tmptext[1:])
            if poem:
                S.append(poem)
    else:
        S = []
        for _ in range(nsamples):
            out = generate(
                n_ctx=n_ctx,
                model=model,
                context=context_tokens,
                length=length,
                is_fast_pattern=fast_pattern, tokenizer=tokenizer,
                temperature=temperature, top_k=topk, top_p=topp, repitition_penalty=repetition_penalty, device=device
            )
            tmptext = untokenization_poem(out, tokenizer, config)
            poem = poemFilter1(tmptext[1:])
            if poem:
                S.append(poem)
    S = dropDuplicateContent(S)
    return S
コード例 #2
0
ファイル: gpt_gen.py プロジェクト: baokui/GPT2-Chinese
def nnlm_modelpredict(D_simi,
                      D_next,
                      config_predict,
                      inputStr='怎么了',
                      maxNext=3,
                      maxChoice=10,
                      num=5):
    prefix, punc = findMaxMatch(inputStr, D_simi, D_next, config_predict)
    if punc == '':
        punc = ','
    if len(prefix) == 0:
        return []
    output = []
    for ii in range(num + 5):
        if len(output) == num:
            break
        s0 = prefix
        S = []
        #S.append(inputStr)
        lastsent = s0
        for i in range(maxNext):
            if s0 in D_next:
                p = [float(tt) for tt in D_next[s0]['probs']]
                w = D_next[s0]['words']
                t = random.choices(w[:maxChoice], p[:maxChoice])[0]
                if t != lastsent:
                    S.append(t)
                    lastsent = t
                    s0 = t
            elif s0 in D_simi:
                p = [float(tt) for tt in D_simi[s0]['probs']]
                w = D_simi[s0]['words']
                t0 = random.choices(w, p)[0]
                p = [float(tt) for tt in D_next[t0]['probs']]
                w = D_next[t0]['words']
                t = random.choices(w[:maxChoice], p[:maxChoice])[0]
                if t != lastsent:
                    S.append(t)
                    lastsent = t
                    s0 = t
            else:
                break
        S = inputStr + punc + ','.join(S)
        if S not in output:
            output.append(S)
        if len(output) >= num:
            break
    output = postprocess(output,
                         inputStr,
                         config_predict,
                         sentEndcontent=False,
                         removeHighFreqWords=False)
    output = dropDuplicateContent(output)
    return output
コード例 #3
0
ファイル: gpt_gen.py プロジェクト: baokui/GPT2-Chinese
def generating_poem(app,
                    prefix,
                    model,
                    config,
                    tokenizer,
                    device,
                    config_predict,
                    quick=False,
                    num=5,
                    continue_writing=False,
                    removeHighFreqWords=False,
                    batchGenerating=False,
                    gpu='0',
                    onlyMax=False,
                    maxNb=20):
    if len(prefix) == 0 or len(prefix) > model.config.n_ctx:
        return []
    if sum([_is_chinese_char(c) for c in prefix]) < len(prefix) * 0.75:
        return []
    if gpu:
        torch.cuda.set_device(int(gpu))
        device = "cuda" if torch.cuda.is_available() else "cpu"
    else:
        device = 'cpu'
    punc = '.,?!;\t 。,?!;'
    global a
    a = app
    fast_pattern = config_predict.fast_pattern
    n_ctx = model.config.n_ctx
    length = config['length']
    nsamples = num
    #maxNb = max(nsamples,maxNb)
    maxNb = nsamples
    temperature = config['temperature']
    topk = config['topk']
    topp = config['topp']
    quick_pattern = quick
    repetition_penalty = config['repetition_penalty']
    if length == -1:
        length = model.config.n_ctx
    #raw_text = prefix[0] + prefix
    raw_text = '[MASK]' + prefix
    context_tokens = tokenizer.convert_tokens_to_ids(
        tokenizer.tokenize(raw_text))
    t0 = time.time()
    outs = []
    if batchGenerating:
        S = []
        if onlyMax:
            outs = sample_sequence_batch_max(
                model,
                context_tokens,
                length,
                n_ctx,
                tokenizer,
                nsamples=2,
                temperature=temperature,
                top_k=topk,
                top_p=topp,
                repitition_penalty=repetition_penalty,
                device=device)
        else:
            if fast_pattern:
                outs = fast_sample_sequence_batch(
                    model,
                    context_tokens,
                    length,
                    nsamples=maxNb,
                    temperature=temperature,
                    top_k=topk,
                    repitition_penalty=repetition_penalty,
                    device=device)
            else:
                outs = sample_sequence_batch_opti(
                    model,
                    context_tokens,
                    length,
                    n_ctx,
                    tokenizer,
                    maxNb,
                    temperature=temperature,
                    top_k=topk,
                    top_p=topp,
                    repitition_penalty=repetition_penalty,
                    device=device)
    else:
        S = []
        for _ in range(maxNb):
            out = generate(n_ctx=n_ctx,
                           model=model,
                           context=context_tokens,
                           length=length,
                           is_fast_pattern=fast_pattern,
                           tokenizer=tokenizer,
                           is_quick=quick_pattern,
                           temperature=temperature,
                           top_k=topk,
                           top_p=topp,
                           repitition_penalty=repetition_penalty,
                           device=device)
            tmptext = untokenization(out, config, tokenizer, punc,
                                     continue_writing)
            S.append(tmptext)
    S = []
    for out in outs:
        tmptext = untokenization_poem(out, tokenizer, config)
        poem = poemFilter1(tmptext, prefix, config_predict.blackwords)
        if poem:
            S.append(poem)
    S = dropDuplicateContent(S)
    S = S[:nsamples]
    return S
コード例 #4
0
ファイル: gpt_gen.py プロジェクト: baokui/GPT2-Chinese
def generating(app,
               prefix,
               model,
               config,
               tokenizer,
               device,
               config_predict,
               quick=False,
               num=5,
               continue_writing=False,
               removeHighFreqWords=False,
               batchGenerating=False,
               gpu='0',
               onlyMax=False,
               maxNb=20,
               style=''):
    #print("start:",prefix)
    #os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    if len(prefix) == 0 or len(prefix) > model.config.n_ctx:
        return []
    if gpu:
        torch.cuda.set_device(int(gpu))
        device = "cuda" if torch.cuda.is_available() else "cpu"
    else:
        device = 'cpu'
    #if style=='prose':
    #prefix = prefix[0] + prefix
    prefix0 = prefix
    if config_predict.prefixTrim:
        prefix = sentTriming(prefix0)
        if len(prefix) == 0:
            prefix = prefix0
    punc = '.,?!;\t 。,?!;'
    global a
    a = app
    fast_pattern = config_predict.fast_pattern
    n_ctx = model.config.n_ctx
    len_prefix = len(prefix)
    if len_prefix < 5:
        max_genlen = 20
    elif len_prefix < 10:
        max_genlen = 25
    else:
        max_genlen = config['length']
    length = min(max_genlen, n_ctx - len_prefix - 1)
    nsamples = num
    maxNb = max(nsamples, maxNb)
    temperature = config['temperature']
    topk = config['topk']
    topp = config['topp']
    quick_pattern = quick
    repetition_penalty = config['repetition_penalty']
    if length == -1:
        length = model.config.n_ctx
    raw_text = '[MASK]' + prefix
    context_tokens = tokenizer.convert_tokens_to_ids(
        tokenizer.tokenize(raw_text))
    if batchGenerating:
        S = []
        if onlyMax:
            outs = sample_sequence_batch_max(
                model,
                context_tokens,
                length,
                n_ctx,
                tokenizer,
                nsamples=2,
                temperature=temperature,
                top_k=topk,
                top_p=topp,
                repitition_penalty=repetition_penalty,
                device=device)
        else:
            if fast_pattern:
                outs = fast_sample_sequence_batch(
                    model,
                    context_tokens,
                    length,
                    nsamples=maxNb,
                    temperature=temperature,
                    top_k=topk,
                    repitition_penalty=repetition_penalty,
                    device=device)
            else:
                outs = sample_sequence_batch_opti(
                    model,
                    context_tokens,
                    length,
                    n_ctx,
                    tokenizer,
                    maxNb,
                    temperature=temperature,
                    top_k=topk,
                    top_p=topp,
                    repitition_penalty=repetition_penalty,
                    device=device)
        for out in outs:
            tmptext = untokenization(out, config, tokenizer, punc,
                                     continue_writing)
            S.append(tmptext)
    else:
        S = []
        for _ in range(maxNb):
            out = generate(n_ctx=n_ctx,
                           model=model,
                           context=context_tokens,
                           length=length,
                           is_fast_pattern=fast_pattern,
                           tokenizer=tokenizer,
                           is_quick=quick_pattern,
                           temperature=temperature,
                           top_k=topk,
                           top_p=topp,
                           repitition_penalty=repetition_penalty,
                           device=device)
            tmptext = untokenization(out, config, tokenizer, punc,
                                     continue_writing)
            S.append(tmptext)
    if config_predict.prefixTrim:
        S = [prefix0 + s[len(prefix):] for s in S]
    S = postprocess(S,
                    prefix0,
                    config_predict,
                    removeHighFreqWords=removeHighFreqWords)
    S = dropDuplicateContent(S)
    if config_predict.resort:
        if len(S) > 0:
            S = resort(prefix0, S, config_predict)
    S = S[:nsamples]
    #if style == 'prose':
    #S = [r[1:] for r in S]
    return S
コード例 #5
0
def generating(app,
               prefix,
               model,
               config,
               tokenizer,
               device,
               config_predict,
               quick=False,
               num=5,
               continue_writing=False,
               removeHighFreqWords=False,
               batchGenerating=False,
               gpu='0',
               onlyMax=False,
               fast_pattern=False):
    #print("start:",prefix)
    #os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    torch.cuda.set_device(int(gpu))
    prefix0 = prefix
    if config_predict.prefixTrim:
        prefix = sentTriming(prefix0)
        if len(prefix) == 0:
            prefix = prefix0
    punc = '.,?!;\t 。,?!;'
    global a
    a = app
    if config_predict.fast_pattern:
        fast_pattern = True
    n_ctx = model.config.n_ctx
    length = config['length']
    nsamples = num
    temperature = config['temperature']
    topk = config['topk']
    topp = config['topp']
    quick_pattern = quick
    repetition_penalty = config['repetition_penalty']
    if length == -1:
        length = model.config.n_ctx
    raw_text = prefix
    context_tokens = tokenizer.convert_tokens_to_ids(
        tokenizer.tokenize(raw_text))
    t0 = time.time()
    if batchGenerating:
        S = []
        if onlyMax:
            outs = sample_sequence_batch_max(
                model,
                context_tokens,
                length,
                n_ctx,
                tokenizer,
                nsamples=2,
                temperature=temperature,
                top_k=topk,
                top_p=topp,
                repitition_penalty=repetition_penalty,
                device=device)
        else:
            if fast_pattern:
                outs = fast_sample_sequence_batch(
                    model,
                    context_tokens,
                    length,
                    nsamples=nsamples,
                    temperature=temperature,
                    top_k=topk,
                    repitition_penalty=repetition_penalty,
                    device=device)
            else:
                outs = sample_sequence_batch_opti(
                    model,
                    context_tokens,
                    length,
                    n_ctx,
                    tokenizer,
                    nsamples,
                    temperature=temperature,
                    top_k=topk,
                    top_p=topp,
                    repitition_penalty=repetition_penalty,
                    device=device)
        #print('model predict all time:%0.4f' % (t1 - t0))
        for out in outs:
            tmptext = untokenization(out, config, tokenizer, punc,
                                     continue_writing)
            S.append(tmptext)
        #print('model untokenization time:%0.4f' % (t2 - t1))
    else:
        S = []
        for _ in range(nsamples):
            out = generate(n_ctx=n_ctx,
                           model=model,
                           context=context_tokens,
                           length=length,
                           is_fast_pattern=fast_pattern,
                           tokenizer=tokenizer,
                           is_quick=quick_pattern,
                           temperature=temperature,
                           top_k=topk,
                           top_p=topp,
                           repitition_penalty=repetition_penalty,
                           device=device)
            tmptext = untokenization(out, config, tokenizer, punc,
                                     continue_writing)
            S.append(tmptext)
    t1 = time.time()
    if config_predict.prefixTrim:
        S = [prefix0 + s[len(prefix):] for s in S]
    S = postprocess(S,
                    prefix0,
                    config_predict,
                    removeHighFreqWords=removeHighFreqWords)
    S = dropDuplicateContent(S)
    t2 = time.time()
    #print('text generating and posprocess time:%0.4f and %0.4f' % (t1 - t0,t2-t1))
    return S
コード例 #6
0
ファイル: gpt_gen.py プロジェクト: baokui/GPT2-Chinese
def testFun(app,
            prefix,
            model,
            config,
            tokenizer,
            device,
            config_predict,
            quick=False,
            num=5,
            continue_writing=False,
            removeHighFreqWords=False,
            batchGenerating=False,
            gpu='0',
            onlyMax=False,
            maxNb=20):
    #print("start:",prefix)
    #os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    if len(prefix) == 0 or len(prefix) > model.config.n_ctx:
        return []
    if gpu:
        torch.cuda.set_device(int(gpu))
        device = "cuda" if torch.cuda.is_available() else "cpu"
    else:
        device = 'cpu'
    #print("use device:%s" % device)
    prefix0 = prefix
    if config_predict.prefixTrim:
        prefix = sentTriming(prefix0)
        if len(prefix) == 0:
            prefix = prefix0
    punc = '.,?!;\t 。,?!;'
    global a
    a = app
    fast_pattern = config_predict.fast_pattern
    n_ctx = model.config.n_ctx
    len_prefix = len(prefix)
    if len_prefix < 5:
        max_genlen = 5 * len_prefix
    elif len_prefix < 10:
        max_genlen = 3 * len_prefix
    else:
        max_genlen = config['length']
    length = min(max_genlen, n_ctx - len_prefix - 1)
    nsamples = num
    maxNb = max(nsamples, maxNb)
    temperature = config['temperature']
    topk = config['topk']
    topp = config['topp']
    quick_pattern = quick
    repetition_penalty = config['repetition_penalty']
    if length == -1:
        length = model.config.n_ctx
    raw_text = prefix
    context_tokens = tokenizer.convert_tokens_to_ids(
        tokenizer.tokenize(raw_text))
    t0 = time.time()
    S = []
    rev_repitition_penalty = 1.0 / repetition_penalty
    inputs = [context_tokens] * nsamples
    inputs = torch.tensor(inputs, dtype=torch.long, device=device)
    _, past = model(inputs[:, :-1], None)[:2]
    prev = inputs[:, -1].view(-1, 1)
    context = context_tokens
    generate = [[t for t in context] for _ in range(nsamples)]
    A0 = []
    A1 = []
    for kk in range(len(generate)):
        for jj in range(len(generate[kk])):
            A0.append(kk)
            A1.append(generate[kk][jj])
    with torch.no_grad():
        for i in range(nsamples):
            output = model(prev, past=past)
            output, past = output[:2]
            output = output.squeeze(1)
            output[A0, A1] *= rev_repitition_penalty
            output /= temperature
            filtered_logits = top_k_top_p_filtering(output,
                                                    top_k=topk,
                                                    top_p=0)
            next_token = torch.multinomial(torch.softmax(filtered_logits,
                                                         dim=-1),
                                           num_samples=1)
            prev = next_token
            NT_np = next_token.cpu().numpy()
            for ii in range(nsamples):
                generate[ii].append(NT_np[ii][0])
                A0.append(ii)
                A1.append(NT_np[ii])
    outs = generate
    for out in outs:
        tmptext = untokenization(out, config, tokenizer, punc,
                                 continue_writing)
        S.append(tmptext)
    return S
    outs = fast_sample_sequence_batch(model,
                                      context_tokens,
                                      length,
                                      nsamples=maxNb,
                                      temperature=temperature,
                                      top_k=topk,
                                      repitition_penalty=repetition_penalty,
                                      device=device)
    return outs
    for out in outs:
        tmptext = untokenization(out, config, tokenizer, punc,
                                 continue_writing)
        S.append(tmptext)

    t1 = time.time()
    if config_predict.prefixTrim:
        S = [prefix0 + s[len(prefix):] for s in S]
    S = postprocess(S,
                    prefix0,
                    config_predict,
                    removeHighFreqWords=removeHighFreqWords)
    S = dropDuplicateContent(S)
    if config_predict.resort:
        if len(S) > 0:
            S = resort(prefix0, S, config_predict)
    t2 = time.time()
    #print('text generating and posprocess time:%0.4f and %0.4f' % (t1 - t0,t2-t1))
    S = S[:nsamples]
    return S