示例#1
0
def run():
    freeze_support()
    print('loop')

    PathModel = Path('Path to your pre-trained model')
    PathCsv = Path('Path to your csv files')
    
    tokenizer = Tokenizer(lang='xx')
    data_lm = TextLMDataBunch.from_csv(PathCsv, csv_name='rest_full.csv',text_cols=0, tokenizer=tokenizer)
    data_lm.save('data_lm_rest_fn.pkl')
    
    data_lm = load_data(PathCsv, 'data_lm_rest_fn.pkl', bs=32)
    learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3 )
    learn.load_pretrained(PathModel'models/model-full-v2.pth',PathModel'full_lm/itos.pkl')
    learn.freeze()
    learn.lr_find()
    learn.recorder.plot(skip_start=15)
    plt.show()

    learn.fit_one_cycle(1, 1e-02)
    learn.save('rest_head_pretrained')

    learn.unfreeze()
    learn.fit_one_cycle(10, 1e-03, moms=(0.8, 0.7))
    learn.save('rest_lm_fine_tuned')
    learn.save_encoder('rest_enc_fine_tuned')
示例#2
0
def freeze_support():
    '''
    Check whether this is a fake forked process in a frozen executable.
    If so then run code specified by commandline and exit.
    '''
    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
        from multiprocessing.forking import freeze_support
        freeze_support()
示例#3
0
def freeze_support():
    """
    Check whether this is a fake forked process in a frozen executable.
    If so then run code specified by commandline and exit.
    """
    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
        from multiprocessing.forking import freeze_support
        freeze_support()
示例#4
0
def run():
    freeze_support()
    print('loop')

    PathCsv = Path('Path to your data')

    data_lm = load_data(PathCsv, 'data_lm_rest_fn.pkl', bs=32)
    print(data_lm)
    tokenizer = Tokenizer(lang='xx')
    data_clas = TextClasDataBunch.from_csv(PathCsv,
                                           vocab=data_lm.vocab,
                                           csv_name='rest_full_clas.csv',
                                           tokenizer=tokenizer)
    data_clas.save('rest_data_clas.pkl')
    data_clas = load_data(PathCsv, 'rest_data_clas.pkl', bs=32)

    learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
    learn.load_encoder('rest_enc_fine_tuned')
    f1_label1 = Fbeta_binary(1, clas=0)
    f1_label0 = Fbeta_binary(1, clas=1)
    learn.metrics = [accuracy, f1_label1, f1_label0]
    learn.freeze()

    learn.lr_find()
    learn.recorder.plot()
    plt.show()
    print(learn.model)
    learn.fit_one_cycle(1, 1.45e-01, moms=(0.8, 0.7))
    learn.save('rest_first')
    learn.load('rest_first')

    learn.freeze_to(-2)
    learn.fit_one_cycle(1, slice(1e-2 / (2.6**4), 1e-2), moms=(0.8, 0.7))
    learn.save('rest_second')
    learn.load('rest_second')

    learn.freeze_to(-3)
    learn.fit_one_cycle(1, slice(5e-3 / (2.6**4), 5e-3), moms=(0.8, 0.7))
    learn.save('rest_third')
    learn.load('rest_third')

    learn.unfreeze()
    learn.fit_one_cycle(2, slice(1e-3 / (2.6**4), 1e-3), moms=(0.8, 0.7))
    print(learn.predict("Güzel ürün tavsiye ederim."))
    print(learn.predict("Kötü"))
    print(learn.predict("Rezalet ötesi"))
示例#5
0
def run():
    freeze_support()
    print('loop')

    #LM Training
    PathCsv = Path('Path to your project folder')
    
    tokenizer = Tokenizer(lang='xx', n_cpus=4)
    data_lm_full = (TextList.from_csv(PathCsv, csv_name='fulltrain.csv', cols=0, processor=[TokenizeProcessor(tokenizer=tokenizer), NumericalizeProcessor(max_vocab=60000)])
                    #Inputs: all the text files in path
                    .split_from_df(col=1)
                    #We may have other temp folders that contain text files so we only keep what's in train and test
                    .label_for_lm()
                    #We want to do a language model so we label accordingly
                    .databunch(bs=32))
    
    data_lm_full.save('full_lm')
    
    
    data_lm_full = TextLMDataBunch.load(PathCsv, 'full_lm', bs=32)
    print(len(data_lm_full.train_ds.vocab.itos))
    data_lm_full.show_batch()

    learn = language_model_learner(data_lm_full, AWD_LSTM, drop_mult=0.3, callback_fns=ShowGraph)
    learn.lr_find()
    learn.recorder.plot(skip_start=0)
    plt.show()
    
    learn.fit_one_cycle(10, 1e-01, moms=(0.8,0.7))
    learn.save('model-full-v2')

    data_lm_full = TextLMDataBunch.load(PathCsv, 'full_lm', bs=32)
    print(len(data_lm_full.train_ds.vocab.itos))
    learn = language_model_learner(data_lm_full, AWD_LSTM)
    learn.load_pretrained(PathCsv 'models/model-full-v2.pth', PathCsv 'full_lm/itos.pkl')

    Text = 'Bu köyün özellikleri arasında '
    N_WORDS = 20
    N_SENTENCES = 2

    print("\n".join(learn.predict(Text, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
示例#6
0
                p = ircMsgClean.split()
                print(p)
                comando = p[4:-1]
                comando = " ".join(comando)
                print(comando)

                id = p[-1]
            except IndexError:
                msgSend(
                    ircChannel,
                    "[+] Sintaxe [+] use: <shell> <comando> <" + botNick + ">")
            else:
                if comando == comando and id == botNick:
                    shell()

        elif ircMsg.find(str.encode("persistence")) != -1:  # Para persistencia
            try:
                p = ircMsgClean.split()
                id = p[4]
            except IndexError:
                msgSend(ircChannel,
                        "[+] Sintaxe [+] use: <persistence> <" + botNick + ">")
            else:
                if id == botNick:
                    persis()


if __name__ == "__main__":
    freeze_support()
    main()
def freeze_support():
    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
        from multiprocessing.forking import freeze_support
        freeze_support()
示例#8
0
def freeze_support():
    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
        from multiprocessing.forking import freeze_support
        freeze_support()