Exemplo n.º 1
0
    def __init__(self, load_path):
        ctx = "cuda"
        cachedir = "~/kogpt2/"
        org_path = "trained_models/gpt2_j20_1007.pt"

        # download vocab
        vocab_info = tokenizer
        vocab_path = download(
            vocab_info["url"],
            vocab_info["fname"],
            vocab_info["chksum"],
            cachedir=cachedir,
        )
        # Device 설정
        device = torch.device(ctx)
        # 저장한 Checkpoint 불러오기
        checkpoint = torch.load(load_path, map_location=device)
        # 1013: special token 학습한 뒤로 keys 값이 달라져서 이와 같은 작업 필요
        checkpoint_org = torch.load(org_path, map_location=device)
        ckpt_final = {
            k: v for k, v in zip(checkpoint_org.keys(), checkpoint.values())
        }  # 원래 state_dict 에 value 를 새로운 학습 결과로 바꿔줌

        # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
        self.kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))

        self.kogpt2model.load_state_dict(ckpt_final)
        self.kogpt2model.to(device)

        self.kogpt2model.eval()
        self.vocab = gluonnlp.vocab.BERTVocab.from_sentencepiece(
            vocab_path,
            mask_token=None,
            sep_token=None,
            cls_token=None,
            unknown_token="<unk>",
            padding_token="<pad>",
            bos_token="<s>",
            eos_token="</s>",
        )

        tok_path = get_tokenizer()
        self.tok = SentencepieceTokenizer(tok_path)
Exemplo n.º 2
0
def main(temperature=0.7,
         top_p=0.8,
         top_k=40,
         tmp_sent="",
         text_size=100,
         loops=0,
         load_path=""):
    ctx = 'cuda'
    cachedir = '~/kogpt2/'
    save_path = './checkpoint/'
    # download model
    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)
    # download vocab
    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)
    # Device 설정
    device = torch.device(ctx)
    # 저장한 Checkpoint 불러오기
    checkpoint = torch.load(load_path, map_location=device)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
    kogpt2model.load_state_dict(checkpoint['model_state_dict'])

    kogpt2model.eval()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()
    model, vocab = kogpt2model, vocab_b_obj
    tok = SentencepieceTokenizer(tok_path)

    if loops:
        num = 1
    else:
        num = 0

    while 1:
        sent = ''
        if tmp_sent == "":
            tmp_sent = input('input : ')
        sent = sent + tmp_sent

        toked = tok(sent)

        if len(toked) > 1022:
            break

        sent = sample_sequence(model, tok, vocab, sent, text_size, temperature,
                               top_p, top_k)
        sent = sent.replace("<unused0>", "\n")  # 비효율적이지만 엔터를 위해서 등장
        sent = auto_enter(sent)
        print(sent)

        now = [int(n) for n in os.listdir("./samples")]
        if len(now) == 0:
            now = 0
        else:
            now = max(now)
        #now = max(now)
        f = open("samples/" + str(now + 1), 'w', encoding="utf-8")
        head = [load_path, tmp_sent, text_size, temperature, top_p, top_k]
        head = [str(h) for h in head]
        f.write(",".join(head))
        f.write("\n")
        f.write(sent)
        f.close()

        tmp_sent = ""

        if num != 0:
            num += 1
            if num >= loops:
                print("good")
                return
Exemplo n.º 3
0
torch.cuda.device("cuda:2")
print(device)
org_path = "trained_models/gpt2_j20_1007.pt"
load_path = "trained_models/gpt2_genre_pad_50.pt"

checkpoint = torch.load(load_path, map_location=device)
# 1013: special token 학습한 뒤로 keys 값이 달라져서 이와 같은 작업 필요
checkpoint_org = torch.load(org_path, map_location=device)

ckpt_final = {
    k: v
    for k, v in zip(checkpoint_org.keys(), checkpoint.values())
}  # 원래 state_dict 에 value 를 새로운 학습 결과로 바꿔줌

# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
model.load_state_dict(ckpt_final)
model.to(device)

import os

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

batch_size = 16
epochs = 100
learning_rate = 3e-5
wamup_steps = 5000
max_seq_len = 400

print("Dataset Loading... ", end=" ")
dataset = synoDataset("./data/korean_naver_2.csv", vocab, tok)
Exemplo n.º 4
0
def main(temperature=0.7,
         top_p=0.8,
         top_k=40,
         tmp_sent="",
         text_size=100,
         loops=-1,
         load_path='./checkpoint/KoGPT2_checkpoint_long.tar',
         ctx='cpu',
         cachedir='~/kogpt2/',
         samples="./samples"):

    pytorch_kogpt2 = {
        'url':
        'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
        'fname': 'pytorch_kogpt2_676e9bcfa7.params',
        'chksum': '676e9bcfa7'
    }

    kogpt2_config = {
        "initializer_range": 0.02,
        "layer_norm_epsilon": 1e-05,
        "n_ctx": 1024,
        "n_embd": 768,
        "n_head": 12,
        "n_layer": 12,
        "n_positions": 1024,
        "vocab_size": 50000
    }

    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)

    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)

    device = torch.device(ctx)

    # 저장한 Checkpoint 불러오기
    checkpoint = torch.load(load_path, map_location=device)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
    kogpt2model.load_state_dict(checkpoint['model_state_dict'])

    kogpt2model.eval()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()

    model, vocab = kogpt2model, vocab_b_obj
    vocab.token_to_idx["\n"] = vocab.token_to_idx["<unused0>"]
    del vocab.token_to_idx["<unused0>"]

    tok = SentencepieceTokenizer(tok_path)
    num = 0

    sent_dict = {}

    if loops != -1:
        num = 1

    while 1:
        sent = ''
        if tmp_sent == "":
            tmp_sent = input('input : ')
        sent = sent + tmp_sent

        toked = tok(sent)

        if len(toked) > 1022:
            break

        sent = sample_sequence(model, tok, vocab, sent, text_size, temperature,
                               top_p, top_k)
        sent = sent.replace("<unused0>", "\n")  # 비효율적이지만 엔터를 위해서 등장
        sent = auto_enter(sent)
        # print(sent)

        sent_dict[num] = sent
        now = [int(n) for n in os.listdir(samples)]
        now = max(now)
        f = open(samples + str(now + 1), 'w', encoding="utf-8")
        f.write(sent)
        f.close()

        if num:
            num += 1
            if num >= loops:
                print("good")
                return sent_dict
Exemplo n.º 5
0
def main(epoch, save_path, load_path, samples, data_file_path, batch_size):
    ctx = 'cuda'
    cachedir = '~/kogpt2/'

    summary = SummaryWriter()

    # download model
    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)
    # download vocab
    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))

    # model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드
    kogpt2model.load_state_dict(torch.load(model_path))

    device = torch.device(ctx)
    kogpt2model.to(device)

    # 불러오기 부분
    try:
        checkpoint = torch.load(load_path, map_location=device)

        # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
        kogpt2model = GPT2LMHeadModel(
            config=GPT2Config.from_dict(kogpt2_config))
        kogpt2model.load_state_dict(checkpoint['model_state_dict'])

        kogpt2model.eval()
    except:
        count = 0
    else:
        count = int(re.findall("\d+", load_path)[1])

    print(count)
    # 추가로 학습하기 위해 .train() 사용
    kogpt2model.train()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()
    model, vocab = kogpt2model, vocab_b_obj
    tok = SentencepieceTokenizer(tok_path)

    dataset = Read_Dataset(data_file_path, vocab, tok)
    data_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             pin_memory=True)

    learning_rate = 3e-5
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    print('KoGPT-2 Transfer Learning Start')
    avg_loss = (0.0, 0.0)

    for epoch in range(epoch):
        for data in data_loader:
            optimizer.zero_grad()
            data = torch.stack(
                data)  # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.
            data = data.transpose(1, 0)
            data = data.to(ctx)
            model = model.to(ctx)

            outputs = model(data, labels=data)
            loss, logits = outputs[:2]
            loss = loss.to(ctx)
            loss.backward()
            avg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)
            optimizer.step()
            if count % 10 == 0:
                print(
                    'epoch no.{0} train no.{1}  loss = {2:.5f} avg_loss = {3:.5f}'
                    .format(epoch, count, loss, avg_loss[0] / avg_loss[1]))
                summary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1],
                                   count)
                summary.add_scalar('loss/loss', loss, count)

            # generator 진행
            if (count > 0 and count % 1000 == 0) or (len(data) < batch_size):
                sent = sample_sequence(model.to("cpu"),
                                       tok,
                                       vocab,
                                       sent="사랑",
                                       text_size=100,
                                       temperature=0.7,
                                       top_p=0.8,
                                       top_k=40)
                sent = sent.replace("<unused0>", "\n")
                print(sent)

                summary.add_text('Text', sent, count)

                if count > 500000:
                    now = [int(n) for n in os.listdir(samples)]
                    now = max(now)
                    f = open(samples + str(now + 1), 'w', encoding="utf-8")
                    f.write(sent)
                    f.close()
            #########################################
            count += 1

            if (count > 0 and count % 10000 == 0) or (len(data) < batch_size):
                # 모델 저장
                try:
                    torch.save(
                        {
                            'epoch': epoch,
                            'train_no': count,
                            'model_state_dict': model.state_dict(),
                            'optimizer_state_dict': optimizer.state_dict(),
                            'loss': loss
                        },
                        save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')
                except:
                    pass
Exemplo n.º 6
0
def main(epoch = 200, save_path = './checkpoint/', load_path = './checkpoint/KoGPT2_checkpoint_long.tar',
		data_file_path = 'dataset/lyrics_dataset.txt', batch_size = 8, summary_url = 'runs/', new = 0, text_size = 100):
	ctx = 'cuda'
	cachedir = '~/kogpt2/'
	summary = SummaryWriter(summary_url)

	pytorch_kogpt2 = {
		'url': 'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
		'fname': 'pytorch_kogpt2_676e9bcfa7.params',
		'chksum': '676e9bcfa7'
	}
	kogpt2_config = {
		"initializer_range": 0.02,
		"layer_norm_epsilon": 1e-05,
		"n_ctx": 1024,
		"n_embd": 768,
		"n_head": 12,
		"n_layer": 12,
		"n_positions": 1024,
		"vocab_size": 50000
	}

	# download model
	model_info = pytorch_kogpt2
	model_path = download(model_info['url'],
						   model_info['fname'],
						   model_info['chksum'],
						   cachedir=cachedir)
	# download vocab
	vocab_info = tokenizer
	vocab_path = download(vocab_info['url'],
						   vocab_info['fname'],
						   vocab_info['chksum'],
						   cachedir=cachedir)

	# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
	kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))

	# model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드
	kogpt2model.load_state_dict(torch.load(model_path))

	device = torch.device(ctx)
	kogpt2model.to(device)
	count = 0
	# 불러오기 부분
	try:
		checkpoint = torch.load(load_path, map_location=device)

		# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
		kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
		kogpt2model.load_state_dict(checkpoint['model_state_dict'])

		kogpt2model.eval()
	except:
		print("count 0 : ", load_path)
	else:
		print("count check : ",re.findall("\d+", load_path))
		count = max([int(i) for i in (re.findall("\d+", load_path))])

	if new:
		count = 0
	# 추가로 학습하기 위해 .train() 사용
	kogpt2model.train()
	vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,
								mask_token=None,
								sep_token=None,
								cls_token=None,
								unknown_token='<unk>',
								padding_token='<pad>',
								bos_token='<s>',
								eos_token='</s>')

	tok_path = get_tokenizer()
	model, vocab = kogpt2model, vocab_b_obj
	sentencepieceTokenizer = SentencepieceTokenizer(tok_path)

	dataset = Read_Dataset(data_file_path, vocab, sentencepieceTokenizer)
	data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)

	learning_rate = 3e-5
	criterion = torch.nn.CrossEntropyLoss()
	optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

	## train
	# vocab.token_to_idx["\n"] = vocab.token_to_idx["<unused0>"]
	# del vocab.token_to_idx["<unused0>"]
	# vocab.token_to_idx["<|endoftext|>"] = vocab.token_to_idx["<unused1>"]
	# del vocab.token_to_idx["<unused1>"]

	model = model.to(ctx)
	tok = SentencepieceTokenizer(tok_path)

	print('KoGPT-2 Transfer Learning Start')
	avg_loss = (0.0, 0.0)
	for epoch in range(epoch):
		for data in data_loader:
			optimizer.zero_grad()
			data = torch.stack(data) # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.
			data = data.transpose(1,0)
			data = data.to(ctx)
			model = model.to(ctx)

			outputs = model(data, labels=data)
			loss, logits = outputs[:2]
			loss = loss.to(ctx)
			loss.backward()
			avg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)
			optimizer.step()

			if count % 10 == 0:
				print('epoch no.{0} train no.{1}  loss = {2:.5f} avg_loss = {3:.5f}' . format(epoch, count, loss, avg_loss[0] / avg_loss[1]))
				summary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1], count)
				summary.add_scalar('loss/loss', loss, count)
				# print("save")
				# torch.save({
				# 	'epoch': epoch,
				# 	'train_no': count,
				# 	'model_state_dict': model.state_dict(),
				# 	'optimizer_state_dict': optimizer.state_dict(),
				# 	'loss': loss
				# }, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')

				#generator 진행
				if (count > 0 and count % 1000 == 0) or (len(data) < batch_size):
					sent = sample_sequence(model.to("cpu"), tok, vocab, sent="성실", text_size=text_size, temperature=0.7, top_p=0.8, top_k=40)
					sent = sent.replace("<unused0>", "\n") # 비효율적이지만 엔터를 위해서 등장
					sent = auto_enter(sent)
					print(sent)
					summary.add_text('Text', sent, count)
					del sent
					pass

			#########################################
			if (count > 0 and count % 18500 == 0):
				# 모델 저장
				try:
					torch.save({
						'epoch': epoch,
						'train_no': count,
						'model_state_dict': model.state_dict(),
						'optimizer_state_dict': optimizer.state_dict(),
						'loss': loss
					}, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')
				except:
					pass
			count += 1
def main(temperature=0.7,
         top_p=0.8,
         top_k=40,
         tmp_sent="",
         text_size=100,
         loops=-1,
         load_path='./checkpoint/KoGPT2_checkpoint_long.tar',
         ctx='cuda',
         cachedir='~/kogpt2/',
         samples="./gdrive/My Drive/KoGPT2-FineTuning_pre/samples/"):
    pytorch_kogpt2 = {
        'url':
        'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
        'fname': 'pytorch_kogpt2_676e9bcfa7.params',
        'chksum': '676e9bcfa7'
    }

    kogpt2_config = {
        "initializer_range": 0.02,
        "layer_norm_epsilon": 1e-05,
        "n_ctx": 1024,
        "n_embd": 768,
        "n_head": 12,
        "n_layer": 12,
        "n_positions": 1024,
        "vocab_size": 50000
    }

    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)

    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)

    device = torch.device(ctx)

    # 저장한 Checkpoint 불러오기
    checkpoint = torch.load(load_path, map_location=device)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
    kogpt2model.load_state_dict(checkpoint['model_state_dict'])

    kogpt2model.eval()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()

    model, vocab = kogpt2model, vocab_b_obj
    tok = SentencepieceTokenizer(tok_path)

    try:
        load_path.split("/")[-2]
    except:
        print("path error")
    else:
        load_path = load_path.split("/")[-2]

    print("ok : ", load_path)
    while (True):
        sent = input()
        make_sentence(model, tok, vocab, sent, text_size, temperature, top_p,
                      top_k, loops)
Exemplo n.º 8
0
def main(temperature=0.7,
         top_p=0.8,
         top_k=40,
         tmp_sent="",
         text_size=100,
         loops=-1,
         load_path='./checkpoint/KoGPT2_checkpoint_long.tar',
         ctx='cuda',
         cachedir='~/kogpt2/',
         samples="./gdrive/My Drive/KoGPT2-FineTuning_pre/samples/"):

    pytorch_kogpt2 = {
        'url':
        'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
        'fname': 'pytorch_kogpt2_676e9bcfa7.params',
        'chksum': '676e9bcfa7'
    }

    kogpt2_config = {
        "initializer_range": 0.02,
        "layer_norm_epsilon": 1e-05,
        "n_ctx": 1024,
        "n_embd": 768,
        "n_head": 12,
        "n_layer": 12,
        "n_positions": 1024,
        "vocab_size": 50000
    }

    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)

    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)

    device = torch.device(ctx)

    # 저장한 Checkpoint 불러오기
    checkpoint = torch.load(load_path, map_location=device)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
    kogpt2model.load_state_dict(checkpoint['model_state_dict'])

    kogpt2model.eval()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()

    model, vocab = kogpt2model, vocab_b_obj
    tok = SentencepieceTokenizer(tok_path)
    num = 0

    if loops:
        num = 1
    else:
        num = 0

    try:
        load_path.split("/")[-2]
    except:
        pass
    else:
        load_path = load_path.split("/")[-2]

    print("weight load - ", load_path)

    while 1:
        sent = ''
        if tmp_sent == "":
            tmp_sent = input('input : ')
        sent = sent + tmp_sent

        toked = tok(sent)

        if len(toked) > 1022:
            break

        # 실제 생성 코드 top_x 상위 x개 만 사전에서 가져오기
        sent = sample_sequence(model, tok, vocab, sent, text_size, temperature,
                               top_p, top_k)

        sent = sent.replace("//", "\n")  # 비효율적이지만 엔터를 위해서 등장
        sent = sent.replace("</s>", "")
        sent = auto_enter(sent)
        print(sent)  # output

        now = [int(n) for n in os.listdir(samples + load_path)]

        try:
            now = max(now)
        except:
            now = 1

        # f = open(samples + load_path + "/" + str(now + 1), 'w', encoding="utf-8")

        # head = [load_path, tmp_sent, text_size, temperature, top_p, top_k]
        # head = [str(h) for h in head]
        # f.write(",".join(head))
        # f.write(",")
        # f.write(sent)
        # f.close()

        #tmp_sent = ""

        if num != 0:
            num += 1
            if num >= loops:
                print("good")
                return
Exemplo n.º 9
0
def main(epoch=200,
         save_path='./checkpoint/',
         load_path='./checkpoint/KoGPT2_checkpoint_long.tar',
         data_file_path='dataset/lyrics_dataset.txt',
         batch_size=8,
         summary_url='runs/',
         new=0,
         text_size=100):
    ctx = 'cuda'
    cachedir = '~/kogpt2/'
    summary = SummaryWriter(summary_url)

    pytorch_kogpt2 = {
        'url':
        'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
        'fname': 'pytorch_kogpt2_676e9bcfa7.params',
        'chksum': '676e9bcfa7'
    }
    kogpt2_config = {
        "initializer_range": 0.02,
        "layer_norm_epsilon": 1e-05,
        "n_ctx": 1024,
        "n_embd": 768,
        "n_head": 12,
        "n_layer": 12,
        "n_positions": 1024,
        "vocab_size": 50000
    }

    # download model
    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)
    # download vocab
    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))

    # model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드
    # 기본 모델에서 가져오는 파라미터 업데이트
    kogpt2model.load_state_dict(torch.load(model_path))

    device = torch.device(ctx)  #GPU
    kogpt2model.to(device)
    count = 0

    # 체크포인트에서 불러오기 부분
    try:
        checkpoint = torch.load(load_path, map_location=device)

        # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
        kogpt2model = GPT2LMHeadModel(
            config=GPT2Config.from_dict(kogpt2_config))
        kogpt2model.load_state_dict(checkpoint['model_state_dict'])

        kogpt2model.eval()
    except:
        print("count 0 : ", load_path)
    else:
        print("count check : ", re.findall("\d+", load_path))
        count = max([int(i) for i in (re.findall("\d+", load_path))])

    if new:
        count = 0

    # 추가로 학습하기 위해 .train() 사용
    kogpt2model.train()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()
    model, vocab = kogpt2model, vocab_b_obj
    sentencepieceTokenizer = SentencepieceTokenizer(tok_path)

    # 우리의 데이터셋 불러오는 부분
    dataset = Read_Dataset(data_file_path, vocab, sentencepieceTokenizer)
    data_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             pin_memory=True)

    # 체크
    learning_rate = 3e-5
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    model = model.to(ctx)
    # bpe로 할 때 나누고 합치고 하는 과정이 편해짐.
    tok = SentencepieceTokenizer(tok_path)

    print('KoGPT-2 Transfer Learning Start')

    # 장르별로 체크포인트 폴더 없으면 생성하기
    try:
        if not (os.path.isdir(save_path + data_file_path.split("/")[-1][:-4])):
            os.makedirs(
                os.path.join(save_path + data_file_path.split("/")[-1][:-4]))
    except OSError as e:
        if e.errno != errno.EEXIST:
            print("Failed to create directory!!!!!")
            raise

    avg_loss = (0.0, 0.0)
    for epoch in range(epoch):
        # 데이터셋 가져와서 학습 시작
        for datas in data_loader:
            data = datas[0]

            optimizer.zero_grad()
            data = torch.stack(
                data)  # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.
            data = data.transpose(1, 0)
            data = data.to(ctx)
            model = model.to(ctx)

            # 실제 학습
            outputs = model(data, labels=data)
            loss, logits = outputs[:2]

            nowloss = copy.copy(loss)
            # 평균 loss 만들기 avg_loss[0] / avg_loss[1] <- loss 정규화
            avg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)

            loss *= datas[2][0]  # 특별 socre 부분

            loss = loss.to(ctx)
            loss.backward()

            # 학습 끝
            optimizer.step()

            if count % 10 == 0:
                print(
                    'epoch no.{0} train no.{1}  loss = {2:.5f} avg_loss = {3:.5f}'
                    .format(epoch, count, loss, avg_loss[0] / avg_loss[1]))
                summary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1],
                                   count)
                summary.add_scalar('loss/loss', loss, count)
                # print("save")
                # torch.save({
                # 	'epoch': epoch,
                # 	'train_no': count,
                # 	'model_state_dict': model.state_dict(),
                # 	'optimizer_state_dict': optimizer.state_dict(),
                # 	'loss': loss
                # }, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')

                #generator 진행
                if (count > 0 and count % 2500 == 0):
                    sent = sample_sequence(model.to("cpu"),
                                           tok,
                                           vocab,
                                           sent="가",
                                           text_size=text_size,
                                           temperature=0.7,
                                           top_p=0.9,
                                           top_k=100)
                    sent = sent.replace("//", "\n")  # 비효율적이지만 엔터를 위해서 등장
                    sent = auto_enter(sent)
                    print(sent)
                    summary.add_text('Text', sent, count)
                    del sent
                    pass

            #########################################
            if (count > 0 and count % 10000 == 0):
                print("모델을 저장합니다.")
                # 모델 저장
                try:
                    torch.save(
                        {
                            'epoch': epoch,
                            'train_no': count,
                            'model_state_dict': model.state_dict(),
                            'optimizer_state_dict': optimizer.state_dict(),
                            'loss': loss
                        }, save_path + data_file_path.split("/")[-1][:-4] +
                        '/' + 'KoGPT2_checkpoint_' + str(count) + '.tar')

                    #print("문제 시작")

                    # 드롭박스에 저장
                    large_file = open(
                        save_path + data_file_path.split("/")[-1][:-4] + '/' +
                        'KoGPT2_checkpoint_' + str(count) + '.tar', 'rb')

                    names = 'KoGPT2_checkpoint_' + str(count) + '.tar'

                    # 장르/체크포인트 부분으로 저장
                    large_file_path = '/' + data_file_path.split(
                        "/")[-1][:-4] + '/' + names

                    #print("문제 시작2")

                    CHUNK_SIZE = 1024 * 1024 * 150

                    chunk = large_file.read(CHUNK_SIZE)
                    session_info = dbx.files_upload_session_start(chunk)
                    cursor = dropbox.files.UploadSessionCursor(
                        session_id=session_info.session_id,
                        offset=large_file.tell(),
                    )

                    print("문제 시작3")
                    # 남은 청크들 업로드용 loop
                    while True:
                        chunk = large_file.read(CHUNK_SIZE)

                        if not chunk:
                            dbx.files_upload_session_finish(
                                b'',
                                dropbox.files.UploadSessionCursor(
                                    session_id=session_info.session_id,
                                    offset=large_file.tell(),
                                ),
                                dropbox.files.CommitInfo(
                                    large_file_path,
                                    dropbox.files.WriteMode('add'),
                                ),
                            )
                            break
                        else:
                            # 청크 분할 후 남은 데이터 appending
                            dbx.files_upload_session_append_v2(chunk, cursor)
                            cursor.offset = large_file.tell()
                    logger.warning('학습한 모델 파일 업로드 완료')

                    #print("문제 시작4")

                    # 액세스 토큰 폴더 내 존재하는 폴더/파일 출력
                    logger.warning('대용량 파일 업로드 후 폴더/파일 목록:')
                    for entry in dbx.files_list_folder('').entries:
                        logger.warning("\t" + entry.name)

                    # 파일 삭제
                    #print("문제 시작5")
                    os.remove(save_path + data_file_path.split("/")[-1][:-4] +
                              '/' + 'KoGPT2_checkpoint_' + str(count) + '.tar')

                    # 휴지통 비우기
                    #print("문제 시작6")
                    logging.getLogger('googleapiclient.discovery').setLevel(
                        logging.CRITICAL)

                    for a_file in my_drive.ListFile({
                            'q': "trashed = true"
                    }).GetList():
                        a_file.Delete()

                except:
                    pass

            if avg_loss[0] / avg_loss[1] < 1.0:
                print("학습이 끝났어용!!")
                print("모델을 저장합니다.")
                # 모델 저장
                #try:
                torch.save(
                    {
                        'epoch': epoch,
                        'train_no': count,
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss
                    }, save_path + data_file_path.split("/")[-1][:-4] + '/' +
                    'KoGPT2_checkpoint_' + str(count) + '.tar')

                #print("문제 시작")

                # 드롭박스에 저장
                large_file = open(
                    save_path + data_file_path.split("/")[-1][:-4] + '/' +
                    'KoGPT2_checkpoint_' + str(count) + '.tar', 'rb')

                names = 'KoGPT2_checkpoint_' + str(count) + '.tar'

                # 장르/체크포인트 부분으로 저장
                large_file_path = '/' + data_file_path.split(
                    "/")[-1][:-4] + '/' + names

                #print("문제 시작2")

                CHUNK_SIZE = 1024 * 1024 * 150

                chunk = large_file.read(CHUNK_SIZE)
                session_info = dbx.files_upload_session_start(chunk)
                cursor = dropbox.files.UploadSessionCursor(
                    session_id=session_info.session_id,
                    offset=large_file.tell(),
                )

                print("문제 시작3")
                # 남은 청크들 업로드용 loop
                while True:
                    chunk = large_file.read(CHUNK_SIZE)

                    if not chunk:
                        dbx.files_upload_session_finish(
                            b'',
                            dropbox.files.UploadSessionCursor(
                                session_id=session_info.session_id,
                                offset=large_file.tell(),
                            ),
                            dropbox.files.CommitInfo(
                                large_file_path,
                                dropbox.files.WriteMode('add'),
                            ),
                        )
                        break
                    else:
                        # 청크 분할 후 남은 데이터 appending
                        dbx.files_upload_session_append_v2(chunk, cursor)
                        cursor.offset = large_file.tell()
                logger.warning('학습한 모델 파일 업로드 완료')

                #print("문제 시작4")

                # 액세스 토큰 폴더 내 존재하는 폴더/파일 출력
                logger.warning('대용량 파일 업로드 후 폴더/파일 목록:')
                for entry in dbx.files_list_folder('').entries:
                    logger.warning("\t" + entry.name)

                # 파일 삭제
                #print("문제 시작5")
                os.remove(save_path + data_file_path.split("/")[-1][:-4] +
                          '/' + 'KoGPT2_checkpoint_' + str(count) + '.tar')

                # 휴지통 비우기
                #print("문제 시작6")
                logging.getLogger('googleapiclient.discovery').setLevel(
                    logging.CRITICAL)

                for a_file in my_drive.ListFile({
                        'q': "trashed = true"
                }).GetList():
                    a_file.Delete()

                return

            count += 1
Exemplo n.º 10
0
def main(epoch = 200, save_path = './checkpoint/', load_path = './checkpoint/KoGPT2_checkpoint_long.tar',
		 data_file_path = 'dataset/lyrics_dataset.txt',
		 batch_size = 8, summary_url = 'runs/', new = 0, text_size = 100):
	ctx = 'cuda'
	cachedir = '~/kogpt2/'
	summary = SummaryWriter(summary_url)

	pytorch_kogpt2 = {
		'url': 'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',
		'fname': 'pytorch_kogpt2_676e9bcfa7.params',
		'chksum': '676e9bcfa7'
	}
	kogpt2_config = {
		"initializer_range": 0.02,
		"layer_norm_epsilon": 1e-05,
		"n_ctx": 1024,
		"n_embd": 768,
		"n_head": 12,
		"n_layer": 12,
		"n_positions": 1024,
		"vocab_size": 50000
	}

	# download model
	model_info = pytorch_kogpt2
	model_path = download(model_info['url'],
						   model_info['fname'],
						   model_info['chksum'],
						   cachedir=cachedir)
	# download vocab
	vocab_info = tokenizer
	vocab_path = download(vocab_info['url'],
						   vocab_info['fname'],
						   vocab_info['chksum'],
						   cachedir=cachedir)

	# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
	kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))

	# model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드
	# 기본 모델에서 가져오는 파라미터 업데이트
	kogpt2model.load_state_dict(torch.load(model_path))

	device = torch.device(ctx) #GPU
	kogpt2model.to(device)
	count = 0

	# 체크포인트에서 불러오기 부분
	try:
		checkpoint = torch.load(load_path, map_location=device)

		# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
		kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
		kogpt2model.load_state_dict(checkpoint['model_state_dict'])

		kogpt2model.eval()
	except:
		print("count 0 : ", load_path)
	else:
		print("count check : ",re.findall("\d+", load_path))
		count = max([int(i) for i in (re.findall("\d+", load_path))])

	if new:
		count = 0

	# 추가로 학습하기 위해 .train() 사용
	kogpt2model.train()
	vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,
														 mask_token=None,
														 sep_token=None,
														 cls_token=None,
														 unknown_token='<unk>',
														 padding_token='<pad>',
														 bos_token='<s>',
														 eos_token='</s>')

	tok_path = get_tokenizer()
	model, vocab = kogpt2model, vocab_b_obj
	sentencepieceTokenizer = SentencepieceTokenizer(tok_path)

	# 우리의 데이터셋 불러오는 부분
	dataset = Read_Dataset(data_file_path, vocab, sentencepieceTokenizer)
	data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)

	# 체크
	learning_rate = 3e-5
	criterion = torch.nn.CrossEntropyLoss()
	optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

	model = model.to(ctx)
	# bpe로 할 때 나누고 합치고 하는 과정이 편해짐.
	tok = SentencepieceTokenizer(tok_path)

	print('KoGPT-2 Transfer Learning Start')

	# 장르별로 체크포인트 폴더 없으면 생성하기
	try:
		if not(os.path.isdir(save_path + data_file_path.split("/")[-1][:-4])):
			os.makedirs(os.path.join(save_path + data_file_path.split("/")[-1][:-4]))
	except OSError as e:
		if e.errno != errno.EEXIST:
			print("Failed to create directory!!!!!")
			raise
	
	avg_loss = (0.0, 0.0)
	for epoch in range(epoch):
		# 데이터셋 가져와서 학습 시작
		for datas in data_loader:
			data = datas[0]

			optimizer.zero_grad()
			data = torch.stack(data) # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.
			data = data.transpose(1,0)
			data = data.to(ctx)
			model = model.to(ctx)

			# 실제 학습
			outputs = model(data, labels=data)
			loss, logits = outputs[:2]

			nowloss = copy.copy(loss)
			# 평균 loss 만들기 avg_loss[0] / avg_loss[1] <- loss 정규화
			avg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)

			loss *= datas[2][0] # 특별 socre 부분

			loss = loss.to(ctx)
			loss.backward()

			# 학습 끝
			optimizer.step()

			if count % 10 == 0:
				print('epoch no.{0} train no.{1}  loss = {2:.5f} avg_loss = {3:.5f}' . format(epoch, count, loss, avg_loss[0] / avg_loss[1]))
				summary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1], count)
				summary.add_scalar('loss/loss', loss, count)
				# print("save")
				# torch.save({
				# 	'epoch': epoch,
				# 	'train_no': count,
				# 	'model_state_dict': model.state_dict(),
				# 	'optimizer_state_dict': optimizer.state_dict(),
				# 	'loss': loss
				# }, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')

				#generator 진행
				if (count > 0 and count % 2500 == 0):
					sent = sample_sequence(model.to("cpu"), tok, vocab, sent="가", text_size=text_size, temperature=0.7, top_p=0.9, top_k=100)
					sent = sent.replace("//", "\n") # 비효율적이지만 엔터를 위해서 등장
					sent = auto_enter(sent)
					print(sent)
					summary.add_text('Text', sent, count)
					del sent
					pass

			#########################################
			if (count > 0 and count % 10000 == 0):
				print("모델을 저장합니다.")
				# 모델 저장
				try:
					torch.save({
						'epoch': epoch,
						'train_no': count,
						'model_state_dict': model.state_dict(),
						'optimizer_state_dict': optimizer.state_dict(),
						'loss': loss
					}, save_path + data_file_path.split("/")[-1][:-4] + '/' + 'KoGPT2_checkpoint_' + str(count) + '.tar')
				except:
					pass

			if avg_loss[0] / avg_loss[1] < 1.0:
				print("학습완료")
				print("모델저장")
				# 모델 저장
				#try:
				torch.save({
					'epoch': epoch,
					'train_no': count,
					'model_state_dict': model.state_dict(),
					'optimizer_state_dict': optimizer.state_dict(),
					'loss': loss
				}, save_path + data_file_path.split("/")[-1][:-4] + '/' + 'KoGPT2_checkpoint_' + str(count) + '.tar')

				return

			count += 1
def main(temperature=0.7,
         top_p=0.8,
         top_k=40,
         tmp_sent="",
         text_size=100,
         loops=0,
         load_path=""):
    ctx = 'cuda'
    cachedir = '~/kogpt2/'
    save_path = './checkpoint/'
    # download model
    model_info = pytorch_kogpt2
    model_path = download(model_info['url'],
                          model_info['fname'],
                          model_info['chksum'],
                          cachedir=cachedir)
    # download vocab
    vocab_info = tokenizer
    vocab_path = download(vocab_info['url'],
                          vocab_info['fname'],
                          vocab_info['chksum'],
                          cachedir=cachedir)
    # Device 설정
    device = torch.device(ctx)
    # 저장한 Checkpoint 불러오기
    checkpoint = torch.load(load_path, map_location=device)

    # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언
    kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))
    kogpt2model.load_state_dict(checkpoint['model_state_dict'])

    kogpt2model.eval()
    vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(
        vocab_path,
        mask_token=None,
        sep_token=None,
        cls_token=None,
        unknown_token='<unk>',
        padding_token='<pad>',
        bos_token='<s>',
        eos_token='</s>')

    tok_path = get_tokenizer()
    model, vocab = kogpt2model, vocab_b_obj
    tok = SentencepieceTokenizer(tok_path)

    if loops:
        num = 1
    else:
        num = 0

    try:
        load_path.split("/")[-2]
    except:
        pass
    else:
        load_path = load_path.split("/")[-2]

    print("ok : ", load_path)

    if not (os.path.isdir("samples/" + load_path)):
        os.makedirs(os.path.join("samples/" + load_path))

    name_list = []

    while 1:
        sent = ''
        if tmp_sent == "":
            tmp_sent = input('input : ')
        sent = sent + tmp_sent

        toked = tok(sent)

        if len(toked) > 1022:
            break

        sent = sample_sequence(model, tok, vocab, sent, text_size, temperature,
                               top_p, top_k)
        sent = sent.replace("//", "\n")  # 비효율적이지만 엔터를 위해서 등장
        sent = sent.replace("</s>", "")
        sent = auto_enter(sent)

        #print(tmp_sent,len(tmp_sent))

        #-- 장르가 포함되면
        ## 제거할 장르 list # 아이돌 노래만 모아서 '아이돌'이라는 단어는 남겨두었음
        genre = [
            'pop', '팝', '댄스', 'dance', '클럽', 'club', '외힙', '힙합', 'hiphop',
            'hop', '트로트', '일렉', 'rnb', '알앤비', '알엔비', '락', '록', '밴드', 'rock',
            '피아노', '첼로', '바이올린', '연주곡', '뉴에이지', 'newage', 'new age', 'ccm',
            '송가', '재즈', '째즈', 'jazz', '클래식', '트로피칼', '트로피컬', '레게', '여자', '여왕',
            '여성', '걸그룹', '남자', '남성', '보이그룹', '인디', '발라드', '랩', 'rap', '래퍼',
            'ost', '디스코', '동요', '영화', '드라마', '크리스마스', 'christmas', '어쿠스틱',
            'jpop', '일본', '애니', '재지', '헤비메탈', '라틴', '블루스', '펑크', 'funk', '솔로',
            '그룹', '해외', '국내', '리믹스', 'remix', '기타', '신스'
        ]

        ## 맨 앞에 장르 이름이 오는 경우를 제외한다: - input 다음단어 ~끝까지 검사
        genre_state = 0  #장르 있는 경우 반복문으로 돌아가기 위함

        for i in genre:
            if i in sent[len(tmp_sent):len(sent)]:
                #print('genre inside')#동작 확인용
                genre_state = 1
                break  #장르 검사 for문 탈출용

        if genre_state == 1:
            continue  #아래 코드 실행 X

        #--특정 단어 뒤에 가수 이름 나올때 존재 - ex) pop의 거장 OOO
        #--숫자있을경우 pass (xx년대 삭제, 오류 추정 키워드 삭제,x월의,x탄,part x ~~ 삭제)
        # ᄂ,ᄋ,ᄏ,ᄒ은 'ㄴ,ㅇ,ㅋ,ㅎ'이아님(옛한글)
        # 그외 이상한 단어/영어조합
        keyword = [
            '거장', '황제', '작곡가', '의 명곡', '의명곡', '트렌드세터', '아티스트', '의 음악', '음악가',
            ' 신', '흐름 ', '숙적', '형님', '누님', '스타', '주도한', '리릭시스트', '가수', '저격수',
            '작사가', '프로듀서', '예찬', '사수', '제왕', '아이콘', '신에서', 'dj', '보컬리스트',
            '마지막', '예찬론자', '레전드', '대가', '신화', '대명사', '대세', '대부', '선구자', '뮤지션',
            '레이블', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '시밤',
            '음악 신나는', '산뜻한 산뜻한', '하아요', '누굴까', 'ss', 'ᄋ', 'ᄒ', 'ᄂ', 'ᄏ', '월',
            '번째', '번 째', '세대', '에브리데이 에브리데이', '탄', 'part', '잘생기', '잘생긴', '땐네',
            '속반', '브랜드', '료를', '이양', 'oo', '싸월드', 'top', '힙존', '미츠', '자세 가을',
            '아른비', '가을뜻', '마다', '탐미', '카페의상', '오는몰래', '카페 cafe', '클래시아', '의요한',
            '사운드로', '4대', '람덤', '수놓', 'nct', 'exo', '엑소', '4요', 'uture', '쿠방',
            'tkdgy', 'nbn', 'ns', 'am', '쿨다운', '퇴근길 브런치', '포레스트 캠프', '에ck',
            '0분', '할로윈', '우리에', '잘톤', '시간에', '주는돈', '우리꺼', '런치여행', '여친', '남친',
            '한번쯤쿵', '시절이송', 'oul', '죽임', '죽이', '비붐', '이기는', '노바', '슬슬장',
            '고도듯한', '위에서뻥', '모음2', '모음1', '느낌 여유', '안좋은', '도색', '시부야', '림을',
            '리지웃', '합의', 'kg', '노래 신나는', '플레이야', '계의', '세습', '째줄', '후회노',
            '노매장', '렉하', '리듬보컬', '악동', '하루부터곡', '사운드의 하면', '의하면', '준억', '예후',
            '숙명', '꺼내듣기', '보자', '줄거리', '사골국', 'trance', '사이키델릭', '충만한바람',
            '주는옴', '의가의', '동양', '사홍길', '의든', 'luv song', 'new york', '루츠',
            '세터지는', '영국민', 'no', 'le', 'ed', 'es', 'er', 'el', 'ey', 'la',
            '지ist', '가의길', '사의길', '가즈아', '마음 메탈', '르진', '의진', '드를', '이상업',
            '망향', '파이', 'x', '의경', '아이 ', '준넘', '으로우는', '문신', '맨오브', '의에서',
            'aomg', '이자칫', '반렛', '가보쟈', '업타운', ' 외', '에칠', '집성', '콜라보 ',
            '을 미치게', '을싸', '념지', '끌어낸 ', '단이', '처음다는', '태호', '댄서블', '가명', '드는',
            '드루와', '꺼내버림', '잠든', '총포', '내빈다', '씬을', '전그', '그라운드'
        ]

        ## 맨 앞에 위에서 지정한 단어가 오는 경우 제외- input 다음단어~끝까지 검사
        keyword_state = 0  #장르 있는 경우 반복문으로 돌아가기 위함

        for i in keyword:
            if i in sent[len(tmp_sent):len(sent)]:
                #print('keyword inside',sent)#동작 확인용
                keyword_state = 1
                break  #장르 검사 for문 탈출용

        if keyword_state == 1:
            continue  #아래 코드 실행 X

        #숫자 있는것 빼버려서 제외
        '''
        #-- 가끔 마지막단어가 숫자로 나올때가 있는데 이를 없앰 ex) : 신나는 음악모음 2
        for i in range(0,len(sent)):
            try:
                int(sent[len(sent)-1]) #마지막글자를 숫자로 바꾸는게 오류가 나지 않는다?? :숫자 
                sent=sent[:len(sent)-1] #숫자제거
                #print('del num')#동작 확인용
            except:
                break # 검사종료
        '''

        #-- 플레이리스트 이름에 엔터 or <unk> 있는 경우 space로 바꿈
        sent = sent.replace('\n', ' ').replace('<unk>', ' ')

        #-- 공백 2개일경우 1개로 바꿈
        sent = sent.replace('  ', ' ')

        #-- 마지막글자 공백 제거
        if (sent[len(sent) - 1] == ' '):  #마지막글자 공백일경우
            sent = sent[:len(sent) - 1]  #공백제거

        #-- 중복 이름 생성x
        if sent in name_list:
            continue  #아래 코드 실행 X

        # 중복x->list에 추가
        name_list.append(sent)
        print(sent)

        #
        now = [int(n) for n in os.listdir("./samples/" + load_path)]

        try:
            now = max(now)
        except:
            now = 1

        f = open("samples/" + load_path + "/" + str(now + 1),
                 'w',
                 encoding="utf-8")

        head = [load_path, tmp_sent, text_size, temperature, top_p, top_k]
        head = [str(h) for h in head]
        f.write(",".join(head))
        f.write(",")
        f.write(sent)
        f.close()

        #tmp_sent = ""

        if num != 0:
            num += 1
            if num >= loops:
                print("good")
                return