testSimilarByWord('湖')


def testCi():
    testSimilarByWord("道路")
    testSimilarByWord("道家")
    testSimilarByWord("道行")


def testVocab():
    word = "月"
    print("-5k-")
    print(tools.getSimilarResult(model, word, vocab=5000))
    print("-10k-")
    print(tools.getSimilarResult(model, word, vocab=10000))
    print("-20k-")
    print(tools.getSimilarResult(model, word, vocab=20000))


if __name__ == "__main__":

    model = tools.loadModel('sgns/sgns.literature.bigram-char.txt')
    print("Lit model loaded")
    # testCi();
    # testZi()
    testVocab()

else:
    model = tools.loadModel('sgns/sgns.literature.bigram-char.txt')
    print("LIT[word] Model loaded")
                    help='GPU ID (negative value indicates CPU)')
                 
parser.add_argument('--input_num', '-in', type=int,default=30,
                    help='input num')
parser.add_argument('--next_day', '-nd', type=int,default=5,
                    help='predict next day')
parser.add_argument('--smooth_term', '-st', type=int,default=30,
                    help='timeperiod to smooth accuray')
parser.add_argument('--experiment_name', '-n', default='experiment', type=str,help='experiment name')

args = parser.parse_args()

input_num = args.input_num

#モデルの読み込み
model_1 = tools.loadModel('./train_result/20160523_2_vol2ema30class/final_model')
model_2 = tools.loadModel('./train_result/20160523_3_volrsistoch30class/final_model')

if args.gpu >= 0:
    cuda.check_cuda_available()
    print "use gpu"
    
    model_1.to_gpu()
    model_2.to_gpu()
    
xp = cuda.cupy if args.gpu >= 0 else np
    
START_TEST_DAY = 20090105
#START_TEST_DAY = 20100104
NEXT_DAY = args.next_day
Пример #3
0
    
    batch = np.array(listbatch).astype(np.float32)
    try:
        x_batch = batch[:, :-output_num-2]
        y_batch = batch[:, -output_num-2:-2]
    except:
        print (batch.shape)
        print ("error!")
        raw_input()
    return x_batch, y_batch
    
  



model = tools.loadModel(args.modelpath)
if args.gpu >= 0:
    cuda.check_cuda_available()
    print "use gpu"
    xp = cuda.cupy if args.gpu >= 0 else np
    model.to_gpu()
    
N = sum(1 for line in open(trainfile))
print ('N = ', N)
N_test = sum(1 for line in open(testfile))
print ('N_test = ', N_test)

sum_loss = 0
for i in range(0,N,args.batchsize):
    print 'checking train data... ', i,' / ',N
    batch = read_batch2(trainfile,range(i,i+args.batchsize))
Пример #4
0
            break

        if newText not in history:
            break
    history += newText
    if len(history) % 100 == 0:
        print(len(history))

    if len(history) < 4744:
        chainVisualSimilarity(newText, history, limit=limit)
    else:
        print(len(history), history)


def testRelational():
    result = model.most_similar('暖')
    print(result)
    result = model.most_similar(negative=['火'])
    print(result)
    result2 = model.most_similar(positive=['体', '本'], negative=['固'])
    print(result2)


if __name__ == "__main__":
    model = tools.loadModel('../embeddings/VC/v3.2/v3.2_embeddings_ep19.txt')
    print(tools.getAnnoyIndex(model2, '情'))

else:
    model = tools.loadModel('../embeddings/VC/v3.2/v3.2_embeddings_ep19.txt')
    print("VISUAL Model loaded")
Пример #5
0
# -*- coding: utf-8 -*-
"""
@author: cqx931
2019
"""
import tools


def testVocab():
    word = "情"
    print(tools.getSimilarResult(model, word, vocab=5000))
    print(tools.getSimilarResult(model, word, vocab=3000))


if __name__ == "__main__":
    model = tools.loadModel('sgns/sgns.sikuquanshu.bigram.txt')
    print(tools.testSimilarByWord(model, '品'))
    # testVocab()

else:
    model = tools.loadModel('zi/zi_embeddingsgns.literature.bigram-char.txt')
    print("LIT Model loaded")