import kenlm # Kenlm训练的LM的路径 LM = 'lm.apra' # 加载LM model = kenlm.LanguageModel(LM) # 查看N-gram print('{0}-gram model'.format(model.order))
该api可以用于对一整句话进行打分,获取对应的分数。
sentence = '今 天 天 气 真 不 错' print(sentence, model.score(sentence))
输出结果:
用于查看一句话中每个token的分数,但是这个api使用前提是你得先获取这一整句话。
# Show scores and n-gram matches words = ['<s>'] + sentence.split() + ['</s>'] for i, (prob, length, oov) in enumerate(model.full_scores(sentence)): print('{0} {1}: {2}'.format(prob, length, ' '.join(words[i + 2 - length:i + 2]))) if oov: print('\t"{0}" is an OOV'.format(words[i + 1])) # Find out-of-vocabulary words for w in words: if not w in model: print('"{0}" is an OOV'.format(w))
输出结果:
由于这里的分数都取log了,所以将每个字的分数相加就是整句话的分数,与model.score()得到的结果一致。
log P ( S ) = log ( P ( S 1 ∣ S 0 ) ∗ P ( S 2 ∣ S 0 S 1 ) ∗ P ( S 3 ∣ S 0 S 1 S 2 ) … P ( S n ∣ S 0 … S n − 1 ) ) = ∑ i = 1 n log P ( S i ∣ S 0 … S i − 1 ) \begin{aligned} \log P(S) &=\log (P(S_1|S_0)*P(S_2|S_0S_1)*P(S_3|S_0S_1S_2)\dots P(S_n|S_0\dots S_{n-1})) \\ &=\sum_{i=1}^{n}\log P(S_i|S_0\dots S_{i-1}) \end{aligned} logP(S)=log(P(S1∣S0)∗P(S2∣S0S1)∗P(S3∣S0S1S2)…P(Sn∣S0…Sn−1))=i=1∑nlogP(Si∣S0…Si−1)
在实际解码过程中,一般使用自回归方法每个token依次出来,使用model.BaseScore()接口可以对预测的下一个token进行打分。
model.BaseScore(pre_state, token, cur_state)
# 设置起始状态 state_pre = kenlm.State() model.BeginSentenceWrite(state_pre) for ch in sentence.split(' '): state = kenlm.State() score = model.BaseScore(state_pre, ch, state) print(ch, score) state_pre = state state = kenlm.State() print('</s>', model.BaseScore(state_pre, '</s>', state))
输出结果: