plda源碼(十二)
LightLDA
原始 Gibbs Sampling 採樣函數如下:
AliasLDA
第二項可以看做“topic-word”桶,與文檔無關。這一項可以通過Alias Table和 Metropolis-Hastings(一種蒙特卡洛採樣方法) 進行O(1) 時間複雜度採樣。Alias Table在上一篇文章有介紹。
LightLDA
第一項爲doc-proposal,第二項爲word-proposal。
同樣退化成MH採樣
doc-proposal
接受率
int K = model_->num_topics();
double sumPd = document->GetDocumentLength() + Kalpha;
for (...) {
int w = iterator.Word();
int topic = iterator.Topic();
int new_topic;
int old_topic = topic;
{
// Draw a topic from doc-proposal
double u = random->RandDouble() * sumPd;
if (u < document->GetDocumentLength()) {
// draw from doc-topic distribution skipping n
unsigned pos = (unsigned) (u);
new_topic = document->topics().wordtopics(pos);
} else {
// draw uniformly
u -= document->GetDocumentLength();
u = u / alpha_;
new_topic = (unsigned short) (u); // pick_a_number(0,trngdata->docs[m]->length-1); (int)(utils::unif01()*ptrndata->docs[m]->length);
}
if (topic != new_topic) {
//2. Find acceptance probability
int ajustment_old = topic == old_topic? -1 : 0;
int ajustment_new = new_topic == old_topic? -1 : 0;
double temp_old = ComputeProbForK(document, w, topic, ajustment_old);
double temp_new = ComputeProbForK(document, w, new_topic, ajustment_new);
double prop_old = (N_DK(document, topic) + alpha_);
double prop_new = (N_DK(document, new_topic) + alpha_);
double acceptance = (temp_new * prop_old) / (temp_old * prop_new);
//3. Compare against uniform[0,1]
if (random->RandDouble() < acceptance) {
topic = new_topic;
}
}
其中的ComputeProbForK是
double ComputeProbForK(LDADocument* document, int w, int topic,
int ajustment) {
return (N_DK(document, topic) + alpha_ + ajustment)
* (N_WK(w, topic) + beta_ + ajustment)
/ (N_K(topic) + Vbeta + ajustment);
}
word-proposal
接受率
{
// Draw a topic from word-proposal
q[w].noSamples++;
if (q[w].noSamples > qtable_construct_frequency) {
GenerateQTable(w);
}
new_topic = q[w].sample(random->RandInt(K), random->RandDouble());
if (topic != new_topic) {
//2. Find acceptance probability
int ajustment_old = topic == old_topic? -1 : 0;
int ajustment_new = new_topic == old_topic? -1 : 0;
double temp_old = ComputeProbForK(document, w, topic, ajustment_old);
double temp_new = ComputeProbForK(document, w, new_topic, ajustment_new);
double acceptance = (temp_new * q[w].w[topic]) / (temp_old * q[w].w[new_topic]);
//3. Compare against uniform[0,1]
if (random->RandDouble() < acceptance) {
topic = new_topic;
}
}
}
其中GenerateQTable如下
void GenerateQTable(unsigned int w) {
int num_topics = model_->num_topics();
q[w].wsum = 0.0;
const TopicDistribution<int32>& word_distribution = model_->GetWordTopicDistribution(w);
const TopicDistribution<int32>& n_k = model_->GetGlobalTopicDistribution();
for (int k = 0; k < num_topics; ++k) {
q[w].w[k] = (word_distribution[k] + beta_) / (n_k[k] + Vbeta);
q[w].wsum += q[w].w[k];
}
q[w].constructTable();
}