server: use pos_next instead of n_tokens for m-rope (#22439)

This commit is contained in:
Aman Gupta
2026-04-28 13:41:00 +08:00
committed by GitHub
parent 434b2a1ff6
commit 516e8d7a8a
+1 -1
View File
@@ -3031,7 +3031,7 @@ private:
slot.sampled = ids.back(); // last accepted token
SLT_DBG(slot, "add accepted tokens: sampled=%d, ids.size=%zu, n_draft=%zu\n", slot.sampled, ids.size(), n_draft);
llama_memory_seq_rm(llama_get_memory(slot.ctx), slot.id, slot.prompt.n_tokens(), -1);
llama_memory_seq_rm(llama_get_memory(slot.ctx), slot.id, slot.prompt.tokens.pos_next(), -1);
for (size_t i = 0; i < ids.size(); ++i) {
completion_token_output result;