Read less data from the input connection.

Currently, we read 256 (max word size) * 5 (max N-gram size + 1) characters
from the input connection when building our context. This is overkill. We
don't need more than 80 characters, regardless of which decoder we use.

Bug 19987461.

Change-Id: Ie3a321cf2482adbacd8006d9d86e6601097c15ed
main
Dan Zivkovic 2015-04-07 11:18:58 -07:00
parent 3e66c6357d
commit 6bb7011559
1 changed files with 4 additions and 4 deletions

View File

@ -62,10 +62,10 @@ public final class RichInputConnection implements PrivateCommandPerformer {
private static final boolean DBG = false; private static final boolean DBG = false;
private static final boolean DEBUG_PREVIOUS_TEXT = false; private static final boolean DEBUG_PREVIOUS_TEXT = false;
private static final boolean DEBUG_BATCH_NESTING = false; private static final boolean DEBUG_BATCH_NESTING = false;
// Provision for long words and separators between the words. // Provision for realistic N-grams like "Hello, how are you?" and "I'm running 5 late".
private static final int LOOKBACK_CHARACTER_NUM = DICTIONARY_MAX_WORD_LENGTH // Technically, this will not handle 5-grams composed of long words, but in practice,
* (DecoderSpecificConstants.MAX_PREV_WORD_COUNT_FOR_N_GRAM + 1) /* words */ // our language models don't include that much data.
+ DecoderSpecificConstants.MAX_PREV_WORD_COUNT_FOR_N_GRAM /* separators */; private static final int LOOKBACK_CHARACTER_NUM = 80;
private static final int INVALID_CURSOR_POSITION = -1; private static final int INVALID_CURSOR_POSITION = -1;
/** /**