Merge "Fix performance-for-range-copy warnings" am: db2e4888f8
am: 947f8ee063
Change-Id: I153c9732d7fd1733f196b9807d99cfe59fcde012
This commit is contained in:
commit
535cd61e63
3 changed files with 4 additions and 4 deletions
|
@ -93,7 +93,7 @@ const WordProperty *OffdeviceIntermediateDict::getWordProperty(
|
||||||
const OffdeviceIntermediateDictPtNodeArray *ptNodeArray = &mRootPtNodeArray;
|
const OffdeviceIntermediateDictPtNodeArray *ptNodeArray = &mRootPtNodeArray;
|
||||||
for (size_t i = 0; i < codePoints.size();) {
|
for (size_t i = 0; i < codePoints.size();) {
|
||||||
bool foundNext = false;
|
bool foundNext = false;
|
||||||
for (const auto ptNode : ptNodeArray->getPtNodeList()) {
|
for (const auto& ptNode : ptNodeArray->getPtNodeList()) {
|
||||||
const CodePointArrayView ptNodeCodePoints = ptNode->getPtNodeCodePoints();
|
const CodePointArrayView ptNodeCodePoints = ptNode->getPtNodeCodePoints();
|
||||||
if (codePoints[i] < ptNodeCodePoints[0]) {
|
if (codePoints[i] < ptNodeCodePoints[0]) {
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -141,7 +141,7 @@ void Ver4PatriciaTriePolicy::iterateNgramEntries(const WordIdArrayView prevWordI
|
||||||
}
|
}
|
||||||
const auto languageModelDictContent = mBuffers->getLanguageModelDictContent();
|
const auto languageModelDictContent = mBuffers->getLanguageModelDictContent();
|
||||||
for (size_t i = 1; i <= prevWordIds.size(); ++i) {
|
for (size_t i = 1; i <= prevWordIds.size(); ++i) {
|
||||||
for (const auto entry : languageModelDictContent->getProbabilityEntries(
|
for (const auto& entry : languageModelDictContent->getProbabilityEntries(
|
||||||
prevWordIds.limit(i))) {
|
prevWordIds.limit(i))) {
|
||||||
const ProbabilityEntry &probabilityEntry = entry.getProbabilityEntry();
|
const ProbabilityEntry &probabilityEntry = entry.getProbabilityEntry();
|
||||||
if (!probabilityEntry.isValid()) {
|
if (!probabilityEntry.isValid()) {
|
||||||
|
@ -516,7 +516,7 @@ const WordProperty Ver4PatriciaTriePolicy::getWordProperty(
|
||||||
int ngramPrevWordsCodePoints[MAX_PREV_WORD_COUNT_FOR_N_GRAM][MAX_WORD_LENGTH];
|
int ngramPrevWordsCodePoints[MAX_PREV_WORD_COUNT_FOR_N_GRAM][MAX_WORD_LENGTH];
|
||||||
int ngramPrevWordsCodePointCount[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
|
int ngramPrevWordsCodePointCount[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
|
||||||
bool ngramPrevWordIsBeginningOfSentense[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
|
bool ngramPrevWordIsBeginningOfSentense[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
|
||||||
for (const auto entry : languageModelDictContent->exportAllNgramEntriesRelatedToWord(
|
for (const auto& entry : languageModelDictContent->exportAllNgramEntriesRelatedToWord(
|
||||||
mHeaderPolicy, wordId)) {
|
mHeaderPolicy, wordId)) {
|
||||||
const int codePointCount = getCodePointsAndReturnCodePointCount(entry.getTargetWordId(),
|
const int codePointCount = getCodePointsAndReturnCodePointCount(entry.getTargetWordId(),
|
||||||
MAX_WORD_LENGTH, ngramTargetCodePoints);
|
MAX_WORD_LENGTH, ngramTargetCodePoints);
|
||||||
|
|
|
@ -80,7 +80,7 @@ TEST(LanguageModelDictContentTest, TestIterateProbabilityEntry) {
|
||||||
languageModelDictContent.setProbabilityEntry(wordId, &originalEntry);
|
languageModelDictContent.setProbabilityEntry(wordId, &originalEntry);
|
||||||
}
|
}
|
||||||
std::unordered_set<int> wordIdSet(std::begin(wordIds), std::end(wordIds));
|
std::unordered_set<int> wordIdSet(std::begin(wordIds), std::end(wordIds));
|
||||||
for (const auto entry : languageModelDictContent.getProbabilityEntries(WordIdArrayView())) {
|
for (const auto& entry : languageModelDictContent.getProbabilityEntries(WordIdArrayView())) {
|
||||||
EXPECT_EQ(originalEntry.getFlags(), entry.getProbabilityEntry().getFlags());
|
EXPECT_EQ(originalEntry.getFlags(), entry.getProbabilityEntry().getFlags());
|
||||||
EXPECT_EQ(originalEntry.getProbability(), entry.getProbabilityEntry().getProbability());
|
EXPECT_EQ(originalEntry.getProbability(), entry.getProbabilityEntry().getProbability());
|
||||||
wordIdSet.erase(entry.getWordId());
|
wordIdSet.erase(entry.getWordId());
|
||||||
|
|
Loading…
Reference in a new issue