2011-06-29 09:01:06 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 The Android Open Source Project
|
|
|
|
*
|
2013-01-21 12:52:57 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-01-21 12:52:57 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
2013-01-21 12:52:57 +00:00
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
|
|
|
|
2012-03-19 08:23:26 +00:00
|
|
|
package com.android.inputmethod.latin.makedict;
|
2011-06-29 09:01:06 +00:00
|
|
|
|
2013-01-06 02:10:27 +00:00
|
|
|
import com.android.inputmethod.annotations.UsedForTesting;
|
2012-09-07 07:49:16 +00:00
|
|
|
import com.android.inputmethod.latin.Constants;
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.Arrays;
|
|
|
|
import java.util.Collections;
|
2012-10-25 09:15:42 +00:00
|
|
|
import java.util.Date;
|
2012-04-03 04:19:57 +00:00
|
|
|
import java.util.HashMap;
|
2011-06-29 09:01:06 +00:00
|
|
|
import java.util.Iterator;
|
|
|
|
import java.util.LinkedList;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A dictionary that can fusion heads and tails of words for more compression.
|
|
|
|
*/
|
2013-01-06 02:10:27 +00:00
|
|
|
@UsedForTesting
|
2012-09-27 09:16:16 +00:00
|
|
|
public final class FusionDictionary implements Iterable<Word> {
|
2012-05-11 13:48:16 +00:00
|
|
|
private static final boolean DBG = MakedictLog.DBG;
|
|
|
|
|
2013-07-30 07:00:43 +00:00
|
|
|
private static int CHARACTER_NOT_FOUND_INDEX = -1;
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* A node array of the dictionary, containing several PtNodes.
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-08-22 02:07:52 +00:00
|
|
|
* A PtNodeArray is but an ordered array of PtNodes, which essentially contain all the
|
2011-06-29 09:01:06 +00:00
|
|
|
* real information.
|
|
|
|
* This class also contains fields to cache size and address, to help with binary
|
|
|
|
* generation.
|
|
|
|
*/
|
2013-08-16 05:51:37 +00:00
|
|
|
public static final class PtNodeArray {
|
2013-08-22 02:07:52 +00:00
|
|
|
ArrayList<PtNode> mData;
|
2011-06-29 09:01:06 +00:00
|
|
|
// To help with binary generation
|
2012-09-03 01:21:03 +00:00
|
|
|
int mCachedSize = Integer.MIN_VALUE;
|
2013-07-10 10:23:03 +00:00
|
|
|
// mCachedAddressBefore/AfterUpdate are helpers for binary dictionary generation. They
|
|
|
|
// always hold the same value except between dictionary address compression, during which
|
|
|
|
// the update process needs to know about both values at the same time. Updating will
|
|
|
|
// update the AfterUpdate value, and the code will move them to BeforeUpdate before
|
|
|
|
// the next update pass.
|
|
|
|
int mCachedAddressBeforeUpdate = Integer.MIN_VALUE;
|
|
|
|
int mCachedAddressAfterUpdate = Integer.MIN_VALUE;
|
2012-09-03 01:21:03 +00:00
|
|
|
int mCachedParentAddress = 0;
|
|
|
|
|
2013-08-16 05:51:37 +00:00
|
|
|
public PtNodeArray() {
|
2013-08-22 02:07:52 +00:00
|
|
|
mData = new ArrayList<PtNode>();
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
2013-08-22 02:07:52 +00:00
|
|
|
public PtNodeArray(ArrayList<PtNode> data) {
|
2011-06-29 09:01:06 +00:00
|
|
|
mData = data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A string with a frequency.
|
|
|
|
*
|
|
|
|
* This represents an "attribute", that is either a bigram or a shortcut.
|
|
|
|
*/
|
2012-09-27 09:16:16 +00:00
|
|
|
public static final class WeightedString {
|
2012-08-03 08:05:41 +00:00
|
|
|
public final String mWord;
|
|
|
|
public int mFrequency;
|
2011-06-29 09:01:06 +00:00
|
|
|
public WeightedString(String word, int frequency) {
|
|
|
|
mWord = word;
|
|
|
|
mFrequency = frequency;
|
|
|
|
}
|
2012-03-22 09:09:19 +00:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public int hashCode() {
|
|
|
|
return Arrays.hashCode(new Object[] { mWord, mFrequency });
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean equals(Object o) {
|
|
|
|
if (o == this) return true;
|
|
|
|
if (!(o instanceof WeightedString)) return false;
|
|
|
|
WeightedString w = (WeightedString)o;
|
|
|
|
return mWord.equals(w.mWord) && mFrequency == w.mFrequency;
|
|
|
|
}
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* PtNode is a group of characters, with a frequency, shortcut targets, bigrams, and children
|
|
|
|
* (Pt means Patricia Trie).
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-08-22 02:07:52 +00:00
|
|
|
* This is the central class of the in-memory representation. A PtNode is what can
|
2011-06-29 09:01:06 +00:00
|
|
|
* be seen as a traditional "trie node", except it can hold several characters at the
|
2013-08-22 02:07:52 +00:00
|
|
|
* same time. A PtNode essentially represents one or several characters in the middle
|
2013-08-16 05:51:37 +00:00
|
|
|
* of the trie tree; as such, it can be a terminal, and it can have children.
|
2013-08-22 02:07:52 +00:00
|
|
|
* In this in-memory representation, whether the PtNode is a terminal or not is represented
|
2011-06-29 09:01:06 +00:00
|
|
|
* in the frequency, where NOT_A_TERMINAL (= -1) means this is not a terminal and any other
|
|
|
|
* value is the frequency of this terminal. A terminal may have non-null shortcuts and/or
|
|
|
|
* bigrams, but a non-terminal may not. Moreover, children, if present, are null.
|
|
|
|
*/
|
2013-08-22 02:07:52 +00:00
|
|
|
public static final class PtNode {
|
2011-06-29 09:01:06 +00:00
|
|
|
public static final int NOT_A_TERMINAL = -1;
|
|
|
|
final int mChars[];
|
2012-03-21 14:55:10 +00:00
|
|
|
ArrayList<WeightedString> mShortcutTargets;
|
|
|
|
ArrayList<WeightedString> mBigrams;
|
|
|
|
int mFrequency; // NOT_A_TERMINAL == mFrequency indicates this is not a terminal.
|
2013-08-16 05:51:37 +00:00
|
|
|
PtNodeArray mChildren;
|
2012-08-31 06:24:39 +00:00
|
|
|
boolean mIsNotAWord; // Only a shortcut
|
|
|
|
boolean mIsBlacklistEntry;
|
2013-07-10 11:16:13 +00:00
|
|
|
// mCachedSize and mCachedAddressBefore/AfterUpdate are helpers for binary dictionary
|
|
|
|
// generation. Before and After always hold the same value except during dictionary
|
|
|
|
// address compression, where the update process needs to know about both values at the
|
|
|
|
// same time. Updating will update the AfterUpdate value, and the code will move them
|
|
|
|
// to BeforeUpdate before the next update pass.
|
|
|
|
// The update process does not need two versions of mCachedSize.
|
2013-08-22 02:07:52 +00:00
|
|
|
int mCachedSize; // The size, in bytes, of this PtNode.
|
|
|
|
int mCachedAddressBeforeUpdate; // The address of this PtNode (before update)
|
|
|
|
int mCachedAddressAfterUpdate; // The address of this PtNode (after update)
|
2011-06-29 09:01:06 +00:00
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
public PtNode(final int[] chars, final ArrayList<WeightedString> shortcutTargets,
|
2012-08-31 06:24:39 +00:00
|
|
|
final ArrayList<WeightedString> bigrams, final int frequency,
|
|
|
|
final boolean isNotAWord, final boolean isBlacklistEntry) {
|
2011-06-29 09:01:06 +00:00
|
|
|
mChars = chars;
|
|
|
|
mFrequency = frequency;
|
2011-12-26 10:00:00 +00:00
|
|
|
mShortcutTargets = shortcutTargets;
|
2011-06-29 09:01:06 +00:00
|
|
|
mBigrams = bigrams;
|
|
|
|
mChildren = null;
|
2012-08-31 06:24:39 +00:00
|
|
|
mIsNotAWord = isNotAWord;
|
|
|
|
mIsBlacklistEntry = isBlacklistEntry;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
public PtNode(final int[] chars, final ArrayList<WeightedString> shortcutTargets,
|
2012-08-31 06:24:39 +00:00
|
|
|
final ArrayList<WeightedString> bigrams, final int frequency,
|
2013-08-16 05:51:37 +00:00
|
|
|
final boolean isNotAWord, final boolean isBlacklistEntry,
|
|
|
|
final PtNodeArray children) {
|
2011-06-29 09:01:06 +00:00
|
|
|
mChars = chars;
|
|
|
|
mFrequency = frequency;
|
2011-12-26 10:00:00 +00:00
|
|
|
mShortcutTargets = shortcutTargets;
|
2011-06-29 09:01:06 +00:00
|
|
|
mBigrams = bigrams;
|
|
|
|
mChildren = children;
|
2012-08-31 06:24:39 +00:00
|
|
|
mIsNotAWord = isNotAWord;
|
|
|
|
mIsBlacklistEntry = isBlacklistEntry;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
public void addChild(PtNode n) {
|
2011-06-29 09:01:06 +00:00
|
|
|
if (null == mChildren) {
|
2013-08-16 05:51:37 +00:00
|
|
|
mChildren = new PtNodeArray();
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
mChildren.mData.add(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
public boolean isTerminal() {
|
|
|
|
return NOT_A_TERMINAL != mFrequency;
|
|
|
|
}
|
|
|
|
|
2012-10-23 08:14:12 +00:00
|
|
|
public int getFrequency() {
|
|
|
|
return mFrequency;
|
|
|
|
}
|
|
|
|
|
2012-10-29 02:09:30 +00:00
|
|
|
public boolean getIsNotAWord() {
|
|
|
|
return mIsNotAWord;
|
|
|
|
}
|
|
|
|
|
|
|
|
public boolean getIsBlacklistEntry() {
|
|
|
|
return mIsBlacklistEntry;
|
|
|
|
}
|
|
|
|
|
|
|
|
public ArrayList<WeightedString> getShortcutTargets() {
|
|
|
|
// We don't want write permission to escape outside the package, so we return a copy
|
|
|
|
if (null == mShortcutTargets) return null;
|
2012-10-29 05:24:16 +00:00
|
|
|
final ArrayList<WeightedString> copyOfShortcutTargets =
|
|
|
|
new ArrayList<WeightedString>(mShortcutTargets);
|
2012-10-29 02:09:30 +00:00
|
|
|
return copyOfShortcutTargets;
|
|
|
|
}
|
|
|
|
|
|
|
|
public ArrayList<WeightedString> getBigrams() {
|
|
|
|
// We don't want write permission to escape outside the package, so we return a copy
|
|
|
|
if (null == mBigrams) return null;
|
2012-10-29 05:24:16 +00:00
|
|
|
final ArrayList<WeightedString> copyOfBigrams = new ArrayList<WeightedString>(mBigrams);
|
2012-10-29 02:09:30 +00:00
|
|
|
return copyOfBigrams;
|
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
public boolean hasSeveralChars() {
|
|
|
|
assert(mChars.length > 0);
|
|
|
|
return 1 < mChars.length;
|
|
|
|
}
|
2012-03-21 14:55:10 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Adds a word to the bigram list. Updates the frequency if the word already
|
|
|
|
* exists.
|
|
|
|
*/
|
|
|
|
public void addBigram(final String word, final int frequency) {
|
|
|
|
if (mBigrams == null) {
|
|
|
|
mBigrams = new ArrayList<WeightedString>();
|
|
|
|
}
|
|
|
|
WeightedString bigram = getBigram(word);
|
|
|
|
if (bigram != null) {
|
|
|
|
bigram.mFrequency = frequency;
|
|
|
|
} else {
|
|
|
|
bigram = new WeightedString(word, frequency);
|
|
|
|
mBigrams.add(bigram);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets the shortcut target for the given word. Returns null if the word is not in the
|
|
|
|
* shortcut list.
|
|
|
|
*/
|
|
|
|
public WeightedString getShortcut(final String word) {
|
2012-05-11 13:48:16 +00:00
|
|
|
// TODO: Don't do a linear search
|
2012-03-21 14:55:10 +00:00
|
|
|
if (mShortcutTargets != null) {
|
|
|
|
final int size = mShortcutTargets.size();
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
WeightedString shortcut = mShortcutTargets.get(i);
|
|
|
|
if (shortcut.mWord.equals(word)) {
|
|
|
|
return shortcut;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets the bigram for the given word.
|
|
|
|
* Returns null if the word is not in the bigrams list.
|
|
|
|
*/
|
|
|
|
public WeightedString getBigram(final String word) {
|
2012-05-11 13:48:16 +00:00
|
|
|
// TODO: Don't do a linear search
|
2012-03-21 14:55:10 +00:00
|
|
|
if (mBigrams != null) {
|
|
|
|
final int size = mBigrams.size();
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
WeightedString bigram = mBigrams.get(i);
|
|
|
|
if (bigram.mWord.equals(word)) {
|
|
|
|
return bigram;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* Updates the PtNode with the given properties. Adds the shortcut and bigram lists to
|
2012-03-21 14:55:10 +00:00
|
|
|
* the existing ones if any. Note: unigram, bigram, and shortcut frequencies are only
|
|
|
|
* updated if they are higher than the existing ones.
|
|
|
|
*/
|
2012-08-31 06:24:39 +00:00
|
|
|
public void update(final int frequency, final ArrayList<WeightedString> shortcutTargets,
|
|
|
|
final ArrayList<WeightedString> bigrams,
|
|
|
|
final boolean isNotAWord, final boolean isBlacklistEntry) {
|
2012-03-21 14:55:10 +00:00
|
|
|
if (frequency > mFrequency) {
|
|
|
|
mFrequency = frequency;
|
|
|
|
}
|
|
|
|
if (shortcutTargets != null) {
|
|
|
|
if (mShortcutTargets == null) {
|
|
|
|
mShortcutTargets = shortcutTargets;
|
|
|
|
} else {
|
|
|
|
final int size = shortcutTargets.size();
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
final WeightedString shortcut = shortcutTargets.get(i);
|
|
|
|
final WeightedString existingShortcut = getShortcut(shortcut.mWord);
|
|
|
|
if (existingShortcut == null) {
|
|
|
|
mShortcutTargets.add(shortcut);
|
|
|
|
} else if (existingShortcut.mFrequency < shortcut.mFrequency) {
|
|
|
|
existingShortcut.mFrequency = shortcut.mFrequency;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bigrams != null) {
|
|
|
|
if (mBigrams == null) {
|
|
|
|
mBigrams = bigrams;
|
|
|
|
} else {
|
|
|
|
final int size = bigrams.size();
|
|
|
|
for (int i = 0; i < size; ++i) {
|
|
|
|
final WeightedString bigram = bigrams.get(i);
|
|
|
|
final WeightedString existingBigram = getBigram(bigram.mWord);
|
|
|
|
if (existingBigram == null) {
|
|
|
|
mBigrams.add(bigram);
|
|
|
|
} else if (existingBigram.mFrequency < bigram.mFrequency) {
|
|
|
|
existingBigram.mFrequency = bigram.mFrequency;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-31 06:24:39 +00:00
|
|
|
mIsNotAWord = isNotAWord;
|
|
|
|
mIsBlacklistEntry = isBlacklistEntry;
|
2012-03-21 14:55:10 +00:00
|
|
|
}
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Options global to the dictionary.
|
|
|
|
*/
|
2012-09-27 09:16:16 +00:00
|
|
|
public static final class DictionaryOptions {
|
2012-04-06 06:30:42 +00:00
|
|
|
public final boolean mGermanUmlautProcessing;
|
|
|
|
public final boolean mFrenchLigatureProcessing;
|
|
|
|
public final HashMap<String, String> mAttributes;
|
|
|
|
public DictionaryOptions(final HashMap<String, String> attributes,
|
|
|
|
final boolean germanUmlautProcessing, final boolean frenchLigatureProcessing) {
|
2012-04-03 04:19:57 +00:00
|
|
|
mAttributes = attributes;
|
2012-04-06 06:30:42 +00:00
|
|
|
mGermanUmlautProcessing = germanUmlautProcessing;
|
|
|
|
mFrenchLigatureProcessing = frenchLigatureProcessing;
|
2012-04-03 04:19:57 +00:00
|
|
|
}
|
2012-10-25 09:15:42 +00:00
|
|
|
@Override
|
|
|
|
public String toString() { // Convenience method
|
2012-10-31 06:35:20 +00:00
|
|
|
return toString(0, false);
|
2012-10-25 09:15:42 +00:00
|
|
|
}
|
2012-10-31 06:35:20 +00:00
|
|
|
public String toString(final int indentCount, final boolean plumbing) {
|
2012-10-25 09:15:42 +00:00
|
|
|
final StringBuilder indent = new StringBuilder();
|
2012-10-31 06:35:20 +00:00
|
|
|
if (plumbing) {
|
|
|
|
indent.append("H:");
|
|
|
|
} else {
|
|
|
|
for (int i = 0; i < indentCount; ++i) {
|
|
|
|
indent.append(" ");
|
|
|
|
}
|
2012-10-25 09:15:42 +00:00
|
|
|
}
|
|
|
|
final StringBuilder s = new StringBuilder();
|
|
|
|
for (final String optionKey : mAttributes.keySet()) {
|
|
|
|
s.append(indent);
|
|
|
|
s.append(optionKey);
|
|
|
|
s.append(" = ");
|
2012-10-31 06:35:20 +00:00
|
|
|
if ("date".equals(optionKey) && !plumbing) {
|
2012-10-25 09:15:42 +00:00
|
|
|
// Date needs a number of milliseconds, but the dictionary contains seconds
|
|
|
|
s.append(new Date(
|
|
|
|
1000 * Long.parseLong(mAttributes.get(optionKey))).toString());
|
|
|
|
} else {
|
|
|
|
s.append(mAttributes.get(optionKey));
|
|
|
|
}
|
|
|
|
s.append("\n");
|
|
|
|
}
|
|
|
|
if (mGermanUmlautProcessing) {
|
|
|
|
s.append(indent);
|
|
|
|
s.append("Needs German umlaut processing\n");
|
|
|
|
}
|
|
|
|
if (mFrenchLigatureProcessing) {
|
|
|
|
s.append(indent);
|
|
|
|
s.append("Needs French ligature processing\n");
|
|
|
|
}
|
|
|
|
return s.toString();
|
|
|
|
}
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
public final DictionaryOptions mOptions;
|
2013-08-16 05:51:37 +00:00
|
|
|
public final PtNodeArray mRootNodeArray;
|
2011-06-29 09:01:06 +00:00
|
|
|
|
2013-08-16 05:51:37 +00:00
|
|
|
public FusionDictionary(final PtNodeArray rootNodeArray, final DictionaryOptions options) {
|
|
|
|
mRootNodeArray = rootNodeArray;
|
2011-06-29 09:01:06 +00:00
|
|
|
mOptions = options;
|
|
|
|
}
|
|
|
|
|
2012-04-03 04:19:57 +00:00
|
|
|
public void addOptionAttribute(final String key, final String value) {
|
|
|
|
mOptions.mAttributes.put(key, value);
|
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
|
|
|
* Helper method to convert a String to an int array.
|
|
|
|
*/
|
2012-10-01 05:50:58 +00:00
|
|
|
static int[] getCodePoints(final String word) {
|
2012-05-11 13:48:16 +00:00
|
|
|
// TODO: this is a copy-paste of the contents of StringUtils.toCodePointArray,
|
|
|
|
// which is not visible from the makedict package. Factor this code.
|
|
|
|
final char[] characters = word.toCharArray();
|
|
|
|
final int length = characters.length;
|
|
|
|
final int[] codePoints = new int[Character.codePointCount(characters, 0, length)];
|
|
|
|
int codePoint = Character.codePointAt(characters, 0);
|
|
|
|
int dsti = 0;
|
|
|
|
for (int srci = Character.charCount(codePoint);
|
|
|
|
srci < length; srci += Character.charCount(codePoint), ++dsti) {
|
|
|
|
codePoints[dsti] = codePoint;
|
|
|
|
codePoint = Character.codePointAt(characters, srci);
|
2012-01-16 03:51:46 +00:00
|
|
|
}
|
2012-05-11 13:48:16 +00:00
|
|
|
codePoints[dsti] = codePoint;
|
|
|
|
return codePoints;
|
2012-01-16 03:51:46 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
|
|
|
* Helper method to add a word as a string.
|
|
|
|
*
|
|
|
|
* This method adds a word to the dictionary with the given frequency. Optional
|
|
|
|
* lists of bigrams and shortcuts can be passed here. For each word inside,
|
|
|
|
* they will be added to the dictionary as necessary.
|
|
|
|
*
|
|
|
|
* @param word the word to add.
|
|
|
|
* @param frequency the frequency of the word, in the range [0..255].
|
2011-12-26 10:00:00 +00:00
|
|
|
* @param shortcutTargets a list of shortcut targets for this word, or null.
|
2012-08-31 06:24:39 +00:00
|
|
|
* @param isNotAWord true if this should not be considered a word (e.g. shortcut only)
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2011-12-26 10:00:00 +00:00
|
|
|
public void add(final String word, final int frequency,
|
2012-08-31 06:24:39 +00:00
|
|
|
final ArrayList<WeightedString> shortcutTargets, final boolean isNotAWord) {
|
|
|
|
add(getCodePoints(word), frequency, shortcutTargets, isNotAWord,
|
|
|
|
false /* isBlacklistEntry */);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper method to add a blacklist entry as a string.
|
|
|
|
*
|
|
|
|
* @param word the word to add as a blacklist entry.
|
|
|
|
* @param shortcutTargets a list of shortcut targets for this word, or null.
|
|
|
|
* @param isNotAWord true if this is not a word for spellcheking purposes (shortcut only or so)
|
|
|
|
*/
|
|
|
|
public void addBlacklistEntry(final String word,
|
|
|
|
final ArrayList<WeightedString> shortcutTargets, final boolean isNotAWord) {
|
|
|
|
add(getCodePoints(word), 0, shortcutTargets, isNotAWord, true /* isBlacklistEntry */);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* Sanity check for a PtNode array.
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-08-22 02:07:52 +00:00
|
|
|
* This method checks that all PtNodes in a node array are ordered as expected.
|
2011-06-29 09:01:06 +00:00
|
|
|
* If they are, nothing happens. If they aren't, an exception is thrown.
|
|
|
|
*/
|
2013-08-22 02:07:52 +00:00
|
|
|
private void checkStack(PtNodeArray ptNodeArray) {
|
|
|
|
ArrayList<PtNode> stack = ptNodeArray.mData;
|
2011-06-29 09:01:06 +00:00
|
|
|
int lastValue = -1;
|
|
|
|
for (int i = 0; i < stack.size(); ++i) {
|
|
|
|
int currentValue = stack.get(i).mChars[0];
|
|
|
|
if (currentValue <= lastValue)
|
|
|
|
throw new RuntimeException("Invalid stack");
|
|
|
|
else
|
|
|
|
lastValue = currentValue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-21 14:55:10 +00:00
|
|
|
/**
|
|
|
|
* Helper method to add a new bigram to the dictionary.
|
|
|
|
*
|
|
|
|
* @param word1 the previous word of the context
|
|
|
|
* @param word2 the next word of the context
|
|
|
|
* @param frequency the bigram frequency
|
|
|
|
*/
|
|
|
|
public void setBigram(final String word1, final String word2, final int frequency) {
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode ptNode = findWordInTree(mRootNodeArray, word1);
|
|
|
|
if (ptNode != null) {
|
|
|
|
final PtNode ptNode2 = findWordInTree(mRootNodeArray, word2);
|
|
|
|
if (ptNode2 == null) {
|
2012-08-31 06:24:39 +00:00
|
|
|
add(getCodePoints(word2), 0, null, false /* isNotAWord */,
|
|
|
|
false /* isBlacklistEntry */);
|
2013-08-22 02:07:52 +00:00
|
|
|
// The PtNode for the first word may have moved by the above insertion,
|
2012-10-24 04:24:59 +00:00
|
|
|
// if word1 and word2 share a common stem that happens not to have been
|
2013-08-22 02:07:52 +00:00
|
|
|
// a cutting point until now. In this case, we need to refresh ptNode.
|
|
|
|
ptNode = findWordInTree(mRootNodeArray, word1);
|
2012-03-21 14:55:10 +00:00
|
|
|
}
|
2013-08-22 02:07:52 +00:00
|
|
|
ptNode.addBigram(word2, frequency);
|
2012-03-21 14:55:10 +00:00
|
|
|
} else {
|
|
|
|
throw new RuntimeException("First word of bigram not found");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
|
|
|
* Add a word to this dictionary.
|
|
|
|
*
|
2012-04-20 10:58:01 +00:00
|
|
|
* The shortcuts, if any, have to be in the dictionary already. If they aren't,
|
2011-06-29 09:01:06 +00:00
|
|
|
* an exception is thrown.
|
|
|
|
*
|
|
|
|
* @param word the word, as an int array.
|
|
|
|
* @param frequency the frequency of the word, in the range [0..255].
|
2011-12-26 10:00:00 +00:00
|
|
|
* @param shortcutTargets an optional list of shortcut targets for this word (null if none).
|
2012-08-31 06:24:39 +00:00
|
|
|
* @param isNotAWord true if this is not a word for spellcheking purposes (shortcut only or so)
|
|
|
|
* @param isBlacklistEntry true if this is a blacklisted word, false otherwise
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2011-12-26 10:00:00 +00:00
|
|
|
private void add(final int[] word, final int frequency,
|
2012-08-31 06:24:39 +00:00
|
|
|
final ArrayList<WeightedString> shortcutTargets,
|
|
|
|
final boolean isNotAWord, final boolean isBlacklistEntry) {
|
2011-06-29 09:01:06 +00:00
|
|
|
assert(frequency >= 0 && frequency <= 255);
|
2013-07-26 03:35:11 +00:00
|
|
|
if (word.length >= Constants.DICTIONARY_MAX_WORD_LENGTH) {
|
2012-09-07 07:49:16 +00:00
|
|
|
MakedictLog.w("Ignoring a word that is too long: word.length = " + word.length);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-08-16 05:51:37 +00:00
|
|
|
PtNodeArray currentNodeArray = mRootNodeArray;
|
2011-06-29 09:01:06 +00:00
|
|
|
int charIndex = 0;
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode currentPtNode = null;
|
2011-06-29 09:01:06 +00:00
|
|
|
int differentCharIndex = 0; // Set by the loop to the index of the char that differs
|
2013-08-16 05:51:37 +00:00
|
|
|
int nodeIndex = findIndexOfChar(mRootNodeArray, word[charIndex]);
|
2013-07-30 07:00:43 +00:00
|
|
|
while (CHARACTER_NOT_FOUND_INDEX != nodeIndex) {
|
2013-08-22 02:07:52 +00:00
|
|
|
currentPtNode = currentNodeArray.mData.get(nodeIndex);
|
|
|
|
differentCharIndex = compareCharArrays(currentPtNode.mChars, word, charIndex);
|
2011-06-29 09:01:06 +00:00
|
|
|
if (ARRAYS_ARE_EQUAL != differentCharIndex
|
2013-08-22 02:07:52 +00:00
|
|
|
&& differentCharIndex < currentPtNode.mChars.length) break;
|
|
|
|
if (null == currentPtNode.mChildren) break;
|
|
|
|
charIndex += currentPtNode.mChars.length;
|
2011-06-29 09:01:06 +00:00
|
|
|
if (charIndex >= word.length) break;
|
2013-08-22 02:07:52 +00:00
|
|
|
currentNodeArray = currentPtNode.mChildren;
|
2013-08-16 05:51:37 +00:00
|
|
|
nodeIndex = findIndexOfChar(currentNodeArray, word[charIndex]);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
2013-07-30 07:00:43 +00:00
|
|
|
if (CHARACTER_NOT_FOUND_INDEX == nodeIndex) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// No node at this point to accept the word. Create one.
|
2013-08-16 05:51:37 +00:00
|
|
|
final int insertionIndex = findInsertionIndex(currentNodeArray, word[charIndex]);
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode newPtNode = new PtNode(Arrays.copyOfRange(word, charIndex, word.length),
|
2012-08-31 06:24:39 +00:00
|
|
|
shortcutTargets, null /* bigrams */, frequency, isNotAWord, isBlacklistEntry);
|
2013-08-22 02:07:52 +00:00
|
|
|
currentNodeArray.mData.add(insertionIndex, newPtNode);
|
2013-08-16 05:51:37 +00:00
|
|
|
if (DBG) checkStack(currentNodeArray);
|
2011-06-29 09:01:06 +00:00
|
|
|
} else {
|
|
|
|
// There is a word with a common prefix.
|
2013-08-22 02:07:52 +00:00
|
|
|
if (differentCharIndex == currentPtNode.mChars.length) {
|
2011-06-29 09:01:06 +00:00
|
|
|
if (charIndex + differentCharIndex >= word.length) {
|
|
|
|
// The new word is a prefix of an existing word, but the node on which it
|
2013-08-22 02:07:52 +00:00
|
|
|
// should end already exists as is. Since the old PtNode was not a terminal,
|
2012-03-21 14:55:10 +00:00
|
|
|
// make it one by filling in its frequency and other attributes
|
2013-08-22 02:07:52 +00:00
|
|
|
currentPtNode.update(frequency, shortcutTargets, null, isNotAWord,
|
2012-08-31 06:24:39 +00:00
|
|
|
isBlacklistEntry);
|
2011-06-29 09:01:06 +00:00
|
|
|
} else {
|
|
|
|
// The new word matches the full old word and extends past it.
|
|
|
|
// We only have to create a new node and add it to the end of this.
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode newNode = new PtNode(
|
2011-06-29 09:01:06 +00:00
|
|
|
Arrays.copyOfRange(word, charIndex + differentCharIndex, word.length),
|
2012-08-31 06:24:39 +00:00
|
|
|
shortcutTargets, null /* bigrams */, frequency, isNotAWord,
|
|
|
|
isBlacklistEntry);
|
2013-08-22 02:07:52 +00:00
|
|
|
currentPtNode.mChildren = new PtNodeArray();
|
|
|
|
currentPtNode.mChildren.mData.add(newNode);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (0 == differentCharIndex) {
|
2012-03-21 14:55:10 +00:00
|
|
|
// Exact same word. Update the frequency if higher. This will also add the
|
2012-04-20 10:58:01 +00:00
|
|
|
// new shortcuts to the existing shortcut list if it already exists.
|
2013-08-22 02:07:52 +00:00
|
|
|
currentPtNode.update(frequency, shortcutTargets, null,
|
|
|
|
currentPtNode.mIsNotAWord && isNotAWord,
|
|
|
|
currentPtNode.mIsBlacklistEntry || isBlacklistEntry);
|
2011-06-29 09:01:06 +00:00
|
|
|
} else {
|
|
|
|
// Partial prefix match only. We have to replace the current node with a node
|
|
|
|
// containing the current prefix and create two new ones for the tails.
|
2013-08-16 05:51:37 +00:00
|
|
|
PtNodeArray newChildren = new PtNodeArray();
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode newOldWord = new PtNode(
|
|
|
|
Arrays.copyOfRange(currentPtNode.mChars, differentCharIndex,
|
|
|
|
currentPtNode.mChars.length), currentPtNode.mShortcutTargets,
|
|
|
|
currentPtNode.mBigrams, currentPtNode.mFrequency,
|
|
|
|
currentPtNode.mIsNotAWord, currentPtNode.mIsBlacklistEntry,
|
|
|
|
currentPtNode.mChildren);
|
2011-06-29 09:01:06 +00:00
|
|
|
newChildren.mData.add(newOldWord);
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode newParent;
|
2011-06-29 09:01:06 +00:00
|
|
|
if (charIndex + differentCharIndex >= word.length) {
|
2013-08-22 02:07:52 +00:00
|
|
|
newParent = new PtNode(
|
|
|
|
Arrays.copyOfRange(currentPtNode.mChars, 0, differentCharIndex),
|
2012-08-31 06:24:39 +00:00
|
|
|
shortcutTargets, null /* bigrams */, frequency,
|
|
|
|
isNotAWord, isBlacklistEntry, newChildren);
|
2011-06-29 09:01:06 +00:00
|
|
|
} else {
|
2013-08-22 02:07:52 +00:00
|
|
|
newParent = new PtNode(
|
|
|
|
Arrays.copyOfRange(currentPtNode.mChars, 0, differentCharIndex),
|
2012-12-19 06:36:55 +00:00
|
|
|
null /* shortcutTargets */, null /* bigrams */, -1,
|
2012-08-31 06:24:39 +00:00
|
|
|
false /* isNotAWord */, false /* isBlacklistEntry */, newChildren);
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode newWord = new PtNode(Arrays.copyOfRange(word,
|
2012-04-20 10:58:01 +00:00
|
|
|
charIndex + differentCharIndex, word.length),
|
2012-08-31 06:24:39 +00:00
|
|
|
shortcutTargets, null /* bigrams */, frequency,
|
|
|
|
isNotAWord, isBlacklistEntry);
|
2011-06-29 09:01:06 +00:00
|
|
|
final int addIndex = word[charIndex + differentCharIndex]
|
2013-08-22 02:07:52 +00:00
|
|
|
> currentPtNode.mChars[differentCharIndex] ? 1 : 0;
|
2011-06-29 09:01:06 +00:00
|
|
|
newChildren.mData.add(addIndex, newWord);
|
|
|
|
}
|
2013-08-16 05:51:37 +00:00
|
|
|
currentNodeArray.mData.set(nodeIndex, newParent);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
2013-08-16 05:51:37 +00:00
|
|
|
if (DBG) checkStack(currentNodeArray);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-25 10:04:54 +00:00
|
|
|
private static int ARRAYS_ARE_EQUAL = 0;
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
|
|
|
* Custom comparison of two int arrays taken to contain character codes.
|
|
|
|
*
|
|
|
|
* This method compares the two arrays passed as an argument in a lexicographic way,
|
|
|
|
* with an offset in the dst string.
|
|
|
|
* This method does NOT test for the first character. It is taken to be equal.
|
|
|
|
* I repeat: this method starts the comparison at 1 <> dstOffset + 1.
|
|
|
|
* The index where the strings differ is returned. ARRAYS_ARE_EQUAL = 0 is returned if the
|
|
|
|
* strings are equal. This works BECAUSE we don't look at the first character.
|
|
|
|
*
|
|
|
|
* @param src the left-hand side string of the comparison.
|
|
|
|
* @param dst the right-hand side string of the comparison.
|
|
|
|
* @param dstOffset the offset in the right-hand side string.
|
|
|
|
* @return the index at which the strings differ, or ARRAYS_ARE_EQUAL = 0 if they don't.
|
|
|
|
*/
|
2013-08-16 05:51:37 +00:00
|
|
|
private static int compareCharArrays(final int[] src, final int[] dst, int dstOffset) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// We do NOT test the first char, because we come from a method that already
|
|
|
|
// tested it.
|
|
|
|
for (int i = 1; i < src.length; ++i) {
|
|
|
|
if (dstOffset + i >= dst.length) return i;
|
|
|
|
if (src[i] != dst[dstOffset + i]) return i;
|
|
|
|
}
|
|
|
|
if (dst.length > src.length) return src.length;
|
|
|
|
return ARRAYS_ARE_EQUAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* Helper class that compares and sorts two PtNodes according to their
|
2011-06-29 09:01:06 +00:00
|
|
|
* first element only. I repeat: ONLY the first element is considered, the rest
|
|
|
|
* is ignored.
|
|
|
|
* This comparator imposes orderings that are inconsistent with equals.
|
|
|
|
*/
|
2013-08-22 02:07:52 +00:00
|
|
|
static private final class PtNodeComparator implements java.util.Comparator<PtNode> {
|
2012-05-25 10:04:54 +00:00
|
|
|
@Override
|
2013-08-22 02:07:52 +00:00
|
|
|
public int compare(PtNode p1, PtNode p2) {
|
|
|
|
if (p1.mChars[0] == p2.mChars[0]) return 0;
|
|
|
|
return p1.mChars[0] < p2.mChars[0] ? -1 : 1;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
}
|
2013-08-22 02:07:52 +00:00
|
|
|
final static private PtNodeComparator PTNODE_COMPARATOR = new PtNodeComparator();
|
2011-06-29 09:01:06 +00:00
|
|
|
|
|
|
|
/**
|
2013-08-16 05:51:37 +00:00
|
|
|
* Finds the insertion index of a character within a node array.
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2013-08-16 05:51:37 +00:00
|
|
|
private static int findInsertionIndex(final PtNodeArray nodeArray, int character) {
|
2013-08-22 02:07:52 +00:00
|
|
|
final ArrayList<PtNode> data = nodeArray.mData;
|
|
|
|
final PtNode reference = new PtNode(new int[] { character },
|
2012-08-31 06:24:39 +00:00
|
|
|
null /* shortcutTargets */, null /* bigrams */, 0, false /* isNotAWord */,
|
|
|
|
false /* isBlacklistEntry */);
|
2013-08-22 02:07:52 +00:00
|
|
|
int result = Collections.binarySearch(data, reference, PTNODE_COMPARATOR);
|
2011-06-29 09:01:06 +00:00
|
|
|
return result >= 0 ? result : -result - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-16 05:51:37 +00:00
|
|
|
* Find the index of a char in a node array, if it exists.
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-08-16 05:51:37 +00:00
|
|
|
* @param nodeArray the node array to search in.
|
2011-06-29 09:01:06 +00:00
|
|
|
* @param character the character to search for.
|
2013-07-30 07:00:43 +00:00
|
|
|
* @return the position of the character if it's there, or CHARACTER_NOT_FOUND_INDEX = -1 else.
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2013-08-16 05:51:37 +00:00
|
|
|
private static int findIndexOfChar(final PtNodeArray nodeArray, int character) {
|
|
|
|
final int insertionIndex = findInsertionIndex(nodeArray, character);
|
|
|
|
if (nodeArray.mData.size() <= insertionIndex) return CHARACTER_NOT_FOUND_INDEX;
|
|
|
|
return character == nodeArray.mData.get(insertionIndex).mChars[0] ? insertionIndex
|
2013-07-30 07:00:43 +00:00
|
|
|
: CHARACTER_NOT_FOUND_INDEX;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper method to find a word in a given branch.
|
|
|
|
*/
|
2012-12-06 11:47:19 +00:00
|
|
|
@SuppressWarnings("unused")
|
2013-08-22 02:07:52 +00:00
|
|
|
public static PtNode findWordInTree(PtNodeArray nodeArray, final String string) {
|
2011-06-29 09:01:06 +00:00
|
|
|
int index = 0;
|
2012-05-11 13:51:34 +00:00
|
|
|
final StringBuilder checker = DBG ? new StringBuilder() : null;
|
2013-04-01 06:23:24 +00:00
|
|
|
final int[] codePoints = getCodePoints(string);
|
2011-06-29 09:01:06 +00:00
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode currentPtNode;
|
2011-06-29 09:01:06 +00:00
|
|
|
do {
|
2013-08-16 05:51:37 +00:00
|
|
|
int indexOfGroup = findIndexOfChar(nodeArray, codePoints[index]);
|
2013-07-30 07:00:43 +00:00
|
|
|
if (CHARACTER_NOT_FOUND_INDEX == indexOfGroup) return null;
|
2013-08-22 02:07:52 +00:00
|
|
|
currentPtNode = nodeArray.mData.get(indexOfGroup);
|
2012-08-16 10:23:56 +00:00
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
if (codePoints.length - index < currentPtNode.mChars.length) return null;
|
2012-08-16 10:23:56 +00:00
|
|
|
int newIndex = index;
|
2013-08-22 02:07:52 +00:00
|
|
|
while (newIndex < codePoints.length && newIndex - index < currentPtNode.mChars.length) {
|
|
|
|
if (currentPtNode.mChars[newIndex - index] != codePoints[newIndex]) return null;
|
2012-08-16 10:23:56 +00:00
|
|
|
newIndex++;
|
|
|
|
}
|
|
|
|
index = newIndex;
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
if (DBG) {
|
|
|
|
checker.append(new String(currentPtNode.mChars, 0, currentPtNode.mChars.length));
|
|
|
|
}
|
2013-04-01 06:23:24 +00:00
|
|
|
if (index < codePoints.length) {
|
2013-08-22 02:07:52 +00:00
|
|
|
nodeArray = currentPtNode.mChildren;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
2013-08-16 05:51:37 +00:00
|
|
|
} while (null != nodeArray && index < codePoints.length);
|
2011-06-29 09:01:06 +00:00
|
|
|
|
2013-04-01 06:23:24 +00:00
|
|
|
if (index < codePoints.length) return null;
|
2013-08-22 02:07:52 +00:00
|
|
|
if (!currentPtNode.isTerminal()) return null;
|
2013-04-10 12:15:38 +00:00
|
|
|
if (DBG && !string.equals(checker.toString())) return null;
|
2013-08-22 02:07:52 +00:00
|
|
|
return currentPtNode;
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
2012-01-16 02:54:31 +00:00
|
|
|
/**
|
|
|
|
* Helper method to find out whether a word is in the dict or not.
|
|
|
|
*/
|
|
|
|
public boolean hasWord(final String s) {
|
|
|
|
if (null == s || "".equals(s)) {
|
|
|
|
throw new RuntimeException("Can't search for a null or empty string");
|
|
|
|
}
|
2013-08-16 05:51:37 +00:00
|
|
|
return null != findWordInTree(mRootNodeArray, s);
|
2012-01-16 02:54:31 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
/**
|
2013-08-22 02:07:52 +00:00
|
|
|
* Recursively count the number of PtNodes in a given branch of the trie.
|
2011-06-29 09:01:06 +00:00
|
|
|
*
|
2013-08-16 05:51:37 +00:00
|
|
|
* @param nodeArray the parent node.
|
2013-08-22 02:07:52 +00:00
|
|
|
* @return the number of PtNodes in all the branch under this node.
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2013-08-22 02:07:52 +00:00
|
|
|
public static int countPtNodes(final PtNodeArray nodeArray) {
|
2013-08-16 05:51:37 +00:00
|
|
|
final int nodeSize = nodeArray.mData.size();
|
2011-06-29 09:01:06 +00:00
|
|
|
int size = nodeSize;
|
|
|
|
for (int i = nodeSize - 1; i >= 0; --i) {
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode ptNode = nodeArray.mData.get(i);
|
|
|
|
if (null != ptNode.mChildren)
|
|
|
|
size += countPtNodes(ptNode.mChildren);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Recursively count the number of nodes in a given branch of the trie.
|
|
|
|
*
|
2013-08-16 05:51:37 +00:00
|
|
|
* @param nodeArray the node array to count.
|
2012-04-25 09:49:31 +00:00
|
|
|
* @return the number of nodes in this branch.
|
2011-06-29 09:01:06 +00:00
|
|
|
*/
|
2013-08-16 05:51:37 +00:00
|
|
|
public static int countNodeArrays(final PtNodeArray nodeArray) {
|
2011-06-29 09:01:06 +00:00
|
|
|
int size = 1;
|
2013-08-16 05:51:37 +00:00
|
|
|
for (int i = nodeArray.mData.size() - 1; i >= 0; --i) {
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode ptNode = nodeArray.mData.get(i);
|
|
|
|
if (null != ptNode.mChildren)
|
|
|
|
size += countNodeArrays(ptNode.mChildren);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2012-04-25 09:49:31 +00:00
|
|
|
// Recursively find out whether there are any bigrams.
|
|
|
|
// This can be pretty expensive especially if there aren't any (we return as soon
|
|
|
|
// as we find one, so it's much cheaper if there are bigrams)
|
2013-08-16 05:51:37 +00:00
|
|
|
private static boolean hasBigramsInternal(final PtNodeArray nodeArray) {
|
|
|
|
if (null == nodeArray) return false;
|
|
|
|
for (int i = nodeArray.mData.size() - 1; i >= 0; --i) {
|
2013-08-22 02:07:52 +00:00
|
|
|
PtNode ptNode = nodeArray.mData.get(i);
|
|
|
|
if (null != ptNode.mBigrams) return true;
|
|
|
|
if (hasBigramsInternal(ptNode.mChildren)) return true;
|
2012-04-25 09:49:31 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Finds out whether there are any bigrams in this dictionary.
|
|
|
|
*
|
|
|
|
* @return true if there is any bigram, false otherwise.
|
|
|
|
*/
|
|
|
|
// TODO: this is expensive especially for large dictionaries without any bigram.
|
|
|
|
// The up side is, this is always accurate and correct and uses no memory. We should
|
|
|
|
// find a more efficient way of doing this, without compromising too much on memory
|
|
|
|
// and ease of use.
|
|
|
|
public boolean hasBigrams() {
|
2013-08-16 05:51:37 +00:00
|
|
|
return hasBigramsInternal(mRootNodeArray);
|
2012-04-25 09:49:31 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 09:01:06 +00:00
|
|
|
// Historically, the tails of the words were going to be merged to save space.
|
|
|
|
// However, that would prevent the code to search for a specific address in log(n)
|
|
|
|
// time so this was abandoned.
|
|
|
|
// The code is still of interest as it does add some compression to any dictionary
|
|
|
|
// that has no need for attributes. Implementations that does not read attributes should be
|
|
|
|
// able to read a dictionary with merged tails.
|
|
|
|
// Also, the following code does support frequencies, as in, it will only merges
|
|
|
|
// tails that share the same frequency. Though it would result in the above loss of
|
|
|
|
// performance while searching by address, it is still technically possible to merge
|
|
|
|
// tails that contain attributes, but this code does not take that into account - it does
|
|
|
|
// not compare attributes and will merge terminals with different attributes regardless.
|
|
|
|
public void mergeTails() {
|
|
|
|
MakedictLog.i("Do not merge tails");
|
|
|
|
return;
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
// MakedictLog.i("Merging PtNodes. Number of PtNodes : " + countPtNodes(root));
|
|
|
|
// MakedictLog.i("Number of PtNodes : " + countPtNodes(root));
|
2011-06-29 09:01:06 +00:00
|
|
|
//
|
2013-08-16 05:51:37 +00:00
|
|
|
// final HashMap<String, ArrayList<PtNodeArray>> repository =
|
|
|
|
// new HashMap<String, ArrayList<PtNodeArray>>();
|
2011-06-29 09:01:06 +00:00
|
|
|
// mergeTailsInner(repository, root);
|
|
|
|
//
|
|
|
|
// MakedictLog.i("Number of different pseudohashes : " + repository.size());
|
|
|
|
// int size = 0;
|
2013-08-16 05:51:37 +00:00
|
|
|
// for (ArrayList<PtNodeArray> a : repository.values()) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// size += a.size();
|
|
|
|
// }
|
|
|
|
// MakedictLog.i("Number of nodes after merge : " + (1 + size));
|
|
|
|
// MakedictLog.i("Recursively seen nodes : " + countNodes(root));
|
|
|
|
}
|
|
|
|
|
|
|
|
// The following methods are used by the deactivated mergeTails()
|
2013-08-16 05:51:37 +00:00
|
|
|
// private static boolean isEqual(PtNodeArray a, PtNodeArray b) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// if (null == a && null == b) return true;
|
|
|
|
// if (null == a || null == b) return false;
|
|
|
|
// if (a.data.size() != b.data.size()) return false;
|
|
|
|
// final int size = a.data.size();
|
|
|
|
// for (int i = size - 1; i >= 0; --i) {
|
2013-08-22 02:07:52 +00:00
|
|
|
// PtNode aPtNode = a.data.get(i);
|
|
|
|
// PtNode bPtNode = b.data.get(i);
|
|
|
|
// if (aPtNode.frequency != bPtNode.frequency) return false;
|
|
|
|
// if (aPtNode.alternates == null && bPtNode.alternates != null) return false;
|
|
|
|
// if (aPtNode.alternates != null && !aPtNode.equals(bPtNode.alternates)) return false;
|
|
|
|
// if (!Arrays.equals(aPtNode.chars, bPtNode.chars)) return false;
|
|
|
|
// if (!isEqual(aPtNode.children, bPtNode.children)) return false;
|
2011-06-29 09:01:06 +00:00
|
|
|
// }
|
|
|
|
// return true;
|
|
|
|
// }
|
|
|
|
|
2013-08-16 05:51:37 +00:00
|
|
|
// static private HashMap<String, ArrayList<PtNodeArray>> mergeTailsInner(
|
|
|
|
// final HashMap<String, ArrayList<PtNodeArray>> map, final PtNodeArray nodeArray) {
|
2013-08-22 02:07:52 +00:00
|
|
|
// final ArrayList<PtNode> branches = nodeArray.data;
|
2011-06-29 09:01:06 +00:00
|
|
|
// final int nodeSize = branches.size();
|
|
|
|
// for (int i = 0; i < nodeSize; ++i) {
|
2013-08-22 02:07:52 +00:00
|
|
|
// PtNode ptNode = branches.get(i);
|
|
|
|
// if (null != ptNode.children) {
|
|
|
|
// String pseudoHash = getPseudoHash(ptNode.children);
|
2013-08-16 05:51:37 +00:00
|
|
|
// ArrayList<PtNodeArray> similarList = map.get(pseudoHash);
|
2011-06-29 09:01:06 +00:00
|
|
|
// if (null == similarList) {
|
2013-08-16 05:51:37 +00:00
|
|
|
// similarList = new ArrayList<PtNodeArray>();
|
2011-06-29 09:01:06 +00:00
|
|
|
// map.put(pseudoHash, similarList);
|
|
|
|
// }
|
|
|
|
// boolean merged = false;
|
2013-08-16 05:51:37 +00:00
|
|
|
// for (PtNodeArray similar : similarList) {
|
2013-08-22 02:07:52 +00:00
|
|
|
// if (isEqual(ptNode.children, similar)) {
|
|
|
|
// ptNode.children = similar;
|
2011-06-29 09:01:06 +00:00
|
|
|
// merged = true;
|
|
|
|
// break;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// if (!merged) {
|
2013-08-22 02:07:52 +00:00
|
|
|
// similarList.add(ptNode.children);
|
2011-06-29 09:01:06 +00:00
|
|
|
// }
|
2013-08-22 02:07:52 +00:00
|
|
|
// mergeTailsInner(map, ptNode.children);
|
2011-06-29 09:01:06 +00:00
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// return map;
|
|
|
|
// }
|
|
|
|
|
2013-08-16 05:51:37 +00:00
|
|
|
// private static String getPseudoHash(final PtNodeArray nodeArray) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// StringBuilder s = new StringBuilder();
|
2013-08-22 02:07:52 +00:00
|
|
|
// for (PtNode ptNode : nodeArray.data) {
|
|
|
|
// s.append(ptNode.frequency);
|
|
|
|
// for (int ch : ptNode.chars) {
|
2011-06-29 09:01:06 +00:00
|
|
|
// s.append(Character.toChars(ch));
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// return s.toString();
|
|
|
|
// }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Iterator to walk through a dictionary.
|
|
|
|
*
|
|
|
|
* This is purely for convenience.
|
|
|
|
*/
|
2012-09-27 09:16:16 +00:00
|
|
|
public static final class DictionaryIterator implements Iterator<Word> {
|
|
|
|
private static final class Position {
|
2013-08-22 02:07:52 +00:00
|
|
|
public Iterator<PtNode> pos;
|
2011-06-29 09:01:06 +00:00
|
|
|
public int length;
|
2013-08-22 02:07:52 +00:00
|
|
|
public Position(ArrayList<PtNode> ptNodes) {
|
|
|
|
pos = ptNodes.iterator();
|
2011-06-29 09:01:06 +00:00
|
|
|
length = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
final StringBuilder mCurrentString;
|
|
|
|
final LinkedList<Position> mPositions;
|
|
|
|
|
2013-08-22 02:07:52 +00:00
|
|
|
public DictionaryIterator(ArrayList<PtNode> ptRoot) {
|
2011-06-29 09:01:06 +00:00
|
|
|
mCurrentString = new StringBuilder();
|
|
|
|
mPositions = new LinkedList<Position>();
|
2013-08-22 02:07:52 +00:00
|
|
|
final Position rootPos = new Position(ptRoot);
|
2011-06-29 09:01:06 +00:00
|
|
|
mPositions.add(rootPos);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean hasNext() {
|
|
|
|
for (Position p : mPositions) {
|
|
|
|
if (p.pos.hasNext()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Word next() {
|
|
|
|
Position currentPos = mPositions.getLast();
|
2013-04-01 06:23:24 +00:00
|
|
|
mCurrentString.setLength(currentPos.length);
|
2011-06-29 09:01:06 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
if (currentPos.pos.hasNext()) {
|
2013-08-22 02:07:52 +00:00
|
|
|
final PtNode currentPtNode = currentPos.pos.next();
|
2013-04-01 06:23:24 +00:00
|
|
|
currentPos.length = mCurrentString.length();
|
2013-08-22 02:07:52 +00:00
|
|
|
for (int i : currentPtNode.mChars) {
|
2011-06-29 09:01:06 +00:00
|
|
|
mCurrentString.append(Character.toChars(i));
|
2013-04-10 12:15:38 +00:00
|
|
|
}
|
2013-08-22 02:07:52 +00:00
|
|
|
if (null != currentPtNode.mChildren) {
|
|
|
|
currentPos = new Position(currentPtNode.mChildren.mData);
|
2013-04-10 12:15:38 +00:00
|
|
|
currentPos.length = mCurrentString.length();
|
2011-06-29 09:01:06 +00:00
|
|
|
mPositions.addLast(currentPos);
|
|
|
|
}
|
2013-08-22 02:07:52 +00:00
|
|
|
if (currentPtNode.mFrequency >= 0) {
|
|
|
|
return new Word(mCurrentString.toString(), currentPtNode.mFrequency,
|
|
|
|
currentPtNode.mShortcutTargets, currentPtNode.mBigrams,
|
|
|
|
currentPtNode.mIsNotAWord, currentPtNode.mIsBlacklistEntry);
|
2013-04-10 12:15:38 +00:00
|
|
|
}
|
2011-06-29 09:01:06 +00:00
|
|
|
} else {
|
|
|
|
mPositions.removeLast();
|
|
|
|
currentPos = mPositions.getLast();
|
2013-04-01 06:23:24 +00:00
|
|
|
mCurrentString.setLength(mPositions.getLast().length);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
2012-09-04 03:49:46 +00:00
|
|
|
} while (true);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void remove() {
|
|
|
|
throw new UnsupportedOperationException("Unsupported yet");
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Method to return an iterator.
|
|
|
|
*
|
|
|
|
* This method enables Java's enhanced for loop. With this you can have a FusionDictionary x
|
|
|
|
* and say : for (Word w : x) {}
|
|
|
|
*/
|
|
|
|
@Override
|
|
|
|
public Iterator<Word> iterator() {
|
2013-08-16 05:51:37 +00:00
|
|
|
return new DictionaryIterator(mRootNodeArray.mData);
|
2011-06-29 09:01:06 +00:00
|
|
|
}
|
|
|
|
}
|