/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.inputmethod.latin.makedict; import com.android.inputmethod.latin.makedict.BinaryDictDecoder.CharEncoding; import com.android.inputmethod.latin.makedict.FormatSpec.FormatOptions; import com.android.inputmethod.latin.makedict.FusionDictionary.CharGroup; import com.android.inputmethod.latin.makedict.FusionDictionary.DictionaryOptions; import com.android.inputmethod.latin.makedict.FusionDictionary.Node; import com.android.inputmethod.latin.makedict.FusionDictionary.WeightedString; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.Iterator; /** * Encodes binary files for a FusionDictionary. * * All the methods in this class are static. */ public class BinaryDictEncoder { private static final boolean DBG = MakedictLog.DBG; private BinaryDictEncoder() { // This utility class is not publicly instantiable. } // Arbitrary limit to how much passes we consider address size compression should // terminate in. At the time of this writing, our largest dictionary completes // compression in five passes. // If the number of passes exceeds this number, makedict bails with an exception on // suspicion that a bug might be causing an infinite loop. private static final int MAX_PASSES = 24; /** * Compute the binary size of the character array. * * If only one character, this is the size of this character. If many, it's the sum of their * sizes + 1 byte for the terminator. * * @param characters the character array * @return the size of the char array, including the terminator if any */ static int getGroupCharactersSize(final int[] characters) { int size = CharEncoding.getCharArraySize(characters); if (characters.length > 1) size += FormatSpec.GROUP_TERMINATOR_SIZE; return size; } /** * Compute the binary size of the character array in a group * * If only one character, this is the size of this character. If many, it's the sum of their * sizes + 1 byte for the terminator. * * @param group the group * @return the size of the char array, including the terminator if any */ private static int getGroupCharactersSize(final CharGroup group) { return getGroupCharactersSize(group.mChars); } /** * Compute the binary size of the group count for a node * @param node the node * @return the size of the group count, either 1 or 2 bytes. */ private static int getGroupCountSize(final Node node) { return BinaryDictIOUtils.getGroupCountSize(node.mData.size()); } /** * Compute the size of a shortcut in bytes. */ private static int getShortcutSize(final WeightedString shortcut) { int size = FormatSpec.GROUP_ATTRIBUTE_FLAGS_SIZE; final String word = shortcut.mWord; final int length = word.length(); for (int i = 0; i < length; i = word.offsetByCodePoints(i, 1)) { final int codePoint = word.codePointAt(i); size += CharEncoding.getCharSize(codePoint); } size += FormatSpec.GROUP_TERMINATOR_SIZE; return size; } /** * Compute the size of a shortcut list in bytes. * * This is known in advance and does not change according to position in the file * like address lists do. */ static int getShortcutListSize(final ArrayList shortcutList) { if (null == shortcutList) return 0; int size = FormatSpec.GROUP_SHORTCUT_LIST_SIZE_SIZE; for (final WeightedString shortcut : shortcutList) { size += getShortcutSize(shortcut); } return size; } /** * Compute the maximum size of a CharGroup, assuming 3-byte addresses for everything. * * @param group the CharGroup to compute the size of. * @param options file format options. * @return the maximum size of the group. */ private static int getCharGroupMaximumSize(final CharGroup group, final FormatOptions options) { int size = getGroupHeaderSize(group, options); // If terminal, one byte for the frequency if (group.isTerminal()) size += FormatSpec.GROUP_FREQUENCY_SIZE; size += FormatSpec.GROUP_MAX_ADDRESS_SIZE; // For children address size += getShortcutListSize(group.mShortcutTargets); if (null != group.mBigrams) { size += (FormatSpec.GROUP_ATTRIBUTE_FLAGS_SIZE + FormatSpec.GROUP_ATTRIBUTE_MAX_ADDRESS_SIZE) * group.mBigrams.size(); } return size; } /** * Compute the maximum size of a node, assuming 3-byte addresses for everything, and caches * it in the 'actualSize' member of the node. * * @param node the node to compute the maximum size of. * @param options file format options. */ private static void calculateNodeMaximumSize(final Node node, final FormatOptions options) { int size = getGroupCountSize(node); for (CharGroup g : node.mData) { final int groupSize = getCharGroupMaximumSize(g, options); g.mCachedSize = groupSize; size += groupSize; } if (options.mSupportsDynamicUpdate) { size += FormatSpec.FORWARD_LINK_ADDRESS_SIZE; } node.mCachedSize = size; } /** * Compute the size of the header (flag + [parent address] + characters size) of a CharGroup. * * @param group the group of which to compute the size of the header * @param options file format options. */ private static int getGroupHeaderSize(final CharGroup group, final FormatOptions options) { if (BinaryDictIOUtils.supportsDynamicUpdate(options)) { return FormatSpec.GROUP_FLAGS_SIZE + FormatSpec.PARENT_ADDRESS_SIZE + getGroupCharactersSize(group); } else { return FormatSpec.GROUP_FLAGS_SIZE + getGroupCharactersSize(group); } } /** * Compute the size, in bytes, that an address will occupy. * * This can be used either for children addresses (which are always positive) or for * attribute, which may be positive or negative but * store their sign bit separately. * * @param address the address * @return the byte size. */ static int getByteSize(final int address) { assert(address <= FormatSpec.UINT24_MAX); if (!BinaryDictIOUtils.hasChildrenAddress(address)) { return 0; } else if (Math.abs(address) <= FormatSpec.UINT8_MAX) { return 1; } else if (Math.abs(address) <= FormatSpec.UINT16_MAX) { return 2; } else { return 3; } } // End utility methods // This method is responsible for finding a nice ordering of the nodes that favors run-time // cache performance and dictionary size. /* package for tests */ static ArrayList flattenTree(final Node root) { final int treeSize = FusionDictionary.countCharGroups(root); MakedictLog.i("Counted nodes : " + treeSize); final ArrayList flatTree = new ArrayList(treeSize); return flattenTreeInner(flatTree, root); } private static ArrayList flattenTreeInner(final ArrayList list, final Node node) { // Removing the node is necessary if the tails are merged, because we would then // add the same node several times when we only want it once. A number of places in // the code also depends on any node being only once in the list. // Merging tails can only be done if there are no attributes. Searching for attributes // in LatinIME code depends on a total breadth-first ordering, which merging tails // breaks. If there are no attributes, it should be fine (and reduce the file size) // to merge tails, and removing the node from the list would be necessary. However, // we don't merge tails because breaking the breadth-first ordering would result in // extreme overhead at bigram lookup time (it would make the search function O(n) instead // of the current O(log(n)), where n=number of nodes in the dictionary which is pretty // high). // If no nodes are ever merged, we can't have the same node twice in the list, hence // searching for duplicates in unnecessary. It is also very performance consuming, // since `list' is an ArrayList so it's an O(n) operation that runs on all nodes, making // this simple list.remove operation O(n*n) overall. On Android this overhead is very // high. // For future reference, the code to remove duplicate is a simple : list.remove(node); list.add(node); final ArrayList branches = node.mData; final int nodeSize = branches.size(); for (CharGroup group : branches) { if (null != group.mChildren) flattenTreeInner(list, group.mChildren); } return list; } /** * Get the offset from a position inside a current node to a target node, during update. * * If the current node is before the target node, the target node has not been updated yet, * so we should return the offset from the old position of the current node to the old position * of the target node. If on the other hand the target is before the current node, it already * has been updated, so we should return the offset from the new position in the current node * to the new position in the target node. * @param currentNode the node containing the CharGroup where the offset will be written * @param offsetFromStartOfCurrentNode the offset, in bytes, from the start of currentNode * @param targetNode the target node to get the offset to * @return the offset to the target node */ private static int getOffsetToTargetNodeDuringUpdate(final Node currentNode, final int offsetFromStartOfCurrentNode, final Node targetNode) { final boolean isTargetBeforeCurrent = (targetNode.mCachedAddressBeforeUpdate < currentNode.mCachedAddressBeforeUpdate); if (isTargetBeforeCurrent) { return targetNode.mCachedAddressAfterUpdate - (currentNode.mCachedAddressAfterUpdate + offsetFromStartOfCurrentNode); } else { return targetNode.mCachedAddressBeforeUpdate - (currentNode.mCachedAddressBeforeUpdate + offsetFromStartOfCurrentNode); } } /** * Get the offset from a position inside a current node to a target CharGroup, during update. * @param currentNode the node containing the CharGroup where the offset will be written * @param offsetFromStartOfCurrentNode the offset, in bytes, from the start of currentNode * @param targetCharGroup the target CharGroup to get the offset to * @return the offset to the target CharGroup */ // TODO: is there any way to factorize this method with the one above? private static int getOffsetToTargetCharGroupDuringUpdate(final Node currentNode, final int offsetFromStartOfCurrentNode, final CharGroup targetCharGroup) { final int oldOffsetBasePoint = currentNode.mCachedAddressBeforeUpdate + offsetFromStartOfCurrentNode; final boolean isTargetBeforeCurrent = (targetCharGroup.mCachedAddressBeforeUpdate < oldOffsetBasePoint); // If the target is before the current node, then its address has already been updated. // We can use the AfterUpdate member, and compare it to our own member after update. // Otherwise, the AfterUpdate member is not updated yet, so we need to use the BeforeUpdate // member, and of course we have to compare this to our own address before update. if (isTargetBeforeCurrent) { final int newOffsetBasePoint = currentNode.mCachedAddressAfterUpdate + offsetFromStartOfCurrentNode; return targetCharGroup.mCachedAddressAfterUpdate - newOffsetBasePoint; } else { return targetCharGroup.mCachedAddressBeforeUpdate - oldOffsetBasePoint; } } /** * Computes the actual node size, based on the cached addresses of the children nodes. * * Each node stores its tentative address. During dictionary address computing, these * are not final, but they can be used to compute the node size (the node size depends * on the address of the children because the number of bytes necessary to store an * address depends on its numeric value. The return value indicates whether the node * contents (as in, any of the addresses stored in the cache fields) have changed with * respect to their previous value. * * @param node the node to compute the size of. * @param dict the dictionary in which the word/attributes are to be found. * @param formatOptions file format options. * @return false if none of the cached addresses inside the node changed, true otherwise. */ private static boolean computeActualNodeSize(final Node node, final FusionDictionary dict, final FormatOptions formatOptions) { boolean changed = false; int size = getGroupCountSize(node); for (CharGroup group : node.mData) { group.mCachedAddressAfterUpdate = node.mCachedAddressAfterUpdate + size; if (group.mCachedAddressAfterUpdate != group.mCachedAddressBeforeUpdate) { changed = true; } int groupSize = getGroupHeaderSize(group, formatOptions); if (group.isTerminal()) groupSize += FormatSpec.GROUP_FREQUENCY_SIZE; if (null == group.mChildren && formatOptions.mSupportsDynamicUpdate) { groupSize += FormatSpec.SIGNED_CHILDREN_ADDRESS_SIZE; } else if (null != group.mChildren) { if (formatOptions.mSupportsDynamicUpdate) { groupSize += FormatSpec.SIGNED_CHILDREN_ADDRESS_SIZE; } else { groupSize += getByteSize(getOffsetToTargetNodeDuringUpdate(node, groupSize + size, group.mChildren)); } } groupSize += getShortcutListSize(group.mShortcutTargets); if (null != group.mBigrams) { for (WeightedString bigram : group.mBigrams) { final int offset = getOffsetToTargetCharGroupDuringUpdate(node, groupSize + size + FormatSpec.GROUP_FLAGS_SIZE, FusionDictionary.findWordInTree(dict.mRoot, bigram.mWord)); groupSize += getByteSize(offset) + FormatSpec.GROUP_FLAGS_SIZE; } } group.mCachedSize = groupSize; size += groupSize; } if (formatOptions.mSupportsDynamicUpdate) { size += FormatSpec.FORWARD_LINK_ADDRESS_SIZE; } if (node.mCachedSize != size) { node.mCachedSize = size; changed = true; } return changed; } /** * Initializes the cached addresses of nodes from their size. * * @param flatNodes the array of nodes. * @param formatOptions file format options. * @return the byte size of the entire stack. */ private static int initializeNodesCachedAddresses(final ArrayList flatNodes, final FormatOptions formatOptions) { int nodeOffset = 0; for (final Node n : flatNodes) { n.mCachedAddressBeforeUpdate = nodeOffset; int groupCountSize = getGroupCountSize(n); int groupOffset = 0; for (final CharGroup g : n.mData) { g.mCachedAddressBeforeUpdate = g.mCachedAddressAfterUpdate = groupCountSize + nodeOffset + groupOffset; groupOffset += g.mCachedSize; } final int nodeSize = groupCountSize + groupOffset + (formatOptions.mSupportsDynamicUpdate ? FormatSpec.FORWARD_LINK_ADDRESS_SIZE : 0); nodeOffset += n.mCachedSize; } return nodeOffset; } /** * Updates the cached addresses of nodes after recomputing their new positions. * * @param flatNodes the array of nodes. */ private static void updateNodeCachedAddresses(final ArrayList flatNodes) { for (final Node n : flatNodes) { n.mCachedAddressBeforeUpdate = n.mCachedAddressAfterUpdate; for (final CharGroup g : n.mData) { g.mCachedAddressBeforeUpdate = g.mCachedAddressAfterUpdate; } } } /** * Compute the cached parent addresses after all has been updated. * * The parent addresses are used by some binary formats at write-to-disk time. Not all formats * need them. In particular, version 2 does not need them, and version 3 does. * * @param flatNodes the flat array of nodes to fill in */ private static void computeParentAddresses(final ArrayList flatNodes) { for (final Node node : flatNodes) { for (final CharGroup group : node.mData) { if (null != group.mChildren) { // Assign my address to children's parent address // Here BeforeUpdate and AfterUpdate addresses have the same value, so it // does not matter which we use. group.mChildren.mCachedParentAddress = group.mCachedAddressAfterUpdate - group.mChildren.mCachedAddressAfterUpdate; } } } } /** * Compute the addresses and sizes of an ordered node array. * * This method takes a node array and will update its cached address and size values * so that they can be written into a file. It determines the smallest size each of the * nodes can be given the addresses of its children and attributes, and store that into * each node. * The order of the node is given by the order of the array. This method makes no effort * to find a good order; it only mechanically computes the size this order results in. * * @param dict the dictionary * @param flatNodes the ordered array of nodes * @param formatOptions file format options. * @return the same array it was passed. The nodes have been updated for address and size. */ private static ArrayList computeAddresses(final FusionDictionary dict, final ArrayList flatNodes, final FormatOptions formatOptions) { // First get the worst possible sizes and offsets for (final Node n : flatNodes) calculateNodeMaximumSize(n, formatOptions); final int offset = initializeNodesCachedAddresses(flatNodes, formatOptions); MakedictLog.i("Compressing the array addresses. Original size : " + offset); MakedictLog.i("(Recursively seen size : " + offset + ")"); int passes = 0; boolean changesDone = false; do { changesDone = false; int nodeStartOffset = 0; for (final Node n : flatNodes) { n.mCachedAddressAfterUpdate = nodeStartOffset; final int oldNodeSize = n.mCachedSize; final boolean changed = computeActualNodeSize(n, dict, formatOptions); final int newNodeSize = n.mCachedSize; if (oldNodeSize < newNodeSize) throw new RuntimeException("Increased size ?!"); nodeStartOffset += newNodeSize; changesDone |= changed; } updateNodeCachedAddresses(flatNodes); ++passes; if (passes > MAX_PASSES) throw new RuntimeException("Too many passes - probably a bug"); } while (changesDone); if (formatOptions.mSupportsDynamicUpdate) { computeParentAddresses(flatNodes); } final Node lastNode = flatNodes.get(flatNodes.size() - 1); MakedictLog.i("Compression complete in " + passes + " passes."); MakedictLog.i("After address compression : " + (lastNode.mCachedAddressAfterUpdate + lastNode.mCachedSize)); return flatNodes; } /** * Sanity-checking method. * * This method checks an array of node for juxtaposition, that is, it will do * nothing if each node's cached address is actually the previous node's address * plus the previous node's size. * If this is not the case, it will throw an exception. * * @param array the array node to check */ private static void checkFlatNodeArray(final ArrayList array) { int offset = 0; int index = 0; for (final Node n : array) { // BeforeUpdate and AfterUpdate addresses are the same here, so it does not matter // which we use. if (n.mCachedAddressAfterUpdate != offset) { throw new RuntimeException("Wrong address for node " + index + " : expected " + offset + ", got " + n.mCachedAddressAfterUpdate); } ++index; offset += n.mCachedSize; } } /** * Helper method to write a variable-size address to a file. * * @param buffer the buffer to write to. * @param index the index in the buffer to write the address to. * @param address the address to write. * @return the size in bytes the address actually took. */ private static int writeVariableAddress(final byte[] buffer, int index, final int address) { switch (getByteSize(address)) { case 1: buffer[index++] = (byte)address; return 1; case 2: buffer[index++] = (byte)(0xFF & (address >> 8)); buffer[index++] = (byte)(0xFF & address); return 2; case 3: buffer[index++] = (byte)(0xFF & (address >> 16)); buffer[index++] = (byte)(0xFF & (address >> 8)); buffer[index++] = (byte)(0xFF & address); return 3; case 0: return 0; default: throw new RuntimeException("Address " + address + " has a strange size"); } } /** * Helper method to write a variable-size signed address to a file. * * @param buffer the buffer to write to. * @param index the index in the buffer to write the address to. * @param address the address to write. * @return the size in bytes the address actually took. */ private static int writeVariableSignedAddress(final byte[] buffer, int index, final int address) { if (!BinaryDictIOUtils.hasChildrenAddress(address)) { buffer[index] = buffer[index + 1] = buffer[index + 2] = 0; } else { final int absAddress = Math.abs(address); buffer[index++] = (byte)((address < 0 ? FormatSpec.MSB8 : 0) | (0xFF & (absAddress >> 16))); buffer[index++] = (byte)(0xFF & (absAddress >> 8)); buffer[index++] = (byte)(0xFF & absAddress); } return 3; } /** * Makes the flag value for a char group. * * @param hasMultipleChars whether the group has multiple chars. * @param isTerminal whether the group is terminal. * @param childrenAddressSize the size of a children address. * @param hasShortcuts whether the group has shortcuts. * @param hasBigrams whether the group has bigrams. * @param isNotAWord whether the group is not a word. * @param isBlackListEntry whether the group is a blacklist entry. * @param formatOptions file format options. * @return the flags */ static int makeCharGroupFlags(final boolean hasMultipleChars, final boolean isTerminal, final int childrenAddressSize, final boolean hasShortcuts, final boolean hasBigrams, final boolean isNotAWord, final boolean isBlackListEntry, final FormatOptions formatOptions) { byte flags = 0; if (hasMultipleChars) flags |= FormatSpec.FLAG_HAS_MULTIPLE_CHARS; if (isTerminal) flags |= FormatSpec.FLAG_IS_TERMINAL; if (formatOptions.mSupportsDynamicUpdate) { flags |= FormatSpec.FLAG_IS_NOT_MOVED; } else if (true) { switch (childrenAddressSize) { case 1: flags |= FormatSpec.FLAG_GROUP_ADDRESS_TYPE_ONEBYTE; break; case 2: flags |= FormatSpec.FLAG_GROUP_ADDRESS_TYPE_TWOBYTES; break; case 3: flags |= FormatSpec.FLAG_GROUP_ADDRESS_TYPE_THREEBYTES; break; case 0: flags |= FormatSpec.FLAG_GROUP_ADDRESS_TYPE_NOADDRESS; break; default: throw new RuntimeException("Node with a strange address"); } } if (hasShortcuts) flags |= FormatSpec.FLAG_HAS_SHORTCUT_TARGETS; if (hasBigrams) flags |= FormatSpec.FLAG_HAS_BIGRAMS; if (isNotAWord) flags |= FormatSpec.FLAG_IS_NOT_A_WORD; if (isBlackListEntry) flags |= FormatSpec.FLAG_IS_BLACKLISTED; return flags; } private static byte makeCharGroupFlags(final CharGroup group, final int groupAddress, final int childrenOffset, final FormatOptions formatOptions) { return (byte) makeCharGroupFlags(group.mChars.length > 1, group.mFrequency >= 0, getByteSize(childrenOffset), group.mShortcutTargets != null, group.mBigrams != null, group.mIsNotAWord, group.mIsBlacklistEntry, formatOptions); } /** * Makes the flag value for a bigram. * * @param more whether there are more bigrams after this one. * @param offset the offset of the bigram. * @param bigramFrequency the frequency of the bigram, 0..255. * @param unigramFrequency the unigram frequency of the same word, 0..255. * @param word the second bigram, for debugging purposes * @return the flags */ private static final int makeBigramFlags(final boolean more, final int offset, int bigramFrequency, final int unigramFrequency, final String word) { int bigramFlags = (more ? FormatSpec.FLAG_ATTRIBUTE_HAS_NEXT : 0) + (offset < 0 ? FormatSpec.FLAG_ATTRIBUTE_OFFSET_NEGATIVE : 0); switch (getByteSize(offset)) { case 1: bigramFlags |= FormatSpec.FLAG_ATTRIBUTE_ADDRESS_TYPE_ONEBYTE; break; case 2: bigramFlags |= FormatSpec.FLAG_ATTRIBUTE_ADDRESS_TYPE_TWOBYTES; break; case 3: bigramFlags |= FormatSpec.FLAG_ATTRIBUTE_ADDRESS_TYPE_THREEBYTES; break; default: throw new RuntimeException("Strange offset size"); } if (unigramFrequency > bigramFrequency) { MakedictLog.e("Unigram freq is superior to bigram freq for \"" + word + "\". Bigram freq is " + bigramFrequency + ", unigram freq for " + word + " is " + unigramFrequency); bigramFrequency = unigramFrequency; } // We compute the difference between 255 (which means probability = 1) and the // unigram score. We split this into a number of discrete steps. // Now, the steps are numbered 0~15; 0 represents an increase of 1 step while 15 // represents an increase of 16 steps: a value of 15 will be interpreted as the median // value of the 16th step. In all justice, if the bigram frequency is low enough to be // rounded below the first step (which means it is less than half a step higher than the // unigram frequency) then the unigram frequency itself is the best approximation of the // bigram freq that we could possibly supply, hence we should *not* include this bigram // in the file at all. // until this is done, we'll write 0 and slightly overestimate this case. // In other words, 0 means "between 0.5 step and 1.5 step", 1 means "between 1.5 step // and 2.5 steps", and 15 means "between 15.5 steps and 16.5 steps". So we want to // divide our range [unigramFreq..MAX_TERMINAL_FREQUENCY] in 16.5 steps to get the // step size. Then we compute the start of the first step (the one where value 0 starts) // by adding half-a-step to the unigramFrequency. From there, we compute the integer // number of steps to the bigramFrequency. One last thing: we want our steps to include // their lower bound and exclude their higher bound so we need to have the first step // start at exactly 1 unit higher than floor(unigramFreq + half a step). // Note : to reconstruct the score, the dictionary reader will need to divide // MAX_TERMINAL_FREQUENCY - unigramFreq by 16.5 likewise to get the value of the step, // and add (discretizedFrequency + 0.5 + 0.5) times this value to get the best // approximation. (0.5 to get the first step start, and 0.5 to get the middle of the // step pointed by the discretized frequency. final float stepSize = (FormatSpec.MAX_TERMINAL_FREQUENCY - unigramFrequency) / (1.5f + FormatSpec.MAX_BIGRAM_FREQUENCY); final float firstStepStart = 1 + unigramFrequency + (stepSize / 2.0f); final int discretizedFrequency = (int)((bigramFrequency - firstStepStart) / stepSize); // If the bigram freq is less than half-a-step higher than the unigram freq, we get -1 // here. The best approximation would be the unigram freq itself, so we should not // include this bigram in the dictionary. For now, register as 0, and live with the // small over-estimation that we get in this case. TODO: actually remove this bigram // if discretizedFrequency < 0. final int finalBigramFrequency = discretizedFrequency > 0 ? discretizedFrequency : 0; bigramFlags += finalBigramFrequency & FormatSpec.FLAG_ATTRIBUTE_FREQUENCY; return bigramFlags; } /** * Makes the 2-byte value for options flags. */ private static final int makeOptionsValue(final FusionDictionary dictionary, final FormatOptions formatOptions) { final DictionaryOptions options = dictionary.mOptions; final boolean hasBigrams = dictionary.hasBigrams(); return (options.mFrenchLigatureProcessing ? FormatSpec.FRENCH_LIGATURE_PROCESSING_FLAG : 0) + (options.mGermanUmlautProcessing ? FormatSpec.GERMAN_UMLAUT_PROCESSING_FLAG : 0) + (hasBigrams ? FormatSpec.CONTAINS_BIGRAMS_FLAG : 0) + (formatOptions.mSupportsDynamicUpdate ? FormatSpec.SUPPORTS_DYNAMIC_UPDATE : 0); } /** * Makes the flag value for a shortcut. * * @param more whether there are more attributes after this one. * @param frequency the frequency of the attribute, 0..15 * @return the flags */ static final int makeShortcutFlags(final boolean more, final int frequency) { return (more ? FormatSpec.FLAG_ATTRIBUTE_HAS_NEXT : 0) + (frequency & FormatSpec.FLAG_ATTRIBUTE_FREQUENCY); } private static final int writeParentAddress(final byte[] buffer, final int index, final int address, final FormatOptions formatOptions) { if (BinaryDictIOUtils.supportsDynamicUpdate(formatOptions)) { if (address == FormatSpec.NO_PARENT_ADDRESS) { buffer[index] = buffer[index + 1] = buffer[index + 2] = 0; } else { final int absAddress = Math.abs(address); assert(absAddress <= FormatSpec.SINT24_MAX); buffer[index] = (byte)((address < 0 ? FormatSpec.MSB8 : 0) | ((absAddress >> 16) & 0xFF)); buffer[index + 1] = (byte)((absAddress >> 8) & 0xFF); buffer[index + 2] = (byte)(absAddress & 0xFF); } return index + 3; } else { return index; } } /** * Write a node to memory. The node is expected to have its final position cached. * * This can be an empty map, but the more is inside the faster the lookups will be. It can * be carried on as long as nodes do not move. * * @param dict the dictionary the node is a part of (for relative offsets). * @param buffer the memory buffer to write to. * @param node the node to write. * @param formatOptions file format options. * @return the address of the END of the node. */ @SuppressWarnings("unused") private static int writePlacedNode(final FusionDictionary dict, byte[] buffer, final Node node, final FormatOptions formatOptions) { // TODO: Make the code in common with BinaryDictIOUtils#writeCharGroup int index = node.mCachedAddressAfterUpdate; final int groupCount = node.mData.size(); final int countSize = getGroupCountSize(node); final int parentAddress = node.mCachedParentAddress; if (1 == countSize) { buffer[index++] = (byte)groupCount; } else if (2 == countSize) { // We need to signal 2-byte size by setting the top bit of the MSB to 1, so // we | 0x80 to do this. buffer[index++] = (byte)((groupCount >> 8) | 0x80); buffer[index++] = (byte)(groupCount & 0xFF); } else { throw new RuntimeException("Strange size from getGroupCountSize : " + countSize); } int groupAddress = index; for (int i = 0; i < groupCount; ++i) { final CharGroup group = node.mData.get(i); if (index != group.mCachedAddressAfterUpdate) { throw new RuntimeException("Bug: write index is not the same as the cached address " + "of the group : " + index + " <> " + group.mCachedAddressAfterUpdate); } groupAddress += getGroupHeaderSize(group, formatOptions); // Sanity checks. if (DBG && group.mFrequency > FormatSpec.MAX_TERMINAL_FREQUENCY) { throw new RuntimeException("A node has a frequency > " + FormatSpec.MAX_TERMINAL_FREQUENCY + " : " + group.mFrequency); } if (group.mFrequency >= 0) groupAddress += FormatSpec.GROUP_FREQUENCY_SIZE; final int childrenOffset = null == group.mChildren ? FormatSpec.NO_CHILDREN_ADDRESS : group.mChildren.mCachedAddressAfterUpdate - groupAddress; buffer[index++] = makeCharGroupFlags(group, groupAddress, childrenOffset, formatOptions); if (parentAddress == FormatSpec.NO_PARENT_ADDRESS) { index = writeParentAddress(buffer, index, parentAddress, formatOptions); } else { index = writeParentAddress(buffer, index, parentAddress + (node.mCachedAddressAfterUpdate - group.mCachedAddressAfterUpdate), formatOptions); } index = CharEncoding.writeCharArray(group.mChars, buffer, index); if (group.hasSeveralChars()) { buffer[index++] = FormatSpec.GROUP_CHARACTERS_TERMINATOR; } if (group.mFrequency >= 0) { buffer[index++] = (byte) group.mFrequency; } final int shift; if (formatOptions.mSupportsDynamicUpdate) { shift = writeVariableSignedAddress(buffer, index, childrenOffset); } else { shift = writeVariableAddress(buffer, index, childrenOffset); } index += shift; groupAddress += shift; // Write shortcuts if (null != group.mShortcutTargets) { final int indexOfShortcutByteSize = index; index += FormatSpec.GROUP_SHORTCUT_LIST_SIZE_SIZE; groupAddress += FormatSpec.GROUP_SHORTCUT_LIST_SIZE_SIZE; final Iterator shortcutIterator = group.mShortcutTargets.iterator(); while (shortcutIterator.hasNext()) { final WeightedString target = shortcutIterator.next(); ++groupAddress; int shortcutFlags = makeShortcutFlags(shortcutIterator.hasNext(), target.mFrequency); buffer[index++] = (byte)shortcutFlags; final int shortcutShift = CharEncoding.writeString(buffer, index, target.mWord); index += shortcutShift; groupAddress += shortcutShift; } final int shortcutByteSize = index - indexOfShortcutByteSize; if (shortcutByteSize > 0xFFFF) { throw new RuntimeException("Shortcut list too large"); } buffer[indexOfShortcutByteSize] = (byte)(shortcutByteSize >> 8); buffer[indexOfShortcutByteSize + 1] = (byte)(shortcutByteSize & 0xFF); } // Write bigrams if (null != group.mBigrams) { final Iterator bigramIterator = group.mBigrams.iterator(); while (bigramIterator.hasNext()) { final WeightedString bigram = bigramIterator.next(); final CharGroup target = FusionDictionary.findWordInTree(dict.mRoot, bigram.mWord); final int addressOfBigram = target.mCachedAddressAfterUpdate; final int unigramFrequencyForThisWord = target.mFrequency; ++groupAddress; final int offset = addressOfBigram - groupAddress; int bigramFlags = makeBigramFlags(bigramIterator.hasNext(), offset, bigram.mFrequency, unigramFrequencyForThisWord, bigram.mWord); buffer[index++] = (byte)bigramFlags; final int bigramShift = writeVariableAddress(buffer, index, Math.abs(offset)); index += bigramShift; groupAddress += bigramShift; } } } if (formatOptions.mSupportsDynamicUpdate) { buffer[index] = buffer[index + 1] = buffer[index + 2] = FormatSpec.NO_FORWARD_LINK_ADDRESS; index += FormatSpec.FORWARD_LINK_ADDRESS_SIZE; } if (index != node.mCachedAddressAfterUpdate + node.mCachedSize) throw new RuntimeException( "Not the same size : written " + (index - node.mCachedAddressAfterUpdate) + " bytes from a node that should have " + node.mCachedSize + " bytes"); return index; } /** * Dumps a collection of useful statistics about a node array. * * This prints purely informative stuff, like the total estimated file size, the * number of nodes, of character groups, the repartition of each address size, etc * * @param nodes the node array. */ private static void showStatistics(ArrayList nodes) { int firstTerminalAddress = Integer.MAX_VALUE; int lastTerminalAddress = Integer.MIN_VALUE; int size = 0; int charGroups = 0; int maxGroups = 0; int maxRuns = 0; for (final Node n : nodes) { if (maxGroups < n.mData.size()) maxGroups = n.mData.size(); for (final CharGroup cg : n.mData) { ++charGroups; if (cg.mChars.length > maxRuns) maxRuns = cg.mChars.length; if (cg.mFrequency >= 0) { if (n.mCachedAddressAfterUpdate < firstTerminalAddress) firstTerminalAddress = n.mCachedAddressAfterUpdate; if (n.mCachedAddressAfterUpdate > lastTerminalAddress) lastTerminalAddress = n.mCachedAddressAfterUpdate; } } if (n.mCachedAddressAfterUpdate + n.mCachedSize > size) { size = n.mCachedAddressAfterUpdate + n.mCachedSize; } } final int[] groupCounts = new int[maxGroups + 1]; final int[] runCounts = new int[maxRuns + 1]; for (final Node n : nodes) { ++groupCounts[n.mData.size()]; for (final CharGroup cg : n.mData) { ++runCounts[cg.mChars.length]; } } MakedictLog.i("Statistics:\n" + " total file size " + size + "\n" + " " + nodes.size() + " nodes\n" + " " + charGroups + " groups (" + ((float)charGroups / nodes.size()) + " groups per node)\n" + " first terminal at " + firstTerminalAddress + "\n" + " last terminal at " + lastTerminalAddress + "\n" + " Group stats : max = " + maxGroups); for (int i = 0; i < groupCounts.length; ++i) { MakedictLog.i(" " + i + " : " + groupCounts[i]); } MakedictLog.i(" Character run stats : max = " + maxRuns); for (int i = 0; i < runCounts.length; ++i) { MakedictLog.i(" " + i + " : " + runCounts[i]); } } /** * Dumps a FusionDictionary to a file. * * This is the public entry point to write a dictionary to a file. * * @param destination the stream to write the binary data to. * @param dict the dictionary to write. * @param formatOptions file format options. */ public static void writeDictionaryBinary(final OutputStream destination, final FusionDictionary dict, final FormatOptions formatOptions) throws IOException, UnsupportedFormatException { // Addresses are limited to 3 bytes, but since addresses can be relative to each node, the // structure itself is not limited to 16MB. However, if it is over 16MB deciding the order // of the nodes becomes a quite complicated problem, because though the dictionary itself // does not have a size limit, each node must still be within 16MB of all its children and // parents. As long as this is ensured, the dictionary file may grow to any size. final int version = formatOptions.mVersion; if (version < FormatSpec.MINIMUM_SUPPORTED_VERSION || version > FormatSpec.MAXIMUM_SUPPORTED_VERSION) { throw new UnsupportedFormatException("Requested file format version " + version + ", but this implementation only supports versions " + FormatSpec.MINIMUM_SUPPORTED_VERSION + " through " + FormatSpec.MAXIMUM_SUPPORTED_VERSION); } ByteArrayOutputStream headerBuffer = new ByteArrayOutputStream(256); // The magic number in big-endian order. // Magic number for all versions. headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 24))); headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 16))); headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 8))); headerBuffer.write((byte) (0xFF & FormatSpec.MAGIC_NUMBER)); // Dictionary version. headerBuffer.write((byte) (0xFF & (version >> 8))); headerBuffer.write((byte) (0xFF & version)); // Options flags final int options = makeOptionsValue(dict, formatOptions); headerBuffer.write((byte) (0xFF & (options >> 8))); headerBuffer.write((byte) (0xFF & options)); final int headerSizeOffset = headerBuffer.size(); // Placeholder to be written later with header size. for (int i = 0; i < 4; ++i) { headerBuffer.write(0); } // Write out the options. for (final String key : dict.mOptions.mAttributes.keySet()) { final String value = dict.mOptions.mAttributes.get(key); CharEncoding.writeString(headerBuffer, key); CharEncoding.writeString(headerBuffer, value); } final int size = headerBuffer.size(); final byte[] bytes = headerBuffer.toByteArray(); // Write out the header size. bytes[headerSizeOffset] = (byte) (0xFF & (size >> 24)); bytes[headerSizeOffset + 1] = (byte) (0xFF & (size >> 16)); bytes[headerSizeOffset + 2] = (byte) (0xFF & (size >> 8)); bytes[headerSizeOffset + 3] = (byte) (0xFF & (size >> 0)); destination.write(bytes); headerBuffer.close(); // Leave the choice of the optimal node order to the flattenTree function. MakedictLog.i("Flattening the tree..."); ArrayList flatNodes = flattenTree(dict.mRoot); MakedictLog.i("Computing addresses..."); computeAddresses(dict, flatNodes, formatOptions); MakedictLog.i("Checking array..."); if (DBG) checkFlatNodeArray(flatNodes); // Create a buffer that matches the final dictionary size. final Node lastNode = flatNodes.get(flatNodes.size() - 1); final int bufferSize = lastNode.mCachedAddressAfterUpdate + lastNode.mCachedSize; final byte[] buffer = new byte[bufferSize]; int index = 0; MakedictLog.i("Writing file..."); int dataEndOffset = 0; for (Node n : flatNodes) { dataEndOffset = writePlacedNode(dict, buffer, n, formatOptions); } if (DBG) showStatistics(flatNodes); destination.write(buffer, 0, dataEndOffset); destination.close(); MakedictLog.i("Done"); } }