diff --git a/src/java/org/apache/poi/hssf/model/Workbook.java b/src/java/org/apache/poi/hssf/model/Workbook.java index 2be0c42d9..b52394a18 100644 --- a/src/java/org/apache/poi/hssf/model/Workbook.java +++ b/src/java/org/apache/poi/hssf/model/Workbook.java @@ -658,11 +658,11 @@ public class Workbook { public int serialize(int offset, byte [] data) { log.log(DEBUG, "Serializing Workbook with offsets"); - + // ArrayList bytes = new ArrayList(records.size()); // int arraysize = getSize(); // 0; int pos = 0; - + // for (int k = 0; k < records.size(); k++) // { // bytes.add((( Record ) records.get(k)).serialize()); diff --git a/src/java/org/apache/poi/hssf/record/ContinueRecord.java b/src/java/org/apache/poi/hssf/record/ContinueRecord.java index 2b67a62d4..5017ade92 100644 --- a/src/java/org/apache/poi/hssf/record/ContinueRecord.java +++ b/src/java/org/apache/poi/hssf/record/ContinueRecord.java @@ -161,9 +161,7 @@ public class ContinueRecord // how many continue records do we need // System.out.println("In ProcessContinue"); - int records = - (data.length - / 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out + int records = (data.length / 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out int offset = 8214; // System.out.println("we have "+records+" continue records to process"); @@ -174,8 +172,7 @@ public class ContinueRecord for (int cr = 0; cr < records; cr++) { ContinueRecord contrec = new ContinueRecord(); - int arraysize = Math.min((8214 - 4), - (data.length - offset)); + int arraysize = Math.min((8214 - 4), (data.length - offset)); byte[] crdata = new byte[ arraysize ]; System.arraycopy(data, offset, crdata, 0, arraysize); diff --git a/src/java/org/apache/poi/hssf/record/RecordProcessor.java b/src/java/org/apache/poi/hssf/record/RecordProcessor.java new file mode 100644 index 000000000..13bfc3a18 --- /dev/null +++ b/src/java/org/apache/poi/hssf/record/RecordProcessor.java @@ -0,0 +1,202 @@ +/* ==================================================================== + * The Apache Software License, Version 1.1 + * + * Copyright (c) 2002 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, + * if any, must include the following acknowledgment: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowledgment may appear in the software itself, + * if and wherever such third-party acknowledgments normally appear. + * + * 4. The names "Apache" and "Apache Software Foundation" and + * "Apache POI" must not be used to endorse or promote products + * derived from this software without prior written permission. For + * written permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache", + * "Apache POI", nor may "Apache" appear in their name, without + * prior written permission of the Apache Software Foundation. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + */ + +package org.apache.poi.hssf.record; + +import org.apache.poi.util.LittleEndianConsts; +import org.apache.poi.util.LittleEndian; + +/** + * Process a single record. That is, an SST record or a continue record. + * Refactored from code originally in SSTRecord. + * + * @author Glen Stampoultzis (glens at apache.org) + */ +class RecordProcessor +{ + private byte[] data; + private int recordOffset; + private int available; + private SSTRecordHeader sstRecordHeader; + + public RecordProcessor( byte[] data, int available, int numStrings, int numUniqueStrings ) + { + this.data = data; + this.available = available; + this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings); + } + + public int getAvailable() + { + return available; + } + + public void writeRecordHeader( int offset, int totalWritten, int recordLength, boolean first_record ) + { + if ( first_record ) + { + available -= 8; + recordOffset = sstRecordHeader.writeSSTHeader( data, recordOffset + offset + totalWritten, recordLength ); + } + else + { + recordOffset = writeContinueHeader( data, recordOffset + offset + totalWritten, recordLength ); + } + } + + public byte[] writeStringRemainder( boolean lastStringCompleted, byte[] stringreminant, int offset, int totalWritten ) + { + if ( !lastStringCompleted ) + { + // write reminant -- it'll all fit neatly + System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, stringreminant.length ); + adjustPointers( stringreminant.length ); + } + else + { + // write as much of the remnant as possible + System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, available ); + byte[] leftover = new byte[( stringreminant.length - available ) + LittleEndianConsts.BYTE_SIZE]; + + System.arraycopy( stringreminant, available, leftover, LittleEndianConsts.BYTE_SIZE, stringreminant.length - available ); + leftover[0] = stringreminant[0]; + stringreminant = leftover; + adjustPointers( available ); // Consume all available remaining space + } + return stringreminant; + } + + public void writeWholeString( UnicodeString unistr, int offset, int totalWritten ) + { + unistr.serialize( recordOffset + offset + totalWritten, data ); + int rsize = unistr.getRecordSize(); + adjustPointers( rsize ); + } + + public byte[] writePartString( UnicodeString unistr, int offset, int totalWritten ) + { + byte[] stringReminant; + byte[] ucs = unistr.serialize(); + + System.arraycopy( ucs, 0, data, recordOffset + offset + totalWritten, available ); + stringReminant = new byte[( ucs.length - available ) + LittleEndianConsts.BYTE_SIZE]; + System.arraycopy( ucs, available, stringReminant, LittleEndianConsts.BYTE_SIZE, ucs.length - available ); + stringReminant[0] = ucs[LittleEndianConsts.SHORT_SIZE]; + available = 0; + return stringReminant; + } + + + private int writeContinueHeader( final byte[] data, final int pos, + final int recsize ) + { + int offset = pos; + + LittleEndian.putShort( data, offset, ContinueRecord.sid ); + offset += LittleEndianConsts.SHORT_SIZE; + LittleEndian.putShort( data, offset, (short) ( recsize ) ); + offset += LittleEndianConsts.SHORT_SIZE; + return offset - pos; + } + + + private void adjustPointers( int amount ) + { + recordOffset += amount; + available -= amount; + } +} + +class SSTRecordHeader +{ + int numStrings; + int numUniqueStrings; + + /** + * + */ + public SSTRecordHeader( int numStrings, int numUniqueStrings ) + { + this.numStrings = numStrings; + this.numUniqueStrings = numUniqueStrings; + } + + /** + * Writes out the SST record. This consists of the sid, the record size, the number of + * strings and the number of unique strings. + * + * @param data The data buffer to write the header to. + * @param bufferIndex The index into the data buffer where the header should be written. + * @param recSize The number of records written. + * + * @return The bufer of bytes modified. + */ + public int writeSSTHeader( byte[] data, int bufferIndex, int recSize ) + { + int offset = bufferIndex; + + LittleEndian.putShort( data, offset, SSTRecord.sid ); + offset += LittleEndianConsts.SHORT_SIZE; + LittleEndian.putShort( data, offset, (short) ( recSize ) ); + offset += LittleEndianConsts.SHORT_SIZE; +// LittleEndian.putInt( data, offset, getNumStrings() ); + LittleEndian.putInt( data, offset, numStrings ); + offset += LittleEndianConsts.INT_SIZE; +// LittleEndian.putInt( data, offset, getNumUniqueStrings() ); + LittleEndian.putInt( data, offset, numUniqueStrings ); + offset += LittleEndianConsts.INT_SIZE; + return offset - bufferIndex; + } + +} \ No newline at end of file diff --git a/src/java/org/apache/poi/hssf/record/SSTDeserializer.java b/src/java/org/apache/poi/hssf/record/SSTDeserializer.java new file mode 100644 index 000000000..58b62c316 --- /dev/null +++ b/src/java/org/apache/poi/hssf/record/SSTDeserializer.java @@ -0,0 +1,357 @@ +package org.apache.poi.hssf.record; + +import org.apache.poi.util.LittleEndian; +import org.apache.poi.util.LittleEndianConsts; +import org.apache.poi.util.BinaryTree; +import org.apache.poi.util.HexDump; + +import java.io.IOException; + +class SSTDeserializer +{ + + private BinaryTree strings; + /** this is the number of characters we expect in the first sub-record in a subsequent continuation record */ + private int continuationExpectedChars; + /** this is the string we were working on before hitting the end of the current record. This string is NOT finished. */ + private String unfinishedString; + /** this is the total length of the current string being handled */ + private int totalLengthBytes; + /** this is the offset into a string field of the actual string data */ + private int stringDataOffset; + /** this is true if the string uses wide characters */ + private boolean wideChar; + + + public SSTDeserializer(BinaryTree strings) + { + this.strings = strings; + setExpectedChars( 0 ); + unfinishedString = ""; + totalLengthBytes = 0; + stringDataOffset = 0; + wideChar = false; + } + + /** + * This is the starting point where strings are constructed. Note that + * strings may span across multiple continuations. Read the SST record + * carefully before beginning to hack. + */ + public void manufactureStrings( final byte[] data, final int index, + short size ) + { + int offset = index; + + while ( offset < size ) + { + int remaining = size - offset; + + if ( ( remaining > 0 ) && ( remaining < LittleEndianConsts.SHORT_SIZE ) ) + { + throw new RecordFormatException( "Cannot get length of the last string in SSTRecord" ); + } + if ( remaining == LittleEndianConsts.SHORT_SIZE ) + { + setExpectedChars( LittleEndian.getUShort( data, offset ) ); + unfinishedString = ""; + break; + } + short charCount = LittleEndian.getShort( data, offset ); + + setupStringParameters( data, offset, charCount ); + if ( remaining < totalLengthBytes ) + { + setExpectedChars( calculateCharCount( totalLengthBytes - remaining ) ); + charCount -= getExpectedChars(); + totalLengthBytes = remaining; + } + else + { + setExpectedChars( 0 ); + } + processString( data, offset, charCount ); + offset += totalLengthBytes; + if ( getExpectedChars() != 0 ) + { + break; + } + } + } + + + /** + * Detemines the option types for the string (ie, compressed or uncompressed unicode, rich text string or + * plain string etc) and calculates the length and offset for the string. + * + * @param data + * @param index + * @param char_count + */ + private void setupStringParameters( final byte[] data, final int index, + final int char_count ) + { + byte optionFlag = data[index + LittleEndianConsts.SHORT_SIZE]; + + wideChar = ( optionFlag & 1 ) == 1; + boolean farEast = ( optionFlag & 4 ) == 4; + boolean richText = ( optionFlag & 8 ) == 8; + + totalLengthBytes = SSTRecord.STRING_MINIMAL_OVERHEAD + calculateByteCount( char_count ); + stringDataOffset = SSTRecord.STRING_MINIMAL_OVERHEAD; + if ( richText ) + { + short run_count = LittleEndian.getShort( data, index + stringDataOffset ); + + stringDataOffset += LittleEndianConsts.SHORT_SIZE; + totalLengthBytes += LittleEndianConsts.SHORT_SIZE + ( LittleEndianConsts.INT_SIZE * run_count ); + } + if ( farEast ) + { + int extension_length = LittleEndian.getInt( data, index + stringDataOffset ); + + stringDataOffset += LittleEndianConsts.INT_SIZE; + totalLengthBytes += LittleEndianConsts.INT_SIZE + extension_length; + } + } + + + private void processString( final byte[] data, final int index, + final short char_count ) + { + byte[] stringDataBuffer = new byte[totalLengthBytes]; + int length = SSTRecord.STRING_MINIMAL_OVERHEAD + calculateByteCount( char_count ); + byte[] bstring = new byte[length]; + + System.arraycopy( data, index, stringDataBuffer, 0, stringDataBuffer.length ); + int offset = 0; + + LittleEndian.putShort( bstring, offset, char_count ); + offset += LittleEndianConsts.SHORT_SIZE; + bstring[offset] = stringDataBuffer[offset]; + +// System.out.println( "offset = " + stringDataOffset ); +// System.out.println( "length = " + (bstring.length - STRING_MINIMAL_OVERHEAD) ); +// System.out.println( "src.length = " + str_data.length ); +// try +// { +// System.out.println( "----------------------- DUMP -------------------------" ); +// HexDump.dump( stringDataBuffer, (long)stringDataOffset, System.out, 1); +// } +// catch ( IOException e ) +// { +// } +// catch ( ArrayIndexOutOfBoundsException e ) +// { +// } +// catch ( IllegalArgumentException e ) +// { +// } + System.arraycopy( stringDataBuffer, stringDataOffset, bstring, + SSTRecord.STRING_MINIMAL_OVERHEAD, + bstring.length - SSTRecord.STRING_MINIMAL_OVERHEAD ); + UnicodeString string = new UnicodeString( UnicodeString.sid, + (short) bstring.length, + bstring ); + + if ( getExpectedChars() != 0 ) + { + unfinishedString = string.getString(); + } + else + { + Integer integer = new Integer( strings.size() ); + addToStringTable( strings, integer, string ); + } + } + + /** + * Okay, we are doing some major cheating here. Because we can't handle rich text strings properly + * we end up getting duplicate strings. To get around this I'm doing do things: 1. Converting rich + * text to normal text and 2. If there's a duplicate I'm adding a space onto the end. Sneaky perhaps + * but it gets the job done until we can handle this a little better. + */ + static public void addToStringTable( BinaryTree strings, Integer integer, UnicodeString string ) + { + if (string.isRichText()) + string.setOptionFlags( (byte)(string.getOptionFlags() & (~8) ) ); + + boolean added = false; + while (added == false) + { + try + { + strings.put( integer, string ); + added = true; + } + catch( Exception ignore ) + { + string.setString( string.getString() + " " ); + } + } + } + + + + private int calculateCharCount( final int byte_count ) + { + return byte_count / ( wideChar ? LittleEndianConsts.SHORT_SIZE + : LittleEndianConsts.BYTE_SIZE ); + } + + /** + * Process a Continue record. A Continue record for an SST record + * contains the same kind of data that the SST record contains, + * with the following exceptions: + *

+ *

    + *
  1. The string counts at the beginning of the SST record are + * not in the Continue record + *
  2. The first string in the Continue record might NOT begin + * with a size. If the last string in the previous record is + * continued in this record, the size is determined by that + * last string in the previous record; the first string will + * begin with a flag byte, followed by the remaining bytes (or + * words) of the last string from the previous + * record. Otherwise, the first string in the record will + * begin with a string length + *
+ * + * @param record the Continue record's byte data + */ + + public void processContinueRecord( final byte[] record ) + { + if ( getExpectedChars() == 0 ) + { + unfinishedString = ""; + totalLengthBytes = 0; + stringDataOffset = 0; + wideChar = false; + manufactureStrings( record, 0, (short) record.length ); + } + else + { + int data_length = record.length - LittleEndianConsts.BYTE_SIZE; + + if ( calculateByteCount( getExpectedChars() ) > data_length ) + { + + // create artificial data to create a UnicodeString + byte[] input = + new byte[record.length + LittleEndianConsts.SHORT_SIZE]; + short size = (short) ( ( ( record[0] & 1 ) == 1 ) + ? ( data_length / LittleEndianConsts.SHORT_SIZE ) + : ( data_length / LittleEndianConsts.BYTE_SIZE ) ); + + LittleEndian.putShort( input, (byte) 0, size ); + System.arraycopy( record, 0, input, LittleEndianConsts.SHORT_SIZE, record.length ); + UnicodeString ucs = new UnicodeString( UnicodeString.sid, (short) input.length, input ); + + unfinishedString = unfinishedString + ucs.getString(); + setExpectedChars( getExpectedChars() - size ); + } + else + { + setupStringParameters( record, -LittleEndianConsts.SHORT_SIZE, + getExpectedChars() ); + byte[] str_data = new byte[totalLengthBytes]; + int length = SSTRecord.STRING_MINIMAL_OVERHEAD + + ( calculateByteCount( getExpectedChars() ) ); + byte[] bstring = new byte[length]; + + // Copy data from the record into the string + // buffer. Copy skips the length of a short in the + // string buffer, to leave room for the string length. + System.arraycopy( record, 0, str_data, + LittleEndianConsts.SHORT_SIZE, + str_data.length + - LittleEndianConsts.SHORT_SIZE ); + + // write the string length + LittleEndian.putShort( bstring, 0, + (short) getExpectedChars() ); + + // write the options flag + bstring[LittleEndianConsts.SHORT_SIZE] = + str_data[LittleEndianConsts.SHORT_SIZE]; + + // copy the bytes/words making up the string; skipping + // past all the overhead of the str_data array + System.arraycopy( str_data, stringDataOffset, bstring, + SSTRecord.STRING_MINIMAL_OVERHEAD, + bstring.length - SSTRecord.STRING_MINIMAL_OVERHEAD ); + + // use special constructor to create the final string + UnicodeString string = + new UnicodeString( UnicodeString.sid, + (short) bstring.length, bstring, + unfinishedString ); + Integer integer = new Integer( strings.size() ); + +// field_3_strings.put( integer, string ); + addToStringTable( strings, integer, string ); + manufactureStrings( record, totalLengthBytes - LittleEndianConsts.SHORT_SIZE, (short) record.length ); + } + } + } + + /** + * @return the number of characters we expect in the first + * sub-record in a subsequent continuation record + */ + + int getExpectedChars() + { + return continuationExpectedChars; + } + + private void setExpectedChars( final int count ) + { + continuationExpectedChars = count; + } + + private int calculateByteCount( final int character_count ) + { + return character_count * ( wideChar ? LittleEndianConsts.SHORT_SIZE : LittleEndianConsts.BYTE_SIZE ); + } + + + /** + * @return the unfinished string + */ + + String getUnfinishedString() + { + return unfinishedString; + } + + /** + * @return the total length of the current string + */ + + int getTotalLength() + { + return totalLengthBytes; + } + + /** + * @return offset into current string data + */ + + int getStringDataOffset() + { + return stringDataOffset; + } + + /** + * @return true if current string uses wide characters + */ + + boolean isWideChar() + { + return wideChar; + } + + +} diff --git a/src/java/org/apache/poi/hssf/record/SSTRecord.java b/src/java/org/apache/poi/hssf/record/SSTRecord.java index d8428148a..6011c8f5d 100644 --- a/src/java/org/apache/poi/hssf/record/SSTRecord.java +++ b/src/java/org/apache/poi/hssf/record/SSTRecord.java @@ -1,4 +1,3 @@ - /* ==================================================================== * The Apache Software License, Version 1.1 * @@ -59,7 +58,8 @@ import org.apache.poi.util.BinaryTree; import org.apache.poi.util.LittleEndian; import org.apache.poi.util.LittleEndianConsts; -import java.util.*; +import java.util.Iterator; +import java.util.List; /** * Title: Static String Table Record @@ -71,65 +71,45 @@ import java.util.*; *

* @author Andrew C. Oliver (acoliver at apache dot org) * @author Marc Johnson (mjohnson at apache dot org) + * @author Glen Stampoultzis (glens at apache.org) * @version 2.0-pre * @see org.apache.poi.hssf.record.LabelSSTRecord * @see org.apache.poi.hssf.record.ContinueRecord */ public class SSTRecord - extends Record + extends Record { - // how big can an SST record be? As big as any record can be: 8228 - // bytes - private static final int _max = 8228; + /** how big can an SST record be? As big as any record can be: 8228 bytes */ + static final int MAX_RECORD_SIZE = 8228; - // standard record overhead: two shorts (record id plus data space - // size) - private static final int _std_record_overhead = - 2 * LittleEndianConsts.SHORT_SIZE; + /** standard record overhead: two shorts (record id plus data space size)*/ + static final int STD_RECORD_OVERHEAD = + 2 * LittleEndianConsts.SHORT_SIZE; - // SST overhead: the standard record overhead, plus the number of - // strings and the number of unique strings -- two ints - private static final int _sst_record_overhead = - (_std_record_overhead + (2 * LittleEndianConsts.INT_SIZE)); + /** SST overhead: the standard record overhead, plus the number of strings and the number of unique strings -- two ints */ + static final int SST_RECORD_OVERHEAD = + ( STD_RECORD_OVERHEAD + ( 2 * LittleEndianConsts.INT_SIZE ) ); - // how much data can we stuff into an SST record? That would be - // _max minus the standard SST record overhead - private static final int _max_data_space = - _max - _sst_record_overhead; + /** how much data can we stuff into an SST record? That would be _max minus the standard SST record overhead */ + static final int MAX_DATA_SPACE = MAX_RECORD_SIZE - SST_RECORD_OVERHEAD; - // overhead for each string includes the string's character count - // (a short) and the flag describing its characteristics (a byte) - private static final int _string_minimal_overhead = - LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE; - public static final short sid = 0xfc; + /** overhead for each string includes the string's character count (a short) and the flag describing its characteristics (a byte) */ + static final int STRING_MINIMAL_OVERHEAD = LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE; - // union of strings in the SST and EXTSST - private int field_1_num_strings; + public static final short sid = 0xfc; - // according to docs ONLY SST - private int field_2_num_unique_strings; - private BinaryTree field_3_strings; + /** union of strings in the SST and EXTSST */ + private int field_1_num_strings; - // this is the number of characters we expect in the first - // sub-record in a subsequent continuation record - private int __expected_chars; + /** according to docs ONLY SST */ + private int field_2_num_unique_strings; + private BinaryTree field_3_strings; - // this is the string we were working on before hitting the end of - // the current record. This string is NOT finished. - private String _unfinished_string; - - // this is the total length of the current string being handled - private int _total_length_bytes; - - // this is the offset into a string field of the actual string - // data - private int _string_data_offset; - - // this is true if the string uses wide characters - private boolean _wide_char; - private List _record_lengths = null; + /** Record lengths for initial SST record and all continue records */ + private List _record_lengths = null; + private SSTDeserializer deserializer; /** * default constructor @@ -137,14 +117,10 @@ public class SSTRecord public SSTRecord() { - field_1_num_strings = 0; + field_1_num_strings = 0; field_2_num_unique_strings = 0; - field_3_strings = new BinaryTree(); - setExpectedChars(0); - _unfinished_string = ""; - _total_length_bytes = 0; - _string_data_offset = 0; - _wide_char = false; + field_3_strings = new BinaryTree(); + deserializer = new SSTDeserializer(field_3_strings); } /** @@ -156,9 +132,9 @@ public class SSTRecord * @param data of the record (should not contain sid/len) */ - public SSTRecord(final short id, final short size, final byte [] data) + public SSTRecord( final short id, final short size, final byte[] data ) { - super(id, size, data); + super( id, size, data ); } /** @@ -171,10 +147,10 @@ public class SSTRecord * @param offset of the record */ - public SSTRecord(final short id, final short size, final byte [] data, - int offset) + public SSTRecord( final short id, final short size, final byte[] data, + int offset ) { - super(id, size, data, offset); + super( id, size, data, offset ); } /** @@ -192,13 +168,13 @@ public class SSTRecord * @return the index of that string in the table */ - public int addString(final String string) + public int addString( final String string ) { int rval; - if (string == null) + if ( string == null ) { - rval = addString("", false); + rval = addString( "", false ); } else { @@ -207,17 +183,17 @@ public class SSTRecord // present, we have to use 16-bit encoding. Otherwise, we // can use 8-bit encoding boolean useUTF16 = false; - int strlen = string.length(); + int strlen = string.length(); - for (int j = 0; j < strlen; j++) + for ( int j = 0; j < strlen; j++ ) { - if (string.charAt(j) > 255) + if ( string.charAt( j ) > 255 ) { useUTF16 = true; break; } } - rval = addString(string, useUTF16); + rval = addString( string, useUTF16 ); } return rval; } @@ -238,21 +214,21 @@ public class SSTRecord * @return the index of that string in the table */ - public int addString(final String string, final boolean useUTF16) + public int addString( final String string, final boolean useUTF16 ) { field_1_num_strings++; - String str = (string == null) ? "" - : string; - int rval = -1; - UnicodeString ucs = new UnicodeString(); + String str = ( string == null ) ? "" + : string; + int rval = -1; + UnicodeString ucs = new UnicodeString(); - ucs.setString(str); - ucs.setCharCount(( short ) str.length()); - ucs.setOptionFlags(( byte ) (useUTF16 ? 1 - : 0)); - Integer integer = ( Integer ) field_3_strings.getKeyForValue(ucs); + ucs.setString( str ); + ucs.setCharCount( (short) str.length() ); + ucs.setOptionFlags( (byte) ( useUTF16 ? 1 + : 0 ) ); + Integer integer = (Integer) field_3_strings.getKeyForValue( ucs ); - if (integer != null) + if ( integer != null ) { rval = integer.intValue(); } @@ -263,8 +239,9 @@ public class SSTRecord // strings we've already collected rval = field_3_strings.size(); field_2_num_unique_strings++; - integer = new Integer(rval); - field_3_strings.put(integer, ucs); + integer = new Integer( rval ); + SSTDeserializer.addToStringTable( field_3_strings, integer, ucs ); +// field_3_strings.put( integer, ucs ); } return rval; } @@ -298,7 +275,7 @@ public class SSTRecord * */ - public void setNumStrings(final int count) + public void setNumStrings( final int count ) { field_1_num_strings = count; } @@ -314,7 +291,7 @@ public class SSTRecord * @param count number of strings */ - public void getNumUniqueStrings(final int count) + public void getNumUniqueStrings( final int count ) { field_2_num_unique_strings = count; } @@ -327,16 +304,15 @@ public class SSTRecord * @return the desired string */ - public String getString(final int id) + public String getString( final int id ) { - return (( UnicodeString ) field_3_strings.get(new Integer(id))) - .getString(); + return ( (UnicodeString) field_3_strings.get( new Integer( id ) ) ).getString(); } - public boolean getString16bit(final int id) + public boolean isString16bit( final int id ) { - return ((( UnicodeString ) field_3_strings.get(new Integer(id))) - .getOptionFlags() == 1); + UnicodeString unicodeString = ( (UnicodeString) field_3_strings.get( new Integer( id ) ) ); + return ( ( unicodeString.getOptionFlags() & 0x01 ) == 1 ); } /** @@ -349,326 +325,24 @@ public class SSTRecord { StringBuffer buffer = new StringBuffer(); - buffer.append("[SST]\n"); - buffer.append(" .numstrings = ") - .append(Integer.toHexString(getNumStrings())).append("\n"); - buffer.append(" .uniquestrings = ") - .append(Integer.toHexString(getNumUniqueStrings())).append("\n"); - for (int k = 0; k < field_3_strings.size(); k++) + buffer.append( "[SST]\n" ); + buffer.append( " .numstrings = " ) + .append( Integer.toHexString( getNumStrings() ) ).append( "\n" ); + buffer.append( " .uniquestrings = " ) + .append( Integer.toHexString( getNumUniqueStrings() ) ).append( "\n" ); + for ( int k = 0; k < field_3_strings.size(); k++ ) { - buffer.append(" .string_" + k + " = ") - .append((( UnicodeString ) field_3_strings - .get(new Integer(k))).toString()).append("\n"); + buffer.append( " .string_" + k + " = " ) + .append( ( (UnicodeString) field_3_strings + .get( new Integer( k ) ) ).toString() ).append( "\n" ); } - buffer.append("[/SST]\n"); + buffer.append( "[/SST]\n" ); return buffer.toString(); } - /** - * Create a byte array consisting of an SST record and any - * required Continue records, ready to be written out. - *

- * If an SST record and any subsequent Continue records are read - * in to create this instance, this method should produce a byte - * array that is identical to the byte array produced by - * concatenating the input records' data. - * - * @return the byte array - */ - - public int serialize(int offset, byte [] data) - { - int rval = getRecordSize(); - int record_length_index = 0; - - // get the linear size of that array - int unicodesize = calculateUnicodeSize(); - - if (unicodesize > _max_data_space) - { - byte[] stringreminant = null; - int unipos = 0; - boolean lastneedcontinue = false; - int stringbyteswritten = 0; - boolean first_record = true; - int totalWritten = 0; - int size = 0; - - while (totalWritten != rval) - { - int pos = 0; - - // write the appropriate header - int available; - - if (first_record) - { - size = - (( Integer ) _record_lengths - .get(record_length_index++)).intValue(); - available = size - 8; - pos = writeSSTHeader(data, - pos + offset - + totalWritten, size); - size += _std_record_overhead; - first_record = false; - } - else - { - pos = 0; - int to_be_written = (unicodesize - stringbyteswritten) - + (lastneedcontinue ? 1 - : 0); // not used? - - size = - (( Integer ) _record_lengths - .get(record_length_index++)).intValue(); - available = size; - pos = writeContinueHeader(data, - pos + offset - + totalWritten, size); - size = size + _std_record_overhead; - } - - // now, write the rest of the data into the current - // record space - if (lastneedcontinue) - { - - // the last string in the previous record was not - // written out completely - if (stringreminant.length <= available) - { - - // write reminant -- it'll all fit neatly - System.arraycopy(stringreminant, 0, data, - pos + offset + totalWritten, - stringreminant.length); - stringbyteswritten += stringreminant.length - 1; - pos += stringreminant.length; - lastneedcontinue = false; - available -= stringreminant.length; - } - else - { - - // write as much of the remnant as possible - System.arraycopy(stringreminant, 0, data, - pos + offset + totalWritten, - available); - stringbyteswritten += available - 1; - pos += available; - byte[] leftover = - new byte[ (stringreminant.length - available) + LittleEndianConsts.BYTE_SIZE ]; - - System.arraycopy(stringreminant, available, leftover, - LittleEndianConsts.BYTE_SIZE, - stringreminant.length - available); - leftover[ 0 ] = stringreminant[ 0 ]; - stringreminant = leftover; - available = 0; - lastneedcontinue = true; - } - } - - // last string's remnant, if any, is cleaned up as - // best as can be done ... now let's try and write - // some more strings - for (; unipos < field_3_strings.size(); unipos++) - { - Integer intunipos = new Integer(unipos); - UnicodeString unistr = - (( UnicodeString ) field_3_strings.get(intunipos)); - - if (unistr.getRecordSize() <= available) - { - unistr.serialize(pos + offset + totalWritten, data); - int rsize = unistr.getRecordSize(); - - stringbyteswritten += rsize; - pos += rsize; - available -= rsize; - } - else - { - - // can't write the entire string out - if (available >= _string_minimal_overhead) - { - - // we can write some of it - byte[] ucs = unistr.serialize(); - - System.arraycopy(ucs, 0, data, - pos + offset + totalWritten, - available); - stringbyteswritten += available; - stringreminant = - new byte[ (ucs.length - available) + LittleEndianConsts.BYTE_SIZE ]; - System.arraycopy(ucs, available, stringreminant, - LittleEndianConsts.BYTE_SIZE, - ucs.length - available); - stringreminant[ 0 ] = - ucs[ LittleEndianConsts.SHORT_SIZE ]; - available = 0; - lastneedcontinue = true; - unipos++; - } - break; - } - } - totalWritten += size; - } - } - else - { - - // short data: write one simple SST record - int datasize = _sst_record_overhead + unicodesize; // not used? - - writeSSTHeader( - data, 0 + offset, - _sst_record_overhead - + (( Integer ) _record_lengths.get( - record_length_index++)).intValue() - _std_record_overhead); - int pos = _sst_record_overhead; - - for (int k = 0; k < field_3_strings.size(); k++) - { - UnicodeString unistr = - (( UnicodeString ) field_3_strings.get(new Integer(k))); - - System.arraycopy(unistr.serialize(), 0, data, pos + offset, - unistr.getRecordSize()); - pos += unistr.getRecordSize(); - } - } - return rval; - } - - // not used: remove? - private int calculateStringsize() - { - int retval = 0; - - for (int k = 0; k < field_3_strings.size(); k++) - { - retval += - (( UnicodeString ) field_3_strings.get(new Integer(k))) - .getRecordSize(); - } - return retval; - } - - /** - * Process a Continue record. A Continue record for an SST record - * contains the same kind of data that the SST record contains, - * with the following exceptions: - *

- *

    - *
  1. The string counts at the beginning of the SST record are - * not in the Continue record - *
  2. The first string in the Continue record might NOT begin - * with a size. If the last string in the previous record is - * continued in this record, the size is determined by that - * last string in the previous record; the first string will - * begin with a flag byte, followed by the remaining bytes (or - * words) of the last string from the previous - * record. Otherwise, the first string in the record will - * begin with a string length - *
- * - * @param record the Continue record's byte data - */ - - public void processContinueRecord(final byte [] record) - { - if (getExpectedChars() == 0) - { - _unfinished_string = ""; - _total_length_bytes = 0; - _string_data_offset = 0; - _wide_char = false; - manufactureStrings(record, 0, ( short ) record.length); - } - else - { - int data_length = record.length - LittleEndianConsts.BYTE_SIZE; - - if (calculateByteCount(getExpectedChars()) > data_length) - { - - // create artificial data to create a UnicodeString - byte[] input = - new byte[ record.length + LittleEndianConsts.SHORT_SIZE ]; - short size = ( short ) (((record[ 0 ] & 1) == 1) - ? (data_length - / LittleEndianConsts.SHORT_SIZE) - : (data_length - / LittleEndianConsts.BYTE_SIZE)); - - LittleEndian.putShort(input, ( byte ) 0, size); - System.arraycopy(record, 0, input, - LittleEndianConsts.SHORT_SIZE, - record.length); - UnicodeString ucs = new UnicodeString(UnicodeString.sid, - ( short ) input.length, - input); - - _unfinished_string = _unfinished_string + ucs.getString(); - setExpectedChars(getExpectedChars() - size); - } - else - { - setupStringParameters(record, -LittleEndianConsts.SHORT_SIZE, - getExpectedChars()); - byte[] str_data = new byte[ _total_length_bytes ]; - int length = _string_minimal_overhead - + (calculateByteCount(getExpectedChars())); - byte[] bstring = new byte[ length ]; - - // Copy data from the record into the string - // buffer. Copy skips the length of a short in the - // string buffer, to leave room for the string length. - System.arraycopy(record, 0, str_data, - LittleEndianConsts.SHORT_SIZE, - str_data.length - - LittleEndianConsts.SHORT_SIZE); - - // write the string length - LittleEndian.putShort(bstring, 0, - ( short ) getExpectedChars()); - - // write the options flag - bstring[ LittleEndianConsts.SHORT_SIZE ] = - str_data[ LittleEndianConsts.SHORT_SIZE ]; - - // copy the bytes/words making up the string; skipping - // past all the overhead of the str_data array - System.arraycopy(str_data, _string_data_offset, bstring, - _string_minimal_overhead, - bstring.length - _string_minimal_overhead); - - // use special constructor to create the final string - UnicodeString string = - new UnicodeString(UnicodeString.sid, - ( short ) bstring.length, bstring, - _unfinished_string); - Integer integer = new Integer(field_3_strings.size()); - - field_3_strings.put(integer, string); - manufactureStrings(record, - _total_length_bytes - - LittleEndianConsts - .SHORT_SIZE, ( short ) record.length); - } - } - } - /** * @return sid */ - public short getSid() { return sid; @@ -677,30 +351,23 @@ public class SSTRecord /** * @return hashcode */ - public int hashCode() { return field_2_num_unique_strings; } - /** - * - * @param o - * @return true if equal - */ - - public boolean equals(Object o) + public boolean equals( Object o ) { - if ((o == null) || (o.getClass() != this.getClass())) + if ( ( o == null ) || ( o.getClass() != this.getClass() ) ) { return false; } - SSTRecord other = ( SSTRecord ) o; + SSTRecord other = (SSTRecord) o; - return ((field_1_num_strings == other - .field_1_num_strings) && (field_2_num_unique_strings == other - .field_2_num_unique_strings) && field_3_strings - .equals(other.field_3_strings)); + return ( ( field_1_num_strings == other + .field_1_num_strings ) && ( field_2_num_unique_strings == other + .field_2_num_unique_strings ) && field_3_strings + .equals( other.field_3_strings ) ); } /** @@ -711,12 +378,12 @@ public class SSTRecord * @exception RecordFormatException if validation fails */ - protected void validateSid(final short id) - throws RecordFormatException + protected void validateSid( final short id ) + throws RecordFormatException { - if (id != sid) + if ( id != sid ) { - throw new RecordFormatException("NOT An SST RECORD"); + throw new RecordFormatException( "NOT An SST RECORD" ); } } @@ -800,33 +467,20 @@ public class SSTRecord * @param size size of the raw data */ - protected void fillFields(final byte [] data, final short size, - int offset) + protected void fillFields( final byte[] data, final short size, + int offset ) { // this method is ALWAYS called after construction -- using // the nontrivial constructor, of course -- so this is where // we initialize our fields - field_1_num_strings = LittleEndian.getInt(data, 0 + offset); - field_2_num_unique_strings = LittleEndian.getInt(data, 4 + offset); - field_3_strings = new BinaryTree(); - setExpectedChars(0); - _unfinished_string = ""; - _total_length_bytes = 0; - _string_data_offset = 0; - _wide_char = false; - manufactureStrings(data, 8 + offset, size); + field_1_num_strings = LittleEndian.getInt( data, 0 + offset ); + field_2_num_unique_strings = LittleEndian.getInt( data, 4 + offset ); + field_3_strings = new BinaryTree(); + deserializer = new SSTDeserializer(field_3_strings); + deserializer.manufactureStrings( data, 8 + offset, size ); } - /** - * @return the number of characters we expect in the first - * sub-record in a subsequent continuation record - */ - - int getExpectedChars() - { - return __expected_chars; - } /** * @return an iterator of the strings we hold. All instances are @@ -848,372 +502,43 @@ public class SSTRecord } /** - * @return the unfinished string + * called by the class that is responsible for writing this sucker. + * Subclasses should implement this so that their data is passed back in a + * byte array. + * + * @return byte array containing instance data */ - String getUnfinishedString() + public int serialize( int offset, byte[] data ) { - return _unfinished_string; + SSTSerializer serializer = new SSTSerializer( + _record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() ); + return serializer.serialize( offset, data ); } - /** - * @return the total length of the current string - */ - - int getTotalLength() - { - return _total_length_bytes; - } - - /** - * @return offset into current string data - */ - - int getStringDataOffset() - { - return _string_data_offset; - } - - /** - * @return true if current string uses wide characters - */ - - boolean isWideChar() - { - return _wide_char; - } - - private int writeSSTHeader(final byte [] data, final int pos, - final int recsize) - { - int offset = pos; - - LittleEndian.putShort(data, offset, sid); - offset += LittleEndianConsts.SHORT_SIZE; - LittleEndian.putShort(data, offset, ( short ) (recsize)); - offset += LittleEndianConsts.SHORT_SIZE; - LittleEndian.putInt(data, offset, getNumStrings()); - offset += LittleEndianConsts.INT_SIZE; - LittleEndian.putInt(data, offset, getNumUniqueStrings()); - offset += LittleEndianConsts.INT_SIZE; - return offset - pos; - } - - private int writeContinueHeader(final byte [] data, final int pos, - final int recsize) - { - int offset = pos; - - LittleEndian.putShort(data, offset, ContinueRecord.sid); - offset += LittleEndianConsts.SHORT_SIZE; - LittleEndian.putShort(data, offset, ( short ) (recsize)); - offset += LittleEndianConsts.SHORT_SIZE; - return offset - pos; - } - - private int calculateUCArrayLength(final byte [][] ucarray) - { - int retval = 0; - - for (int k = 0; k < ucarray.length; k++) - { - retval += ucarray[ k ].length; - } - return retval; - } - - private void manufactureStrings(final byte [] data, final int index, - short size) - { - int offset = index; - - while (offset < size) - { - int remaining = size - offset; - - if ((remaining > 0) - && (remaining < LittleEndianConsts.SHORT_SIZE)) - { - throw new RecordFormatException( - "Cannot get length of the last string in SSTRecord"); - } - if (remaining == LittleEndianConsts.SHORT_SIZE) - { - setExpectedChars(LittleEndian.getShort(data, offset)); - _unfinished_string = ""; - break; - } - short char_count = LittleEndian.getShort(data, offset); - - setupStringParameters(data, offset, char_count); - if (remaining < _total_length_bytes) - { - setExpectedChars(calculateCharCount(_total_length_bytes - - remaining)); - char_count -= getExpectedChars(); - _total_length_bytes = remaining; - } - else - { - setExpectedChars(0); - } - processString(data, offset, char_count); - offset += _total_length_bytes; - if (getExpectedChars() != 0) - { - break; - } - } - } - - private void setupStringParameters(final byte [] data, final int index, - final int char_count) - { - byte flag = data[ index + LittleEndianConsts.SHORT_SIZE ]; - - _wide_char = (flag & 1) == 1; - boolean extended = (flag & 4) == 4; - boolean formatted_run = (flag & 8) == 8; - - _total_length_bytes = _string_minimal_overhead - + calculateByteCount(char_count); - _string_data_offset = _string_minimal_overhead; - if (formatted_run) - { - short run_count = LittleEndian.getShort(data, - index - + _string_data_offset); - - _string_data_offset += LittleEndianConsts.SHORT_SIZE; - _total_length_bytes += LittleEndianConsts.SHORT_SIZE - + (LittleEndianConsts.INT_SIZE - * run_count); - } - if (extended) - { - int extension_length = LittleEndian.getInt(data, - index - + _string_data_offset); - - _string_data_offset += LittleEndianConsts.INT_SIZE; - _total_length_bytes += LittleEndianConsts.INT_SIZE - + extension_length; - } - } - - private void processString(final byte [] data, final int index, - final short char_count) - { - byte[] str_data = new byte[ _total_length_bytes ]; - int length = _string_minimal_overhead - + calculateByteCount(char_count); - byte[] bstring = new byte[ length ]; - - System.arraycopy(data, index, str_data, 0, str_data.length); - int offset = 0; - - LittleEndian.putShort(bstring, offset, char_count); - offset += LittleEndianConsts.SHORT_SIZE; - bstring[ offset ] = str_data[ offset ]; - System.arraycopy(str_data, _string_data_offset, bstring, - _string_minimal_overhead, - bstring.length - _string_minimal_overhead); - UnicodeString string = new UnicodeString(UnicodeString.sid, - ( short ) bstring.length, - bstring); - - if (getExpectedChars() != 0) - { - _unfinished_string = string.getString(); - } - else - { - Integer integer = new Integer(field_3_strings.size()); - - field_3_strings.put(integer, string); - } - } - - private void setExpectedChars(final int count) - { - __expected_chars = count; - } - - private int calculateByteCount(final int character_count) - { - return character_count * (_wide_char ? LittleEndianConsts.SHORT_SIZE - : LittleEndianConsts.BYTE_SIZE); - } - - private int calculateCharCount(final int byte_count) - { - return byte_count / (_wide_char ? LittleEndianConsts.SHORT_SIZE - : LittleEndianConsts.BYTE_SIZE); - } // we can probably simplify this later...this calculates the size // w/o serializing but still is a bit slow public int getRecordSize() { - _record_lengths = new ArrayList(); - int retval = 0; - int unicodesize = calculateUnicodeSize(); + SSTSerializer serializer = new SSTSerializer( + _record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() ); - if (unicodesize > _max_data_space) - { - UnicodeString unistr = null; - int stringreminant = 0; - int unipos = 0; - boolean lastneedcontinue = false; - int stringbyteswritten = 0; - boolean finished = false; - boolean first_record = true; - int totalWritten = 0; - - while (!finished) - { - int record = 0; - int pos = 0; - - if (first_record) - { - - // writing SST record - record = _max; - pos = 12; - first_record = false; - _record_lengths.add(new Integer(record - - _std_record_overhead)); - } - else - { - - // writing continue record - pos = 0; - int to_be_written = (unicodesize - stringbyteswritten) - + (lastneedcontinue ? 1 - : 0); - int size = Math.min(_max - _std_record_overhead, - to_be_written); - - if (size == to_be_written) - { - finished = true; - } - record = size + _std_record_overhead; - _record_lengths.add(new Integer(size)); - pos = 4; - } - if (lastneedcontinue) - { - int available = _max - pos; - - if (stringreminant <= available) - { - - // write reminant - stringbyteswritten += stringreminant - 1; - pos += stringreminant; - lastneedcontinue = false; - } - else - { - - // write as much of the remnant as possible - int toBeWritten = unistr.maxBrokenLength(available); - - if (available != toBeWritten) - { - int shortrecord = record - - (available - toBeWritten); - - _record_lengths.set( - _record_lengths.size() - 1, - new Integer( - shortrecord - _std_record_overhead)); - record = shortrecord; - } - stringbyteswritten += toBeWritten - 1; - pos += toBeWritten; - stringreminant -= toBeWritten - 1; - lastneedcontinue = true; - } - } - for (; unipos < field_3_strings.size(); unipos++) - { - int available = _max - pos; - Integer intunipos = new Integer(unipos); - - unistr = - (( UnicodeString ) field_3_strings.get(intunipos)); - if (unistr.getRecordSize() <= available) - { - stringbyteswritten += unistr.getRecordSize(); - pos += unistr.getRecordSize(); - } - else - { - if (available >= _string_minimal_overhead) - { - int toBeWritten = - unistr.maxBrokenLength(available); - - stringbyteswritten += toBeWritten; - stringreminant = - (unistr.getRecordSize() - toBeWritten) - + LittleEndianConsts.BYTE_SIZE; - if (available != toBeWritten) - { - int shortrecord = record - - (available - toBeWritten); - - _record_lengths.set( - _record_lengths.size() - 1, - new Integer( - shortrecord - _std_record_overhead)); - record = shortrecord; - } - lastneedcontinue = true; - unipos++; - } - else - { - int shortrecord = record - available; - - _record_lengths.set( - _record_lengths.size() - 1, - new Integer( - shortrecord - _std_record_overhead)); - record = shortrecord; - } - break; - } - } - totalWritten += record; - } - retval = totalWritten; - } - else - { - - // short data: write one simple SST record - retval = _sst_record_overhead + unicodesize; - _record_lengths.add(new Integer(unicodesize)); - } - return retval; + return serializer.getRecordSize(); } - private int calculateUnicodeSize() + SSTDeserializer getDeserializer() { - int retval = 0; + return deserializer; + } - for (int k = 0; k < field_3_strings.size(); k++) - { - UnicodeString string = - ( UnicodeString ) field_3_strings.get(new Integer(k)); - - retval += string.getRecordSize(); - } - return retval; + /** + * Strange to handle continue records this way. Is it a smell? + */ + public void processContinueRecord( byte[] record ) + { + deserializer.processContinueRecord( record ); } } + + diff --git a/src/java/org/apache/poi/hssf/record/SSTSerializer.java b/src/java/org/apache/poi/hssf/record/SSTSerializer.java new file mode 100644 index 000000000..580227914 --- /dev/null +++ b/src/java/org/apache/poi/hssf/record/SSTSerializer.java @@ -0,0 +1,356 @@ +/* ==================================================================== + * The Apache Software License, Version 1.1 + * + * Copyright (c) 2002 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, + * if any, must include the following acknowledgment: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowledgment may appear in the software itself, + * if and wherever such third-party acknowledgments normally appear. + * + * 4. The names "Apache" and "Apache Software Foundation" and + * "Apache POI" must not be used to endorse or promote products + * derived from this software without prior written permission. For + * written permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache", + * "Apache POI", nor may "Apache" appear in their name, without + * prior written permission of the Apache Software Foundation. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + */ + +package org.apache.poi.hssf.record; + +import org.apache.poi.util.BinaryTree; +import org.apache.poi.util.LittleEndianConsts; + +import java.util.List; +import java.util.ArrayList; + +/** + * This class handles serialization of SST records. It utilizes the record processor + * class write individual records. This has been refactored from the SSTRecord class. + * + * @author Glen Stampoultzis (glens at apache.org) + */ +class SSTSerializer +{ + + private List recordLengths; + private BinaryTree strings; + private int numStrings; + private int numUniqueStrings; + private SSTRecordHeader sstRecordHeader; + + public SSTSerializer( List recordLengths, BinaryTree strings, int numStrings, int numUniqueStrings ) + { + this.recordLengths = recordLengths; + this.strings = strings; + this.numStrings = numStrings; + this.numUniqueStrings = numUniqueStrings; + this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings); + } + + /** + * Create a byte array consisting of an SST record and any + * required Continue records, ready to be written out. + *

+ * If an SST record and any subsequent Continue records are read + * in to create this instance, this method should produce a byte + * array that is identical to the byte array produced by + * concatenating the input records' data. + * + * @return the byte array + */ + public int serialize( int offset, byte[] data ) + { + int record_size = getRecordSize(); + int record_length_index = 0; + + if ( calculateUnicodeSize() > SSTRecord.MAX_DATA_SPACE ) + serializeLargeRecord( record_size, record_length_index, data, offset ); + else + serializeSingleSSTRecord( data, offset, record_length_index ); + return record_size; + } + + private int calculateUnicodeSize() + { + int retval = 0; + + for ( int k = 0; k < strings.size(); k++ ) + { + retval += getUnicodeString(k).getRecordSize(); + } + return retval; + } + + // we can probably simplify this later...this calculates the size + // w/o serializing but still is a bit slow + public int getRecordSize() + { + recordLengths = new ArrayList(); + int retval = 0; + int unicodesize = calculateUnicodeSize(); + + if ( unicodesize > SSTRecord.MAX_DATA_SPACE ) + { + retval = calcRecordSizesForLongStrings( unicodesize ); + } + else + { + // short data: write one simple SST record + retval = SSTRecord.SST_RECORD_OVERHEAD + unicodesize; + recordLengths.add( new Integer( unicodesize ) ); + } + return retval; + } + + private int calcRecordSizesForLongStrings( int unicodesize ) + { + int retval; + UnicodeString unistr = null; + int stringreminant = 0; + int unipos = 0; + boolean lastneedcontinue = false; + int stringbyteswritten = 0; + boolean finished = false; + boolean first_record = true; + int totalWritten = 0; + + while ( !finished ) + { + int record = 0; + int pos = 0; + + if ( first_record ) + { + + // writing SST record + record = SSTRecord.MAX_RECORD_SIZE; + pos = 12; + first_record = false; + recordLengths.add( new Integer( record - SSTRecord.STD_RECORD_OVERHEAD ) ); + } + else + { + + // writing continue record + pos = 0; + int to_be_written = ( unicodesize - stringbyteswritten ) + ( lastneedcontinue ? 1 : 0 ); + int size = Math.min( SSTRecord.MAX_RECORD_SIZE - SSTRecord.STD_RECORD_OVERHEAD, to_be_written ); + + if ( size == to_be_written ) + { + finished = true; + } + record = size + SSTRecord.STD_RECORD_OVERHEAD; + recordLengths.add( new Integer( size ) ); + pos = 4; + } + if ( lastneedcontinue ) + { + int available = SSTRecord.MAX_RECORD_SIZE - pos; + + if ( stringreminant <= available ) + { + + // write reminant + stringbyteswritten += stringreminant - 1; + pos += stringreminant; + lastneedcontinue = false; + } + else + { + + // write as much of the remnant as possible + int toBeWritten = unistr.maxBrokenLength( available ); + + if ( available != toBeWritten ) + { + int shortrecord = record - ( available - toBeWritten ); + recordLengths.set( recordLengths.size() - 1, + new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) ); + record = shortrecord; + } + stringbyteswritten += toBeWritten - 1; + pos += toBeWritten; + stringreminant -= toBeWritten - 1; + lastneedcontinue = true; + } + } + for ( ; unipos < strings.size(); unipos++ ) + { + int available = SSTRecord.MAX_RECORD_SIZE - pos; + Integer intunipos = new Integer( unipos ); + + unistr = ( (UnicodeString) strings.get( intunipos ) ); + if ( unistr.getRecordSize() <= available ) + { + stringbyteswritten += unistr.getRecordSize(); + pos += unistr.getRecordSize(); + } + else + { + if ( available >= SSTRecord.STRING_MINIMAL_OVERHEAD ) + { + int toBeWritten = + unistr.maxBrokenLength( available ); + + stringbyteswritten += toBeWritten; + stringreminant = + ( unistr.getRecordSize() - toBeWritten ) + + LittleEndianConsts.BYTE_SIZE; + if ( available != toBeWritten ) + { + int shortrecord = record + - ( available - toBeWritten ); + + recordLengths.set( + recordLengths.size() - 1, + new Integer( + shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) ); + record = shortrecord; + } + lastneedcontinue = true; + unipos++; + } + else + { + int shortrecord = record - available; + + recordLengths.set( recordLengths.size() - 1, + new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) ); + record = shortrecord; + } + break; + } + } + totalWritten += record; + } + retval = totalWritten; + + return retval; + } + + + private void serializeSingleSSTRecord( byte[] data, int offset, int record_length_index ) + { + // short data: write one simple SST record + + int len = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue(); + int recordSize = SSTRecord.SST_RECORD_OVERHEAD + len - SSTRecord.STD_RECORD_OVERHEAD; + sstRecordHeader.writeSSTHeader( data, 0 + offset, recordSize ); + int pos = SSTRecord.SST_RECORD_OVERHEAD; + + for ( int k = 0; k < strings.size(); k++ ) + { +// UnicodeString unistr = ( (UnicodeString) strings.get( new Integer( k ) ) ); + System.arraycopy( getUnicodeString(k).serialize(), 0, data, pos + offset, getUnicodeString(k).getRecordSize() ); + pos += getUnicodeString(k).getRecordSize(); + } + } + + /** + * Large records are serialized to an SST and to one or more CONTINUE records. Joy. They have the special + * characteristic that they can change the option field when a single string is split across to a + * CONTINUE record. + */ + private void serializeLargeRecord( int record_size, int record_length_index, byte[] buffer, int offset ) + { + + byte[] stringReminant = null; + int stringIndex = 0; + boolean lastneedcontinue = false; + boolean first_record = true; + int totalWritten = 0; + + while ( totalWritten != record_size ) + { + int recordLength = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue(); + RecordProcessor recordProcessor = new RecordProcessor( buffer, + recordLength, numStrings, numUniqueStrings ); + + // write the appropriate header + recordProcessor.writeRecordHeader( offset, totalWritten, recordLength, first_record ); + first_record = false; + + // now, write the rest of the data into the current + // record space + if ( lastneedcontinue ) + { + lastneedcontinue = stringReminant.length > recordProcessor.getAvailable(); + // the last string in the previous record was not written out completely + stringReminant = recordProcessor.writeStringRemainder( lastneedcontinue, + stringReminant, offset, totalWritten ); + } + + // last string's remnant, if any, is cleaned up as best as can be done ... now let's try and write + // some more strings + for ( ; stringIndex < strings.size(); stringIndex++ ) + { + UnicodeString unistr = getUnicodeString( stringIndex ); + + if ( unistr.getRecordSize() <= recordProcessor.getAvailable() ) + { + recordProcessor.writeWholeString( unistr, offset, totalWritten ); + } + else + { + + // can't write the entire string out + if ( recordProcessor.getAvailable() >= SSTRecord.STRING_MINIMAL_OVERHEAD ) + { + + // we can write some of it + stringReminant = recordProcessor.writePartString( unistr, offset, totalWritten ); + lastneedcontinue = true; + stringIndex++; + } + break; + } + } + totalWritten += recordLength + SSTRecord.STD_RECORD_OVERHEAD; + } + } + + private UnicodeString getUnicodeString( int index ) + { + Integer intunipos = new Integer( index ); + return ( (UnicodeString) strings.get( intunipos ) ); + } + +} diff --git a/src/java/org/apache/poi/hssf/record/UnicodeString.java b/src/java/org/apache/poi/hssf/record/UnicodeString.java index 097be19b1..2d6881525 100644 --- a/src/java/org/apache/poi/hssf/record/UnicodeString.java +++ b/src/java/org/apache/poi/hssf/record/UnicodeString.java @@ -66,6 +66,7 @@ import org.apache.poi.util.StringUtil; * REFERENCE: PG 264 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)

* @author Andrew C. Oliver * @author Marc Johnson (mjohnson at apache dot org) + * @author Glen Stampoultzis (glens at apache.org) * @version 2.0-pre */ @@ -77,12 +78,28 @@ public class UnicodeString private short field_1_charCount; // = 0; private byte field_2_optionflags; // = 0; private String field_3_string; // = null; + private final int RICH_TEXT_BIT = 8; + + public UnicodeString() + { + } + public int hashCode() { - return field_1_charCount; + int stringHash = 0; + if (field_3_string != null) + stringHash = field_3_string.hashCode(); + return field_1_charCount + stringHash; } + /** + * Our handling of equals is inconsistent with compareTo. The trouble is because we don't truely understand + * rich text fields yet it's difficult to make a sound comparison. + * + * @param o The object to compare. + * @return true if the object is actually equal. + */ public boolean equals(Object o) { if ((o == null) || (o.getClass() != this.getClass())) @@ -96,10 +113,6 @@ public class UnicodeString && field_3_string.equals(other.field_3_string)); } - public UnicodeString() - { - } - /** * construct a unicode string record and fill its fields, ID is ignored * @param id - ignored @@ -278,19 +291,10 @@ public class UnicodeString public int serialize(int offset, byte [] data) { - int charsize = 1; - - if (getOptionFlags() == 1) - { - charsize = 2; - } - - // byte[] retval = new byte[ 3 + (getString().length() * charsize) ]; LittleEndian.putShort(data, 0 + offset, getCharCount()); data[ 2 + offset ] = getOptionFlags(); -// System.out.println("Unicode: We've got "+retval[2]+" for our option flag"); - if (getOptionFlags() == 0) + if (!isUncompressedUnicode()) { StringUtil.putCompressedUnicode(getString(), data, 0x3 + offset); } @@ -302,14 +306,14 @@ public class UnicodeString return getRecordSize(); } + private boolean isUncompressedUnicode() + { + return (getOptionFlags() & 0x01) == 1; + } + public int getRecordSize() { - int charsize = 1; - - if (getOptionFlags() == 1) - { - charsize = 2; - } + int charsize = isUncompressedUnicode() ? 2 : 1; return 3 + (getString().length() * charsize); } @@ -338,11 +342,16 @@ public class UnicodeString return this.getString().compareTo(str.getString()); } + public boolean isRichText() + { + return (getOptionFlags() & RICH_TEXT_BIT) != 0; + } + int maxBrokenLength(final int proposedBrokenLength) { int rval = proposedBrokenLength; - if ((field_2_optionflags & 1) == 1) + if (isUncompressedUnicode()) { int proposedStringLength = proposedBrokenLength - 3; @@ -355,12 +364,4 @@ public class UnicodeString return rval; } -// public boolean equals(Object obj) { -// if (!(obj instanceof UnicodeString)) return false; -// -// UnicodeString str = (UnicodeString)obj; -// -// -// return this.getString().equals(str.getString()); -// } } diff --git a/src/java/org/apache/poi/util/LittleEndian.java b/src/java/org/apache/poi/util/LittleEndian.java index eb048c8de..7259154cc 100644 --- a/src/java/org/apache/poi/util/LittleEndian.java +++ b/src/java/org/apache/poi/util/LittleEndian.java @@ -617,4 +617,5 @@ public class LittleEndian return copy; } + } diff --git a/src/testcases/org/apache/poi/hssf/data/duprich1.xls b/src/testcases/org/apache/poi/hssf/data/duprich1.xls new file mode 100644 index 000000000..3fddbedd2 Binary files /dev/null and b/src/testcases/org/apache/poi/hssf/data/duprich1.xls differ diff --git a/src/testcases/org/apache/poi/hssf/data/duprich2.xls b/src/testcases/org/apache/poi/hssf/data/duprich2.xls new file mode 100644 index 000000000..57af63b3d Binary files /dev/null and b/src/testcases/org/apache/poi/hssf/data/duprich2.xls differ diff --git a/src/testcases/org/apache/poi/hssf/record/TestSSTRecord.java b/src/testcases/org/apache/poi/hssf/record/TestSSTRecord.java index 56e6fec76..f6963e137 100644 --- a/src/testcases/org/apache/poi/hssf/record/TestSSTRecord.java +++ b/src/testcases/org/apache/poi/hssf/record/TestSSTRecord.java @@ -1,4 +1,3 @@ - /* ==================================================================== * The Apache Software License, Version 1.1 * @@ -55,24 +54,28 @@ package org.apache.poi.hssf.record; -import org.apache.poi.util.*; - -import junit.framework.*; +import junit.framework.TestCase; +import org.apache.poi.util.LittleEndian; +import org.apache.poi.util.LittleEndianConsts; +import org.apache.poi.hssf.usermodel.HSSFWorkbook; +import org.apache.poi.hssf.usermodel.HSSFSheet; import java.io.*; - -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; /** * @author Marc Johnson (mjohnson at apache dot org) */ public class TestSSTRecord - extends TestCase + extends TestCase { - private String _test_file_path; + private String _test_file_path; private static final String _test_file_path_property = - "HSSF.testdata.path"; + "HSSF.testdata.path"; /** * Creates new TestSSTRecord @@ -80,10 +83,10 @@ public class TestSSTRecord * @param name */ - public TestSSTRecord(String name) + public TestSSTRecord( String name ) { - super(name); - _test_file_path = System.getProperty(_test_file_path_property); + super( name ); + _test_file_path = System.getProperty( _test_file_path_property ); } /** @@ -93,118 +96,118 @@ public class TestSSTRecord */ public void testProcessContinueRecord() - throws IOException + throws IOException { - byte[] testdata = readTestData("BigSSTRecord"); - byte[] input = new byte[ testdata.length - 4 ]; + byte[] testdata = readTestData( "BigSSTRecord" ); + byte[] input = new byte[testdata.length - 4]; - System.arraycopy(testdata, 4, input, 0, input.length); - SSTRecord record = - new SSTRecord(LittleEndian.getShort(testdata, 0), - LittleEndian.getShort(testdata, 2), input); - byte[] continueRecord = readTestData("BigSSTRecordCR"); + System.arraycopy( testdata, 4, input, 0, input.length ); + SSTRecord record = + new SSTRecord( LittleEndian.getShort( testdata, 0 ), + LittleEndian.getShort( testdata, 2 ), input ); + byte[] continueRecord = readTestData( "BigSSTRecordCR" ); - input = new byte[ continueRecord.length - 4 ]; - System.arraycopy(continueRecord, 4, input, 0, input.length); - record.processContinueRecord(input); - assertEquals(1464, record.getNumStrings()); - assertEquals(688, record.getNumUniqueStrings()); - assertEquals(688, record.countStrings()); + input = new byte[continueRecord.length - 4]; + System.arraycopy( continueRecord, 4, input, 0, input.length ); + record.processContinueRecord( input ); + assertEquals( 1464, record.getNumStrings() ); + assertEquals( 688, record.getNumUniqueStrings() ); + assertEquals( 688, record.countStrings() ); byte[] ser_output = record.serialize(); - int offset = 0; - short type = LittleEndian.getShort(ser_output, offset); + int offset = 0; + short type = LittleEndian.getShort( ser_output, offset ); offset += LittleEndianConsts.SHORT_SIZE; - short length = LittleEndian.getShort(ser_output, offset); + short length = LittleEndian.getShort( ser_output, offset ); offset += LittleEndianConsts.SHORT_SIZE; - byte[] recordData = new byte[ length ]; + byte[] recordData = new byte[length]; - System.arraycopy(ser_output, offset, recordData, 0, length); + System.arraycopy( ser_output, offset, recordData, 0, length ); offset += length; - SSTRecord testRecord = new SSTRecord(type, length, recordData); + SSTRecord testRecord = new SSTRecord( type, length, recordData ); - assertEquals(ContinueRecord.sid, - LittleEndian.getShort(ser_output, offset)); + assertEquals( ContinueRecord.sid, + LittleEndian.getShort( ser_output, offset ) ); offset += LittleEndianConsts.SHORT_SIZE; - length = LittleEndian.getShort(ser_output, offset); + length = LittleEndian.getShort( ser_output, offset ); offset += LittleEndianConsts.SHORT_SIZE; - byte[] cr = new byte[ length ]; + byte[] cr = new byte[length]; - System.arraycopy(ser_output, offset, cr, 0, length); + System.arraycopy( ser_output, offset, cr, 0, length ); offset += length; - assertEquals(offset, ser_output.length); - testRecord.processContinueRecord(cr); - assertEquals(record, testRecord); + assertEquals( offset, ser_output.length ); + testRecord.processContinueRecord( cr ); + assertEquals( record, testRecord ); // testing based on new bug report - testdata = readTestData("BigSSTRecord2"); - input = new byte[ testdata.length - 4 ]; - System.arraycopy(testdata, 4, input, 0, input.length); - record = new SSTRecord(LittleEndian.getShort(testdata, 0), - LittleEndian.getShort(testdata, 2), input); - byte[] continueRecord1 = readTestData("BigSSTRecord2CR1"); + testdata = readTestData( "BigSSTRecord2" ); + input = new byte[testdata.length - 4]; + System.arraycopy( testdata, 4, input, 0, input.length ); + record = new SSTRecord( LittleEndian.getShort( testdata, 0 ), + LittleEndian.getShort( testdata, 2 ), input ); + byte[] continueRecord1 = readTestData( "BigSSTRecord2CR1" ); - input = new byte[ continueRecord1.length - 4 ]; - System.arraycopy(continueRecord1, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord2 = readTestData("BigSSTRecord2CR2"); + input = new byte[continueRecord1.length - 4]; + System.arraycopy( continueRecord1, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord2 = readTestData( "BigSSTRecord2CR2" ); - input = new byte[ continueRecord2.length - 4 ]; - System.arraycopy(continueRecord2, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord3 = readTestData("BigSSTRecord2CR3"); + input = new byte[continueRecord2.length - 4]; + System.arraycopy( continueRecord2, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord3 = readTestData( "BigSSTRecord2CR3" ); - input = new byte[ continueRecord3.length - 4 ]; - System.arraycopy(continueRecord3, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord4 = readTestData("BigSSTRecord2CR4"); + input = new byte[continueRecord3.length - 4]; + System.arraycopy( continueRecord3, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord4 = readTestData( "BigSSTRecord2CR4" ); - input = new byte[ continueRecord4.length - 4 ]; - System.arraycopy(continueRecord4, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord5 = readTestData("BigSSTRecord2CR5"); + input = new byte[continueRecord4.length - 4]; + System.arraycopy( continueRecord4, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord5 = readTestData( "BigSSTRecord2CR5" ); - input = new byte[ continueRecord5.length - 4 ]; - System.arraycopy(continueRecord5, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord6 = readTestData("BigSSTRecord2CR6"); + input = new byte[continueRecord5.length - 4]; + System.arraycopy( continueRecord5, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord6 = readTestData( "BigSSTRecord2CR6" ); - input = new byte[ continueRecord6.length - 4 ]; - System.arraycopy(continueRecord6, 4, input, 0, input.length); - record.processContinueRecord(input); - byte[] continueRecord7 = readTestData("BigSSTRecord2CR7"); + input = new byte[continueRecord6.length - 4]; + System.arraycopy( continueRecord6, 4, input, 0, input.length ); + record.processContinueRecord( input ); + byte[] continueRecord7 = readTestData( "BigSSTRecord2CR7" ); - input = new byte[ continueRecord7.length - 4 ]; - System.arraycopy(continueRecord7, 4, input, 0, input.length); - record.processContinueRecord(input); - assertEquals(158642, record.getNumStrings()); - assertEquals(5249, record.getNumUniqueStrings()); - assertEquals(5249, record.countStrings()); + input = new byte[continueRecord7.length - 4]; + System.arraycopy( continueRecord7, 4, input, 0, input.length ); + record.processContinueRecord( input ); + assertEquals( 158642, record.getNumStrings() ); + assertEquals( 5249, record.getNumUniqueStrings() ); + assertEquals( 5249, record.countStrings() ); ser_output = record.serialize(); - offset = 0; - type = LittleEndian.getShort(ser_output, offset); - offset += LittleEndianConsts.SHORT_SIZE; - length = LittleEndian.getShort(ser_output, offset); - offset += LittleEndianConsts.SHORT_SIZE; - recordData = new byte[ length ]; - System.arraycopy(ser_output, offset, recordData, 0, length); - offset += length; - testRecord = new SSTRecord(type, length, recordData); - for (int count = 0; count < 7; count++) + offset = 0; + type = LittleEndian.getShort( ser_output, offset ); + offset += LittleEndianConsts.SHORT_SIZE; + length = LittleEndian.getShort( ser_output, offset ); + offset += LittleEndianConsts.SHORT_SIZE; + recordData = new byte[length]; + System.arraycopy( ser_output, offset, recordData, 0, length ); + offset += length; + testRecord = new SSTRecord( type, length, recordData ); + for ( int count = 0; count < 7; count++ ) { - assertEquals(ContinueRecord.sid, - LittleEndian.getShort(ser_output, offset)); + assertEquals( ContinueRecord.sid, + LittleEndian.getShort( ser_output, offset ) ); offset += LittleEndianConsts.SHORT_SIZE; - length = LittleEndian.getShort(ser_output, offset); + length = LittleEndian.getShort( ser_output, offset ); offset += LittleEndianConsts.SHORT_SIZE; - cr = new byte[ length ]; - System.arraycopy(ser_output, offset, cr, 0, length); - testRecord.processContinueRecord(cr); + cr = new byte[length]; + System.arraycopy( ser_output, offset, cr, 0, length ); + testRecord.processContinueRecord( cr ); offset += length; } - assertEquals(offset, ser_output.length); - assertEquals(record, testRecord); + assertEquals( offset, ser_output.length ); + assertEquals( record, testRecord ); } /** @@ -214,23 +217,23 @@ public class TestSSTRecord */ public void testHugeStrings() - throws IOException + throws IOException { - SSTRecord record = new SSTRecord(); - byte[][] bstrings = - { - new byte[ 9000 ], new byte[ 7433 ], new byte[ 9002 ], - new byte[ 16998 ] - }; - String[] strings = new String[ bstrings.length ]; - int total_length = 0; + SSTRecord record = new SSTRecord(); + byte[][] bstrings = + { + new byte[9000], new byte[7433], new byte[9002], + new byte[16998] + }; + String[] strings = new String[bstrings.length]; + int total_length = 0; - for (int k = 0; k < bstrings.length; k++) + for ( int k = 0; k < bstrings.length; k++ ) { - Arrays.fill(bstrings[ k ], ( byte ) ('a' + k)); - strings[ k ] = new String(bstrings[ k ]); - record.addString(strings[ k ]); - total_length += 3 + bstrings[ k ].length; + Arrays.fill( bstrings[k], (byte) ( 'a' + k ) ); + strings[k] = new String( bstrings[k] ); + record.addString( strings[k] ); + total_length += 3 + bstrings[k].length; } // add overhead of SST record @@ -240,88 +243,88 @@ public class TestSSTRecord total_length += 4; // add overhead of six records - total_length += (6 * 4); - byte[] content = new byte[ record.getRecordSize() ]; + total_length += ( 6 * 4 ); + byte[] content = new byte[record.getRecordSize()]; - record.serialize(0, content); - assertEquals(total_length, content.length); - for (int index = 0; index != content.length; ) + record.serialize( 0, content ); + assertEquals( total_length, content.length ); + for ( int index = 0; index != content.length; ) { - short record_type = LittleEndian.getShort(content, index); + short record_type = LittleEndian.getShort( content, index ); index += LittleEndianConsts.SHORT_SIZE; - short record_length = LittleEndian.getShort(content, index); + short record_length = LittleEndian.getShort( content, index ); index += LittleEndianConsts.SHORT_SIZE; - byte[] data = new byte[ record_length ]; + byte[] data = new byte[record_length]; - System.arraycopy(content, index, data, 0, record_length); + System.arraycopy( content, index, data, 0, record_length ); index += record_length; - if (record_type == SSTRecord.sid) + if ( record_type == SSTRecord.sid ) { - record = new SSTRecord(record_type, record_length, data); + record = new SSTRecord( record_type, record_length, data ); } else { - record.processContinueRecord(data); + record.processContinueRecord( data ); } } - assertEquals(strings.length, record.getNumStrings()); - assertEquals(strings.length, record.getNumUniqueStrings()); - assertEquals(strings.length, record.countStrings()); - for (int k = 0; k < strings.length; k++) + assertEquals( strings.length, record.getNumStrings() ); + assertEquals( strings.length, record.getNumUniqueStrings() ); + assertEquals( strings.length, record.countStrings() ); + for ( int k = 0; k < strings.length; k++ ) { - assertEquals(strings[ k ], record.getString(k)); + assertEquals( strings[k], record.getString( k ) ); } - record = new SSTRecord(); - bstrings[ 1 ] = new byte[ bstrings[ 1 ].length - 1 ]; - for (int k = 0; k < bstrings.length; k++) + record = new SSTRecord(); + bstrings[1] = new byte[bstrings[1].length - 1]; + for ( int k = 0; k < bstrings.length; k++ ) { - if ((bstrings[ k ].length % 2) == 1) + if ( ( bstrings[k].length % 2 ) == 1 ) { - Arrays.fill(bstrings[ k ], ( byte ) ('a' + k)); - strings[ k ] = new String(bstrings[ k ]); + Arrays.fill( bstrings[k], (byte) ( 'a' + k ) ); + strings[k] = new String( bstrings[k] ); } else { - char[] data = new char[ bstrings[ k ].length / 2 ]; + char[] data = new char[bstrings[k].length / 2]; - Arrays.fill(data, ( char ) ('\u2122' + k)); - strings[ k ] = new String(data); + Arrays.fill( data, (char) ( '\u2122' + k ) ); + strings[k] = new String( data ); } - record.addString(strings[ k ]); + record.addString( strings[k] ); } - content = new byte[ record.getRecordSize() ]; - record.serialize(0, content); + content = new byte[record.getRecordSize()]; + record.serialize( 0, content ); total_length--; - assertEquals(total_length, content.length); - for (int index = 0; index != content.length; ) + assertEquals( total_length, content.length ); + for ( int index = 0; index != content.length; ) { - short record_type = LittleEndian.getShort(content, index); + short record_type = LittleEndian.getShort( content, index ); index += LittleEndianConsts.SHORT_SIZE; - short record_length = LittleEndian.getShort(content, index); + short record_length = LittleEndian.getShort( content, index ); index += LittleEndianConsts.SHORT_SIZE; - byte[] data = new byte[ record_length ]; + byte[] data = new byte[record_length]; - System.arraycopy(content, index, data, 0, record_length); + System.arraycopy( content, index, data, 0, record_length ); index += record_length; - if (record_type == SSTRecord.sid) + if ( record_type == SSTRecord.sid ) { - record = new SSTRecord(record_type, record_length, data); + record = new SSTRecord( record_type, record_length, data ); } else { - record.processContinueRecord(data); + record.processContinueRecord( data ); } } - assertEquals(strings.length, record.getNumStrings()); - assertEquals(strings.length, record.getNumUniqueStrings()); - assertEquals(strings.length, record.countStrings()); - for (int k = 0; k < strings.length; k++) + assertEquals( strings.length, record.getNumStrings() ); + assertEquals( strings.length, record.getNumUniqueStrings() ); + assertEquals( strings.length, record.countStrings() ); + for ( int k = 0; k < strings.length; k++ ) { - assertEquals(strings[ k ], record.getString(k)); + assertEquals( strings[k], record.getString( k ) ); } } @@ -332,7 +335,7 @@ public class TestSSTRecord */ public void testSSTRecordBug() - throws IOException + throws IOException { // create an SSTRecord and write a certain pattern of strings @@ -342,22 +345,22 @@ public class TestSSTRecord // the record will start with two integers, then this string // ... that will eat up 16 of the 8224 bytes that the record // can hold - record.addString("Hello"); + record.addString( "Hello" ); // now we have an additional 8208 bytes, which is an exact // multiple of 16 bytes long testvalue = 1000000000000L; - for (int k = 0; k < 2000; k++) + for ( int k = 0; k < 2000; k++ ) { - record.addString(String.valueOf(testvalue++)); + record.addString( String.valueOf( testvalue++ ) ); } - byte[] content = new byte[ record.getRecordSize() ]; + byte[] content = new byte[record.getRecordSize()]; - record.serialize(0, content); - assertEquals(( byte ) 13, content[ 4 + 8228 ]); - assertEquals(( byte ) 13, content[ 4 + 8228 * 2 ]); - assertEquals(( byte ) 13, content[ 4 + 8228 * 3 ]); + record.serialize( 0, content ); + assertEquals( (byte) 13, content[4 + 8228] ); + assertEquals( (byte) 13, content[4 + 8228 * 2] ); + assertEquals( (byte) 13, content[4 + 8228 * 3] ); } /** @@ -367,43 +370,43 @@ public class TestSSTRecord public void testSimpleAddString() { SSTRecord record = new SSTRecord(); - String s1 = "Hello world"; + String s1 = "Hello world"; // \u2122 is the encoding of the trademark symbol ... - String s2 = "Hello world\u2122"; + String s2 = "Hello world\u2122"; - assertEquals(0, record.addString(s1)); - assertEquals(s1, record.getString(0)); - assertEquals(1, record.countStrings()); - assertEquals(1, record.getNumStrings()); - assertEquals(1, record.getNumUniqueStrings()); - assertEquals(0, record.addString(s1)); - assertEquals(s1, record.getString(0)); - assertEquals(1, record.countStrings()); - assertEquals(2, record.getNumStrings()); - assertEquals(1, record.getNumUniqueStrings()); - assertEquals(1, record.addString(s2)); - assertEquals(s2, record.getString(1)); - assertEquals(2, record.countStrings()); - assertEquals(3, record.getNumStrings()); - assertEquals(2, record.getNumUniqueStrings()); + assertEquals( 0, record.addString( s1 ) ); + assertEquals( s1, record.getString( 0 ) ); + assertEquals( 1, record.countStrings() ); + assertEquals( 1, record.getNumStrings() ); + assertEquals( 1, record.getNumUniqueStrings() ); + assertEquals( 0, record.addString( s1 ) ); + assertEquals( s1, record.getString( 0 ) ); + assertEquals( 1, record.countStrings() ); + assertEquals( 2, record.getNumStrings() ); + assertEquals( 1, record.getNumUniqueStrings() ); + assertEquals( 1, record.addString( s2 ) ); + assertEquals( s2, record.getString( 1 ) ); + assertEquals( 2, record.countStrings() ); + assertEquals( 3, record.getNumStrings() ); + assertEquals( 2, record.getNumUniqueStrings() ); Iterator iter = record.getStrings(); - while (iter.hasNext()) + while ( iter.hasNext() ) { - UnicodeString ucs = ( UnicodeString ) iter.next(); + UnicodeString ucs = (UnicodeString) iter.next(); - if (ucs.getString().equals(s1)) + if ( ucs.getString().equals( s1 ) ) { - assertEquals(( byte ) 0, ucs.getOptionFlags()); + assertEquals( (byte) 0, ucs.getOptionFlags() ); } - else if (ucs.getString().equals(s2)) + else if ( ucs.getString().equals( s2 ) ) { - assertEquals(( byte ) 1, ucs.getOptionFlags()); + assertEquals( (byte) 1, ucs.getOptionFlags() ); } else { - fail("cannot match string: " + ucs.getString()); + fail( "cannot match string: " + ucs.getString() ); } } } @@ -415,25 +418,25 @@ public class TestSSTRecord */ public void testReaderConstructor() - throws IOException + throws IOException { - byte[] testdata = readTestData("BigSSTRecord"); - byte[] input = new byte[ testdata.length - 4 ]; + byte[] testdata = readTestData( "BigSSTRecord" ); + byte[] input = new byte[testdata.length - 4]; - System.arraycopy(testdata, 4, input, 0, input.length); - SSTRecord record = new SSTRecord(LittleEndian.getShort(testdata, 0), - LittleEndian.getShort(testdata, 2), - input); + System.arraycopy( testdata, 4, input, 0, input.length ); + SSTRecord record = new SSTRecord( LittleEndian.getShort( testdata, 0 ), + LittleEndian.getShort( testdata, 2 ), + input ); - assertEquals(1464, record.getNumStrings()); - assertEquals(688, record.getNumUniqueStrings()); - assertEquals(492, record.countStrings()); - assertEquals(1, record.getExpectedChars()); - assertEquals("Consolidated B-24J Liberator The Dragon & His Tai", - record.getUnfinishedString()); - assertEquals(52, record.getTotalLength()); - assertEquals(3, record.getStringDataOffset()); - assertTrue(!record.isWideChar()); + assertEquals( 1464, record.getNumStrings() ); + assertEquals( 688, record.getNumUniqueStrings() ); + assertEquals( 492, record.countStrings() ); + assertEquals( 1, record.getDeserializer().getExpectedChars() ); + assertEquals( "Consolidated B-24J Liberator The Dragon & His Tai", + record.getDeserializer().getUnfinishedString() ); + assertEquals( 52, record.getDeserializer().getTotalLength() ); + assertEquals( 3, record.getDeserializer().getStringDataOffset() ); + assertTrue( !record.getDeserializer().isWideChar() ); } /** @@ -444,26 +447,26 @@ public class TestSSTRecord { SSTRecord record = new SSTRecord(); - assertEquals(0, record.getNumStrings()); - assertEquals(0, record.getNumUniqueStrings()); - assertEquals(0, record.countStrings()); - assertEquals(0, record.getExpectedChars()); - assertEquals("", record.getUnfinishedString()); - assertEquals(0, record.getTotalLength()); - assertEquals(0, record.getStringDataOffset()); - assertTrue(!record.isWideChar()); - byte[] output = record.serialize(); + assertEquals( 0, record.getNumStrings() ); + assertEquals( 0, record.getNumUniqueStrings() ); + assertEquals( 0, record.countStrings() ); + assertEquals( 0, record.getDeserializer().getExpectedChars() ); + assertEquals( "", record.getDeserializer().getUnfinishedString() ); + assertEquals( 0, record.getDeserializer().getTotalLength() ); + assertEquals( 0, record.getDeserializer().getStringDataOffset() ); + assertTrue( !record.getDeserializer().isWideChar() ); + byte[] output = record.serialize(); byte[] expected = - { - ( byte ) record.getSid(), ( byte ) (record.getSid() >> 8), - ( byte ) 8, ( byte ) 0, ( byte ) 0, ( byte ) 0, ( byte ) 0, - ( byte ) 0, ( byte ) 0, ( byte ) 0, ( byte ) 0, ( byte ) 0 - }; + { + (byte) record.getSid(), (byte) ( record.getSid() >> 8 ), + (byte) 8, (byte) 0, (byte) 0, (byte) 0, (byte) 0, + (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0 + }; - assertEquals(expected.length, output.length); - for (int k = 0; k < expected.length; k++) + assertEquals( expected.length, output.length ); + for ( int k = 0; k < expected.length; k++ ) { - assertEquals(String.valueOf(k), expected[ k ], output[ k ]); + assertEquals( String.valueOf( k ), expected[k], output[k] ); } } @@ -473,87 +476,87 @@ public class TestSSTRecord * @param ignored_args */ - public static void main(String [] ignored_args) + public static void main( String[] ignored_args ) { - System.out.println("Testing hssf.record.SSTRecord functionality"); - junit.textui.TestRunner.run(TestSSTRecord.class); + System.out.println( "Testing hssf.record.SSTRecord functionality" ); + junit.textui.TestRunner.run( TestSSTRecord.class ); } - private byte [] readTestData(String filename) - throws IOException + private byte[] readTestData( String filename ) + throws IOException { - File file = new File(_test_file_path - + File.separator - + filename); - FileInputStream stream = new FileInputStream(file); - int characterCount = 0; - byte b = ( byte ) 0; - List bytes = new ArrayList(); - boolean done = false; + File file = new File( _test_file_path + + File.separator + + filename ); + FileInputStream stream = new FileInputStream( file ); + int characterCount = 0; + byte b = (byte) 0; + List bytes = new ArrayList(); + boolean done = false; - while (!done) + while ( !done ) { int count = stream.read(); - switch (count) + switch ( count ) { - case '0' : - case '1' : - case '2' : - case '3' : - case '4' : - case '5' : - case '6' : - case '7' : - case '8' : - case '9' : + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': b <<= 4; - b += ( byte ) (count - '0'); + b += (byte) ( count - '0' ); characterCount++; - if (characterCount == 2) + if ( characterCount == 2 ) { - bytes.add(new Byte(b)); + bytes.add( new Byte( b ) ); characterCount = 0; - b = ( byte ) 0; + b = (byte) 0; } break; - case 'A' : - case 'B' : - case 'C' : - case 'D' : - case 'E' : - case 'F' : + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': + case 'F': b <<= 4; - b += ( byte ) (count + 10 - 'A'); + b += (byte) ( count + 10 - 'A' ); characterCount++; - if (characterCount == 2) + if ( characterCount == 2 ) { - bytes.add(new Byte(b)); + bytes.add( new Byte( b ) ); characterCount = 0; - b = ( byte ) 0; + b = (byte) 0; } break; - case 'a' : - case 'b' : - case 'c' : - case 'd' : - case 'e' : - case 'f' : + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': + case 'f': b <<= 4; - b += ( byte ) (count + 10 - 'a'); + b += (byte) ( count + 10 - 'a' ); characterCount++; - if (characterCount == 2) + if ( characterCount == 2 ) { - bytes.add(new Byte(b)); + bytes.add( new Byte( b ) ); characterCount = 0; - b = ( byte ) 0; + b = (byte) 0; } break; - case -1 : + case -1: done = true; break; @@ -562,13 +565,55 @@ public class TestSSTRecord } } stream.close(); - Byte[] polished = ( Byte [] ) bytes.toArray(new Byte[ 0 ]); - byte[] rval = new byte[ polished.length ]; + Byte[] polished = (Byte[]) bytes.toArray( new Byte[0] ); + byte[] rval = new byte[polished.length]; - for (int j = 0; j < polished.length; j++) + for ( int j = 0; j < polished.length; j++ ) { - rval[ j ] = polished[ j ].byteValue(); + rval[j] = polished[j].byteValue(); } return rval; } + + /** + * Tests that workbooks with rich text that duplicates a non rich text cell can be read and written. + */ + public void testReadWriteDuplicatedRichText1() + throws Exception + { + File file = new File( _test_file_path + File.separator + "duprich1.xls" ); + InputStream stream = new FileInputStream(file); + HSSFWorkbook wb = new HSSFWorkbook(stream); + stream.close(); + HSSFSheet sheet = wb.getSheetAt(1); + assertEquals("01/05 (Wed) ", sheet.getRow(0).getCell((short)8).getStringCellValue()); + assertEquals("01/05 (Wed)", sheet.getRow(1).getCell((short)8).getStringCellValue()); + + file = File.createTempFile("testout", "xls"); + FileOutputStream outStream = new FileOutputStream(file); + wb.write(outStream); + outStream.close(); + file.delete(); + + // test the second file. + file = new File( _test_file_path + File.separator + "duprich2.xls" ); + stream = new FileInputStream(file); + wb = new HSSFWorkbook(stream); + stream.close(); + sheet = wb.getSheetAt(0); + int row = 0; + assertEquals("Testing ", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + assertEquals("rich", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + assertEquals("text", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + assertEquals("strings", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + assertEquals("Testing ", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + assertEquals("Testing", sheet.getRow(row++).getCell((short)0).getStringCellValue()); + +// file = new File("/tryme.xls"); + file = File.createTempFile("testout", ".xls"); + outStream = new FileOutputStream(file); + wb.write(outStream); + outStream.close(); + file.delete(); + } } diff --git a/src/testcases/org/apache/poi/util/TestLittleEndian.java b/src/testcases/org/apache/poi/util/TestLittleEndian.java index 89bc5a3a9..a42686cea 100644 --- a/src/testcases/org/apache/poi/util/TestLittleEndian.java +++ b/src/testcases/org/apache/poi/util/TestLittleEndian.java @@ -479,6 +479,12 @@ public class TestLittleEndian return result; } + public void testUnsignedShort() + throws Exception + { + assertEquals(0xffff, LittleEndian.getUShort(new byte[] { (byte)0xff, (byte)0xff }, 0)); + } + /** * main method to run the unit tests *