Merged from 1.5 branch.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/trunk@352660 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2490325a95
commit
7b23ccd114
@ -161,9 +161,7 @@ public class ContinueRecord
|
||||
|
||||
// how many continue records do we need
|
||||
// System.out.println("In ProcessContinue");
|
||||
int records =
|
||||
(data.length
|
||||
/ 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out
|
||||
int records = (data.length / 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out
|
||||
int offset = 8214;
|
||||
|
||||
// System.out.println("we have "+records+" continue records to process");
|
||||
@ -174,8 +172,7 @@ public class ContinueRecord
|
||||
for (int cr = 0; cr < records; cr++)
|
||||
{
|
||||
ContinueRecord contrec = new ContinueRecord();
|
||||
int arraysize = Math.min((8214 - 4),
|
||||
(data.length - offset));
|
||||
int arraysize = Math.min((8214 - 4), (data.length - offset));
|
||||
byte[] crdata = new byte[ arraysize ];
|
||||
|
||||
System.arraycopy(data, offset, crdata, 0, arraysize);
|
||||
|
202
src/java/org/apache/poi/hssf/record/RecordProcessor.java
Normal file
202
src/java/org/apache/poi/hssf/record/RecordProcessor.java
Normal file
@ -0,0 +1,202 @@
|
||||
/* ====================================================================
|
||||
* The Apache Software License, Version 1.1
|
||||
*
|
||||
* Copyright (c) 2002 The Apache Software Foundation. All rights
|
||||
* reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3. The end-user documentation included with the redistribution,
|
||||
* if any, must include the following acknowledgment:
|
||||
* "This product includes software developed by the
|
||||
* Apache Software Foundation (http://www.apache.org/)."
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.
|
||||
*
|
||||
* 4. The names "Apache" and "Apache Software Foundation" and
|
||||
* "Apache POI" must not be used to endorse or promote products
|
||||
* derived from this software without prior written permission. For
|
||||
* written permission, please contact apache@apache.org.
|
||||
*
|
||||
* 5. Products derived from this software may not be called "Apache",
|
||||
* "Apache POI", nor may "Apache" appear in their name, without
|
||||
* prior written permission of the Apache Software Foundation.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
|
||||
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
* ====================================================================
|
||||
*
|
||||
* This software consists of voluntary contributions made by many
|
||||
* individuals on behalf of the Apache Software Foundation. For more
|
||||
* information on the Apache Software Foundation, please see
|
||||
* <http://www.apache.org/>.
|
||||
*/
|
||||
|
||||
package org.apache.poi.hssf.record;
|
||||
|
||||
import org.apache.poi.util.LittleEndianConsts;
|
||||
import org.apache.poi.util.LittleEndian;
|
||||
|
||||
/**
|
||||
* Process a single record. That is, an SST record or a continue record.
|
||||
* Refactored from code originally in SSTRecord.
|
||||
*
|
||||
* @author Glen Stampoultzis (glens at apache.org)
|
||||
*/
|
||||
class RecordProcessor
|
||||
{
|
||||
private byte[] data;
|
||||
private int recordOffset;
|
||||
private int available;
|
||||
private SSTRecordHeader sstRecordHeader;
|
||||
|
||||
public RecordProcessor( byte[] data, int available, int numStrings, int numUniqueStrings )
|
||||
{
|
||||
this.data = data;
|
||||
this.available = available;
|
||||
this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings);
|
||||
}
|
||||
|
||||
public int getAvailable()
|
||||
{
|
||||
return available;
|
||||
}
|
||||
|
||||
public void writeRecordHeader( int offset, int totalWritten, int recordLength, boolean first_record )
|
||||
{
|
||||
if ( first_record )
|
||||
{
|
||||
available -= 8;
|
||||
recordOffset = sstRecordHeader.writeSSTHeader( data, recordOffset + offset + totalWritten, recordLength );
|
||||
}
|
||||
else
|
||||
{
|
||||
recordOffset = writeContinueHeader( data, recordOffset + offset + totalWritten, recordLength );
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] writeStringRemainder( boolean lastStringCompleted, byte[] stringreminant, int offset, int totalWritten )
|
||||
{
|
||||
if ( !lastStringCompleted )
|
||||
{
|
||||
// write reminant -- it'll all fit neatly
|
||||
System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, stringreminant.length );
|
||||
adjustPointers( stringreminant.length );
|
||||
}
|
||||
else
|
||||
{
|
||||
// write as much of the remnant as possible
|
||||
System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, available );
|
||||
byte[] leftover = new byte[( stringreminant.length - available ) + LittleEndianConsts.BYTE_SIZE];
|
||||
|
||||
System.arraycopy( stringreminant, available, leftover, LittleEndianConsts.BYTE_SIZE, stringreminant.length - available );
|
||||
leftover[0] = stringreminant[0];
|
||||
stringreminant = leftover;
|
||||
adjustPointers( available ); // Consume all available remaining space
|
||||
}
|
||||
return stringreminant;
|
||||
}
|
||||
|
||||
public void writeWholeString( UnicodeString unistr, int offset, int totalWritten )
|
||||
{
|
||||
unistr.serialize( recordOffset + offset + totalWritten, data );
|
||||
int rsize = unistr.getRecordSize();
|
||||
adjustPointers( rsize );
|
||||
}
|
||||
|
||||
public byte[] writePartString( UnicodeString unistr, int offset, int totalWritten )
|
||||
{
|
||||
byte[] stringReminant;
|
||||
byte[] ucs = unistr.serialize();
|
||||
|
||||
System.arraycopy( ucs, 0, data, recordOffset + offset + totalWritten, available );
|
||||
stringReminant = new byte[( ucs.length - available ) + LittleEndianConsts.BYTE_SIZE];
|
||||
System.arraycopy( ucs, available, stringReminant, LittleEndianConsts.BYTE_SIZE, ucs.length - available );
|
||||
stringReminant[0] = ucs[LittleEndianConsts.SHORT_SIZE];
|
||||
available = 0;
|
||||
return stringReminant;
|
||||
}
|
||||
|
||||
|
||||
private int writeContinueHeader( final byte[] data, final int pos,
|
||||
final int recsize )
|
||||
{
|
||||
int offset = pos;
|
||||
|
||||
LittleEndian.putShort( data, offset, ContinueRecord.sid );
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
LittleEndian.putShort( data, offset, (short) ( recsize ) );
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
return offset - pos;
|
||||
}
|
||||
|
||||
|
||||
private void adjustPointers( int amount )
|
||||
{
|
||||
recordOffset += amount;
|
||||
available -= amount;
|
||||
}
|
||||
}
|
||||
|
||||
class SSTRecordHeader
|
||||
{
|
||||
int numStrings;
|
||||
int numUniqueStrings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public SSTRecordHeader( int numStrings, int numUniqueStrings )
|
||||
{
|
||||
this.numStrings = numStrings;
|
||||
this.numUniqueStrings = numUniqueStrings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes out the SST record. This consists of the sid, the record size, the number of
|
||||
* strings and the number of unique strings.
|
||||
*
|
||||
* @param data The data buffer to write the header to.
|
||||
* @param bufferIndex The index into the data buffer where the header should be written.
|
||||
* @param recSize The number of records written.
|
||||
*
|
||||
* @return The bufer of bytes modified.
|
||||
*/
|
||||
public int writeSSTHeader( byte[] data, int bufferIndex, int recSize )
|
||||
{
|
||||
int offset = bufferIndex;
|
||||
|
||||
LittleEndian.putShort( data, offset, SSTRecord.sid );
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
LittleEndian.putShort( data, offset, (short) ( recSize ) );
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
// LittleEndian.putInt( data, offset, getNumStrings() );
|
||||
LittleEndian.putInt( data, offset, numStrings );
|
||||
offset += LittleEndianConsts.INT_SIZE;
|
||||
// LittleEndian.putInt( data, offset, getNumUniqueStrings() );
|
||||
LittleEndian.putInt( data, offset, numUniqueStrings );
|
||||
offset += LittleEndianConsts.INT_SIZE;
|
||||
return offset - bufferIndex;
|
||||
}
|
||||
|
||||
}
|
357
src/java/org/apache/poi/hssf/record/SSTDeserializer.java
Normal file
357
src/java/org/apache/poi/hssf/record/SSTDeserializer.java
Normal file
@ -0,0 +1,357 @@
|
||||
package org.apache.poi.hssf.record;
|
||||
|
||||
import org.apache.poi.util.LittleEndian;
|
||||
import org.apache.poi.util.LittleEndianConsts;
|
||||
import org.apache.poi.util.BinaryTree;
|
||||
import org.apache.poi.util.HexDump;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
class SSTDeserializer
|
||||
{
|
||||
|
||||
private BinaryTree strings;
|
||||
/** this is the number of characters we expect in the first sub-record in a subsequent continuation record */
|
||||
private int continuationExpectedChars;
|
||||
/** this is the string we were working on before hitting the end of the current record. This string is NOT finished. */
|
||||
private String unfinishedString;
|
||||
/** this is the total length of the current string being handled */
|
||||
private int totalLengthBytes;
|
||||
/** this is the offset into a string field of the actual string data */
|
||||
private int stringDataOffset;
|
||||
/** this is true if the string uses wide characters */
|
||||
private boolean wideChar;
|
||||
|
||||
|
||||
public SSTDeserializer(BinaryTree strings)
|
||||
{
|
||||
this.strings = strings;
|
||||
setExpectedChars( 0 );
|
||||
unfinishedString = "";
|
||||
totalLengthBytes = 0;
|
||||
stringDataOffset = 0;
|
||||
wideChar = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the starting point where strings are constructed. Note that
|
||||
* strings may span across multiple continuations. Read the SST record
|
||||
* carefully before beginning to hack.
|
||||
*/
|
||||
public void manufactureStrings( final byte[] data, final int index,
|
||||
short size )
|
||||
{
|
||||
int offset = index;
|
||||
|
||||
while ( offset < size )
|
||||
{
|
||||
int remaining = size - offset;
|
||||
|
||||
if ( ( remaining > 0 ) && ( remaining < LittleEndianConsts.SHORT_SIZE ) )
|
||||
{
|
||||
throw new RecordFormatException( "Cannot get length of the last string in SSTRecord" );
|
||||
}
|
||||
if ( remaining == LittleEndianConsts.SHORT_SIZE )
|
||||
{
|
||||
setExpectedChars( LittleEndian.getUShort( data, offset ) );
|
||||
unfinishedString = "";
|
||||
break;
|
||||
}
|
||||
short charCount = LittleEndian.getShort( data, offset );
|
||||
|
||||
setupStringParameters( data, offset, charCount );
|
||||
if ( remaining < totalLengthBytes )
|
||||
{
|
||||
setExpectedChars( calculateCharCount( totalLengthBytes - remaining ) );
|
||||
charCount -= getExpectedChars();
|
||||
totalLengthBytes = remaining;
|
||||
}
|
||||
else
|
||||
{
|
||||
setExpectedChars( 0 );
|
||||
}
|
||||
processString( data, offset, charCount );
|
||||
offset += totalLengthBytes;
|
||||
if ( getExpectedChars() != 0 )
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Detemines the option types for the string (ie, compressed or uncompressed unicode, rich text string or
|
||||
* plain string etc) and calculates the length and offset for the string.
|
||||
*
|
||||
* @param data
|
||||
* @param index
|
||||
* @param char_count
|
||||
*/
|
||||
private void setupStringParameters( final byte[] data, final int index,
|
||||
final int char_count )
|
||||
{
|
||||
byte optionFlag = data[index + LittleEndianConsts.SHORT_SIZE];
|
||||
|
||||
wideChar = ( optionFlag & 1 ) == 1;
|
||||
boolean farEast = ( optionFlag & 4 ) == 4;
|
||||
boolean richText = ( optionFlag & 8 ) == 8;
|
||||
|
||||
totalLengthBytes = SSTRecord.STRING_MINIMAL_OVERHEAD + calculateByteCount( char_count );
|
||||
stringDataOffset = SSTRecord.STRING_MINIMAL_OVERHEAD;
|
||||
if ( richText )
|
||||
{
|
||||
short run_count = LittleEndian.getShort( data, index + stringDataOffset );
|
||||
|
||||
stringDataOffset += LittleEndianConsts.SHORT_SIZE;
|
||||
totalLengthBytes += LittleEndianConsts.SHORT_SIZE + ( LittleEndianConsts.INT_SIZE * run_count );
|
||||
}
|
||||
if ( farEast )
|
||||
{
|
||||
int extension_length = LittleEndian.getInt( data, index + stringDataOffset );
|
||||
|
||||
stringDataOffset += LittleEndianConsts.INT_SIZE;
|
||||
totalLengthBytes += LittleEndianConsts.INT_SIZE + extension_length;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void processString( final byte[] data, final int index,
|
||||
final short char_count )
|
||||
{
|
||||
byte[] stringDataBuffer = new byte[totalLengthBytes];
|
||||
int length = SSTRecord.STRING_MINIMAL_OVERHEAD + calculateByteCount( char_count );
|
||||
byte[] bstring = new byte[length];
|
||||
|
||||
System.arraycopy( data, index, stringDataBuffer, 0, stringDataBuffer.length );
|
||||
int offset = 0;
|
||||
|
||||
LittleEndian.putShort( bstring, offset, char_count );
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
bstring[offset] = stringDataBuffer[offset];
|
||||
|
||||
// System.out.println( "offset = " + stringDataOffset );
|
||||
// System.out.println( "length = " + (bstring.length - STRING_MINIMAL_OVERHEAD) );
|
||||
// System.out.println( "src.length = " + str_data.length );
|
||||
// try
|
||||
// {
|
||||
// System.out.println( "----------------------- DUMP -------------------------" );
|
||||
// HexDump.dump( stringDataBuffer, (long)stringDataOffset, System.out, 1);
|
||||
// }
|
||||
// catch ( IOException e )
|
||||
// {
|
||||
// }
|
||||
// catch ( ArrayIndexOutOfBoundsException e )
|
||||
// {
|
||||
// }
|
||||
// catch ( IllegalArgumentException e )
|
||||
// {
|
||||
// }
|
||||
System.arraycopy( stringDataBuffer, stringDataOffset, bstring,
|
||||
SSTRecord.STRING_MINIMAL_OVERHEAD,
|
||||
bstring.length - SSTRecord.STRING_MINIMAL_OVERHEAD );
|
||||
UnicodeString string = new UnicodeString( UnicodeString.sid,
|
||||
(short) bstring.length,
|
||||
bstring );
|
||||
|
||||
if ( getExpectedChars() != 0 )
|
||||
{
|
||||
unfinishedString = string.getString();
|
||||
}
|
||||
else
|
||||
{
|
||||
Integer integer = new Integer( strings.size() );
|
||||
addToStringTable( strings, integer, string );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Okay, we are doing some major cheating here. Because we can't handle rich text strings properly
|
||||
* we end up getting duplicate strings. To get around this I'm doing do things: 1. Converting rich
|
||||
* text to normal text and 2. If there's a duplicate I'm adding a space onto the end. Sneaky perhaps
|
||||
* but it gets the job done until we can handle this a little better.
|
||||
*/
|
||||
static public void addToStringTable( BinaryTree strings, Integer integer, UnicodeString string )
|
||||
{
|
||||
if (string.isRichText())
|
||||
string.setOptionFlags( (byte)(string.getOptionFlags() & (~8) ) );
|
||||
|
||||
boolean added = false;
|
||||
while (added == false)
|
||||
{
|
||||
try
|
||||
{
|
||||
strings.put( integer, string );
|
||||
added = true;
|
||||
}
|
||||
catch( Exception ignore )
|
||||
{
|
||||
string.setString( string.getString() + " " );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private int calculateCharCount( final int byte_count )
|
||||
{
|
||||
return byte_count / ( wideChar ? LittleEndianConsts.SHORT_SIZE
|
||||
: LittleEndianConsts.BYTE_SIZE );
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a Continue record. A Continue record for an SST record
|
||||
* contains the same kind of data that the SST record contains,
|
||||
* with the following exceptions:
|
||||
* <P>
|
||||
* <OL>
|
||||
* <LI>The string counts at the beginning of the SST record are
|
||||
* not in the Continue record
|
||||
* <LI>The first string in the Continue record might NOT begin
|
||||
* with a size. If the last string in the previous record is
|
||||
* continued in this record, the size is determined by that
|
||||
* last string in the previous record; the first string will
|
||||
* begin with a flag byte, followed by the remaining bytes (or
|
||||
* words) of the last string from the previous
|
||||
* record. Otherwise, the first string in the record will
|
||||
* begin with a string length
|
||||
* </OL>
|
||||
*
|
||||
* @param record the Continue record's byte data
|
||||
*/
|
||||
|
||||
public void processContinueRecord( final byte[] record )
|
||||
{
|
||||
if ( getExpectedChars() == 0 )
|
||||
{
|
||||
unfinishedString = "";
|
||||
totalLengthBytes = 0;
|
||||
stringDataOffset = 0;
|
||||
wideChar = false;
|
||||
manufactureStrings( record, 0, (short) record.length );
|
||||
}
|
||||
else
|
||||
{
|
||||
int data_length = record.length - LittleEndianConsts.BYTE_SIZE;
|
||||
|
||||
if ( calculateByteCount( getExpectedChars() ) > data_length )
|
||||
{
|
||||
|
||||
// create artificial data to create a UnicodeString
|
||||
byte[] input =
|
||||
new byte[record.length + LittleEndianConsts.SHORT_SIZE];
|
||||
short size = (short) ( ( ( record[0] & 1 ) == 1 )
|
||||
? ( data_length / LittleEndianConsts.SHORT_SIZE )
|
||||
: ( data_length / LittleEndianConsts.BYTE_SIZE ) );
|
||||
|
||||
LittleEndian.putShort( input, (byte) 0, size );
|
||||
System.arraycopy( record, 0, input, LittleEndianConsts.SHORT_SIZE, record.length );
|
||||
UnicodeString ucs = new UnicodeString( UnicodeString.sid, (short) input.length, input );
|
||||
|
||||
unfinishedString = unfinishedString + ucs.getString();
|
||||
setExpectedChars( getExpectedChars() - size );
|
||||
}
|
||||
else
|
||||
{
|
||||
setupStringParameters( record, -LittleEndianConsts.SHORT_SIZE,
|
||||
getExpectedChars() );
|
||||
byte[] str_data = new byte[totalLengthBytes];
|
||||
int length = SSTRecord.STRING_MINIMAL_OVERHEAD
|
||||
+ ( calculateByteCount( getExpectedChars() ) );
|
||||
byte[] bstring = new byte[length];
|
||||
|
||||
// Copy data from the record into the string
|
||||
// buffer. Copy skips the length of a short in the
|
||||
// string buffer, to leave room for the string length.
|
||||
System.arraycopy( record, 0, str_data,
|
||||
LittleEndianConsts.SHORT_SIZE,
|
||||
str_data.length
|
||||
- LittleEndianConsts.SHORT_SIZE );
|
||||
|
||||
// write the string length
|
||||
LittleEndian.putShort( bstring, 0,
|
||||
(short) getExpectedChars() );
|
||||
|
||||
// write the options flag
|
||||
bstring[LittleEndianConsts.SHORT_SIZE] =
|
||||
str_data[LittleEndianConsts.SHORT_SIZE];
|
||||
|
||||
// copy the bytes/words making up the string; skipping
|
||||
// past all the overhead of the str_data array
|
||||
System.arraycopy( str_data, stringDataOffset, bstring,
|
||||
SSTRecord.STRING_MINIMAL_OVERHEAD,
|
||||
bstring.length - SSTRecord.STRING_MINIMAL_OVERHEAD );
|
||||
|
||||
// use special constructor to create the final string
|
||||
UnicodeString string =
|
||||
new UnicodeString( UnicodeString.sid,
|
||||
(short) bstring.length, bstring,
|
||||
unfinishedString );
|
||||
Integer integer = new Integer( strings.size() );
|
||||
|
||||
// field_3_strings.put( integer, string );
|
||||
addToStringTable( strings, integer, string );
|
||||
manufactureStrings( record, totalLengthBytes - LittleEndianConsts.SHORT_SIZE, (short) record.length );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of characters we expect in the first
|
||||
* sub-record in a subsequent continuation record
|
||||
*/
|
||||
|
||||
int getExpectedChars()
|
||||
{
|
||||
return continuationExpectedChars;
|
||||
}
|
||||
|
||||
private void setExpectedChars( final int count )
|
||||
{
|
||||
continuationExpectedChars = count;
|
||||
}
|
||||
|
||||
private int calculateByteCount( final int character_count )
|
||||
{
|
||||
return character_count * ( wideChar ? LittleEndianConsts.SHORT_SIZE : LittleEndianConsts.BYTE_SIZE );
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the unfinished string
|
||||
*/
|
||||
|
||||
String getUnfinishedString()
|
||||
{
|
||||
return unfinishedString;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total length of the current string
|
||||
*/
|
||||
|
||||
int getTotalLength()
|
||||
{
|
||||
return totalLengthBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return offset into current string data
|
||||
*/
|
||||
|
||||
int getStringDataOffset()
|
||||
{
|
||||
return stringDataOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if current string uses wide characters
|
||||
*/
|
||||
|
||||
boolean isWideChar()
|
||||
{
|
||||
return wideChar;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
|
||||
/* ====================================================================
|
||||
* The Apache Software License, Version 1.1
|
||||
*
|
||||
@ -59,7 +58,8 @@ import org.apache.poi.util.BinaryTree;
|
||||
import org.apache.poi.util.LittleEndian;
|
||||
import org.apache.poi.util.LittleEndianConsts;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Title: Static String Table Record
|
||||
@ -71,6 +71,7 @@ import java.util.*;
|
||||
* <P>
|
||||
* @author Andrew C. Oliver (acoliver at apache dot org)
|
||||
* @author Marc Johnson (mjohnson at apache dot org)
|
||||
* @author Glen Stampoultzis (glens at apache.org)
|
||||
* @version 2.0-pre
|
||||
* @see org.apache.poi.hssf.record.LabelSSTRecord
|
||||
* @see org.apache.poi.hssf.record.ContinueRecord
|
||||
@ -80,56 +81,35 @@ public class SSTRecord
|
||||
extends Record
|
||||
{
|
||||
|
||||
// how big can an SST record be? As big as any record can be: 8228
|
||||
// bytes
|
||||
private static final int _max = 8228;
|
||||
/** how big can an SST record be? As big as any record can be: 8228 bytes */
|
||||
static final int MAX_RECORD_SIZE = 8228;
|
||||
|
||||
// standard record overhead: two shorts (record id plus data space
|
||||
// size)
|
||||
private static final int _std_record_overhead =
|
||||
/** standard record overhead: two shorts (record id plus data space size)*/
|
||||
static final int STD_RECORD_OVERHEAD =
|
||||
2 * LittleEndianConsts.SHORT_SIZE;
|
||||
|
||||
// SST overhead: the standard record overhead, plus the number of
|
||||
// strings and the number of unique strings -- two ints
|
||||
private static final int _sst_record_overhead =
|
||||
(_std_record_overhead + (2 * LittleEndianConsts.INT_SIZE));
|
||||
/** SST overhead: the standard record overhead, plus the number of strings and the number of unique strings -- two ints */
|
||||
static final int SST_RECORD_OVERHEAD =
|
||||
( STD_RECORD_OVERHEAD + ( 2 * LittleEndianConsts.INT_SIZE ) );
|
||||
|
||||
// how much data can we stuff into an SST record? That would be
|
||||
// _max minus the standard SST record overhead
|
||||
private static final int _max_data_space =
|
||||
_max - _sst_record_overhead;
|
||||
/** how much data can we stuff into an SST record? That would be _max minus the standard SST record overhead */
|
||||
static final int MAX_DATA_SPACE = MAX_RECORD_SIZE - SST_RECORD_OVERHEAD;
|
||||
|
||||
/** overhead for each string includes the string's character count (a short) and the flag describing its characteristics (a byte) */
|
||||
static final int STRING_MINIMAL_OVERHEAD = LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE;
|
||||
|
||||
// overhead for each string includes the string's character count
|
||||
// (a short) and the flag describing its characteristics (a byte)
|
||||
private static final int _string_minimal_overhead =
|
||||
LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE;
|
||||
public static final short sid = 0xfc;
|
||||
|
||||
// union of strings in the SST and EXTSST
|
||||
/** union of strings in the SST and EXTSST */
|
||||
private int field_1_num_strings;
|
||||
|
||||
// according to docs ONLY SST
|
||||
/** according to docs ONLY SST */
|
||||
private int field_2_num_unique_strings;
|
||||
private BinaryTree field_3_strings;
|
||||
|
||||
// this is the number of characters we expect in the first
|
||||
// sub-record in a subsequent continuation record
|
||||
private int __expected_chars;
|
||||
|
||||
// this is the string we were working on before hitting the end of
|
||||
// the current record. This string is NOT finished.
|
||||
private String _unfinished_string;
|
||||
|
||||
// this is the total length of the current string being handled
|
||||
private int _total_length_bytes;
|
||||
|
||||
// this is the offset into a string field of the actual string
|
||||
// data
|
||||
private int _string_data_offset;
|
||||
|
||||
// this is true if the string uses wide characters
|
||||
private boolean _wide_char;
|
||||
/** Record lengths for initial SST record and all continue records */
|
||||
private List _record_lengths = null;
|
||||
private SSTDeserializer deserializer;
|
||||
|
||||
/**
|
||||
* default constructor
|
||||
@ -140,11 +120,7 @@ public class SSTRecord
|
||||
field_1_num_strings = 0;
|
||||
field_2_num_unique_strings = 0;
|
||||
field_3_strings = new BinaryTree();
|
||||
setExpectedChars(0);
|
||||
_unfinished_string = "";
|
||||
_total_length_bytes = 0;
|
||||
_string_data_offset = 0;
|
||||
_wide_char = false;
|
||||
deserializer = new SSTDeserializer(field_3_strings);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -264,7 +240,8 @@ public class SSTRecord
|
||||
rval = field_3_strings.size();
|
||||
field_2_num_unique_strings++;
|
||||
integer = new Integer( rval );
|
||||
field_3_strings.put(integer, ucs);
|
||||
SSTDeserializer.addToStringTable( field_3_strings, integer, ucs );
|
||||
// field_3_strings.put( integer, ucs );
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
@ -329,14 +306,13 @@ public class SSTRecord
|
||||
|
||||
public String getString( final int id )
|
||||
{
|
||||
return (( UnicodeString ) field_3_strings.get(new Integer(id)))
|
||||
.getString();
|
||||
return ( (UnicodeString) field_3_strings.get( new Integer( id ) ) ).getString();
|
||||
}
|
||||
|
||||
public boolean getString16bit(final int id)
|
||||
public boolean isString16bit( final int id )
|
||||
{
|
||||
return ((( UnicodeString ) field_3_strings.get(new Integer(id)))
|
||||
.getOptionFlags() == 1);
|
||||
UnicodeString unicodeString = ( (UnicodeString) field_3_strings.get( new Integer( id ) ) );
|
||||
return ( ( unicodeString.getOptionFlags() & 0x01 ) == 1 );
|
||||
}
|
||||
|
||||
/**
|
||||
@ -364,311 +340,9 @@ public class SSTRecord
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a byte array consisting of an SST record and any
|
||||
* required Continue records, ready to be written out.
|
||||
* <p>
|
||||
* If an SST record and any subsequent Continue records are read
|
||||
* in to create this instance, this method should produce a byte
|
||||
* array that is identical to the byte array produced by
|
||||
* concatenating the input records' data.
|
||||
*
|
||||
* @return the byte array
|
||||
*/
|
||||
|
||||
public int serialize(int offset, byte [] data)
|
||||
{
|
||||
int rval = getRecordSize();
|
||||
int record_length_index = 0;
|
||||
|
||||
// get the linear size of that array
|
||||
int unicodesize = calculateUnicodeSize();
|
||||
|
||||
if (unicodesize > _max_data_space)
|
||||
{
|
||||
byte[] stringreminant = null;
|
||||
int unipos = 0;
|
||||
boolean lastneedcontinue = false;
|
||||
int stringbyteswritten = 0;
|
||||
boolean first_record = true;
|
||||
int totalWritten = 0;
|
||||
int size = 0;
|
||||
|
||||
while (totalWritten != rval)
|
||||
{
|
||||
int pos = 0;
|
||||
|
||||
// write the appropriate header
|
||||
int available;
|
||||
|
||||
if (first_record)
|
||||
{
|
||||
size =
|
||||
(( Integer ) _record_lengths
|
||||
.get(record_length_index++)).intValue();
|
||||
available = size - 8;
|
||||
pos = writeSSTHeader(data,
|
||||
pos + offset
|
||||
+ totalWritten, size);
|
||||
size += _std_record_overhead;
|
||||
first_record = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
pos = 0;
|
||||
int to_be_written = (unicodesize - stringbyteswritten)
|
||||
+ (lastneedcontinue ? 1
|
||||
: 0); // not used?
|
||||
|
||||
size =
|
||||
(( Integer ) _record_lengths
|
||||
.get(record_length_index++)).intValue();
|
||||
available = size;
|
||||
pos = writeContinueHeader(data,
|
||||
pos + offset
|
||||
+ totalWritten, size);
|
||||
size = size + _std_record_overhead;
|
||||
}
|
||||
|
||||
// now, write the rest of the data into the current
|
||||
// record space
|
||||
if (lastneedcontinue)
|
||||
{
|
||||
|
||||
// the last string in the previous record was not
|
||||
// written out completely
|
||||
if (stringreminant.length <= available)
|
||||
{
|
||||
|
||||
// write reminant -- it'll all fit neatly
|
||||
System.arraycopy(stringreminant, 0, data,
|
||||
pos + offset + totalWritten,
|
||||
stringreminant.length);
|
||||
stringbyteswritten += stringreminant.length - 1;
|
||||
pos += stringreminant.length;
|
||||
lastneedcontinue = false;
|
||||
available -= stringreminant.length;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// write as much of the remnant as possible
|
||||
System.arraycopy(stringreminant, 0, data,
|
||||
pos + offset + totalWritten,
|
||||
available);
|
||||
stringbyteswritten += available - 1;
|
||||
pos += available;
|
||||
byte[] leftover =
|
||||
new byte[ (stringreminant.length - available) + LittleEndianConsts.BYTE_SIZE ];
|
||||
|
||||
System.arraycopy(stringreminant, available, leftover,
|
||||
LittleEndianConsts.BYTE_SIZE,
|
||||
stringreminant.length - available);
|
||||
leftover[ 0 ] = stringreminant[ 0 ];
|
||||
stringreminant = leftover;
|
||||
available = 0;
|
||||
lastneedcontinue = true;
|
||||
}
|
||||
}
|
||||
|
||||
// last string's remnant, if any, is cleaned up as
|
||||
// best as can be done ... now let's try and write
|
||||
// some more strings
|
||||
for (; unipos < field_3_strings.size(); unipos++)
|
||||
{
|
||||
Integer intunipos = new Integer(unipos);
|
||||
UnicodeString unistr =
|
||||
(( UnicodeString ) field_3_strings.get(intunipos));
|
||||
|
||||
if (unistr.getRecordSize() <= available)
|
||||
{
|
||||
unistr.serialize(pos + offset + totalWritten, data);
|
||||
int rsize = unistr.getRecordSize();
|
||||
|
||||
stringbyteswritten += rsize;
|
||||
pos += rsize;
|
||||
available -= rsize;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// can't write the entire string out
|
||||
if (available >= _string_minimal_overhead)
|
||||
{
|
||||
|
||||
// we can write some of it
|
||||
byte[] ucs = unistr.serialize();
|
||||
|
||||
System.arraycopy(ucs, 0, data,
|
||||
pos + offset + totalWritten,
|
||||
available);
|
||||
stringbyteswritten += available;
|
||||
stringreminant =
|
||||
new byte[ (ucs.length - available) + LittleEndianConsts.BYTE_SIZE ];
|
||||
System.arraycopy(ucs, available, stringreminant,
|
||||
LittleEndianConsts.BYTE_SIZE,
|
||||
ucs.length - available);
|
||||
stringreminant[ 0 ] =
|
||||
ucs[ LittleEndianConsts.SHORT_SIZE ];
|
||||
available = 0;
|
||||
lastneedcontinue = true;
|
||||
unipos++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
totalWritten += size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// short data: write one simple SST record
|
||||
int datasize = _sst_record_overhead + unicodesize; // not used?
|
||||
|
||||
writeSSTHeader(
|
||||
data, 0 + offset,
|
||||
_sst_record_overhead
|
||||
+ (( Integer ) _record_lengths.get(
|
||||
record_length_index++)).intValue() - _std_record_overhead);
|
||||
int pos = _sst_record_overhead;
|
||||
|
||||
for (int k = 0; k < field_3_strings.size(); k++)
|
||||
{
|
||||
UnicodeString unistr =
|
||||
(( UnicodeString ) field_3_strings.get(new Integer(k)));
|
||||
|
||||
System.arraycopy(unistr.serialize(), 0, data, pos + offset,
|
||||
unistr.getRecordSize());
|
||||
pos += unistr.getRecordSize();
|
||||
}
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
// not used: remove?
|
||||
private int calculateStringsize()
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
for (int k = 0; k < field_3_strings.size(); k++)
|
||||
{
|
||||
retval +=
|
||||
(( UnicodeString ) field_3_strings.get(new Integer(k)))
|
||||
.getRecordSize();
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a Continue record. A Continue record for an SST record
|
||||
* contains the same kind of data that the SST record contains,
|
||||
* with the following exceptions:
|
||||
* <P>
|
||||
* <OL>
|
||||
* <LI>The string counts at the beginning of the SST record are
|
||||
* not in the Continue record
|
||||
* <LI>The first string in the Continue record might NOT begin
|
||||
* with a size. If the last string in the previous record is
|
||||
* continued in this record, the size is determined by that
|
||||
* last string in the previous record; the first string will
|
||||
* begin with a flag byte, followed by the remaining bytes (or
|
||||
* words) of the last string from the previous
|
||||
* record. Otherwise, the first string in the record will
|
||||
* begin with a string length
|
||||
* </OL>
|
||||
*
|
||||
* @param record the Continue record's byte data
|
||||
*/
|
||||
|
||||
public void processContinueRecord(final byte [] record)
|
||||
{
|
||||
if (getExpectedChars() == 0)
|
||||
{
|
||||
_unfinished_string = "";
|
||||
_total_length_bytes = 0;
|
||||
_string_data_offset = 0;
|
||||
_wide_char = false;
|
||||
manufactureStrings(record, 0, ( short ) record.length);
|
||||
}
|
||||
else
|
||||
{
|
||||
int data_length = record.length - LittleEndianConsts.BYTE_SIZE;
|
||||
|
||||
if (calculateByteCount(getExpectedChars()) > data_length)
|
||||
{
|
||||
|
||||
// create artificial data to create a UnicodeString
|
||||
byte[] input =
|
||||
new byte[ record.length + LittleEndianConsts.SHORT_SIZE ];
|
||||
short size = ( short ) (((record[ 0 ] & 1) == 1)
|
||||
? (data_length
|
||||
/ LittleEndianConsts.SHORT_SIZE)
|
||||
: (data_length
|
||||
/ LittleEndianConsts.BYTE_SIZE));
|
||||
|
||||
LittleEndian.putShort(input, ( byte ) 0, size);
|
||||
System.arraycopy(record, 0, input,
|
||||
LittleEndianConsts.SHORT_SIZE,
|
||||
record.length);
|
||||
UnicodeString ucs = new UnicodeString(UnicodeString.sid,
|
||||
( short ) input.length,
|
||||
input);
|
||||
|
||||
_unfinished_string = _unfinished_string + ucs.getString();
|
||||
setExpectedChars(getExpectedChars() - size);
|
||||
}
|
||||
else
|
||||
{
|
||||
setupStringParameters(record, -LittleEndianConsts.SHORT_SIZE,
|
||||
getExpectedChars());
|
||||
byte[] str_data = new byte[ _total_length_bytes ];
|
||||
int length = _string_minimal_overhead
|
||||
+ (calculateByteCount(getExpectedChars()));
|
||||
byte[] bstring = new byte[ length ];
|
||||
|
||||
// Copy data from the record into the string
|
||||
// buffer. Copy skips the length of a short in the
|
||||
// string buffer, to leave room for the string length.
|
||||
System.arraycopy(record, 0, str_data,
|
||||
LittleEndianConsts.SHORT_SIZE,
|
||||
str_data.length
|
||||
- LittleEndianConsts.SHORT_SIZE);
|
||||
|
||||
// write the string length
|
||||
LittleEndian.putShort(bstring, 0,
|
||||
( short ) getExpectedChars());
|
||||
|
||||
// write the options flag
|
||||
bstring[ LittleEndianConsts.SHORT_SIZE ] =
|
||||
str_data[ LittleEndianConsts.SHORT_SIZE ];
|
||||
|
||||
// copy the bytes/words making up the string; skipping
|
||||
// past all the overhead of the str_data array
|
||||
System.arraycopy(str_data, _string_data_offset, bstring,
|
||||
_string_minimal_overhead,
|
||||
bstring.length - _string_minimal_overhead);
|
||||
|
||||
// use special constructor to create the final string
|
||||
UnicodeString string =
|
||||
new UnicodeString(UnicodeString.sid,
|
||||
( short ) bstring.length, bstring,
|
||||
_unfinished_string);
|
||||
Integer integer = new Integer(field_3_strings.size());
|
||||
|
||||
field_3_strings.put(integer, string);
|
||||
manufactureStrings(record,
|
||||
_total_length_bytes
|
||||
- LittleEndianConsts
|
||||
.SHORT_SIZE, ( short ) record.length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return sid
|
||||
*/
|
||||
|
||||
public short getSid()
|
||||
{
|
||||
return sid;
|
||||
@ -677,18 +351,11 @@ public class SSTRecord
|
||||
/**
|
||||
* @return hashcode
|
||||
*/
|
||||
|
||||
public int hashCode()
|
||||
{
|
||||
return field_2_num_unique_strings;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param o
|
||||
* @return true if equal
|
||||
*/
|
||||
|
||||
public boolean equals( Object o )
|
||||
{
|
||||
if ( ( o == null ) || ( o.getClass() != this.getClass() ) )
|
||||
@ -810,23 +477,10 @@ public class SSTRecord
|
||||
field_1_num_strings = LittleEndian.getInt( data, 0 + offset );
|
||||
field_2_num_unique_strings = LittleEndian.getInt( data, 4 + offset );
|
||||
field_3_strings = new BinaryTree();
|
||||
setExpectedChars(0);
|
||||
_unfinished_string = "";
|
||||
_total_length_bytes = 0;
|
||||
_string_data_offset = 0;
|
||||
_wide_char = false;
|
||||
manufactureStrings(data, 8 + offset, size);
|
||||
deserializer = new SSTDeserializer(field_3_strings);
|
||||
deserializer.manufactureStrings( data, 8 + offset, size );
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of characters we expect in the first
|
||||
* sub-record in a subsequent continuation record
|
||||
*/
|
||||
|
||||
int getExpectedChars()
|
||||
{
|
||||
return __expected_chars;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an iterator of the strings we hold. All instances are
|
||||
@ -848,372 +502,43 @@ public class SSTRecord
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the unfinished string
|
||||
* called by the class that is responsible for writing this sucker.
|
||||
* Subclasses should implement this so that their data is passed back in a
|
||||
* byte array.
|
||||
*
|
||||
* @return byte array containing instance data
|
||||
*/
|
||||
|
||||
String getUnfinishedString()
|
||||
public int serialize( int offset, byte[] data )
|
||||
{
|
||||
return _unfinished_string;
|
||||
SSTSerializer serializer = new SSTSerializer(
|
||||
_record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() );
|
||||
return serializer.serialize( offset, data );
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total length of the current string
|
||||
*/
|
||||
|
||||
int getTotalLength()
|
||||
{
|
||||
return _total_length_bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return offset into current string data
|
||||
*/
|
||||
|
||||
int getStringDataOffset()
|
||||
{
|
||||
return _string_data_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if current string uses wide characters
|
||||
*/
|
||||
|
||||
boolean isWideChar()
|
||||
{
|
||||
return _wide_char;
|
||||
}
|
||||
|
||||
private int writeSSTHeader(final byte [] data, final int pos,
|
||||
final int recsize)
|
||||
{
|
||||
int offset = pos;
|
||||
|
||||
LittleEndian.putShort(data, offset, sid);
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
LittleEndian.putShort(data, offset, ( short ) (recsize));
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
LittleEndian.putInt(data, offset, getNumStrings());
|
||||
offset += LittleEndianConsts.INT_SIZE;
|
||||
LittleEndian.putInt(data, offset, getNumUniqueStrings());
|
||||
offset += LittleEndianConsts.INT_SIZE;
|
||||
return offset - pos;
|
||||
}
|
||||
|
||||
private int writeContinueHeader(final byte [] data, final int pos,
|
||||
final int recsize)
|
||||
{
|
||||
int offset = pos;
|
||||
|
||||
LittleEndian.putShort(data, offset, ContinueRecord.sid);
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
LittleEndian.putShort(data, offset, ( short ) (recsize));
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
return offset - pos;
|
||||
}
|
||||
|
||||
private int calculateUCArrayLength(final byte [][] ucarray)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
for (int k = 0; k < ucarray.length; k++)
|
||||
{
|
||||
retval += ucarray[ k ].length;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
private void manufactureStrings(final byte [] data, final int index,
|
||||
short size)
|
||||
{
|
||||
int offset = index;
|
||||
|
||||
while (offset < size)
|
||||
{
|
||||
int remaining = size - offset;
|
||||
|
||||
if ((remaining > 0)
|
||||
&& (remaining < LittleEndianConsts.SHORT_SIZE))
|
||||
{
|
||||
throw new RecordFormatException(
|
||||
"Cannot get length of the last string in SSTRecord");
|
||||
}
|
||||
if (remaining == LittleEndianConsts.SHORT_SIZE)
|
||||
{
|
||||
setExpectedChars(LittleEndian.getShort(data, offset));
|
||||
_unfinished_string = "";
|
||||
break;
|
||||
}
|
||||
short char_count = LittleEndian.getShort(data, offset);
|
||||
|
||||
setupStringParameters(data, offset, char_count);
|
||||
if (remaining < _total_length_bytes)
|
||||
{
|
||||
setExpectedChars(calculateCharCount(_total_length_bytes
|
||||
- remaining));
|
||||
char_count -= getExpectedChars();
|
||||
_total_length_bytes = remaining;
|
||||
}
|
||||
else
|
||||
{
|
||||
setExpectedChars(0);
|
||||
}
|
||||
processString(data, offset, char_count);
|
||||
offset += _total_length_bytes;
|
||||
if (getExpectedChars() != 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void setupStringParameters(final byte [] data, final int index,
|
||||
final int char_count)
|
||||
{
|
||||
byte flag = data[ index + LittleEndianConsts.SHORT_SIZE ];
|
||||
|
||||
_wide_char = (flag & 1) == 1;
|
||||
boolean extended = (flag & 4) == 4;
|
||||
boolean formatted_run = (flag & 8) == 8;
|
||||
|
||||
_total_length_bytes = _string_minimal_overhead
|
||||
+ calculateByteCount(char_count);
|
||||
_string_data_offset = _string_minimal_overhead;
|
||||
if (formatted_run)
|
||||
{
|
||||
short run_count = LittleEndian.getShort(data,
|
||||
index
|
||||
+ _string_data_offset);
|
||||
|
||||
_string_data_offset += LittleEndianConsts.SHORT_SIZE;
|
||||
_total_length_bytes += LittleEndianConsts.SHORT_SIZE
|
||||
+ (LittleEndianConsts.INT_SIZE
|
||||
* run_count);
|
||||
}
|
||||
if (extended)
|
||||
{
|
||||
int extension_length = LittleEndian.getInt(data,
|
||||
index
|
||||
+ _string_data_offset);
|
||||
|
||||
_string_data_offset += LittleEndianConsts.INT_SIZE;
|
||||
_total_length_bytes += LittleEndianConsts.INT_SIZE
|
||||
+ extension_length;
|
||||
}
|
||||
}
|
||||
|
||||
private void processString(final byte [] data, final int index,
|
||||
final short char_count)
|
||||
{
|
||||
byte[] str_data = new byte[ _total_length_bytes ];
|
||||
int length = _string_minimal_overhead
|
||||
+ calculateByteCount(char_count);
|
||||
byte[] bstring = new byte[ length ];
|
||||
|
||||
System.arraycopy(data, index, str_data, 0, str_data.length);
|
||||
int offset = 0;
|
||||
|
||||
LittleEndian.putShort(bstring, offset, char_count);
|
||||
offset += LittleEndianConsts.SHORT_SIZE;
|
||||
bstring[ offset ] = str_data[ offset ];
|
||||
System.arraycopy(str_data, _string_data_offset, bstring,
|
||||
_string_minimal_overhead,
|
||||
bstring.length - _string_minimal_overhead);
|
||||
UnicodeString string = new UnicodeString(UnicodeString.sid,
|
||||
( short ) bstring.length,
|
||||
bstring);
|
||||
|
||||
if (getExpectedChars() != 0)
|
||||
{
|
||||
_unfinished_string = string.getString();
|
||||
}
|
||||
else
|
||||
{
|
||||
Integer integer = new Integer(field_3_strings.size());
|
||||
|
||||
field_3_strings.put(integer, string);
|
||||
}
|
||||
}
|
||||
|
||||
private void setExpectedChars(final int count)
|
||||
{
|
||||
__expected_chars = count;
|
||||
}
|
||||
|
||||
private int calculateByteCount(final int character_count)
|
||||
{
|
||||
return character_count * (_wide_char ? LittleEndianConsts.SHORT_SIZE
|
||||
: LittleEndianConsts.BYTE_SIZE);
|
||||
}
|
||||
|
||||
private int calculateCharCount(final int byte_count)
|
||||
{
|
||||
return byte_count / (_wide_char ? LittleEndianConsts.SHORT_SIZE
|
||||
: LittleEndianConsts.BYTE_SIZE);
|
||||
}
|
||||
|
||||
// we can probably simplify this later...this calculates the size
|
||||
// w/o serializing but still is a bit slow
|
||||
public int getRecordSize()
|
||||
{
|
||||
_record_lengths = new ArrayList();
|
||||
int retval = 0;
|
||||
int unicodesize = calculateUnicodeSize();
|
||||
SSTSerializer serializer = new SSTSerializer(
|
||||
_record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() );
|
||||
|
||||
if (unicodesize > _max_data_space)
|
||||
return serializer.getRecordSize();
|
||||
}
|
||||
|
||||
SSTDeserializer getDeserializer()
|
||||
{
|
||||
UnicodeString unistr = null;
|
||||
int stringreminant = 0;
|
||||
int unipos = 0;
|
||||
boolean lastneedcontinue = false;
|
||||
int stringbyteswritten = 0;
|
||||
boolean finished = false;
|
||||
boolean first_record = true;
|
||||
int totalWritten = 0;
|
||||
return deserializer;
|
||||
}
|
||||
|
||||
while (!finished)
|
||||
/**
|
||||
* Strange to handle continue records this way. Is it a smell?
|
||||
*/
|
||||
public void processContinueRecord( byte[] record )
|
||||
{
|
||||
int record = 0;
|
||||
int pos = 0;
|
||||
|
||||
if (first_record)
|
||||
{
|
||||
|
||||
// writing SST record
|
||||
record = _max;
|
||||
pos = 12;
|
||||
first_record = false;
|
||||
_record_lengths.add(new Integer(record
|
||||
- _std_record_overhead));
|
||||
deserializer.processContinueRecord( record );
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// writing continue record
|
||||
pos = 0;
|
||||
int to_be_written = (unicodesize - stringbyteswritten)
|
||||
+ (lastneedcontinue ? 1
|
||||
: 0);
|
||||
int size = Math.min(_max - _std_record_overhead,
|
||||
to_be_written);
|
||||
|
||||
if (size == to_be_written)
|
||||
{
|
||||
finished = true;
|
||||
}
|
||||
record = size + _std_record_overhead;
|
||||
_record_lengths.add(new Integer(size));
|
||||
pos = 4;
|
||||
}
|
||||
if (lastneedcontinue)
|
||||
{
|
||||
int available = _max - pos;
|
||||
|
||||
if (stringreminant <= available)
|
||||
{
|
||||
|
||||
// write reminant
|
||||
stringbyteswritten += stringreminant - 1;
|
||||
pos += stringreminant;
|
||||
lastneedcontinue = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// write as much of the remnant as possible
|
||||
int toBeWritten = unistr.maxBrokenLength(available);
|
||||
|
||||
if (available != toBeWritten)
|
||||
{
|
||||
int shortrecord = record
|
||||
- (available - toBeWritten);
|
||||
|
||||
_record_lengths.set(
|
||||
_record_lengths.size() - 1,
|
||||
new Integer(
|
||||
shortrecord - _std_record_overhead));
|
||||
record = shortrecord;
|
||||
}
|
||||
stringbyteswritten += toBeWritten - 1;
|
||||
pos += toBeWritten;
|
||||
stringreminant -= toBeWritten - 1;
|
||||
lastneedcontinue = true;
|
||||
}
|
||||
}
|
||||
for (; unipos < field_3_strings.size(); unipos++)
|
||||
{
|
||||
int available = _max - pos;
|
||||
Integer intunipos = new Integer(unipos);
|
||||
|
||||
unistr =
|
||||
(( UnicodeString ) field_3_strings.get(intunipos));
|
||||
if (unistr.getRecordSize() <= available)
|
||||
{
|
||||
stringbyteswritten += unistr.getRecordSize();
|
||||
pos += unistr.getRecordSize();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (available >= _string_minimal_overhead)
|
||||
{
|
||||
int toBeWritten =
|
||||
unistr.maxBrokenLength(available);
|
||||
|
||||
stringbyteswritten += toBeWritten;
|
||||
stringreminant =
|
||||
(unistr.getRecordSize() - toBeWritten)
|
||||
+ LittleEndianConsts.BYTE_SIZE;
|
||||
if (available != toBeWritten)
|
||||
{
|
||||
int shortrecord = record
|
||||
- (available - toBeWritten);
|
||||
|
||||
_record_lengths.set(
|
||||
_record_lengths.size() - 1,
|
||||
new Integer(
|
||||
shortrecord - _std_record_overhead));
|
||||
record = shortrecord;
|
||||
}
|
||||
lastneedcontinue = true;
|
||||
unipos++;
|
||||
}
|
||||
else
|
||||
{
|
||||
int shortrecord = record - available;
|
||||
|
||||
_record_lengths.set(
|
||||
_record_lengths.size() - 1,
|
||||
new Integer(
|
||||
shortrecord - _std_record_overhead));
|
||||
record = shortrecord;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
totalWritten += record;
|
||||
}
|
||||
retval = totalWritten;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// short data: write one simple SST record
|
||||
retval = _sst_record_overhead + unicodesize;
|
||||
_record_lengths.add(new Integer(unicodesize));
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
private int calculateUnicodeSize()
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
for (int k = 0; k < field_3_strings.size(); k++)
|
||||
{
|
||||
UnicodeString string =
|
||||
( UnicodeString ) field_3_strings.get(new Integer(k));
|
||||
|
||||
retval += string.getRecordSize();
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
356
src/java/org/apache/poi/hssf/record/SSTSerializer.java
Normal file
356
src/java/org/apache/poi/hssf/record/SSTSerializer.java
Normal file
@ -0,0 +1,356 @@
|
||||
/* ====================================================================
|
||||
* The Apache Software License, Version 1.1
|
||||
*
|
||||
* Copyright (c) 2002 The Apache Software Foundation. All rights
|
||||
* reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3. The end-user documentation included with the redistribution,
|
||||
* if any, must include the following acknowledgment:
|
||||
* "This product includes software developed by the
|
||||
* Apache Software Foundation (http://www.apache.org/)."
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.
|
||||
*
|
||||
* 4. The names "Apache" and "Apache Software Foundation" and
|
||||
* "Apache POI" must not be used to endorse or promote products
|
||||
* derived from this software without prior written permission. For
|
||||
* written permission, please contact apache@apache.org.
|
||||
*
|
||||
* 5. Products derived from this software may not be called "Apache",
|
||||
* "Apache POI", nor may "Apache" appear in their name, without
|
||||
* prior written permission of the Apache Software Foundation.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
|
||||
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
* ====================================================================
|
||||
*
|
||||
* This software consists of voluntary contributions made by many
|
||||
* individuals on behalf of the Apache Software Foundation. For more
|
||||
* information on the Apache Software Foundation, please see
|
||||
* <http://www.apache.org/>.
|
||||
*/
|
||||
|
||||
package org.apache.poi.hssf.record;
|
||||
|
||||
import org.apache.poi.util.BinaryTree;
|
||||
import org.apache.poi.util.LittleEndianConsts;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* This class handles serialization of SST records. It utilizes the record processor
|
||||
* class write individual records. This has been refactored from the SSTRecord class.
|
||||
*
|
||||
* @author Glen Stampoultzis (glens at apache.org)
|
||||
*/
|
||||
class SSTSerializer
|
||||
{
|
||||
|
||||
private List recordLengths;
|
||||
private BinaryTree strings;
|
||||
private int numStrings;
|
||||
private int numUniqueStrings;
|
||||
private SSTRecordHeader sstRecordHeader;
|
||||
|
||||
public SSTSerializer( List recordLengths, BinaryTree strings, int numStrings, int numUniqueStrings )
|
||||
{
|
||||
this.recordLengths = recordLengths;
|
||||
this.strings = strings;
|
||||
this.numStrings = numStrings;
|
||||
this.numUniqueStrings = numUniqueStrings;
|
||||
this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a byte array consisting of an SST record and any
|
||||
* required Continue records, ready to be written out.
|
||||
* <p>
|
||||
* If an SST record and any subsequent Continue records are read
|
||||
* in to create this instance, this method should produce a byte
|
||||
* array that is identical to the byte array produced by
|
||||
* concatenating the input records' data.
|
||||
*
|
||||
* @return the byte array
|
||||
*/
|
||||
public int serialize( int offset, byte[] data )
|
||||
{
|
||||
int record_size = getRecordSize();
|
||||
int record_length_index = 0;
|
||||
|
||||
if ( calculateUnicodeSize() > SSTRecord.MAX_DATA_SPACE )
|
||||
serializeLargeRecord( record_size, record_length_index, data, offset );
|
||||
else
|
||||
serializeSingleSSTRecord( data, offset, record_length_index );
|
||||
return record_size;
|
||||
}
|
||||
|
||||
private int calculateUnicodeSize()
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
for ( int k = 0; k < strings.size(); k++ )
|
||||
{
|
||||
retval += getUnicodeString(k).getRecordSize();
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
// we can probably simplify this later...this calculates the size
|
||||
// w/o serializing but still is a bit slow
|
||||
public int getRecordSize()
|
||||
{
|
||||
recordLengths = new ArrayList();
|
||||
int retval = 0;
|
||||
int unicodesize = calculateUnicodeSize();
|
||||
|
||||
if ( unicodesize > SSTRecord.MAX_DATA_SPACE )
|
||||
{
|
||||
retval = calcRecordSizesForLongStrings( unicodesize );
|
||||
}
|
||||
else
|
||||
{
|
||||
// short data: write one simple SST record
|
||||
retval = SSTRecord.SST_RECORD_OVERHEAD + unicodesize;
|
||||
recordLengths.add( new Integer( unicodesize ) );
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
private int calcRecordSizesForLongStrings( int unicodesize )
|
||||
{
|
||||
int retval;
|
||||
UnicodeString unistr = null;
|
||||
int stringreminant = 0;
|
||||
int unipos = 0;
|
||||
boolean lastneedcontinue = false;
|
||||
int stringbyteswritten = 0;
|
||||
boolean finished = false;
|
||||
boolean first_record = true;
|
||||
int totalWritten = 0;
|
||||
|
||||
while ( !finished )
|
||||
{
|
||||
int record = 0;
|
||||
int pos = 0;
|
||||
|
||||
if ( first_record )
|
||||
{
|
||||
|
||||
// writing SST record
|
||||
record = SSTRecord.MAX_RECORD_SIZE;
|
||||
pos = 12;
|
||||
first_record = false;
|
||||
recordLengths.add( new Integer( record - SSTRecord.STD_RECORD_OVERHEAD ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// writing continue record
|
||||
pos = 0;
|
||||
int to_be_written = ( unicodesize - stringbyteswritten ) + ( lastneedcontinue ? 1 : 0 );
|
||||
int size = Math.min( SSTRecord.MAX_RECORD_SIZE - SSTRecord.STD_RECORD_OVERHEAD, to_be_written );
|
||||
|
||||
if ( size == to_be_written )
|
||||
{
|
||||
finished = true;
|
||||
}
|
||||
record = size + SSTRecord.STD_RECORD_OVERHEAD;
|
||||
recordLengths.add( new Integer( size ) );
|
||||
pos = 4;
|
||||
}
|
||||
if ( lastneedcontinue )
|
||||
{
|
||||
int available = SSTRecord.MAX_RECORD_SIZE - pos;
|
||||
|
||||
if ( stringreminant <= available )
|
||||
{
|
||||
|
||||
// write reminant
|
||||
stringbyteswritten += stringreminant - 1;
|
||||
pos += stringreminant;
|
||||
lastneedcontinue = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// write as much of the remnant as possible
|
||||
int toBeWritten = unistr.maxBrokenLength( available );
|
||||
|
||||
if ( available != toBeWritten )
|
||||
{
|
||||
int shortrecord = record - ( available - toBeWritten );
|
||||
recordLengths.set( recordLengths.size() - 1,
|
||||
new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
|
||||
record = shortrecord;
|
||||
}
|
||||
stringbyteswritten += toBeWritten - 1;
|
||||
pos += toBeWritten;
|
||||
stringreminant -= toBeWritten - 1;
|
||||
lastneedcontinue = true;
|
||||
}
|
||||
}
|
||||
for ( ; unipos < strings.size(); unipos++ )
|
||||
{
|
||||
int available = SSTRecord.MAX_RECORD_SIZE - pos;
|
||||
Integer intunipos = new Integer( unipos );
|
||||
|
||||
unistr = ( (UnicodeString) strings.get( intunipos ) );
|
||||
if ( unistr.getRecordSize() <= available )
|
||||
{
|
||||
stringbyteswritten += unistr.getRecordSize();
|
||||
pos += unistr.getRecordSize();
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( available >= SSTRecord.STRING_MINIMAL_OVERHEAD )
|
||||
{
|
||||
int toBeWritten =
|
||||
unistr.maxBrokenLength( available );
|
||||
|
||||
stringbyteswritten += toBeWritten;
|
||||
stringreminant =
|
||||
( unistr.getRecordSize() - toBeWritten )
|
||||
+ LittleEndianConsts.BYTE_SIZE;
|
||||
if ( available != toBeWritten )
|
||||
{
|
||||
int shortrecord = record
|
||||
- ( available - toBeWritten );
|
||||
|
||||
recordLengths.set(
|
||||
recordLengths.size() - 1,
|
||||
new Integer(
|
||||
shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
|
||||
record = shortrecord;
|
||||
}
|
||||
lastneedcontinue = true;
|
||||
unipos++;
|
||||
}
|
||||
else
|
||||
{
|
||||
int shortrecord = record - available;
|
||||
|
||||
recordLengths.set( recordLengths.size() - 1,
|
||||
new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
|
||||
record = shortrecord;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
totalWritten += record;
|
||||
}
|
||||
retval = totalWritten;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
private void serializeSingleSSTRecord( byte[] data, int offset, int record_length_index )
|
||||
{
|
||||
// short data: write one simple SST record
|
||||
|
||||
int len = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue();
|
||||
int recordSize = SSTRecord.SST_RECORD_OVERHEAD + len - SSTRecord.STD_RECORD_OVERHEAD;
|
||||
sstRecordHeader.writeSSTHeader( data, 0 + offset, recordSize );
|
||||
int pos = SSTRecord.SST_RECORD_OVERHEAD;
|
||||
|
||||
for ( int k = 0; k < strings.size(); k++ )
|
||||
{
|
||||
// UnicodeString unistr = ( (UnicodeString) strings.get( new Integer( k ) ) );
|
||||
System.arraycopy( getUnicodeString(k).serialize(), 0, data, pos + offset, getUnicodeString(k).getRecordSize() );
|
||||
pos += getUnicodeString(k).getRecordSize();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Large records are serialized to an SST and to one or more CONTINUE records. Joy. They have the special
|
||||
* characteristic that they can change the option field when a single string is split across to a
|
||||
* CONTINUE record.
|
||||
*/
|
||||
private void serializeLargeRecord( int record_size, int record_length_index, byte[] buffer, int offset )
|
||||
{
|
||||
|
||||
byte[] stringReminant = null;
|
||||
int stringIndex = 0;
|
||||
boolean lastneedcontinue = false;
|
||||
boolean first_record = true;
|
||||
int totalWritten = 0;
|
||||
|
||||
while ( totalWritten != record_size )
|
||||
{
|
||||
int recordLength = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue();
|
||||
RecordProcessor recordProcessor = new RecordProcessor( buffer,
|
||||
recordLength, numStrings, numUniqueStrings );
|
||||
|
||||
// write the appropriate header
|
||||
recordProcessor.writeRecordHeader( offset, totalWritten, recordLength, first_record );
|
||||
first_record = false;
|
||||
|
||||
// now, write the rest of the data into the current
|
||||
// record space
|
||||
if ( lastneedcontinue )
|
||||
{
|
||||
lastneedcontinue = stringReminant.length > recordProcessor.getAvailable();
|
||||
// the last string in the previous record was not written out completely
|
||||
stringReminant = recordProcessor.writeStringRemainder( lastneedcontinue,
|
||||
stringReminant, offset, totalWritten );
|
||||
}
|
||||
|
||||
// last string's remnant, if any, is cleaned up as best as can be done ... now let's try and write
|
||||
// some more strings
|
||||
for ( ; stringIndex < strings.size(); stringIndex++ )
|
||||
{
|
||||
UnicodeString unistr = getUnicodeString( stringIndex );
|
||||
|
||||
if ( unistr.getRecordSize() <= recordProcessor.getAvailable() )
|
||||
{
|
||||
recordProcessor.writeWholeString( unistr, offset, totalWritten );
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// can't write the entire string out
|
||||
if ( recordProcessor.getAvailable() >= SSTRecord.STRING_MINIMAL_OVERHEAD )
|
||||
{
|
||||
|
||||
// we can write some of it
|
||||
stringReminant = recordProcessor.writePartString( unistr, offset, totalWritten );
|
||||
lastneedcontinue = true;
|
||||
stringIndex++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
totalWritten += recordLength + SSTRecord.STD_RECORD_OVERHEAD;
|
||||
}
|
||||
}
|
||||
|
||||
private UnicodeString getUnicodeString( int index )
|
||||
{
|
||||
Integer intunipos = new Integer( index );
|
||||
return ( (UnicodeString) strings.get( intunipos ) );
|
||||
}
|
||||
|
||||
}
|
@ -66,6 +66,7 @@ import org.apache.poi.util.StringUtil;
|
||||
* REFERENCE: PG 264 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)<P>
|
||||
* @author Andrew C. Oliver
|
||||
* @author Marc Johnson (mjohnson at apache dot org)
|
||||
* @author Glen Stampoultzis (glens at apache.org)
|
||||
* @version 2.0-pre
|
||||
*/
|
||||
|
||||
@ -77,12 +78,28 @@ public class UnicodeString
|
||||
private short field_1_charCount; // = 0;
|
||||
private byte field_2_optionflags; // = 0;
|
||||
private String field_3_string; // = null;
|
||||
private final int RICH_TEXT_BIT = 8;
|
||||
|
||||
public UnicodeString()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
public int hashCode()
|
||||
{
|
||||
return field_1_charCount;
|
||||
int stringHash = 0;
|
||||
if (field_3_string != null)
|
||||
stringHash = field_3_string.hashCode();
|
||||
return field_1_charCount + stringHash;
|
||||
}
|
||||
|
||||
/**
|
||||
* Our handling of equals is inconsistent with compareTo. The trouble is because we don't truely understand
|
||||
* rich text fields yet it's difficult to make a sound comparison.
|
||||
*
|
||||
* @param o The object to compare.
|
||||
* @return true if the object is actually equal.
|
||||
*/
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if ((o == null) || (o.getClass() != this.getClass()))
|
||||
@ -96,10 +113,6 @@ public class UnicodeString
|
||||
&& field_3_string.equals(other.field_3_string));
|
||||
}
|
||||
|
||||
public UnicodeString()
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* construct a unicode string record and fill its fields, ID is ignored
|
||||
* @param id - ignored
|
||||
@ -278,19 +291,10 @@ public class UnicodeString
|
||||
|
||||
public int serialize(int offset, byte [] data)
|
||||
{
|
||||
int charsize = 1;
|
||||
|
||||
if (getOptionFlags() == 1)
|
||||
{
|
||||
charsize = 2;
|
||||
}
|
||||
|
||||
// byte[] retval = new byte[ 3 + (getString().length() * charsize) ];
|
||||
LittleEndian.putShort(data, 0 + offset, getCharCount());
|
||||
data[ 2 + offset ] = getOptionFlags();
|
||||
|
||||
// System.out.println("Unicode: We've got "+retval[2]+" for our option flag");
|
||||
if (getOptionFlags() == 0)
|
||||
if (!isUncompressedUnicode())
|
||||
{
|
||||
StringUtil.putCompressedUnicode(getString(), data, 0x3 + offset);
|
||||
}
|
||||
@ -302,14 +306,14 @@ public class UnicodeString
|
||||
return getRecordSize();
|
||||
}
|
||||
|
||||
private boolean isUncompressedUnicode()
|
||||
{
|
||||
return (getOptionFlags() & 0x01) == 1;
|
||||
}
|
||||
|
||||
public int getRecordSize()
|
||||
{
|
||||
int charsize = 1;
|
||||
|
||||
if (getOptionFlags() == 1)
|
||||
{
|
||||
charsize = 2;
|
||||
}
|
||||
int charsize = isUncompressedUnicode() ? 2 : 1;
|
||||
return 3 + (getString().length() * charsize);
|
||||
}
|
||||
|
||||
@ -338,11 +342,16 @@ public class UnicodeString
|
||||
return this.getString().compareTo(str.getString());
|
||||
}
|
||||
|
||||
public boolean isRichText()
|
||||
{
|
||||
return (getOptionFlags() & RICH_TEXT_BIT) != 0;
|
||||
}
|
||||
|
||||
int maxBrokenLength(final int proposedBrokenLength)
|
||||
{
|
||||
int rval = proposedBrokenLength;
|
||||
|
||||
if ((field_2_optionflags & 1) == 1)
|
||||
if (isUncompressedUnicode())
|
||||
{
|
||||
int proposedStringLength = proposedBrokenLength - 3;
|
||||
|
||||
@ -355,12 +364,4 @@ public class UnicodeString
|
||||
return rval;
|
||||
}
|
||||
|
||||
// public boolean equals(Object obj) {
|
||||
// if (!(obj instanceof UnicodeString)) return false;
|
||||
//
|
||||
// UnicodeString str = (UnicodeString)obj;
|
||||
//
|
||||
//
|
||||
// return this.getString().equals(str.getString());
|
||||
// }
|
||||
}
|
||||
|
@ -617,4 +617,5 @@ public class LittleEndian
|
||||
return copy;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
BIN
src/testcases/org/apache/poi/hssf/data/duprich1.xls
Normal file
BIN
src/testcases/org/apache/poi/hssf/data/duprich1.xls
Normal file
Binary file not shown.
BIN
src/testcases/org/apache/poi/hssf/data/duprich2.xls
Normal file
BIN
src/testcases/org/apache/poi/hssf/data/duprich2.xls
Normal file
Binary file not shown.
@ -1,4 +1,3 @@
|
||||
|
||||
/* ====================================================================
|
||||
* The Apache Software License, Version 1.1
|
||||
*
|
||||
@ -55,13 +54,17 @@
|
||||
|
||||
package org.apache.poi.hssf.record;
|
||||
|
||||
import org.apache.poi.util.*;
|
||||
|
||||
import junit.framework.*;
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.poi.util.LittleEndian;
|
||||
import org.apache.poi.util.LittleEndianConsts;
|
||||
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
|
||||
import org.apache.poi.hssf.usermodel.HSSFSheet;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author Marc Johnson (mjohnson at apache dot org)
|
||||
@ -428,12 +431,12 @@ public class TestSSTRecord
|
||||
assertEquals( 1464, record.getNumStrings() );
|
||||
assertEquals( 688, record.getNumUniqueStrings() );
|
||||
assertEquals( 492, record.countStrings() );
|
||||
assertEquals(1, record.getExpectedChars());
|
||||
assertEquals( 1, record.getDeserializer().getExpectedChars() );
|
||||
assertEquals( "Consolidated B-24J Liberator The Dragon & His Tai",
|
||||
record.getUnfinishedString());
|
||||
assertEquals(52, record.getTotalLength());
|
||||
assertEquals(3, record.getStringDataOffset());
|
||||
assertTrue(!record.isWideChar());
|
||||
record.getDeserializer().getUnfinishedString() );
|
||||
assertEquals( 52, record.getDeserializer().getTotalLength() );
|
||||
assertEquals( 3, record.getDeserializer().getStringDataOffset() );
|
||||
assertTrue( !record.getDeserializer().isWideChar() );
|
||||
}
|
||||
|
||||
/**
|
||||
@ -447,11 +450,11 @@ public class TestSSTRecord
|
||||
assertEquals( 0, record.getNumStrings() );
|
||||
assertEquals( 0, record.getNumUniqueStrings() );
|
||||
assertEquals( 0, record.countStrings() );
|
||||
assertEquals(0, record.getExpectedChars());
|
||||
assertEquals("", record.getUnfinishedString());
|
||||
assertEquals(0, record.getTotalLength());
|
||||
assertEquals(0, record.getStringDataOffset());
|
||||
assertTrue(!record.isWideChar());
|
||||
assertEquals( 0, record.getDeserializer().getExpectedChars() );
|
||||
assertEquals( "", record.getDeserializer().getUnfinishedString() );
|
||||
assertEquals( 0, record.getDeserializer().getTotalLength() );
|
||||
assertEquals( 0, record.getDeserializer().getStringDataOffset() );
|
||||
assertTrue( !record.getDeserializer().isWideChar() );
|
||||
byte[] output = record.serialize();
|
||||
byte[] expected =
|
||||
{
|
||||
@ -571,4 +574,46 @@ public class TestSSTRecord
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that workbooks with rich text that duplicates a non rich text cell can be read and written.
|
||||
*/
|
||||
public void testReadWriteDuplicatedRichText1()
|
||||
throws Exception
|
||||
{
|
||||
File file = new File( _test_file_path + File.separator + "duprich1.xls" );
|
||||
InputStream stream = new FileInputStream(file);
|
||||
HSSFWorkbook wb = new HSSFWorkbook(stream);
|
||||
stream.close();
|
||||
HSSFSheet sheet = wb.getSheetAt(1);
|
||||
assertEquals("01/05 (Wed) ", sheet.getRow(0).getCell((short)8).getStringCellValue());
|
||||
assertEquals("01/05 (Wed)", sheet.getRow(1).getCell((short)8).getStringCellValue());
|
||||
|
||||
file = File.createTempFile("testout", "xls");
|
||||
FileOutputStream outStream = new FileOutputStream(file);
|
||||
wb.write(outStream);
|
||||
outStream.close();
|
||||
file.delete();
|
||||
|
||||
// test the second file.
|
||||
file = new File( _test_file_path + File.separator + "duprich2.xls" );
|
||||
stream = new FileInputStream(file);
|
||||
wb = new HSSFWorkbook(stream);
|
||||
stream.close();
|
||||
sheet = wb.getSheetAt(0);
|
||||
int row = 0;
|
||||
assertEquals("Testing ", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
assertEquals("rich", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
assertEquals("text", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
assertEquals("strings", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
assertEquals("Testing ", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
assertEquals("Testing", sheet.getRow(row++).getCell((short)0).getStringCellValue());
|
||||
|
||||
// file = new File("/tryme.xls");
|
||||
file = File.createTempFile("testout", ".xls");
|
||||
outStream = new FileOutputStream(file);
|
||||
wb.write(outStream);
|
||||
outStream.close();
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
|
@ -479,6 +479,12 @@ public class TestLittleEndian
|
||||
return result;
|
||||
}
|
||||
|
||||
public void testUnsignedShort()
|
||||
throws Exception
|
||||
{
|
||||
assertEquals(0xffff, LittleEndian.getUShort(new byte[] { (byte)0xff, (byte)0xff }, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* main method to run the unit tests
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user