Compare commits

...

17 Commits

Author SHA1 Message Date
Joe Schaefer 3d2051ae22 move poi to top level
git-svn-id: https://svn.apache.org/repos/asf/poi/branches/REL_1_5_BRANCH@550311 13f79535-47bb-0310-9956-ffa450edef68
2007-06-25 01:51:34 +00:00
Glen Stampoultzis da6101e570 1.5.1 tweaking
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352698 13f79535-47bb-0310-9956-ffa450edef68
2002-06-15 03:21:07 +00:00
Glen Stampoultzis 9009364fb9 Applied extended text fix to 1.5 branch
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352685 13f79535-47bb-0310-9956-ffa450edef68
2002-06-12 09:13:32 +00:00
Glen Stampoultzis bc4ab79fdd Forgot this one.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352664 13f79535-47bb-0310-9956-ffa450edef68
2002-06-09 12:42:42 +00:00
Glen Stampoultzis d7672fa259 SST fixed!!! Yay... Will reliably read in spreadsheets that have rich text or extended text. Code is a bit cleaner now but could still use more improvement. If I have the energy I'll look into it.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352663 13f79535-47bb-0310-9956-ffa450edef68
2002-06-09 12:33:26 +00:00
Glen Stampoultzis 8a4d120c34 New example
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352662 13f79535-47bb-0310-9956-ffa450edef68
2002-06-08 12:19:05 +00:00
Glen Stampoultzis 3cfdcec260 More refactoring
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352658 13f79535-47bb-0310-9956-ffa450edef68
2002-06-01 02:25:53 +00:00
Glen Stampoultzis aac81d881b SST Rich Text Fix.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352656 13f79535-47bb-0310-9956-ffa450edef68
2002-05-29 14:14:17 +00:00
Glen Stampoultzis 9a47a0c1ea Refactoring and cleanup work in prep for looking more deeply into SST handling.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352650 13f79535-47bb-0310-9956-ffa450edef68
2002-05-27 11:43:27 +00:00
Glen Stampoultzis 13948e264f Fixed biff viewer.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352514 13f79535-47bb-0310-9956-ffa450edef68
2002-04-28 05:22:45 +00:00
Glen Stampoultzis c2912a8a68 Fix for reading row with no cells
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352512 13f79535-47bb-0310-9956-ffa450edef68
2002-04-28 04:51:34 +00:00
Glen Stampoultzis e2803297ec New logo's... disturbing.
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352477 13f79535-47bb-0310-9956-ffa450edef68
2002-04-24 14:07:33 +00:00
Glen Stampoultzis d881a60f6f Applied Romans patch. Thanks!
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352476 13f79535-47bb-0310-9956-ffa450edef68
2002-04-24 14:00:52 +00:00
Glen Stampoultzis 86d593e5e8 Test for remove row bug
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352475 13f79535-47bb-0310-9956-ffa450edef68
2002-04-24 14:00:29 +00:00
Glen Stampoultzis 69e22c9145 Documentation updates
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352470 13f79535-47bb-0310-9956-ffa450edef68
2002-04-23 12:22:48 +00:00
Glen Stampoultzis 5e3737ff0b Bug fix for cell delete
git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352469 13f79535-47bb-0310-9956-ffa450edef68
2002-04-23 12:22:24 +00:00
No Author 61f124ebb7 This commit was manufactured by cvs2svn to create branch
'REL_1_5_BRANCH'.

git-svn-id: https://svn.apache.org/repos/asf/jakarta/poi/branches/REL_1_5_BRANCH@352468 13f79535-47bb-0310-9956-ffa450edef68
2002-04-22 11:10:31 +00:00
46 changed files with 2945 additions and 2104 deletions

View File

@ -68,8 +68,8 @@
<version major="1"
minor="5"
fix ="0"
tag="dev"/>
fix ="1"
tag="final"/>
<package>org.apache.poi</package>
@ -87,7 +87,7 @@
<depend project="junit"/>
<depend project="IzPress"/>
<!-- needed for POI -->
<depend project="commons-logging"/>
<!-- <depend project="commons-logging"/> -->
<!-- Project jars POI build can use -->
<option project="jakarta-log4j"/>

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.4 KiB

View File

@ -39,6 +39,7 @@
<menu label="Get Involved">
<menu-item label="Contributing" href="contrib.html"/>
<menu-item label="Branching" href="branching.html"/>
<menu-item label="Bug Database" href="http://nagoya.apache.org/bugzilla/buglist.cgi?product=POI"/>
<menu-item label="CVS" href="http://jakarta.apache.org/site/cvsindex.html"/>
<menu-item label="Mail Lists" href="http://jakarta.apache.org/site/mail.html"/>

View File

@ -0,0 +1,97 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V1.1//EN" "./dtd/document-v11.dtd">
<document>
<header>
<title>Branching</title>
<authors>
<person id="GJS" name="Glen Stampoultzis" email="glens@apache.org"/>
</authors>
</header>
<body>
<section title="Branching Conventions">
<p>
Branches are tagged in the following way:
</p>
<ul>
<li>REL_1_5_BRANCH</li>
<li>REL_2_0_BRANCH</li>
</ul>
<p>
Merge points should be tagged as follows:
</p>
<ul>
<li>REL_1_5_BRANCH_MERGE1</li>
<li>REL_1_5_BRANCH_MERGE2</li>
<li>etc...</li>
</ul>
<p>
Releases should be tagged as:
</p>
<ul>
<li>REL_1_5</li>
<li>REL_1_5_1</li>
<li>REL_1_5_2</li>
<li>etc...</li>
</ul>
</section>
<section title="Branching Advise">
<p>
Don't forget which branch you are currently on. This is critically
important. Committing stuff to the wrong branch causes all sorts of
headaches. Best to name your checkout after the branch you are on.
</p>
</section>
<section title="Who Manages Branching?">
<p>
All branching is currently managed by Glen Stampoultzis. If you wish
to create your own branch please let him know. Merging is also
handled by Glen. Just pop him a mail if you feel it's necessary to
create a branch or perform a merge.
</p>
<p>
The reason to go through a single point for branching is that it can be
an easy thing to get wrong. Having a single person managing branches
means there is less chance of getting getting our wires crossed with this
difficult area of CVS.
</p>
</section>
<section title="Currently Active Branches">
<p>
The following branches are currently active:
</p>
<table>
<tr>
<th>
<b>Branch</b>
</th>
<th>
<b>Description</b>
</th>
</tr>
<tr>
<td>
HEAD
</td>
<td>
This is the trunk and is always active. Currently it is being used to continue development
of the 2.0 release.
</td>
</tr>
<tr>
<td>
REL_1_5_BRANCH
</td>
<td>
All bug fixes not specifically relevant to the 2.0 work should be placed in this branch.
From here they will merged back to the trunk and the merge point marked.
</td>
</tr>
</table>
</section>
</body>
</document>

View File

@ -11,7 +11,11 @@
<person id="NKB" name="Nicola Ken Barozzi" email="barozzi@nicolaken.com"/>
<person id="POI-DEVELOPERS" name="Poi Developers" email="poi-dev@jakarta.apache.org"/>
</devs>
<release version="1.5" date="Coming Soon">
<release version="1.5.1" date="16 June 2002">
<action dev="GJS" type="update">Removed depedency on commons logging. Now define poi.logging system property to enable logging to standard out.</action>
<action dev="GJS" type="fix">Fixed SST string handling so that spreadsheets with rich text or extended text will be read correctly.</action>
</release>
<release version="1.5" date="06 May 2002">
<action dev="NKB" type="update">New project build.</action>
<action dev="NKB" type="update">New project documentation system based on Cocoon.</action>
<action dev="POI-DEVELOPERS" type="update">Package rename</action>

View File

@ -3,7 +3,7 @@
<document>
<header>
<title></title>
<title>Project History</title>
<authors>
<person id="AO" name="Andrew C. Oliver" email="acoliver@apache.org"/>
</authors>

View File

@ -397,55 +397,20 @@ export CLASSPATH=$CLASSPATH:$HSSFDIR/hssf.jar:$HSSFDIR/poi-poifs.jar:$HSSFDIR/po
<ul>
<li>Type:
<code>java org.apache.poi.hssf.dev.HSSF ~/input.xls output.xls</code>
<p>
<br/>
<br/>
This is the read/write/modify test. It reads in the spreadsheet, modifies a cell, and writes it back out.
Failing this test is not necessarily a bad thing. If HSSF tries to modify a non-existant sheet then this will
most likely fail. No big deal. </p></li>
most likely fail. No big deal. </li>
</ul>
</section>
<section title="HSSF Logging facility">
<p>HSSF now has a logging facility (using
<link href="http://jakarta.apache.org/commons/logging.html">commons logging</link>)
that will record massive amounts of debugging information. Its mostly
useful to us hssf-developing geeks, but might be useful in tracking
down problems.
</p>
<p>So Why use commons logging rather than log4j? Well the following discussion from
the jakarta-general mailing list sums it up pretty well. (Thanks Morgan)
</p>
<p><em>Here's the problem, as I see it.</em>
</p>
<p><em>Suppose Commons component A decides to adopt Log4J, Commons component B
decides to adopt LogKit, and Commons component C adopts JDK1.4 logging.
They will all minimally function with the right jars in the classpath.
However you (the end-user) are left with maintaining configuration for 3
different logging APIs, which is tedious at best. When you take into
account cool features like variable log levels, Log4J appenders and the
like, you're pretty much guaranteed to swallow up useful configuration
options because sophisticated configurations are too difficult to maintain
over mutiple logging implementations.</em>
</p>
<section title="HSSF Logging Facility">
<p>
<em>Contrarily, if all three Commons components use a logging facade, you can
focus all your configuration efforts on one logging implementation. Sure,
there is a trade-off; you don't have access to all the features, and the
interface between the facade and the implementation must be maintained. But
the benefits are not just political; they potentially make the end-users
configuration much easier.</em>
</p>
<p><em>Even if all Commons components used the same logging implementation (Log4J
for example), other projects in Jakarta-land may choose otherwise. If you
add enough Jakarta projects to your environment, you eventually end up with
the scenario described above. It's a worthwhile effort to attempt a logging
solution that plays well with the Jakarta community at large. I think in
many cases the Commons Logging component can fill that role.</em>
</p>
<p>
Refer to the commons logging package level javadoc for more information concerning how to
<link href="http://jakarta.apache.org/commons/logging/api/index.html">configure commons logging.</link>
POI has a small amount of logging code embedded within it. Defining the system property
poi.logging will enable logging to standard out.
</p>
</section>
<section title="HSSF Developer's tools">
<section title="HSSF Developer's Tools">
<p>HSSF has a number of tools useful for developers to debug/develop
stuff using HSSF (and more generally XLS files). We've already

View File

@ -28,6 +28,7 @@
<li><link href="#MergedCells">Merging cells</link></li>
<li><link href="#WorkingWithFonts">Working with fonts</link></li>
<li><link href="#ReadWriteWorkbook">Reading and writing</link></li>
<li><link href="#NewLinesInCells">Use newlines in cells.</link></li>
</ul>
</section>
<section title="Features">
@ -294,6 +295,35 @@
fileOut.close();
</source>
</section>
<anchor id="UseNewLinesInCells"/>
<section title="Using newlines in cells">
<source>
HSSFWorkbook wb = new HSSFWorkbook();
HSSFSheet s = wb.createSheet();
HSSFRow r = null;
HSSFCell c = null;
HSSFCellStyle cs = wb.createCellStyle();
HSSFFont f = wb.createFont();
HSSFFont f2 = wb.createFont();
cs = wb.createCellStyle();
cs.setFont( f2 );
//Word Wrap MUST be turned on
cs.setWrapText( true );
r = s.createRow( (short) 2 );
r.setHeight( (short) 0x349 );
c = r.createCell( (short) 2 );
c.setCellType( HSSFCell.CELL_TYPE_STRING );
c.setCellValue( "Use \n with word wrap on to create a new line" );
c.setCellStyle( cs );
s.setColumnWidth( (short) 2, (short) ( ( 50 * 8 ) / ( (double) 1 / 20 ) ) );
FileOutputStream fileOut = new FileOutputStream( "workbook.xls" );
wb.write( fileOut );
fileOut.close();</source>
</section>
</section>
</section>
</body>

View File

@ -6,6 +6,7 @@
<title></title>
<authors>
<person id="AO" name="Andrew C. Oliver" email="acoliver@apache.org"/>
<person id="GS" name="Glen Stampoultzis" email="glens@apache.org"/>
</authors>
</header>
@ -103,6 +104,30 @@
<img src="images/logoJanssen2.png"/>
</p>
</section>
<section title="RaPi GmbH">
<p>
Contact Person: Fancy at: fancy at my-feiqi.com
</p>
<p>
<img src="images/logoRaPiGmbH1.png"/>&nbsp;&nbsp;&nbsp;
<img src="images/logoRaPiGmbH2.png"/>
</p>
<p>
<img src="images/logoRaPiGmbH3.png"/>
</p>
<p>
<img src="images/logoRaPiGmbH4.png"/>
</p>
<p>
<img src="images/logoRaPiGmbH5.png"/>
</p>
<p>
<img src="images/logoRaPiGmbH6.png"/>
</p>
<p>
<img src="images/logoRaPiGmbH7.png"/>
</p>
</section>
</section>
</body>

View File

@ -0,0 +1,100 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" and
* "Apache POI" must not be used to endorse or promote products
* derived from this software without prior written permission. For
* written permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* "Apache POI", nor may "Apache" appear in their name, without
* prior written permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*/
package org.apache.poi.hssf.usermodel.examples;
import org.apache.poi.hssf.usermodel.*;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* Demonstrates how to use newlines in cells.
*
* @author Glen Stampoultzis (glens at apache.org)
* @author Fauzia Lala <fauzia.lala at wcom.com>
*/
public class NewLinesInCells
{
public static void main( String[] args ) throws IOException
{
HSSFWorkbook wb = new HSSFWorkbook();
HSSFSheet s = wb.createSheet();
HSSFRow r = null;
HSSFCell c = null;
HSSFCellStyle cs = wb.createCellStyle();
HSSFFont f = wb.createFont();
HSSFFont f2 = wb.createFont();
cs = wb.createCellStyle();
cs.setFont( f2 );
//Word Wrap MUST be turned on
cs.setWrapText( true );
r = s.createRow( (short) 2 );
r.setHeight( (short) 0x349 );
c = r.createCell( (short) 2 );
c.setCellType( HSSFCell.CELL_TYPE_STRING );
c.setCellValue( "Use \n with word wrap on to create a new line" );
c.setCellStyle( cs );
s.setColumnWidth( (short) 2, (short) ( ( 50 * 8 ) / ( (double) 1 / 20 ) ) );
FileOutputStream fileOut = new FileOutputStream( "workbook.xls" );
wb.write( fileOut );
fileOut.close();
}
}

View File

@ -631,12 +631,12 @@ public class BiffViewer
retval = new LinkedDataRecord(rectype, size, data);
break;
case FormulaRecord.sid:
retval = new FormulaRecord(rectype, size, data);
break;
// case FormulaRecord.sid:
// retval = new FormulaRecord(rectype, size, data);
// break;
case SheetPropertiesRecord.sid:
retval = new FormulaRecord(rectype, size, data);
retval = new SheetPropertiesRecord(rectype, size, data);
break;

View File

@ -643,26 +643,11 @@ public class Workbook
{
log.log(DEBUG, "Serializing Workbook with offsets");
// ArrayList bytes = new ArrayList(records.size());
// int arraysize = getSize(); // 0;
int pos = 0;
// for (int k = 0; k < records.size(); k++)
// {
// bytes.add((( Record ) records.get(k)).serialize());
//
// }
// for (int k = 0; k < bytes.size(); k++)
// {
// arraysize += (( byte [] ) bytes.get(k)).length;
// }
for (int k = 0; k < records.size(); k++)
{
// byte[] rec = (( byte [] ) bytes.get(k));
// System.arraycopy(rec, 0, data, offset + pos, rec.length);
pos += (( Record ) records.get(k)).serialize(pos + offset,
data); // rec.length;
pos += (( Record ) records.get(k)).serialize(pos + offset, data); // rec.length;
}
log.log(DEBUG, "Exiting serialize workbook");
return pos;

View File

@ -161,9 +161,7 @@ public class ContinueRecord
// how many continue records do we need
// System.out.println("In ProcessContinue");
int records =
(data.length
/ 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out
int records = (data.length / 8214); // we've a 1 offset but we're also off by one due to rounding...so it balances out
int offset = 8214;
// System.out.println("we have "+records+" continue records to process");
@ -174,8 +172,7 @@ public class ContinueRecord
for (int cr = 0; cr < records; cr++)
{
ContinueRecord contrec = new ContinueRecord();
int arraysize = Math.min((8214 - 4),
(data.length - offset));
int arraysize = Math.min((8214 - 4), (data.length - offset));
byte[] crdata = new byte[ arraysize ];
System.arraycopy(data, offset, crdata, 0, arraysize);

View File

@ -0,0 +1,202 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" and
* "Apache POI" must not be used to endorse or promote products
* derived from this software without prior written permission. For
* written permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* "Apache POI", nor may "Apache" appear in their name, without
* prior written permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*/
package org.apache.poi.hssf.record;
import org.apache.poi.util.LittleEndianConsts;
import org.apache.poi.util.LittleEndian;
/**
* Process a single record. That is, an SST record or a continue record.
* Refactored from code originally in SSTRecord.
*
* @author Glen Stampoultzis (glens at apache.org)
*/
class RecordProcessor
{
private byte[] data;
private int recordOffset;
private int available;
private SSTRecordHeader sstRecordHeader;
public RecordProcessor( byte[] data, int available, int numStrings, int numUniqueStrings )
{
this.data = data;
this.available = available;
this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings);
}
public int getAvailable()
{
return available;
}
public void writeRecordHeader( int offset, int totalWritten, int recordLength, boolean first_record )
{
if ( first_record )
{
available -= 8;
recordOffset = sstRecordHeader.writeSSTHeader( data, recordOffset + offset + totalWritten, recordLength );
}
else
{
recordOffset = writeContinueHeader( data, recordOffset + offset + totalWritten, recordLength );
}
}
public byte[] writeStringRemainder( boolean lastStringCompleted, byte[] stringreminant, int offset, int totalWritten )
{
if ( !lastStringCompleted )
{
// write reminant -- it'll all fit neatly
System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, stringreminant.length );
adjustPointers( stringreminant.length );
}
else
{
// write as much of the remnant as possible
System.arraycopy( stringreminant, 0, data, recordOffset + offset + totalWritten, available );
byte[] leftover = new byte[( stringreminant.length - available ) + LittleEndianConsts.BYTE_SIZE];
System.arraycopy( stringreminant, available, leftover, LittleEndianConsts.BYTE_SIZE, stringreminant.length - available );
leftover[0] = stringreminant[0];
stringreminant = leftover;
adjustPointers( available ); // Consume all available remaining space
}
return stringreminant;
}
public void writeWholeString( UnicodeString unistr, int offset, int totalWritten )
{
unistr.serialize( recordOffset + offset + totalWritten, data );
int rsize = unistr.getRecordSize();
adjustPointers( rsize );
}
public byte[] writePartString( UnicodeString unistr, int offset, int totalWritten )
{
byte[] stringReminant;
byte[] ucs = unistr.serialize();
System.arraycopy( ucs, 0, data, recordOffset + offset + totalWritten, available );
stringReminant = new byte[( ucs.length - available ) + LittleEndianConsts.BYTE_SIZE];
System.arraycopy( ucs, available, stringReminant, LittleEndianConsts.BYTE_SIZE, ucs.length - available );
stringReminant[0] = ucs[LittleEndianConsts.SHORT_SIZE];
available = 0;
return stringReminant;
}
private int writeContinueHeader( final byte[] data, final int pos,
final int recsize )
{
int offset = pos;
LittleEndian.putShort( data, offset, ContinueRecord.sid );
offset += LittleEndianConsts.SHORT_SIZE;
LittleEndian.putShort( data, offset, (short) ( recsize ) );
offset += LittleEndianConsts.SHORT_SIZE;
return offset - pos;
}
private void adjustPointers( int amount )
{
recordOffset += amount;
available -= amount;
}
}
class SSTRecordHeader
{
int numStrings;
int numUniqueStrings;
/**
*
*/
public SSTRecordHeader( int numStrings, int numUniqueStrings )
{
this.numStrings = numStrings;
this.numUniqueStrings = numUniqueStrings;
}
/**
* Writes out the SST record. This consists of the sid, the record size, the number of
* strings and the number of unique strings.
*
* @param data The data buffer to write the header to.
* @param bufferIndex The index into the data buffer where the header should be written.
* @param recSize The number of records written.
*
* @return The bufer of bytes modified.
*/
public int writeSSTHeader( byte[] data, int bufferIndex, int recSize )
{
int offset = bufferIndex;
LittleEndian.putShort( data, offset, SSTRecord.sid );
offset += LittleEndianConsts.SHORT_SIZE;
LittleEndian.putShort( data, offset, (short) ( recSize ) );
offset += LittleEndianConsts.SHORT_SIZE;
// LittleEndian.putInt( data, offset, getNumStrings() );
LittleEndian.putInt( data, offset, numStrings );
offset += LittleEndianConsts.INT_SIZE;
// LittleEndian.putInt( data, offset, getNumUniqueStrings() );
LittleEndian.putInt( data, offset, numUniqueStrings );
offset += LittleEndianConsts.INT_SIZE;
return offset - bufferIndex;
}
}

View File

@ -452,8 +452,8 @@ public class RowRecord
LittleEndian.putShort(data, 0 + offset, sid);
LittleEndian.putShort(data, 2 + offset, ( short ) 16);
LittleEndian.putShort(data, 4 + offset, getRowNumber());
LittleEndian.putShort(data, 6 + offset, getFirstCol());
LittleEndian.putShort(data, 8 + offset, getLastCol());
LittleEndian.putShort(data, 6 + offset, getFirstCol() == -1 ? (short)0 : getFirstCol());
LittleEndian.putShort(data, 8 + offset, getLastCol() == -1 ? (short)0 : getLastCol());
LittleEndian.putShort(data, 10 + offset, getHeight());
LittleEndian.putShort(data, 12 + offset, getOptimize());
LittleEndian.putShort(data, 14 + offset, field_6_reserved);

View File

@ -0,0 +1,562 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" and
* "Apache POI" must not be used to endorse or promote products
* derived from this software without prior written permission. For
* written permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* "Apache POI", nor may "Apache" appear in their name, without
* prior written permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*/
package org.apache.poi.hssf.record;
import org.apache.poi.util.BinaryTree;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* Handles the task of deserializing a SST string. The two main entry points are
*
* @author Glen Stampoultzis (glens at apache.org)
*/
class SSTDeserializer
{
private BinaryTree strings;
/** this is the number of characters we expect in the first sub-record in a subsequent continuation record */
private int continuationExpectedChars;
/** this is the string we were working on before hitting the end of the current record. This string is NOT finished. */
private String unfinishedString;
/** this is true if the string uses wide characters */
private boolean wideChar;
/** this is true if the string is a rich text string */
private boolean richText;
/** this is true if the string is a far east string or some other wierd string */
private boolean extendedText;
/** Number of formatting runs in this rich text field */
private short runCount;
/** Number of characters in current string */
private int charCount;
private int extensionLength;
public SSTDeserializer( BinaryTree strings )
{
this.strings = strings;
initVars();
}
private void initVars()
{
runCount = 0;
continuationExpectedChars = 0;
unfinishedString = "";
// bytesInCurrentSegment = 0;
// stringDataOffset = 0;
wideChar = false;
richText = false;
extendedText = false;
}
/**
* This is the starting point where strings are constructed. Note that
* strings may span across multiple continuations. Read the SST record
* carefully before beginning to hack.
*/
public void manufactureStrings( final byte[] data, final int initialOffset, short dataSize )
{
initVars();
int offset = initialOffset;
while ( ( offset - initialOffset ) < dataSize )
{
int remaining = dataSize - offset + initialOffset;
if ( ( remaining > 0 ) && ( remaining < LittleEndianConsts.SHORT_SIZE ) )
{
throw new RecordFormatException( "Cannot get length of the last string in SSTRecord" );
}
if ( remaining == LittleEndianConsts.SHORT_SIZE )
{
setContinuationExpectedChars( LittleEndian.getUShort( data, offset ) );
unfinishedString = "";
break;
}
charCount = LittleEndian.getUShort( data, offset );
readStringHeader( data, offset );
boolean stringContinuesOverContinuation = remaining < totalStringSize();
if ( stringContinuesOverContinuation )
{
int remainingBytes = ( initialOffset + dataSize ) - offset - stringHeaderOverhead();
setContinuationExpectedChars( charCount - calculateCharCount( remainingBytes ) );
charCount -= getContinuationExpectedChars();
}
else
{
setContinuationExpectedChars( 0 );
}
processString( data, offset, charCount );
offset += totalStringSize();
if ( getContinuationExpectedChars() != 0 )
{
break;
}
}
}
// private void dump( final byte[] data, int offset, int length )
// {
// try
// {
// System.out.println( "------------------- SST DUMP -------------------------" );
// HexDump.dump( (byte[]) data, offset, System.out, offset, length );
// }
// catch ( IOException e )
// {
// }
// catch ( ArrayIndexOutOfBoundsException e )
// {
// }
// catch ( IllegalArgumentException e )
// {
// }
// }
/**
* Detemines the option types for the string (ie, compressed or uncompressed unicode, rich text string or
* plain string etc) and calculates the length and offset for the string.
*
*/
private void readStringHeader( final byte[] data, final int index )
{
byte optionFlag = data[index + LittleEndianConsts.SHORT_SIZE];
wideChar = ( optionFlag & 1 ) == 1;
extendedText = ( optionFlag & 4 ) == 4;
richText = ( optionFlag & 8 ) == 8;
runCount = 0;
if ( richText )
{
runCount = LittleEndian.getShort( data, index + SSTRecord.STRING_MINIMAL_OVERHEAD );
}
extensionLength = 0;
if ( extendedText )
{
extensionLength = LittleEndian.getInt( data, index + SSTRecord.STRING_MINIMAL_OVERHEAD
+ (richText ? LittleEndianConsts.SHORT_SIZE : 0) );
}
}
/**
* Reads a string or the first part of a string.
*
* @param characters the number of characters to write.
*
* @return the number of bytes written.
*/
private int processString( final byte[] data, final int dataIndex, final int characters )
{
// length is the length we store it as. not the length that is read.
int length = SSTRecord.STRING_MINIMAL_OVERHEAD + calculateByteCount( characters );
byte[] unicodeStringBuffer = new byte[length];
int offset = 0;
// Set the length in characters
LittleEndian.putUShort( unicodeStringBuffer, offset, characters );
offset += LittleEndianConsts.SHORT_SIZE;
// Set the option flags
unicodeStringBuffer[offset] = data[dataIndex + offset];
// Copy in the string data
int bytesRead = unicodeStringBuffer.length - SSTRecord.STRING_MINIMAL_OVERHEAD;
arraycopy( data, dataIndex + stringHeaderOverhead(), unicodeStringBuffer, SSTRecord.STRING_MINIMAL_OVERHEAD, bytesRead );
// Create the unicode string
UnicodeString string = new UnicodeString( UnicodeString.sid,
(short) unicodeStringBuffer.length,
unicodeStringBuffer );
if ( isStringFinished() )
{
Integer integer = new Integer( strings.size() );
addToStringTable( strings, integer, string );
}
else
{
unfinishedString = string.getString();
}
return bytesRead;
}
private boolean isStringFinished()
{
return getContinuationExpectedChars() == 0;
}
/**
* Okay, we are doing some major cheating here. Because we can't handle rich text strings properly
* we end up getting duplicate strings. To get around this I'm doing two things: 1. Converting rich
* text to normal text and 2. If there's a duplicate I'm adding a space onto the end. Sneaky perhaps
* but it gets the job done until we can handle this a little better.
*/
static public void addToStringTable( BinaryTree strings, Integer integer, UnicodeString string )
{
if ( string.isRichText() )
string.setOptionFlags( (byte) ( string.getOptionFlags() & ( ~8 ) ) );
if ( string.isExtendedText() )
string.setOptionFlags( (byte) ( string.getOptionFlags() & ( ~4 ) ) );
boolean added = false;
while ( added == false )
{
try
{
strings.put( integer, string );
added = true;
}
catch ( Exception ignore )
{
string.setString( string.getString() + " " );
}
}
}
private int calculateCharCount( final int byte_count )
{
return byte_count / ( wideChar ? LittleEndianConsts.SHORT_SIZE : LittleEndianConsts.BYTE_SIZE );
}
/**
* Process a Continue record. A Continue record for an SST record
* contains the same kind of data that the SST record contains,
* with the following exceptions:
* <P>
* <OL>
* <LI>The string counts at the beginning of the SST record are
* not in the Continue record
* <LI>The first string in the Continue record might NOT begin
* with a size. If the last string in the previous record is
* continued in this record, the size is determined by that
* last string in the previous record; the first string will
* begin with a flag byte, followed by the remaining bytes (or
* words) of the last string from the previous
* record. Otherwise, the first string in the record will
* begin with a string length
* </OL>
*
* @param record the Continue record's byte data
*/
public void processContinueRecord( final byte[] record )
{
if ( isStringFinished() )
{
initVars();
manufactureStrings( record, 0, (short) record.length );
}
else
{
// reset the wide bit because that can change across a continuation. the fact that it's
// actually rich text doesn't change across continuations even though the rich text
// may on longer be set in the "new" option flag. confusing huh?
wideChar = ( record[0] & 1 ) == 1;
if ( stringSpansContinuation( record.length - LittleEndianConsts.BYTE_SIZE ) )
{
processEntireContinuation( record );
}
else
{
readStringRemainder( record );
}
}
}
/**
* Reads the remainder string and any subsequent strings from the continuation record.
*
* @param record The entire continuation record data.
*/
private void readStringRemainder( final byte[] record )
{
int stringRemainderSizeInBytes = calculateByteCount( getContinuationExpectedChars() );
// stringDataOffset = LittleEndianConsts.BYTE_SIZE;
byte[] unicodeStringData = new byte[SSTRecord.STRING_MINIMAL_OVERHEAD
+ calculateByteCount( getContinuationExpectedChars() )];
// write the string length
LittleEndian.putShort( unicodeStringData, 0, (short) getContinuationExpectedChars() );
// write the options flag
unicodeStringData[LittleEndianConsts.SHORT_SIZE] = createOptionByte( wideChar, richText, extendedText );
// copy the bytes/words making up the string; skipping
// past all the overhead of the str_data array
arraycopy( record, LittleEndianConsts.BYTE_SIZE, unicodeStringData,
SSTRecord.STRING_MINIMAL_OVERHEAD,
unicodeStringData.length - SSTRecord.STRING_MINIMAL_OVERHEAD );
// use special constructor to create the final string
UnicodeString string = new UnicodeString( UnicodeString.sid,
(short) unicodeStringData.length, unicodeStringData,
unfinishedString );
Integer integer = new Integer( strings.size() );
addToStringTable( strings, integer, string );
int newOffset = offsetForContinuedRecord( stringRemainderSizeInBytes );
manufactureStrings( record, newOffset, (short) ( record.length - newOffset ) );
}
/**
* Calculates the size of the string in bytes based on the character width
*/
private int stringSizeInBytes()
{
return calculateByteCount( charCount );
}
/**
* Calculates the size of the string in byes. This figure includes all the over
* heads for the string.
*/
private int totalStringSize()
{
return stringSizeInBytes()
+ stringHeaderOverhead()
+ LittleEndianConsts.INT_SIZE * runCount
+ extensionLength;
}
private int stringHeaderOverhead()
{
return SSTRecord.STRING_MINIMAL_OVERHEAD
+ ( richText ? LittleEndianConsts.SHORT_SIZE : 0 )
+ ( extendedText ? LittleEndianConsts.INT_SIZE : 0 );
}
private int offsetForContinuedRecord( int stringRemainderSizeInBytes )
{
return stringRemainderSizeInBytes + LittleEndianConsts.BYTE_SIZE
+ runCount * LittleEndianConsts.INT_SIZE + extensionLength;
}
private byte createOptionByte( boolean wideChar, boolean richText, boolean farEast )
{
return (byte) ( ( wideChar ? 1 : 0 ) + ( farEast ? 4 : 0 ) + ( richText ? 8 : 0 ) );
}
/**
* If the continued record is so long is spans into the next continue then
* simply suck the remaining string data into the existing <code>unfinishedString</code>.
*
* @param record The data from the continuation record.
*/
private void processEntireContinuation( final byte[] record )
{
// create artificial data to create a UnicodeString
int dataLengthInBytes = record.length - LittleEndianConsts.BYTE_SIZE;
byte[] unicodeStringData = new byte[record.length + LittleEndianConsts.SHORT_SIZE];
LittleEndian.putShort( unicodeStringData, (byte) 0, (short) calculateCharCount( dataLengthInBytes ) );
arraycopy( record, 0, unicodeStringData, LittleEndianConsts.SHORT_SIZE, record.length );
UnicodeString ucs = new UnicodeString( UnicodeString.sid, (short) unicodeStringData.length, unicodeStringData );
unfinishedString = unfinishedString + ucs.getString();
setContinuationExpectedChars( getContinuationExpectedChars() - calculateCharCount( dataLengthInBytes ) );
}
private boolean stringSpansContinuation( int continuationSizeInBytes )
{
return calculateByteCount( getContinuationExpectedChars() ) > continuationSizeInBytes;
}
/**
* @return the number of characters we expect in the first
* sub-record in a subsequent continuation record
*/
int getContinuationExpectedChars()
{
return continuationExpectedChars;
}
private void setContinuationExpectedChars( final int count )
{
continuationExpectedChars = count;
}
private int calculateByteCount( final int character_count )
{
return character_count * ( wideChar ? LittleEndianConsts.SHORT_SIZE : LittleEndianConsts.BYTE_SIZE );
}
/**
* Copies an array from the specified source array, beginning at the
* specified position, to the specified position of the destination array.
* A subsequence of array components are copied from the source
* array referenced by <code>src</code> to the destination array
* referenced by <code>dst</code>. The number of components copied is
* equal to the <code>length</code> argument. The components at
* positions <code>srcOffset</code> through
* <code>srcOffset+length-1</code> in the source array are copied into
* positions <code>dstOffset</code> through
* <code>dstOffset+length-1</code>, respectively, of the destination
* array.
* <p>
* If the <code>src</code> and <code>dst</code> arguments refer to the
* same array object, then the copying is performed as if the
* components at positions <code>srcOffset</code> through
* <code>srcOffset+length-1</code> were first copied to a temporary
* array with <code>length</code> components and then the contents of
* the temporary array were copied into positions
* <code>dstOffset</code> through <code>dstOffset+length-1</code> of the
* destination array.
* <p>
* If <code>dst</code> is <code>null</code>, then a
* <code>NullPointerException</code> is thrown.
* <p>
* If <code>src</code> is <code>null</code>, then a
* <code>NullPointerException</code> is thrown and the destination
* array is not modified.
* <p>
* Otherwise, if any of the following is true, an
* <code>ArrayStoreException</code> is thrown and the destination is
* not modified:
* <ul>
* <li>The <code>src</code> argument refers to an object that is not an
* array.
* <li>The <code>dst</code> argument refers to an object that is not an
* array.
* <li>The <code>src</code> argument and <code>dst</code> argument refer to
* arrays whose component types are different primitive types.
* <li>The <code>src</code> argument refers to an array with a primitive
* component type and the <code>dst</code> argument refers to an array
* with a reference component type.
* <li>The <code>src</code> argument refers to an array with a reference
* component type and the <code>dst</code> argument refers to an array
* with a primitive component type.
* </ul>
* <p>
* Otherwise, if any of the following is true, an
* <code>IndexOutOfBoundsException</code> is
* thrown and the destination is not modified:
* <ul>
* <li>The <code>srcOffset</code> argument is negative.
* <li>The <code>dstOffset</code> argument is negative.
* <li>The <code>length</code> argument is negative.
* <li><code>srcOffset+length</code> is greater than
* <code>src.length</code>, the length of the source array.
* <li><code>dstOffset+length</code> is greater than
* <code>dst.length</code>, the length of the destination array.
* </ul>
* <p>
* Otherwise, if any actual component of the source array from
* position <code>srcOffset</code> through
* <code>srcOffset+length-1</code> cannot be converted to the component
* type of the destination array by assignment conversion, an
* <code>ArrayStoreException</code> is thrown. In this case, let
* <b><i>k</i></b> be the smallest nonnegative integer less than
* length such that <code>src[srcOffset+</code><i>k</i><code>]</code>
* cannot be converted to the component type of the destination
* array; when the exception is thrown, source array components from
* positions <code>srcOffset</code> through
* <code>srcOffset+</code><i>k</i><code>-1</code>
* will already have been copied to destination array positions
* <code>dstOffset</code> through
* <code>dstOffset+</code><i>k</I><code>-1</code> and no other
* positions of the destination array will have been modified.
* (Because of the restrictions already itemized, this
* paragraph effectively applies only to the situation where both
* arrays have component types that are reference types.)
*
* @param src the source array.
* @param src_position start position in the source array.
* @param dst the destination array.
* @param dst_position pos start position in the destination data.
* @param length the number of array elements to be copied.
* @exception IndexOutOfBoundsException if copying would cause
* access of data outside array bounds.
* @exception ArrayStoreException if an element in the <code>src</code>
* array could not be stored into the <code>dest</code> array
* because of a type mismatch.
* @exception NullPointerException if either <code>src</code> or
* <code>dst</code> is <code>null</code>.
*/
private void arraycopy( byte[] src, int src_position,
byte[] dst, int dst_position,
int length )
{
System.arraycopy( src, src_position, dst, dst_position, length );
}
/**
* @return the unfinished string
*/
String getUnfinishedString()
{
return unfinishedString;
}
/**
* @return true if current string uses wide characters
*/
boolean isWideChar()
{
return wideChar;
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
@ -59,7 +58,8 @@ import org.apache.poi.util.BinaryTree;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
import java.util.*;
import java.util.Iterator;
import java.util.List;
/**
* Title: Static String Table Record
@ -71,6 +71,7 @@ import java.util.*;
* <P>
* @author Andrew C. Oliver (acoliver at apache dot org)
* @author Marc Johnson (mjohnson at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
* @version 2.0-pre
* @see org.apache.poi.hssf.record.LabelSSTRecord
* @see org.apache.poi.hssf.record.ContinueRecord
@ -80,56 +81,35 @@ public class SSTRecord
extends Record
{
// how big can an SST record be? As big as any record can be: 8228
// bytes
private static final int _max = 8228;
/** how big can an SST record be? As big as any record can be: 8228 bytes */
static final int MAX_RECORD_SIZE = 8228;
// standard record overhead: two shorts (record id plus data space
// size)
private static final int _std_record_overhead =
/** standard record overhead: two shorts (record id plus data space size)*/
static final int STD_RECORD_OVERHEAD =
2 * LittleEndianConsts.SHORT_SIZE;
// SST overhead: the standard record overhead, plus the number of
// strings and the number of unique strings -- two ints
private static final int _sst_record_overhead =
(_std_record_overhead + (2 * LittleEndianConsts.INT_SIZE));
/** SST overhead: the standard record overhead, plus the number of strings and the number of unique strings -- two ints */
static final int SST_RECORD_OVERHEAD =
( STD_RECORD_OVERHEAD + ( 2 * LittleEndianConsts.INT_SIZE ) );
// how much data can we stuff into an SST record? That would be
// _max minus the standard SST record overhead
private static final int _max_data_space =
_max - _sst_record_overhead;
/** how much data can we stuff into an SST record? That would be _max minus the standard SST record overhead */
static final int MAX_DATA_SPACE = MAX_RECORD_SIZE - SST_RECORD_OVERHEAD;
/** overhead for each string includes the string's character count (a short) and the flag describing its characteristics (a byte) */
static final int STRING_MINIMAL_OVERHEAD = LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE;
// overhead for each string includes the string's character count
// (a short) and the flag describing its characteristics (a byte)
private static final int _string_minimal_overhead =
LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE;
public static final short sid = 0xfc;
// union of strings in the SST and EXTSST
/** union of strings in the SST and EXTSST */
private int field_1_num_strings;
// according to docs ONLY SST
/** according to docs ONLY SST */
private int field_2_num_unique_strings;
private BinaryTree field_3_strings;
// this is the number of characters we expect in the first
// sub-record in a subsequent continuation record
private int __expected_chars;
// this is the string we were working on before hitting the end of
// the current record. This string is NOT finished.
private String _unfinished_string;
// this is the total length of the current string being handled
private int _total_length_bytes;
// this is the offset into a string field of the actual string
// data
private int _string_data_offset;
// this is true if the string uses wide characters
private boolean _wide_char;
/** Record lengths for initial SST record and all continue records */
private List _record_lengths = null;
private SSTDeserializer deserializer;
/**
* default constructor
@ -140,11 +120,7 @@ public class SSTRecord
field_1_num_strings = 0;
field_2_num_unique_strings = 0;
field_3_strings = new BinaryTree();
setExpectedChars(0);
_unfinished_string = "";
_total_length_bytes = 0;
_string_data_offset = 0;
_wide_char = false;
deserializer = new SSTDeserializer(field_3_strings);
}
/**
@ -264,7 +240,8 @@ public class SSTRecord
rval = field_3_strings.size();
field_2_num_unique_strings++;
integer = new Integer( rval );
field_3_strings.put(integer, ucs);
SSTDeserializer.addToStringTable( field_3_strings, integer, ucs );
// field_3_strings.put( integer, ucs );
}
return rval;
}
@ -329,14 +306,13 @@ public class SSTRecord
public String getString( final int id )
{
return (( UnicodeString ) field_3_strings.get(new Integer(id)))
.getString();
return ( (UnicodeString) field_3_strings.get( new Integer( id ) ) ).getString();
}
public boolean getString16bit(final int id)
public boolean isString16bit( final int id )
{
return ((( UnicodeString ) field_3_strings.get(new Integer(id)))
.getOptionFlags() == 1);
UnicodeString unicodeString = ( (UnicodeString) field_3_strings.get( new Integer( id ) ) );
return ( ( unicodeString.getOptionFlags() & 0x01 ) == 1 );
}
/**
@ -364,311 +340,9 @@ public class SSTRecord
return buffer.toString();
}
/**
* Create a byte array consisting of an SST record and any
* required Continue records, ready to be written out.
* <p>
* If an SST record and any subsequent Continue records are read
* in to create this instance, this method should produce a byte
* array that is identical to the byte array produced by
* concatenating the input records' data.
*
* @return the byte array
*/
public int serialize(int offset, byte [] data)
{
int rval = getRecordSize();
int record_length_index = 0;
// get the linear size of that array
int unicodesize = calculateUnicodeSize();
if (unicodesize > _max_data_space)
{
byte[] stringreminant = null;
int unipos = 0;
boolean lastneedcontinue = false;
int stringbyteswritten = 0;
boolean first_record = true;
int totalWritten = 0;
int size = 0;
while (totalWritten != rval)
{
int pos = 0;
// write the appropriate header
int available;
if (first_record)
{
size =
(( Integer ) _record_lengths
.get(record_length_index++)).intValue();
available = size - 8;
pos = writeSSTHeader(data,
pos + offset
+ totalWritten, size);
size += _std_record_overhead;
first_record = false;
}
else
{
pos = 0;
int to_be_written = (unicodesize - stringbyteswritten)
+ (lastneedcontinue ? 1
: 0); // not used?
size =
(( Integer ) _record_lengths
.get(record_length_index++)).intValue();
available = size;
pos = writeContinueHeader(data,
pos + offset
+ totalWritten, size);
size = size + _std_record_overhead;
}
// now, write the rest of the data into the current
// record space
if (lastneedcontinue)
{
// the last string in the previous record was not
// written out completely
if (stringreminant.length <= available)
{
// write reminant -- it'll all fit neatly
System.arraycopy(stringreminant, 0, data,
pos + offset + totalWritten,
stringreminant.length);
stringbyteswritten += stringreminant.length - 1;
pos += stringreminant.length;
lastneedcontinue = false;
available -= stringreminant.length;
}
else
{
// write as much of the remnant as possible
System.arraycopy(stringreminant, 0, data,
pos + offset + totalWritten,
available);
stringbyteswritten += available - 1;
pos += available;
byte[] leftover =
new byte[ (stringreminant.length - available) + LittleEndianConsts.BYTE_SIZE ];
System.arraycopy(stringreminant, available, leftover,
LittleEndianConsts.BYTE_SIZE,
stringreminant.length - available);
leftover[ 0 ] = stringreminant[ 0 ];
stringreminant = leftover;
available = 0;
lastneedcontinue = true;
}
}
// last string's remnant, if any, is cleaned up as
// best as can be done ... now let's try and write
// some more strings
for (; unipos < field_3_strings.size(); unipos++)
{
Integer intunipos = new Integer(unipos);
UnicodeString unistr =
(( UnicodeString ) field_3_strings.get(intunipos));
if (unistr.getRecordSize() <= available)
{
unistr.serialize(pos + offset + totalWritten, data);
int rsize = unistr.getRecordSize();
stringbyteswritten += rsize;
pos += rsize;
available -= rsize;
}
else
{
// can't write the entire string out
if (available >= _string_minimal_overhead)
{
// we can write some of it
byte[] ucs = unistr.serialize();
System.arraycopy(ucs, 0, data,
pos + offset + totalWritten,
available);
stringbyteswritten += available;
stringreminant =
new byte[ (ucs.length - available) + LittleEndianConsts.BYTE_SIZE ];
System.arraycopy(ucs, available, stringreminant,
LittleEndianConsts.BYTE_SIZE,
ucs.length - available);
stringreminant[ 0 ] =
ucs[ LittleEndianConsts.SHORT_SIZE ];
available = 0;
lastneedcontinue = true;
unipos++;
}
break;
}
}
totalWritten += size;
}
}
else
{
// short data: write one simple SST record
int datasize = _sst_record_overhead + unicodesize; // not used?
writeSSTHeader(
data, 0 + offset,
_sst_record_overhead
+ (( Integer ) _record_lengths.get(
record_length_index++)).intValue() - _std_record_overhead);
int pos = _sst_record_overhead;
for (int k = 0; k < field_3_strings.size(); k++)
{
UnicodeString unistr =
(( UnicodeString ) field_3_strings.get(new Integer(k)));
System.arraycopy(unistr.serialize(), 0, data, pos + offset,
unistr.getRecordSize());
pos += unistr.getRecordSize();
}
}
return rval;
}
// not used: remove?
private int calculateStringsize()
{
int retval = 0;
for (int k = 0; k < field_3_strings.size(); k++)
{
retval +=
(( UnicodeString ) field_3_strings.get(new Integer(k)))
.getRecordSize();
}
return retval;
}
/**
* Process a Continue record. A Continue record for an SST record
* contains the same kind of data that the SST record contains,
* with the following exceptions:
* <P>
* <OL>
* <LI>The string counts at the beginning of the SST record are
* not in the Continue record
* <LI>The first string in the Continue record might NOT begin
* with a size. If the last string in the previous record is
* continued in this record, the size is determined by that
* last string in the previous record; the first string will
* begin with a flag byte, followed by the remaining bytes (or
* words) of the last string from the previous
* record. Otherwise, the first string in the record will
* begin with a string length
* </OL>
*
* @param record the Continue record's byte data
*/
public void processContinueRecord(final byte [] record)
{
if (getExpectedChars() == 0)
{
_unfinished_string = "";
_total_length_bytes = 0;
_string_data_offset = 0;
_wide_char = false;
manufactureStrings(record, 0, ( short ) record.length);
}
else
{
int data_length = record.length - LittleEndianConsts.BYTE_SIZE;
if (calculateByteCount(getExpectedChars()) > data_length)
{
// create artificial data to create a UnicodeString
byte[] input =
new byte[ record.length + LittleEndianConsts.SHORT_SIZE ];
short size = ( short ) (((record[ 0 ] & 1) == 1)
? (data_length
/ LittleEndianConsts.SHORT_SIZE)
: (data_length
/ LittleEndianConsts.BYTE_SIZE));
LittleEndian.putShort(input, ( byte ) 0, size);
System.arraycopy(record, 0, input,
LittleEndianConsts.SHORT_SIZE,
record.length);
UnicodeString ucs = new UnicodeString(UnicodeString.sid,
( short ) input.length,
input);
_unfinished_string = _unfinished_string + ucs.getString();
setExpectedChars(getExpectedChars() - size);
}
else
{
setupStringParameters(record, -LittleEndianConsts.SHORT_SIZE,
getExpectedChars());
byte[] str_data = new byte[ _total_length_bytes ];
int length = _string_minimal_overhead
+ (calculateByteCount(getExpectedChars()));
byte[] bstring = new byte[ length ];
// Copy data from the record into the string
// buffer. Copy skips the length of a short in the
// string buffer, to leave room for the string length.
System.arraycopy(record, 0, str_data,
LittleEndianConsts.SHORT_SIZE,
str_data.length
- LittleEndianConsts.SHORT_SIZE);
// write the string length
LittleEndian.putShort(bstring, 0,
( short ) getExpectedChars());
// write the options flag
bstring[ LittleEndianConsts.SHORT_SIZE ] =
str_data[ LittleEndianConsts.SHORT_SIZE ];
// copy the bytes/words making up the string; skipping
// past all the overhead of the str_data array
System.arraycopy(str_data, _string_data_offset, bstring,
_string_minimal_overhead,
bstring.length - _string_minimal_overhead);
// use special constructor to create the final string
UnicodeString string =
new UnicodeString(UnicodeString.sid,
( short ) bstring.length, bstring,
_unfinished_string);
Integer integer = new Integer(field_3_strings.size());
field_3_strings.put(integer, string);
manufactureStrings(record,
_total_length_bytes
- LittleEndianConsts
.SHORT_SIZE, ( short ) record.length);
}
}
}
/**
* @return sid
*/
public short getSid()
{
return sid;
@ -677,18 +351,11 @@ public class SSTRecord
/**
* @return hashcode
*/
public int hashCode()
{
return field_2_num_unique_strings;
}
/**
*
* @param o
* @return true if equal
*/
public boolean equals( Object o )
{
if ( ( o == null ) || ( o.getClass() != this.getClass() ) )
@ -810,23 +477,10 @@ public class SSTRecord
field_1_num_strings = LittleEndian.getInt( data, 0 + offset );
field_2_num_unique_strings = LittleEndian.getInt( data, 4 + offset );
field_3_strings = new BinaryTree();
setExpectedChars(0);
_unfinished_string = "";
_total_length_bytes = 0;
_string_data_offset = 0;
_wide_char = false;
manufactureStrings(data, 8 + offset, size);
deserializer = new SSTDeserializer(field_3_strings);
deserializer.manufactureStrings( data, 8 + offset, (short)(size - 8) );
}
/**
* @return the number of characters we expect in the first
* sub-record in a subsequent continuation record
*/
int getExpectedChars()
{
return __expected_chars;
}
/**
* @return an iterator of the strings we hold. All instances are
@ -848,372 +502,43 @@ public class SSTRecord
}
/**
* @return the unfinished string
* called by the class that is responsible for writing this sucker.
* Subclasses should implement this so that their data is passed back in a
* byte array.
*
* @return byte array containing instance data
*/
String getUnfinishedString()
public int serialize( int offset, byte[] data )
{
return _unfinished_string;
SSTSerializer serializer = new SSTSerializer(
_record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() );
return serializer.serialize( offset, data );
}
/**
* @return the total length of the current string
*/
int getTotalLength()
{
return _total_length_bytes;
}
/**
* @return offset into current string data
*/
int getStringDataOffset()
{
return _string_data_offset;
}
/**
* @return true if current string uses wide characters
*/
boolean isWideChar()
{
return _wide_char;
}
private int writeSSTHeader(final byte [] data, final int pos,
final int recsize)
{
int offset = pos;
LittleEndian.putShort(data, offset, sid);
offset += LittleEndianConsts.SHORT_SIZE;
LittleEndian.putShort(data, offset, ( short ) (recsize));
offset += LittleEndianConsts.SHORT_SIZE;
LittleEndian.putInt(data, offset, getNumStrings());
offset += LittleEndianConsts.INT_SIZE;
LittleEndian.putInt(data, offset, getNumUniqueStrings());
offset += LittleEndianConsts.INT_SIZE;
return offset - pos;
}
private int writeContinueHeader(final byte [] data, final int pos,
final int recsize)
{
int offset = pos;
LittleEndian.putShort(data, offset, ContinueRecord.sid);
offset += LittleEndianConsts.SHORT_SIZE;
LittleEndian.putShort(data, offset, ( short ) (recsize));
offset += LittleEndianConsts.SHORT_SIZE;
return offset - pos;
}
private int calculateUCArrayLength(final byte [][] ucarray)
{
int retval = 0;
for (int k = 0; k < ucarray.length; k++)
{
retval += ucarray[ k ].length;
}
return retval;
}
private void manufactureStrings(final byte [] data, final int index,
short size)
{
int offset = index;
while (offset < size)
{
int remaining = size - offset;
if ((remaining > 0)
&& (remaining < LittleEndianConsts.SHORT_SIZE))
{
throw new RecordFormatException(
"Cannot get length of the last string in SSTRecord");
}
if (remaining == LittleEndianConsts.SHORT_SIZE)
{
setExpectedChars(LittleEndian.getShort(data, offset));
_unfinished_string = "";
break;
}
short char_count = LittleEndian.getShort(data, offset);
setupStringParameters(data, offset, char_count);
if (remaining < _total_length_bytes)
{
setExpectedChars(calculateCharCount(_total_length_bytes
- remaining));
char_count -= getExpectedChars();
_total_length_bytes = remaining;
}
else
{
setExpectedChars(0);
}
processString(data, offset, char_count);
offset += _total_length_bytes;
if (getExpectedChars() != 0)
{
break;
}
}
}
private void setupStringParameters(final byte [] data, final int index,
final int char_count)
{
byte flag = data[ index + LittleEndianConsts.SHORT_SIZE ];
_wide_char = (flag & 1) == 1;
boolean extended = (flag & 4) == 4;
boolean formatted_run = (flag & 8) == 8;
_total_length_bytes = _string_minimal_overhead
+ calculateByteCount(char_count);
_string_data_offset = _string_minimal_overhead;
if (formatted_run)
{
short run_count = LittleEndian.getShort(data,
index
+ _string_data_offset);
_string_data_offset += LittleEndianConsts.SHORT_SIZE;
_total_length_bytes += LittleEndianConsts.SHORT_SIZE
+ (LittleEndianConsts.INT_SIZE
* run_count);
}
if (extended)
{
int extension_length = LittleEndian.getInt(data,
index
+ _string_data_offset);
_string_data_offset += LittleEndianConsts.INT_SIZE;
_total_length_bytes += LittleEndianConsts.INT_SIZE
+ extension_length;
}
}
private void processString(final byte [] data, final int index,
final short char_count)
{
byte[] str_data = new byte[ _total_length_bytes ];
int length = _string_minimal_overhead
+ calculateByteCount(char_count);
byte[] bstring = new byte[ length ];
System.arraycopy(data, index, str_data, 0, str_data.length);
int offset = 0;
LittleEndian.putShort(bstring, offset, char_count);
offset += LittleEndianConsts.SHORT_SIZE;
bstring[ offset ] = str_data[ offset ];
System.arraycopy(str_data, _string_data_offset, bstring,
_string_minimal_overhead,
bstring.length - _string_minimal_overhead);
UnicodeString string = new UnicodeString(UnicodeString.sid,
( short ) bstring.length,
bstring);
if (getExpectedChars() != 0)
{
_unfinished_string = string.getString();
}
else
{
Integer integer = new Integer(field_3_strings.size());
field_3_strings.put(integer, string);
}
}
private void setExpectedChars(final int count)
{
__expected_chars = count;
}
private int calculateByteCount(final int character_count)
{
return character_count * (_wide_char ? LittleEndianConsts.SHORT_SIZE
: LittleEndianConsts.BYTE_SIZE);
}
private int calculateCharCount(final int byte_count)
{
return byte_count / (_wide_char ? LittleEndianConsts.SHORT_SIZE
: LittleEndianConsts.BYTE_SIZE);
}
// we can probably simplify this later...this calculates the size
// w/o serializing but still is a bit slow
public int getRecordSize()
{
_record_lengths = new ArrayList();
int retval = 0;
int unicodesize = calculateUnicodeSize();
SSTSerializer serializer = new SSTSerializer(
_record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() );
if (unicodesize > _max_data_space)
return serializer.getRecordSize();
}
SSTDeserializer getDeserializer()
{
UnicodeString unistr = null;
int stringreminant = 0;
int unipos = 0;
boolean lastneedcontinue = false;
int stringbyteswritten = 0;
boolean finished = false;
boolean first_record = true;
int totalWritten = 0;
return deserializer;
}
while (!finished)
/**
* Strange to handle continue records this way. Is it a smell?
*/
public void processContinueRecord( byte[] record )
{
int record = 0;
int pos = 0;
if (first_record)
{
// writing SST record
record = _max;
pos = 12;
first_record = false;
_record_lengths.add(new Integer(record
- _std_record_overhead));
deserializer.processContinueRecord( record );
}
else
{
// writing continue record
pos = 0;
int to_be_written = (unicodesize - stringbyteswritten)
+ (lastneedcontinue ? 1
: 0);
int size = Math.min(_max - _std_record_overhead,
to_be_written);
if (size == to_be_written)
{
finished = true;
}
record = size + _std_record_overhead;
_record_lengths.add(new Integer(size));
pos = 4;
}
if (lastneedcontinue)
{
int available = _max - pos;
if (stringreminant <= available)
{
// write reminant
stringbyteswritten += stringreminant - 1;
pos += stringreminant;
lastneedcontinue = false;
}
else
{
// write as much of the remnant as possible
int toBeWritten = unistr.maxBrokenLength(available);
if (available != toBeWritten)
{
int shortrecord = record
- (available - toBeWritten);
_record_lengths.set(
_record_lengths.size() - 1,
new Integer(
shortrecord - _std_record_overhead));
record = shortrecord;
}
stringbyteswritten += toBeWritten - 1;
pos += toBeWritten;
stringreminant -= toBeWritten - 1;
lastneedcontinue = true;
}
}
for (; unipos < field_3_strings.size(); unipos++)
{
int available = _max - pos;
Integer intunipos = new Integer(unipos);
unistr =
(( UnicodeString ) field_3_strings.get(intunipos));
if (unistr.getRecordSize() <= available)
{
stringbyteswritten += unistr.getRecordSize();
pos += unistr.getRecordSize();
}
else
{
if (available >= _string_minimal_overhead)
{
int toBeWritten =
unistr.maxBrokenLength(available);
stringbyteswritten += toBeWritten;
stringreminant =
(unistr.getRecordSize() - toBeWritten)
+ LittleEndianConsts.BYTE_SIZE;
if (available != toBeWritten)
{
int shortrecord = record
- (available - toBeWritten);
_record_lengths.set(
_record_lengths.size() - 1,
new Integer(
shortrecord - _std_record_overhead));
record = shortrecord;
}
lastneedcontinue = true;
unipos++;
}
else
{
int shortrecord = record - available;
_record_lengths.set(
_record_lengths.size() - 1,
new Integer(
shortrecord - _std_record_overhead));
record = shortrecord;
}
break;
}
}
totalWritten += record;
}
retval = totalWritten;
}
else
{
// short data: write one simple SST record
retval = _sst_record_overhead + unicodesize;
_record_lengths.add(new Integer(unicodesize));
}
return retval;
}
private int calculateUnicodeSize()
{
int retval = 0;
for (int k = 0; k < field_3_strings.size(); k++)
{
UnicodeString string =
( UnicodeString ) field_3_strings.get(new Integer(k));
retval += string.getRecordSize();
}
return retval;
}
}

View File

@ -0,0 +1,356 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" and
* "Apache POI" must not be used to endorse or promote products
* derived from this software without prior written permission. For
* written permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* "Apache POI", nor may "Apache" appear in their name, without
* prior written permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*/
package org.apache.poi.hssf.record;
import org.apache.poi.util.BinaryTree;
import org.apache.poi.util.LittleEndianConsts;
import java.util.List;
import java.util.ArrayList;
/**
* This class handles serialization of SST records. It utilizes the record processor
* class write individual records. This has been refactored from the SSTRecord class.
*
* @author Glen Stampoultzis (glens at apache.org)
*/
class SSTSerializer
{
private List recordLengths;
private BinaryTree strings;
private int numStrings;
private int numUniqueStrings;
private SSTRecordHeader sstRecordHeader;
public SSTSerializer( List recordLengths, BinaryTree strings, int numStrings, int numUniqueStrings )
{
this.recordLengths = recordLengths;
this.strings = strings;
this.numStrings = numStrings;
this.numUniqueStrings = numUniqueStrings;
this.sstRecordHeader = new SSTRecordHeader(numStrings, numUniqueStrings);
}
/**
* Create a byte array consisting of an SST record and any
* required Continue records, ready to be written out.
* <p>
* If an SST record and any subsequent Continue records are read
* in to create this instance, this method should produce a byte
* array that is identical to the byte array produced by
* concatenating the input records' data.
*
* @return the byte array
*/
public int serialize( int offset, byte[] data )
{
int record_size = getRecordSize();
int record_length_index = 0;
if ( calculateUnicodeSize() > SSTRecord.MAX_DATA_SPACE )
serializeLargeRecord( record_size, record_length_index, data, offset );
else
serializeSingleSSTRecord( data, offset, record_length_index );
return record_size;
}
private int calculateUnicodeSize()
{
int retval = 0;
for ( int k = 0; k < strings.size(); k++ )
{
retval += getUnicodeString(k).getRecordSize();
}
return retval;
}
// we can probably simplify this later...this calculates the size
// w/o serializing but still is a bit slow
public int getRecordSize()
{
recordLengths = new ArrayList();
int retval = 0;
int unicodesize = calculateUnicodeSize();
if ( unicodesize > SSTRecord.MAX_DATA_SPACE )
{
retval = calcRecordSizesForLongStrings( unicodesize );
}
else
{
// short data: write one simple SST record
retval = SSTRecord.SST_RECORD_OVERHEAD + unicodesize;
recordLengths.add( new Integer( unicodesize ) );
}
return retval;
}
private int calcRecordSizesForLongStrings( int unicodesize )
{
int retval;
UnicodeString unistr = null;
int stringreminant = 0;
int unipos = 0;
boolean lastneedcontinue = false;
int stringbyteswritten = 0;
boolean finished = false;
boolean first_record = true;
int totalWritten = 0;
while ( !finished )
{
int record = 0;
int pos = 0;
if ( first_record )
{
// writing SST record
record = SSTRecord.MAX_RECORD_SIZE;
pos = 12;
first_record = false;
recordLengths.add( new Integer( record - SSTRecord.STD_RECORD_OVERHEAD ) );
}
else
{
// writing continue record
pos = 0;
int to_be_written = ( unicodesize - stringbyteswritten ) + ( lastneedcontinue ? 1 : 0 );
int size = Math.min( SSTRecord.MAX_RECORD_SIZE - SSTRecord.STD_RECORD_OVERHEAD, to_be_written );
if ( size == to_be_written )
{
finished = true;
}
record = size + SSTRecord.STD_RECORD_OVERHEAD;
recordLengths.add( new Integer( size ) );
pos = 4;
}
if ( lastneedcontinue )
{
int available = SSTRecord.MAX_RECORD_SIZE - pos;
if ( stringreminant <= available )
{
// write reminant
stringbyteswritten += stringreminant - 1;
pos += stringreminant;
lastneedcontinue = false;
}
else
{
// write as much of the remnant as possible
int toBeWritten = unistr.maxBrokenLength( available );
if ( available != toBeWritten )
{
int shortrecord = record - ( available - toBeWritten );
recordLengths.set( recordLengths.size() - 1,
new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
record = shortrecord;
}
stringbyteswritten += toBeWritten - 1;
pos += toBeWritten;
stringreminant -= toBeWritten - 1;
lastneedcontinue = true;
}
}
for ( ; unipos < strings.size(); unipos++ )
{
int available = SSTRecord.MAX_RECORD_SIZE - pos;
Integer intunipos = new Integer( unipos );
unistr = ( (UnicodeString) strings.get( intunipos ) );
if ( unistr.getRecordSize() <= available )
{
stringbyteswritten += unistr.getRecordSize();
pos += unistr.getRecordSize();
}
else
{
if ( available >= SSTRecord.STRING_MINIMAL_OVERHEAD )
{
int toBeWritten =
unistr.maxBrokenLength( available );
stringbyteswritten += toBeWritten;
stringreminant =
( unistr.getRecordSize() - toBeWritten )
+ LittleEndianConsts.BYTE_SIZE;
if ( available != toBeWritten )
{
int shortrecord = record
- ( available - toBeWritten );
recordLengths.set(
recordLengths.size() - 1,
new Integer(
shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
record = shortrecord;
}
lastneedcontinue = true;
unipos++;
}
else
{
int shortrecord = record - available;
recordLengths.set( recordLengths.size() - 1,
new Integer( shortrecord - SSTRecord.STD_RECORD_OVERHEAD ) );
record = shortrecord;
}
break;
}
}
totalWritten += record;
}
retval = totalWritten;
return retval;
}
private void serializeSingleSSTRecord( byte[] data, int offset, int record_length_index )
{
// short data: write one simple SST record
int len = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue();
int recordSize = SSTRecord.SST_RECORD_OVERHEAD + len - SSTRecord.STD_RECORD_OVERHEAD;
sstRecordHeader.writeSSTHeader( data, 0 + offset, recordSize );
int pos = SSTRecord.SST_RECORD_OVERHEAD;
for ( int k = 0; k < strings.size(); k++ )
{
// UnicodeString unistr = ( (UnicodeString) strings.get( new Integer( k ) ) );
System.arraycopy( getUnicodeString(k).serialize(), 0, data, pos + offset, getUnicodeString(k).getRecordSize() );
pos += getUnicodeString(k).getRecordSize();
}
}
/**
* Large records are serialized to an SST and to one or more CONTINUE records. Joy. They have the special
* characteristic that they can change the option field when a single string is split across to a
* CONTINUE record.
*/
private void serializeLargeRecord( int record_size, int record_length_index, byte[] buffer, int offset )
{
byte[] stringReminant = null;
int stringIndex = 0;
boolean lastneedcontinue = false;
boolean first_record = true;
int totalWritten = 0;
while ( totalWritten != record_size )
{
int recordLength = ( (Integer) recordLengths.get( record_length_index++ ) ).intValue();
RecordProcessor recordProcessor = new RecordProcessor( buffer,
recordLength, numStrings, numUniqueStrings );
// write the appropriate header
recordProcessor.writeRecordHeader( offset, totalWritten, recordLength, first_record );
first_record = false;
// now, write the rest of the data into the current
// record space
if ( lastneedcontinue )
{
lastneedcontinue = stringReminant.length > recordProcessor.getAvailable();
// the last string in the previous record was not written out completely
stringReminant = recordProcessor.writeStringRemainder( lastneedcontinue,
stringReminant, offset, totalWritten );
}
// last string's remnant, if any, is cleaned up as best as can be done ... now let's try and write
// some more strings
for ( ; stringIndex < strings.size(); stringIndex++ )
{
UnicodeString unistr = getUnicodeString( stringIndex );
if ( unistr.getRecordSize() <= recordProcessor.getAvailable() )
{
recordProcessor.writeWholeString( unistr, offset, totalWritten );
}
else
{
// can't write the entire string out
if ( recordProcessor.getAvailable() >= SSTRecord.STRING_MINIMAL_OVERHEAD )
{
// we can write some of it
stringReminant = recordProcessor.writePartString( unistr, offset, totalWritten );
lastneedcontinue = true;
stringIndex++;
}
break;
}
}
totalWritten += recordLength + SSTRecord.STD_RECORD_OVERHEAD;
}
}
private UnicodeString getUnicodeString( int index )
{
Integer intunipos = new Integer( index );
return ( (UnicodeString) strings.get( intunipos ) );
}
}

View File

@ -66,6 +66,7 @@ import org.apache.poi.util.StringUtil;
* REFERENCE: PG 264 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)<P>
* @author Andrew C. Oliver
* @author Marc Johnson (mjohnson at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
* @version 2.0-pre
*/
@ -77,12 +78,29 @@ public class UnicodeString
private short field_1_charCount; // = 0;
private byte field_2_optionflags; // = 0;
private String field_3_string; // = null;
private final int RICH_TEXT_BIT = 8;
private final int EXT_BIT = 4;
public UnicodeString()
{
}
public int hashCode()
{
return field_1_charCount;
int stringHash = 0;
if (field_3_string != null)
stringHash = field_3_string.hashCode();
return field_1_charCount + stringHash;
}
/**
* Our handling of equals is inconsistent with compareTo. The trouble is because we don't truely understand
* rich text fields yet it's difficult to make a sound comparison.
*
* @param o The object to compare.
* @return true if the object is actually equal.
*/
public boolean equals(Object o)
{
if ((o == null) || (o.getClass() != this.getClass()))
@ -96,10 +114,6 @@ public class UnicodeString
&& field_3_string.equals(other.field_3_string));
}
public UnicodeString()
{
}
/**
* construct a unicode string record and fill its fields, ID is ignored
* @param id - ignored
@ -278,19 +292,10 @@ public class UnicodeString
public int serialize(int offset, byte [] data)
{
int charsize = 1;
if (getOptionFlags() == 1)
{
charsize = 2;
}
// byte[] retval = new byte[ 3 + (getString().length() * charsize) ];
LittleEndian.putShort(data, 0 + offset, getCharCount());
data[ 2 + offset ] = getOptionFlags();
// System.out.println("Unicode: We've got "+retval[2]+" for our option flag");
if (getOptionFlags() == 0)
if (!isUncompressedUnicode())
{
StringUtil.putCompressedUnicode(getString(), data, 0x3 + offset);
}
@ -302,14 +307,14 @@ public class UnicodeString
return getRecordSize();
}
private boolean isUncompressedUnicode()
{
return (getOptionFlags() & 0x01) == 1;
}
public int getRecordSize()
{
int charsize = 1;
if (getOptionFlags() == 1)
{
charsize = 2;
}
int charsize = isUncompressedUnicode() ? 2 : 1;
return 3 + (getString().length() * charsize);
}
@ -338,11 +343,16 @@ public class UnicodeString
return this.getString().compareTo(str.getString());
}
public boolean isRichText()
{
return (getOptionFlags() & RICH_TEXT_BIT) != 0;
}
int maxBrokenLength(final int proposedBrokenLength)
{
int rval = proposedBrokenLength;
if ((field_2_optionflags & 1) == 1)
if (isUncompressedUnicode())
{
int proposedStringLength = proposedBrokenLength - 3;
@ -355,12 +365,9 @@ public class UnicodeString
return rval;
}
// public boolean equals(Object obj) {
// if (!(obj instanceof UnicodeString)) return false;
//
// UnicodeString str = (UnicodeString)obj;
//
//
// return this.getString().equals(str.getString());
// }
public boolean isExtendedText()
{
return (getOptionFlags() & EXT_BIT) != 0;
}
}

View File

@ -292,8 +292,8 @@ public class HSSFRow
* get the hssfcell representing a given column (logical cell) 0-based. If you
* ask for a cell that is not defined....you get a null.
*
* @param cellnum - 0 based column number
* @returns HSSFCell representing that column or null if undefined.
* @param cellnum 0 based column number
* @return HSSFCell representing that column or null if undefined.
*/
public HSSFCell getCell(short cellnum)
@ -318,6 +318,9 @@ public class HSSFRow
public short getFirstCellNum()
{
if (getPhysicalNumberOfCells() == 0)
return -1;
else
return row.getFirstCol();
}
@ -328,6 +331,9 @@ public class HSSFRow
public short getLastCellNum()
{
if (getPhysicalNumberOfCells() == 0)
return -1;
else
return row.getLastCol();
}
@ -441,7 +447,7 @@ public class HSSFRow
}
/**
* @returns cell iterator of the physically defined cells. Note element 4 may
* @return cell iterator of the physically defined cells. Note element 4 may
* actually be row cell depending on how many are defined!
*/

View File

@ -1,4 +1,3 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
@ -60,11 +59,14 @@
*/
package org.apache.poi.hssf.usermodel;
import org.apache.poi.util.POILogFactory;
import org.apache.poi.hssf.model.Sheet;
import org.apache.poi.hssf.model.Workbook;
import org.apache.poi.hssf.record.*;
import org.apache.poi.hssf.record.CellValueRecordInterface;
import org.apache.poi.hssf.record.RowRecord;
import org.apache.poi.hssf.record.VCenterRecord;
import org.apache.poi.hssf.record.WSBoolRecord;
import org.apache.poi.hssf.util.Region;
import org.apache.poi.util.POILogFactory;
import org.apache.poi.util.POILogger;
import java.util.Iterator;
@ -74,7 +76,7 @@ import java.util.TreeMap;
* High level representation of a worksheet.
* @author Andrew C. Oliver (acoliver at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
* @version 1.0-pre
* @author Libin Roman (romal at vistaportal.com)
*/
public class HSSFSheet
@ -132,11 +134,6 @@ public class HSSFSheet
setPropertiesFromSheet(sheet);
}
/** private default constructor prevents bogus initializationless construction */
private HSSFSheet()
{
}
/**
* used internally to set the properties given a Sheet object
@ -254,7 +251,7 @@ public class HSSFSheet
int rownum = lastrow - 1;
HSSFRow r = getRow(rownum);
while (r == null)
while (r == null && rownum >= 0)
{
r = getRow(--rownum);
}
@ -270,10 +267,14 @@ public class HSSFSheet
int rownum = firstrow + 1;
HSSFRow r = getRow(rownum);
while (r == null)
while (r == null && rownum <= getLastRowNum())
{
r = getRow(++rownum);
}
if (rownum > getLastRowNum())
return -1;
return rownum;
}
@ -344,26 +345,6 @@ public class HSSFSheet
return lastrow;
}
/**
* Seems to be unused (gjs)
*
* used internally to add cells from a high level row to the low level model
* @param row the row object to represent in low level RowRecord.
*/
private void addCellsFromRow(HSSFRow row)
{
Iterator iter = row.cellIterator();
// for (int k = 0; k < row.getPhysicalNumberOfCells(); k++)
while (iter.hasNext())
{
HSSFCell cell =
( HSSFCell ) iter.next(); // row.getPhysicalCellAt(k);
sheet.addValueRecord(row.getRowNum(), cell.getCellValueRecord());
}
}
/**
* set the width (in units of 1/256th of a character width)
* @param column - the column to set (0-based)
@ -400,7 +381,7 @@ public class HSSFSheet
/**
* get the default row height for the sheet (if the rows do not define their own height) in
* twips (1/20 of a point)
* @retun default row height
* @return default row height
*/
public short getDefaultRowHeight()
@ -543,7 +524,7 @@ public class HSSFSheet
}
/**
* @returns an iterator of the PHYSICAL rows. Meaning the 3rd element may not
* @return an iterator of the PHYSICAL rows. Meaning the 3rd element may not
* be the third row if say for instance the second row is undefined.
*/

View File

@ -81,6 +81,7 @@ public class HexDump
* @param stream the OutputStream to which the data is to be
* written
* @param index initial index into the byte array
* @param length number of characters to output
*
* @exception IOException is thrown if anything goes wrong writing
* the data to stream
@ -89,9 +90,8 @@ public class HexDump
* @exception IllegalArgumentException if the output stream is
* null
*/
public synchronized static void dump(final byte [] data, final long offset,
final OutputStream stream, final int index)
final OutputStream stream, final int index, final int length)
throws IOException, ArrayIndexOutOfBoundsException,
IllegalArgumentException
{
@ -108,9 +108,11 @@ public class HexDump
long display_offset = offset + index;
StringBuffer buffer = new StringBuffer(74);
for (int j = index; j < data.length; j += 16)
int data_length = Math.min(data.length,index+length);
for (int j = index; j < data_length; j += 16)
{
int chars_read = data.length - j;
int chars_read = data_length - j;
if (chars_read > 16)
{
@ -146,6 +148,32 @@ public class HexDump
buffer.setLength(0);
display_offset += chars_read;
}
}
/**
* dump an array of bytes to an OutputStream
*
* @param data the byte array to be dumped
* @param offset its offset, whatever that might mean
* @param stream the OutputStream to which the data is to be
* written
* @param index initial index into the byte array
*
* @exception IOException is thrown if anything goes wrong writing
* the data to stream
* @exception ArrayIndexOutOfBoundsException if the index is
* outside the data array's bounds
* @exception IllegalArgumentException if the output stream is
* null
*/
public synchronized static void dump(final byte [] data, final long offset,
final OutputStream stream, final int index)
throws IOException, ArrayIndexOutOfBoundsException,
IllegalArgumentException
{
dump(data, offset, stream, index, data.length-index);
}
public static final String EOL =

View File

@ -0,0 +1,116 @@
package org.apache.poi.util;
import java.io.IOException;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.List;
import java.util.ArrayList;
public class HexRead
{
public static byte[] readTestData( String filename )
throws IOException
{
File file = new File( filename );
FileInputStream stream = new FileInputStream( file );
int characterCount = 0;
byte b = (byte) 0;
List bytes = new ArrayList();
boolean done = false;
while ( !done )
{
int count = stream.read();
switch ( count )
{
case '#':
readToEOL(stream);
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
b <<= 4;
b += (byte) ( count - '0' );
characterCount++;
if ( characterCount == 2 )
{
bytes.add( new Byte( b ) );
characterCount = 0;
b = (byte) 0;
}
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
b <<= 4;
b += (byte) ( count + 10 - 'A' );
characterCount++;
if ( characterCount == 2 )
{
bytes.add( new Byte( b ) );
characterCount = 0;
b = (byte) 0;
}
break;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
b <<= 4;
b += (byte) ( count + 10 - 'a' );
characterCount++;
if ( characterCount == 2 )
{
bytes.add( new Byte( b ) );
characterCount = 0;
b = (byte) 0;
}
break;
case -1:
done = true;
break;
default :
break;
}
}
stream.close();
Byte[] polished = (Byte[]) bytes.toArray( new Byte[0] );
byte[] rval = new byte[polished.length];
for ( int j = 0; j < polished.length; j++ )
{
rval[j] = polished[j].byteValue();
}
return rval;
}
static private void readToEOL( InputStream stream ) throws IOException
{
int c = stream.read();
while ( c != -1 && c != '\n' && c != '\r')
{
c = stream.read();
}
}
}

View File

@ -236,13 +236,27 @@ public class LittleEndian
*
* @exception ArrayIndexOutOfBoundsException may be thrown
*/
public static void putShort(final byte[] data, final int offset,
final short value)
{
putNumber(data, offset, value, SHORT_SIZE);
}
/**
* put an unsigned short value into a byte array
*
* @param data the byte array
* @param offset a starting offset into the byte array
* @param value the short (16-bit) value
*
* @exception ArrayIndexOutOfBoundsException may be thrown
*/
public static void putUShort(final byte[] data, final int offset,
final int value)
{
putNumber(data, offset, value, SHORT_SIZE);
}
/**
* put a array of shorts into a byte array
*
@ -592,4 +606,17 @@ public class LittleEndian
return copy;
}
/**
* Retrieves and unsigned short. This is converted UP to a int
* so it can fit.
*
* @param data The data to read
* @param offset The offset to read the short from
* @return An integer representation of the short.
*/
public static int getUShort( byte[] data, int offset )
{
return (int)getNumber(data, offset, SHORT_SIZE);
}
}

View File

@ -1,4 +1,3 @@
/*
* ====================================================================
* The Apache Software License, Version 1.1
@ -55,12 +54,8 @@
*/
package org.apache.poi.util;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.*;
import org.apache.commons.logging.*;
import java.util.HashMap;
import java.util.Map;
/**
* Provides logging without clients having to mess with
@ -73,7 +68,7 @@ import org.apache.commons.logging.*;
public class POILogFactory
{
private static LogFactory _creator = LogFactory.getFactory();
// private static LogFactory _creator = LogFactory.getFactory();
// map of POILogger instances, with classes as keys
private static Map _loggers = new HashMap();;
@ -118,7 +113,7 @@ public class POILogFactory
}
else
{
logger = new POILogger(_creator.getInstance(cat));
logger = new POILogger( );
_loggers.put( cat, logger );
}
return logger;

View File

@ -55,8 +55,6 @@
*/
package org.apache.poi.util;
import org.apache.commons.logging.Log;
import java.util.*;
/**
@ -72,7 +70,7 @@ import java.util.*;
public class POILogger
{
private Log log = null;
// private Log log = null;
public static final int DEBUG = 1;
public static final int INFO = 3;
public static final int WARN = 5;
@ -82,13 +80,10 @@ public class POILogger
/**
* package scope so it cannot be instantiated outside of the util
* package. You need a POILogger? Go to the POILogFactory for one
*
* @param log the object that does the real work of logging
*/
POILogger(final Log log)
POILogger()
{
this.log = log;
}
/**
@ -100,91 +95,73 @@ public class POILogger
public void log(final int level, final Object obj1)
{
if(level==FATAL)
{
if(log.isFatalEnabled())
{
log.fatal(obj1);
}
}
else if(level==ERROR)
{
if(log.isErrorEnabled())
{
log.error(obj1);
}
}
else if(level==WARN)
{
if(log.isWarnEnabled())
{
log.warn(obj1);
}
}
else if(level==INFO)
{
if(log.isInfoEnabled())
{
log.info(obj1);
}
}
else if(level==DEBUG)
{
if(log.isDebugEnabled())
{
log.debug(obj1);
}
}
else
{
if(log.isTraceEnabled())
{
log.trace(obj1);
}
if (check(level))
System.out.println( obj1 );
}
private boolean isDebugEnabled()
{
return System.getProperty("poi.logging") != null;
}
private boolean isInfoEnabled()
{
return false;
}
private boolean isWarnEnabled()
{
return System.getProperty("poi.logging") != null;
}
private boolean isErrorEnabled()
{
return System.getProperty("poi.logging") != null;
}
private boolean isFatalEnabled()
{
return System.getProperty("poi.logging") != null;
}
/**
* Check if a logger is enabled to log at the specified level
*
* @param level One of DEBUG, INFO, WARN, ERROR, FATAL
* @param obj1 The logger to check.
*/
public boolean check(final Log log, final int level)
public boolean check(final int level)
{
if(level==FATAL)
{
if(log.isFatalEnabled())
if(isFatalEnabled())
{
return true;
}
}
else if(level==ERROR)
{
if(log.isErrorEnabled())
if(isErrorEnabled())
{
return true;
}
}
else if(level==WARN)
{
if(log.isWarnEnabled())
if(isWarnEnabled())
{
return true;
}
}
else if(level==INFO)
{
if(log.isInfoEnabled())
if(isInfoEnabled())
{
return true;
}
}
else if(level==DEBUG)
{
if(log.isDebugEnabled())
if(isDebugEnabled())
{
return true;
}
@ -204,7 +181,7 @@ public class POILogger
public void log(final int level, final Object obj1, final Object obj2)
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(32).append(obj1).append(obj2));
}
@ -222,13 +199,9 @@ public class POILogger
public void log(final int level, final Object obj1, final Object obj2,
final Object obj3)
{
if (check(log, level))
if (check( level))
{
log(level,
new StringBuffer(48).append(obj1).append(obj2)
.append(obj3));
log(level, new StringBuffer(48).append(obj1).append(obj2 ).append(obj3));
}
}
@ -247,7 +220,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level,
new StringBuffer(64).append(obj1).append(obj2)
@ -271,7 +244,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level,
new StringBuffer(80).append(obj1).append(obj2)
@ -297,7 +270,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level ,
new StringBuffer(96).append(obj1).append(obj2)
@ -324,7 +297,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level,
new StringBuffer(112).append(obj1).append(obj2)
@ -353,7 +326,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level,
new StringBuffer(128).append(obj1).append(obj2)
@ -390,7 +363,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(32).append(obj1).append(obj2),
exception);
@ -412,7 +385,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(48).append(obj1).append(obj2)
.append(obj3), exception);
@ -436,7 +409,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(64).append(obj1).append(obj2)
.append(obj3).append(obj4), exception);
@ -461,7 +434,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(80).append(obj1).append(obj2)
.append(obj3).append(obj4).append(obj5), exception);
@ -487,7 +460,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level , new StringBuffer(96).append(obj1)
.append(obj2).append(obj3).append(obj4).append(obj5)
@ -516,7 +489,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(112).append(obj1).append(obj2)
.append(obj3).append(obj4).append(obj5).append(obj6)
@ -546,7 +519,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
log(level, new StringBuffer(128).append(obj1).append(obj2)
.append(obj3).append(obj4).append(obj5).append(obj6)
@ -703,7 +676,7 @@ public class POILogger
{
if (check(log, level))
if (check( level))
{
Object[] params = flattenArrays(unflatParams);

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,16 @@
14 00 # String length 0x14=20
01 # Option flag, 16bit
# String: At a dinner party or
41 00 74 00 20 00 61 00 20 00
64 00 69 00 6E 00 6E 00 65 00
72 00 20 00 70 00 61 00 72 00
74 00 79 00 20 00 6F 00 72 00
# Continuation record (new string on the boundry)
11 00 # String length 0x11=17
00 # Option flag, 8bit
# String: At a dinner party
41 74 20 61 20
64 69 6E 6E 65
72 20 70 61 72
74 79

View File

@ -0,0 +1,21 @@
1D 00 # String length 0x1b=29
09 # Option flag, rich text + 16bit
02 00 # Formatting runs
# String: At a dinner party or
41 00 74 00 20 00 61 00 20 00
64 00 69 00 6E 00 6E 00 65 00
72 00 20 00 70 00 61 00 72 00
74 00 79 00 20 00 6F 00 72 00
# Continuation record
00 # option flag
# string:at at at
41 74 20
41 74 20
41 74 20
00 00 # Formatting run 1, first formated char at 0
00 00 # Formatting run 1, Index to font record
02 00 # Formatting run 2, first formated char at 2
00 00 # Formatting run 2, Index to font record

View File

@ -0,0 +1,7 @@
14 00 # String length 0x14=20
01 # Option flag, 16bit
# String: At a dinner party or
41 00 74 00 20 00 61 00 20 00
64 00 69 00 6E 00 6E 00 65 00
72 00 20 00 70 00 61 00 72 00
74 00 79 00 20 00 6F 00 72 00

View File

@ -0,0 +1,9 @@
# Continuation record
22 00 # String length 0x11=17
00 # Option flag, 8bit
# String: At a dinner party
41 74 20 61 20
64 69 6E 6E 65
72 20 70 61 72
74 79

View File

@ -0,0 +1,7 @@
# Continuation record
00 # option flag
# String: At a dinner party
41 74 20 61 20
64 69 6E 6E 65
72 20 70 61 72
74 79

View File

@ -1,4 +1,3 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
@ -55,16 +54,21 @@
package org.apache.poi.hssf.record;
import org.apache.poi.util.*;
import junit.framework.*;
import junit.framework.TestCase;
import org.apache.poi.hssf.usermodel.HSSFSheet;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.util.BinaryTree;
import org.apache.poi.util.HexRead;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
import java.io.*;
import java.util.*;
import java.util.Arrays;
import java.util.Iterator;
/**
* @author Marc Johnson (mjohnson at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
*/
public class TestSSTRecord
@ -95,14 +99,14 @@ public class TestSSTRecord
public void testProcessContinueRecord()
throws IOException
{
byte[] testdata = readTestData("BigSSTRecord");
byte[] testdata = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord" );
byte[] input = new byte[testdata.length - 4];
System.arraycopy( testdata, 4, input, 0, input.length );
SSTRecord record =
new SSTRecord( LittleEndian.getShort( testdata, 0 ),
LittleEndian.getShort( testdata, 2 ), input );
byte[] continueRecord = readTestData("BigSSTRecordCR");
byte[] continueRecord = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecordCR" );
input = new byte[continueRecord.length - 4];
System.arraycopy( continueRecord, 4, input, 0, input.length );
@ -138,42 +142,42 @@ public class TestSSTRecord
assertEquals( record, testRecord );
// testing based on new bug report
testdata = readTestData("BigSSTRecord2");
testdata = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2" );
input = new byte[testdata.length - 4];
System.arraycopy( testdata, 4, input, 0, input.length );
record = new SSTRecord( LittleEndian.getShort( testdata, 0 ),
LittleEndian.getShort( testdata, 2 ), input );
byte[] continueRecord1 = readTestData("BigSSTRecord2CR1");
byte[] continueRecord1 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR1" );
input = new byte[continueRecord1.length - 4];
System.arraycopy( continueRecord1, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord2 = readTestData("BigSSTRecord2CR2");
byte[] continueRecord2 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR2" );
input = new byte[continueRecord2.length - 4];
System.arraycopy( continueRecord2, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord3 = readTestData("BigSSTRecord2CR3");
byte[] continueRecord3 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR3" );
input = new byte[continueRecord3.length - 4];
System.arraycopy( continueRecord3, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord4 = readTestData("BigSSTRecord2CR4");
byte[] continueRecord4 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR4" );
input = new byte[continueRecord4.length - 4];
System.arraycopy( continueRecord4, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord5 = readTestData("BigSSTRecord2CR5");
byte[] continueRecord5 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR5" );
input = new byte[continueRecord5.length - 4];
System.arraycopy( continueRecord5, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord6 = readTestData("BigSSTRecord2CR6");
byte[] continueRecord6 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR6" );
input = new byte[continueRecord6.length - 4];
System.arraycopy( continueRecord6, 4, input, 0, input.length );
record.processContinueRecord( input );
byte[] continueRecord7 = readTestData("BigSSTRecord2CR7");
byte[] continueRecord7 = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord2CR7" );
input = new byte[continueRecord7.length - 4];
System.arraycopy( continueRecord7, 4, input, 0, input.length );
@ -205,6 +209,7 @@ public class TestSSTRecord
}
assertEquals( offset, ser_output.length );
assertEquals( record, testRecord );
assertEquals( record.countStrings(), testRecord.countStrings() );
}
/**
@ -330,7 +335,6 @@ public class TestSSTRecord
*
* @exception IOException
*/
public void testSSTRecordBug()
throws IOException
{
@ -363,7 +367,6 @@ public class TestSSTRecord
/**
* test simple addString
*/
public void testSimpleAddString()
{
SSTRecord record = new SSTRecord();
@ -417,7 +420,7 @@ public class TestSSTRecord
public void testReaderConstructor()
throws IOException
{
byte[] testdata = readTestData("BigSSTRecord");
byte[] testdata = HexRead.readTestData( _test_file_path + File.separator + "BigSSTRecord" );
byte[] input = new byte[testdata.length - 4];
System.arraycopy( testdata, 4, input, 0, input.length );
@ -428,12 +431,12 @@ public class TestSSTRecord
assertEquals( 1464, record.getNumStrings() );
assertEquals( 688, record.getNumUniqueStrings() );
assertEquals( 492, record.countStrings() );
assertEquals(1, record.getExpectedChars());
assertEquals( 1, record.getDeserializer().getContinuationExpectedChars() );
assertEquals( "Consolidated B-24J Liberator The Dragon & His Tai",
record.getUnfinishedString());
assertEquals(52, record.getTotalLength());
assertEquals(3, record.getStringDataOffset());
assertTrue(!record.isWideChar());
record.getDeserializer().getUnfinishedString() );
// assertEquals( 52, record.getDeserializer().getTotalLength() );
// assertEquals( 3, record.getDeserializer().getStringDataOffset() );
assertTrue( !record.getDeserializer().isWideChar() );
}
/**
@ -447,11 +450,11 @@ public class TestSSTRecord
assertEquals( 0, record.getNumStrings() );
assertEquals( 0, record.getNumUniqueStrings() );
assertEquals( 0, record.countStrings() );
assertEquals(0, record.getExpectedChars());
assertEquals("", record.getUnfinishedString());
assertEquals(0, record.getTotalLength());
assertEquals(0, record.getStringDataOffset());
assertTrue(!record.isWideChar());
assertEquals( 0, record.getDeserializer().getContinuationExpectedChars() );
assertEquals( "", record.getDeserializer().getUnfinishedString() );
// assertEquals( 0, record.getDeserializer().getTotalLength() );
// assertEquals( 0, record.getDeserializer().getStringDataOffset() );
assertTrue( !record.getDeserializer().isWideChar() );
byte[] output = record.serialize();
byte[] expected =
{
@ -479,96 +482,93 @@ public class TestSSTRecord
junit.textui.TestRunner.run( TestSSTRecord.class );
}
private byte [] readTestData(String filename)
throws IOException
/**
* Tests that workbooks with rich text that duplicates a non rich text cell can be read and written.
*/
public void testReadWriteDuplicatedRichText1()
throws Exception
{
File file = new File(_test_file_path
+ File.separator
+ filename);
FileInputStream stream = new FileInputStream(file);
int characterCount = 0;
byte b = ( byte ) 0;
List bytes = new ArrayList();
boolean done = false;
while (!done)
{
int count = stream.read();
switch (count)
{
case '0' :
case '1' :
case '2' :
case '3' :
case '4' :
case '5' :
case '6' :
case '7' :
case '8' :
case '9' :
b <<= 4;
b += ( byte ) (count - '0');
characterCount++;
if (characterCount == 2)
{
bytes.add(new Byte(b));
characterCount = 0;
b = ( byte ) 0;
}
break;
case 'A' :
case 'B' :
case 'C' :
case 'D' :
case 'E' :
case 'F' :
b <<= 4;
b += ( byte ) (count + 10 - 'A');
characterCount++;
if (characterCount == 2)
{
bytes.add(new Byte(b));
characterCount = 0;
b = ( byte ) 0;
}
break;
case 'a' :
case 'b' :
case 'c' :
case 'd' :
case 'e' :
case 'f' :
b <<= 4;
b += ( byte ) (count + 10 - 'a');
characterCount++;
if (characterCount == 2)
{
bytes.add(new Byte(b));
characterCount = 0;
b = ( byte ) 0;
}
break;
case -1 :
done = true;
break;
default :
break;
}
}
File file = new File( _test_file_path + File.separator + "duprich1.xls" );
InputStream stream = new FileInputStream( file );
HSSFWorkbook wb = new HSSFWorkbook( stream );
stream.close();
Byte[] polished = ( Byte [] ) bytes.toArray(new Byte[ 0 ]);
byte[] rval = new byte[ polished.length ];
HSSFSheet sheet = wb.getSheetAt( 1 );
assertEquals( "01/05 (Wed) ", sheet.getRow( 0 ).getCell( (short) 8 ).getStringCellValue() );
assertEquals( "01/05 (Wed)", sheet.getRow( 1 ).getCell( (short) 8 ).getStringCellValue() );
for (int j = 0; j < polished.length; j++)
file = File.createTempFile( "testout", "xls" );
FileOutputStream outStream = new FileOutputStream( file );
wb.write( outStream );
outStream.close();
file.delete();
// test the second file.
file = new File( _test_file_path + File.separator + "duprich2.xls" );
stream = new FileInputStream( file );
wb = new HSSFWorkbook( stream );
stream.close();
sheet = wb.getSheetAt( 0 );
int row = 0;
assertEquals( "Testing ", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
assertEquals( "rich", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
assertEquals( "text", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
assertEquals( "strings", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
assertEquals( "Testing ", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
assertEquals( "Testing", sheet.getRow( row++ ).getCell( (short) 0 ).getStringCellValue() );
// file = new File("/tryme.xls");
file = File.createTempFile( "testout", ".xls" );
outStream = new FileOutputStream( file );
wb.write( outStream );
outStream.close();
file.delete();
}
public void testSpanRichTextToPlainText()
throws Exception
{
rval[ j ] = polished[ j ].byteValue();
byte[] bytes = HexRead.readTestData( _test_file_path + File.separator + "richtextdata.txt" );
BinaryTree strings = new BinaryTree();
SSTDeserializer deserializer = new SSTDeserializer( strings );
deserializer.manufactureStrings( bytes, 0, (short) 45 );
byte[] continueBytes = new byte[bytes.length - 45];
System.arraycopy( bytes, 45, continueBytes, 0, bytes.length - 45 );
deserializer.processContinueRecord( continueBytes );
// System.out.println( "strings.getKeyForValue(new Integer(0)) = " + strings.get( new Integer( 0 ) ) );
assertEquals( "At a dinner party orAt At At ", strings.get( new Integer( 0 ) ) + "" );
}
return rval;
public void testContinuationWithNoOverlap()
throws Exception
{
byte[] bytes = HexRead.readTestData( _test_file_path + File.separator + "evencontinuation.txt" );
BinaryTree strings = new BinaryTree();
SSTDeserializer deserializer = new SSTDeserializer( strings );
deserializer.manufactureStrings( bytes, 0, (short) 43 );
byte[] continueBytes = new byte[bytes.length - 43];
System.arraycopy( bytes, 43, continueBytes, 0, bytes.length - 43 );
deserializer.processContinueRecord( continueBytes );
assertEquals( "At a dinner party or", strings.get( new Integer( 0 ) ) + "" );
assertEquals( "At a dinner party", strings.get( new Integer( 1 ) ) + "" );
}
public void testStringAcross2Continuations()
throws Exception
{
byte[] bytes = HexRead.readTestData( _test_file_path + File.separator + "stringacross2continuations.txt" );
BinaryTree strings = new BinaryTree();
SSTDeserializer deserializer = new SSTDeserializer( strings );
deserializer.manufactureStrings( bytes, 0, (short) 43 );
bytes = HexRead.readTestData( _test_file_path + File.separator + "stringacross2continuationsCR1.txt" );
deserializer.processContinueRecord( bytes );
bytes = HexRead.readTestData( _test_file_path + File.separator + "stringacross2continuationsCR2.txt" );
deserializer.processContinueRecord( bytes );
assertEquals( "At a dinner party or", strings.get( new Integer( 0 ) ) + "" );
assertEquals( "At a dinner partyAt a dinner party", strings.get( new Integer( 1 ) ) + "" );
}
}

View File

@ -55,7 +55,10 @@
package org.apache.poi.hssf.usermodel;
import junit.framework.TestCase;
import org.apache.poi.hssf.record.RowRecord;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
/**
* Test HSSFRow is okay.
@ -87,15 +90,10 @@ public class TestHSSFRow
assertEquals(1, row.getFirstCellNum());
assertEquals(2, row.getLastCellNum());
RowRecord rowRecord = new RowRecord();
rowRecord.setFirstCol((short) 2);
rowRecord.setLastCol((short) 5);
row = new HSSFRow(workbook.getWorkbook(), sheet.getSheet(), rowRecord);
assertEquals(2, row.getFirstCellNum());
assertEquals(5, row.getLastCellNum());
}
public void testRemoveCell()
throws Exception
{
HSSFWorkbook workbook = new HSSFWorkbook();
HSSFSheet sheet = workbook.createSheet();
@ -115,5 +113,24 @@ public class TestHSSFRow
assertEquals(-1, row.getLastCellNum());
assertEquals(-1, row.getFirstCellNum());
// check the row record actually writes it out as 0's
byte[] data = new byte[100];
row.getRowRecord().serialize(0, data);
assertEquals(0, data[6]);
assertEquals(0, data[8]);
File file = File.createTempFile("XXX", "XLS");
FileOutputStream stream = new FileOutputStream(file);
workbook.write(stream);
stream.close();
FileInputStream inputStream = new FileInputStream(file);
workbook = new HSSFWorkbook(inputStream);
sheet = workbook.getSheetAt(0);
stream.close();
file.delete();
assertEquals(-1, sheet.getRow((short) 0).getLastCellNum());
assertEquals(-1, sheet.getRow((short) 0).getFirstCellNum());
}
}

View File

@ -58,7 +58,6 @@ import junit.framework.TestCase;
import org.apache.poi.hssf.model.Sheet;
import org.apache.poi.hssf.record.VCenterRecord;
import org.apache.poi.hssf.record.WSBoolRecord;
import org.apache.poi.hssf.dev.BiffViewer;
import java.io.File;
import java.io.FileInputStream;
@ -190,7 +189,14 @@ public class TestHSSFSheet
tempFile.delete();
assertNotNull(row);
assertEquals(2, row.getPhysicalNumberOfCells());
}
public void testRemoveRow()
{
HSSFWorkbook workbook = new HSSFWorkbook();
HSSFSheet sheet = workbook.createSheet("Test boolean");
HSSFRow row = sheet.createRow((short) 2);
sheet.removeRow(row);
}
}
}

View File

@ -437,6 +437,12 @@ public class TestLittleEndian
return result;
}
public void testUnsignedShort()
throws Exception
{
assertEquals(0xffff, LittleEndian.getUShort(new byte[] { (byte)0xff, (byte)0xff }, 0));
}
/**
* main method to run the unit tests
*

View File

@ -1,4 +1,3 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
@ -55,11 +54,9 @@
package org.apache.poi.util;
import org.apache.log4j.Category;
import junit.framework.TestCase;
import junit.framework.*;
import java.io.*;
import java.io.IOException;
/**
* @author Marc Johnson (mjohnson at apache dot org)

View File

@ -1,4 +1,3 @@
/* ====================================================================
* The Apache Software License, Version 1.1
*
@ -57,9 +56,6 @@ package org.apache.poi.util;
import junit.framework.TestCase;
import java.io.File;
import java.io.FileInputStream;
/**
* Tests the log class.
*
@ -84,26 +80,11 @@ public class TestPOILogger
super( s );
}
/**
* Method setUp
*
*
* @exception Exception
*
*/
protected void setUp()
throws Exception
{
super.setUp();
}
/**
* Test different types of log output.
*
* @exception Exception
*/
public void testVariousLogTypes()
throws Exception
{
@ -114,18 +95,10 @@ public class TestPOILogger
POILogger log = POILogFactory.getLogger( "foo" );
log.log( POILogger.WARN, "Test = ", new Integer( 1 ) );
log.logFormatted(POILogger.ERROR, "Test param 1 = %, param 2 = %",
"2", new Integer(3));
log.logFormatted(POILogger.ERROR, "Test param 1 = %, param 2 = %",
new int[]
{
4, 5
});
log.logFormatted( POILogger.ERROR, "Test param 1 = %, param 2 = %", "2", new Integer( 3 ) );
log.logFormatted( POILogger.ERROR, "Test param 1 = %, param 2 = %", new int[]{4, 5} );
log.logFormatted( POILogger.ERROR,
"Test param 1 = %1.1, param 2 = %0.1", new double[]
{
4, 5.23
});
"Test param 1 = %1.1, param 2 = %0.1", new double[]{4, 5.23} );
}
}