Compare commits

...

7 Commits

Author SHA1 Message Date
Nick Burch
8571f9e70c Sync with trunk for NPOIFS
git-svn-id: https://svn.apache.org/repos/asf/poi/branches/NIO_32_BRANCH@1055380 13f79535-47bb-0310-9956-ffa450edef68
2011-01-05 09:42:30 +00:00
Nick Burch
c1e2c16412 Port more NIO changes over from trunk
git-svn-id: https://svn.apache.org/repos/asf/poi/branches/NIO_32_BRANCH@1055375 13f79535-47bb-0310-9956-ffa450edef68
2011-01-05 09:38:10 +00:00
Nick Burch
58a6793fdd Backport the latest POIFS/NPOIFS code from trunk, along with Util to power it, and HPSF to use it. Also makes a few tweaks to let existing code compile against these new versions
git-svn-id: https://svn.apache.org/repos/asf/poi/branches/NIO_32_BRANCH@1053791 13f79535-47bb-0310-9956-ffa450edef68
2010-12-30 02:35:06 +00:00
Nick Burch
fc34d3e1b1 I need a jar of 3.2 with the new NIO backported to it, so creating a branch to hold it
git-svn-id: https://svn.apache.org/repos/asf/poi/branches/NIO_32_BRANCH@1053785 13f79535-47bb-0310-9956-ffa450edef68
2010-12-30 02:27:03 +00:00
Yegor Kozlov
19cfe88647 merged with trunk r703645
git-svn-id: https://svn.apache.org/repos/asf/poi/tags/REL_3_2_FINAL@703646 13f79535-47bb-0310-9956-ffa450edef68
2008-10-11 10:34:33 +00:00
Yegor Kozlov
2a2ae2097f set version.id=3.2-FINAL
git-svn-id: https://svn.apache.org/repos/asf/poi/tags/REL_3_2_FINAL@703644 13f79535-47bb-0310-9956-ffa450edef68
2008-10-11 10:19:20 +00:00
Yegor Kozlov
c49ceaba31 tag r703640 as 3.2-final
git-svn-id: https://svn.apache.org/repos/asf/poi/tags/REL_3_2_FINAL@703641 13f79535-47bb-0310-9956-ffa450edef68
2008-10-11 09:56:11 +00:00
196 changed files with 13227 additions and 16614 deletions

View File

@ -119,11 +119,11 @@ under the License.
<property name="mavendist.poi.dir" location="build/maven-dist/poi"/>
<property name="mavendist.oap.dir" location="build/maven-dist/org.apache.poi"/>
<property name="jar.name" value="poi"/>
<property name="version.id" value="3.2-alpha1"/>
<property name="version.id" value="3.2-NIObackport"/>
<property name="halt.on.test.failure" value="true"/>
<property name="jdk.version.source" value="1.3"
<property name="jdk.version.source" value="1.5"
description="JDK version of source code"/>
<property name="jdk.version.class" value="1.3"
<property name="jdk.version.class" value="1.5"
description="JDK version of generated class files"/>

View File

@ -164,7 +164,7 @@ public class TreeReaderListener implements POIFSReaderListener
{
is.close();
}
catch (IOException ex)
catch (Exception ex)
{
System.err.println
("Unexpected exception while closing " +

View File

@ -36,7 +36,7 @@
</devs>
<!-- Don't forget to update status.xml too! -->
<release version="3.2-alpha1" date="2008-??-??">
<release version="3.2-FINAL" date="2008-10-19">
<action dev="POI-DEVELOPERS" type="fix">45866 - allowed for change of unicode compression across Continue records</action>
<action dev="POI-DEVELOPERS" type="fix">45964 - support for link formulas in Text Objects</action>
<action dev="POI-DEVELOPERS" type="fix">43354 - support for evalating formulas with missing args</action>

View File

@ -207,7 +207,7 @@ if (cell!=null) {
FileInputStream fis = new FileInputStream("/somepath/test.xls");
HSSFWorkbook wb = new HSSFWorkbook(fis);
HSSFFormulaEvaluator evaluator = new HSSFFormulaEvaluator(wb);
for(int sheetNum = 0; sheetNum < wb.getNumberOfSheets(); sheetNum++) {
for(int sheetNum = 0; sheetNum &lt; wb.getNumberOfSheets(); sheetNum++) {
HSSFSheet sheet = wb.getSheetAt(sheetNum);
for(Iterator rit = sheet.rowIterator(); rit.hasNext();) {

View File

@ -33,7 +33,7 @@
<!-- Don't forget to update changes.xml too! -->
<changes>
<release version="3.2-alpha1" date="2008-??-??">
<release version="3.2-FINAL" date="2008-10-19">
<action dev="POI-DEVELOPERS" type="fix">45866 - allowed for change of unicode compression across Continue records</action>
<action dev="POI-DEVELOPERS" type="fix">45964 - support for link formulas in Text Objects</action>
<action dev="POI-DEVELOPERS" type="fix">43354 - support for evalating formulas with missing args</action>

View File

@ -20,6 +20,7 @@ package org.apache.poi;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Iterator;
import java.util.List;
@ -47,27 +48,28 @@ import org.apache.poi.util.POILogger;
*/
public abstract class POIDocument {
/** Holds metadata on our document */
protected SummaryInformation sInf;
private SummaryInformation sInf;
/** Holds further metadata on our document */
protected DocumentSummaryInformation dsInf;
/** The open POIFS FileSystem that contains our document */
protected POIFSFileSystem filesystem;
private DocumentSummaryInformation dsInf;
/** The directory that our document lives in */
protected DirectoryNode directory;
/** For our own logging use */
protected POILogger logger = POILogFactory.getLogger(this.getClass());
private final static POILogger logger = POILogFactory.getLogger(POIDocument.class);
/* Have the property streams been read yet? (Only done on-demand) */
protected boolean initialized = false;
private boolean initialized = false;
protected POIDocument(DirectoryNode dir, POIFSFileSystem fs) {
this.filesystem = fs;
protected POIDocument(DirectoryNode dir) {
this.directory = dir;
}
@Deprecated
protected POIDocument(DirectoryNode dir, POIFSFileSystem fs) {
this.directory = dir;
}
protected POIDocument(POIFSFileSystem fs) {
this(fs.getRoot(), fs);
this(fs.getRoot());
}
/**
@ -86,6 +88,25 @@ public abstract class POIDocument {
return sInf;
}
/**
* Will create whichever of SummaryInformation
* and DocumentSummaryInformation (HPSF) properties
* are not already part of your document.
* This is normally useful when creating a new
* document from scratch.
* If the information properties are already there,
* then nothing will happen.
*/
public void createInformationProperties() {
if(!initialized) readProperties();
if(sInf == null) {
sInf = PropertySetFactory.newSummaryInformation();
}
if(dsInf == null) {
dsInf = PropertySetFactory.newDocumentSummaryInformation();
}
}
/**
* Find, and create objects for, the standard
* Documment Information Properties (HPSF).
@ -120,28 +141,31 @@ public abstract class POIDocument {
* if it wasn't found
*/
protected PropertySet getPropertySet(String setName) {
DocumentInputStream dis;
try {
// Find the entry, and get an input stream for it
dis = directory.createDocumentInputStream(setName);
} catch(IOException ie) {
// Oh well, doesn't exist
logger.log(POILogger.WARN, "Error getting property set with name " + setName + "\n" + ie);
return null;
}
//directory can be null when creating new documents
if(directory == null) return null;
try {
// Create the Property Set
PropertySet set = PropertySetFactory.create(dis);
return set;
} catch(IOException ie) {
// Must be corrupt or something like that
logger.log(POILogger.WARN, "Error creating property set with name " + setName + "\n" + ie);
} catch(org.apache.poi.hpsf.HPSFException he) {
// Oh well, doesn't exist
logger.log(POILogger.WARN, "Error creating property set with name " + setName + "\n" + he);
}
return null;
DocumentInputStream dis;
try {
// Find the entry, and get an input stream for it
dis = directory.createDocumentInputStream( directory.getEntry(setName) );
} catch(IOException ie) {
// Oh well, doesn't exist
logger.log(POILogger.WARN, "Error getting property set with name " + setName + "\n" + ie);
return null;
}
try {
// Create the Property Set
PropertySet set = PropertySetFactory.create(dis);
return set;
} catch(IOException ie) {
// Must be corrupt or something like that
logger.log(POILogger.WARN, "Error creating property set with name " + setName + "\n" + ie);
} catch(org.apache.poi.hpsf.HPSFException he) {
// Oh well, doesn't exist
logger.log(POILogger.WARN, "Error creating property set with name " + setName + "\n" + he);
}
return null;
}
/**
@ -157,14 +181,16 @@ public abstract class POIDocument {
* @param writtenEntries a list of POIFS entries to add the property names too
*/
protected void writeProperties(POIFSFileSystem outFS, List writtenEntries) throws IOException {
if(sInf != null) {
writePropertySet(SummaryInformation.DEFAULT_STREAM_NAME,sInf,outFS);
SummaryInformation si = getSummaryInformation();
if(si != null) {
writePropertySet(SummaryInformation.DEFAULT_STREAM_NAME, si, outFS);
if(writtenEntries != null) {
writtenEntries.add(SummaryInformation.DEFAULT_STREAM_NAME);
}
}
if(dsInf != null) {
writePropertySet(DocumentSummaryInformation.DEFAULT_STREAM_NAME,dsInf,outFS);
DocumentSummaryInformation dsi = getDocumentSummaryInformation();
if(dsi != null) {
writePropertySet(DocumentSummaryInformation.DEFAULT_STREAM_NAME, dsi, outFS);
if(writtenEntries != null) {
writtenEntries.add(DocumentSummaryInformation.DEFAULT_STREAM_NAME);
}

View File

@ -27,8 +27,6 @@ import org.apache.poi.util.HexDump;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class ClassID
{

View File

@ -22,8 +22,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2004-06-20
* @version $Id$
*/
public class Constants
{

View File

@ -47,25 +47,28 @@ import org.apache.poi.hpsf.wellknown.PropertyIDMap;
* unmodified) or whether one or more properties have been dropped.</p>
*
* <p>This class is not thread-safe; concurrent access to instances of this
* class must be syncronized.</p>
* class must be synchronized.</p>
*
* <p>While this class is roughly HashMap<Long,CustomProperty>, that's the
* internal representation. To external calls, it should appear as
* HashMap<String,Object> mapping between Names and Custom Property Values.</p>
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2006-02-09
* @version $Id$
*/
public class CustomProperties extends HashMap
@SuppressWarnings("serial")
public class CustomProperties extends HashMap<Object,CustomProperty>
{
/**
* <p>Maps property IDs to property names.</p>
*/
private Map dictionaryIDToName = new HashMap();
private Map<Long,String> dictionaryIDToName = new HashMap<Long,String>();
/**
* <p>Maps property names to property IDs.</p>
*/
private Map dictionaryNameToID = new HashMap();
private Map<String,Long> dictionaryNameToID = new HashMap<String,Long>();
/**
* <p>Tells whether this object is pure or not.</p>
@ -73,39 +76,33 @@ public class CustomProperties extends HashMap
private boolean isPure = true;
/**
* <p>Puts a {@link CustomProperty} into this map. It is assumed that the
* {@link CustomProperty} already has a valid ID. Otherwise use
* {@link #put(CustomProperty)}.</p>
*/
public Object put(final Object name, final Object customProperty) throws ClassCastException
public CustomProperty put(final String name, final CustomProperty cp)
{
final CustomProperty cp = (CustomProperty) customProperty;
if (name == null)
{
/* Ignoring a property without a name. */
isPure = false;
return null;
}
if (!(name instanceof String))
throw new ClassCastException("The name of a custom property must " +
"be a java.lang.String, but it is a " +
name.getClass().getName());
if (!(name.equals(cp.getName())))
throw new IllegalArgumentException("Parameter \"name\" (" + name +
") and custom property's name (" + cp.getName() +
") do not match.");
/* Register name and ID in the dictionary. Mapping in both directions is possible. If there is already a */
final Long idKey = new Long(cp.getID());
final Object oldID = dictionaryNameToID.get(name);
final Long idKey = Long.valueOf(cp.getID());
final Long oldID = dictionaryNameToID.get(name);
dictionaryIDToName.remove(oldID);
dictionaryNameToID.put(name, idKey);
dictionaryIDToName.put(idKey, name);
/* Put the custom property into this map. */
final Object oldCp = super.remove(oldID);
final CustomProperty oldCp = super.remove(oldID);
super.put(idKey, cp);
return oldCp;
}
@ -140,9 +137,9 @@ public class CustomProperties extends HashMap
else
{
long max = 1;
for (final Iterator i = dictionaryIDToName.keySet().iterator(); i.hasNext();)
for (final Iterator<Long> i = dictionaryIDToName.keySet().iterator(); i.hasNext();)
{
final long id = ((Long) i.next()).longValue();
final long id = i.next().longValue();
if (id > max)
max = id;
}
@ -297,15 +294,31 @@ public class CustomProperties extends HashMap
/**
* Returns a set of all the names of our
* custom properties
* custom properties. Equivalent to
* {@link #nameSet()}
*/
public Set keySet() {
return dictionaryNameToID.keySet();
}
return dictionaryNameToID.keySet();
}
/**
* Returns a set of all the names of our
* custom properties
*/
public Set<String> nameSet() {
return dictionaryNameToID.keySet();
}
/**
* Returns a set of all the IDs of our
* custom properties
*/
public Set<String> idSet() {
return dictionaryNameToID.keySet();
}
/**
/**
* <p>Sets the codepage.</p>
*
* @param codepage the codepage
@ -315,7 +328,7 @@ public class CustomProperties extends HashMap
final MutableProperty p = new MutableProperty();
p.setID(PropertyIDMap.PID_CODEPAGE);
p.setType(Variant.VT_I2);
p.setValue(new Integer(codepage));
p.setValue(Integer.valueOf(codepage));
put(new CustomProperty(p));
}
@ -327,14 +340,44 @@ public class CustomProperties extends HashMap
*
* @return the dictionary.
*/
Map getDictionary()
Map<Long,String> getDictionary()
{
return dictionaryIDToName;
}
/**
* Checks against both String Name and Long ID
*/
public boolean containsKey(Object key) {
if(key instanceof Long) {
return super.containsKey((Long)key);
}
if(key instanceof String) {
return super.containsKey((Long)dictionaryNameToID.get(key));
}
return false;
}
/**
* Checks against both the property, and its values.
*/
public boolean containsValue(Object value) {
if(value instanceof CustomProperty) {
return super.containsValue((CustomProperty)value);
} else {
for(CustomProperty cp : super.values()) {
if(cp.getValue() == value) {
return true;
}
}
}
return false;
}
/**
* <p>Gets the codepage.</p>
*
* @return the codepage or -1 if the codepage is undefined.
@ -342,9 +385,9 @@ public class CustomProperties extends HashMap
public int getCodepage()
{
int codepage = -1;
for (final Iterator i = this.values().iterator(); codepage == -1 && i.hasNext();)
for (final Iterator<CustomProperty> i = this.values().iterator(); codepage == -1 && i.hasNext();)
{
final CustomProperty cp = (CustomProperty) i.next();
final CustomProperty cp = i.next();
if (cp.getID() == PropertyIDMap.PID_CODEPAGE)
codepage = ((Integer) cp.getValue()).intValue();
}
@ -375,5 +418,4 @@ public class CustomProperties extends HashMap
{
this.isPure = isPure;
}
}

View File

@ -18,15 +18,13 @@
package org.apache.poi.hpsf;
/**
* <p>This class represents custum properties in the document summary
* <p>This class represents custom properties in the document summary
* information stream. The difference to normal properties is that custom
* properties have an optional name. If the name is not <code>null</code> it
* will be maintained in the section's dictionary.</p>
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2006-02-09
* @version $Id$
*/
public class CustomProperty extends MutableProperty
{

View File

@ -32,8 +32,6 @@ import org.apache.poi.hpsf.wellknown.SectionIDMap;
* @author Drew Varner (Drew.Varner closeTo sc.edu)
* @author robert_flaherty@hyperion.com
* @see SummaryInformation
* @version $Id$
* @since 2002-02-09
*/
public class DocumentSummaryInformation extends SpecialPropertySet
{
@ -571,7 +569,6 @@ public class DocumentSummaryInformation extends SpecialPropertySet
* <p>Gets the custom properties.</p>
*
* @return The custom properties.
* @since 2006-02-09
*/
public CustomProperties getCustomProperties()
{
@ -580,7 +577,7 @@ public class DocumentSummaryInformation extends SpecialPropertySet
{
cps = new CustomProperties();
final Section section = (Section) getSections().get(1);
final Map dictionary = section.getDictionary();
final Map<Long,String> dictionary = section.getDictionary();
final Property[] properties = section.getProperties();
int propertyCount = 0;
for (int i = 0; i < properties.length; i++)
@ -591,7 +588,7 @@ public class DocumentSummaryInformation extends SpecialPropertySet
{
propertyCount++;
final CustomProperty cp = new CustomProperty(p,
(String) dictionary.get(new Long(id)));
dictionary.get(Long.valueOf(id)));
cps.put(cp.getName(), cp);
}
}
@ -605,13 +602,12 @@ public class DocumentSummaryInformation extends SpecialPropertySet
* <p>Sets the custom properties.</p>
*
* @param customProperties The custom properties
* @since 2006-02-07
*/
public void setCustomProperties(final CustomProperties customProperties)
{
ensureSection2();
final MutableSection section = (MutableSection) getSections().get(1);
final Map dictionary = customProperties.getDictionary();
final Map<Long,String> dictionary = customProperties.getDictionary();
section.clear();
/* Set the codepage. If both custom properties and section have a
@ -625,9 +621,9 @@ public class DocumentSummaryInformation extends SpecialPropertySet
customProperties.setCodepage(cpCodepage);
section.setCodepage(cpCodepage);
section.setDictionary(dictionary);
for (final Iterator i = customProperties.values().iterator(); i.hasNext();)
for (final Iterator<CustomProperty> i = customProperties.values().iterator(); i.hasNext();)
{
final Property p = (Property) i.next();
final Property p = i.next();
section.setProperty(p);
}
}
@ -652,8 +648,6 @@ public class DocumentSummaryInformation extends SpecialPropertySet
/**
* <p>Removes the custom properties.</p>
*
* @since 2006-02-08
*/
public void removeCustomProperties()
{
@ -676,5 +670,4 @@ public class DocumentSummaryInformation extends SpecialPropertySet
{
throw new UnsupportedOperationException(msg + " is not yet implemented.");
}
}

View File

@ -24,8 +24,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class HPSFException extends Exception
{

View File

@ -27,8 +27,6 @@ import java.io.PrintWriter;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class HPSFRuntimeException extends RuntimeException
{

View File

@ -26,8 +26,6 @@ package org.apache.poi.hpsf;
* thrown.</p>
*
* @author Drew Varner(Drew.Varner atDomain sc.edu)
* @version $Id$
* @since 2002-05-26
*/
public class IllegalPropertySetDataException extends HPSFRuntimeException
{

View File

@ -25,8 +25,6 @@ import org.apache.poi.util.HexDump;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2004-06-21
* @version $Id$
*/
public class IllegalVariantTypeException extends VariantTypeException
{

View File

@ -23,8 +23,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class MarkUnsupportedException extends HPSFException
{

View File

@ -26,8 +26,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id: NoSingleSectionException.java 353545 2004-04-09 13:05:39Z glens $
* @since 2006-02-08
*/
public class MissingSectionException extends HPSFRuntimeException
{

View File

@ -28,8 +28,6 @@ import java.io.OutputStream;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2003-08-03
* @version $Id$
*/
public class MutableProperty extends Property
{

View File

@ -33,8 +33,6 @@ import org.apache.poi.poifs.filesystem.Entry;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* <p>Adds writing support to the {@link PropertySet} class.</p>
*
@ -43,8 +41,6 @@ import org.apache.poi.util.LittleEndianConsts;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2003-02-19
*/
public class MutablePropertySet extends PropertySet
{
@ -236,10 +232,10 @@ public class MutablePropertySet extends PropertySet
catch (HPSFRuntimeException ex)
{
final Throwable cause = ex.getReason();
if (cause instanceof UnsupportedEncodingException)
if (cause instanceof UnsupportedEncodingException) {
throw new IllegalPropertySetDataException(cause);
else
throw ex;
}
throw ex;
}
}

View File

@ -37,9 +37,6 @@ import org.apache.poi.util.LittleEndian;
*
* <p>Please be aware that this class' functionality will be merged into the
* {@link Section} class at a later time, so the API will change.</p>
*
* @version $Id$
* @since 2002-02-20
*/
public class MutableSection extends Section
{
@ -56,7 +53,7 @@ public class MutableSection extends Section
* decision has been taken when specifying the "properties" field
* as an Property[]. It should have been a {@link java.util.List}.</p>
*/
private List preprops;
private List<Property> preprops;
@ -77,7 +74,7 @@ public class MutableSection extends Section
dirty = true;
formatID = null;
offset = -1;
preprops = new LinkedList();
preprops = new LinkedList<Property>();
}
@ -148,7 +145,7 @@ public class MutableSection extends Section
public void setProperties(final Property[] properties)
{
this.properties = properties;
preprops = new LinkedList();
preprops = new LinkedList<Property>();
for (int i = 0; i < properties.length; i++)
preprops.add(properties[i]);
dirty = true;
@ -185,7 +182,7 @@ public class MutableSection extends Section
*/
public void setProperty(final int id, final int value)
{
setProperty(id, Variant.VT_I4, new Integer(value));
setProperty(id, Variant.VT_I4, Integer.valueOf(value));
dirty = true;
}
@ -202,7 +199,7 @@ public class MutableSection extends Section
*/
public void setProperty(final int id, final long value)
{
setProperty(id, Variant.VT_I8, new Long(value));
setProperty(id, Variant.VT_I8, Long.valueOf(value));
dirty = true;
}
@ -219,7 +216,7 @@ public class MutableSection extends Section
*/
public void setProperty(final int id, final boolean value)
{
setProperty(id, Variant.VT_BOOL, new Boolean(value));
setProperty(id, Variant.VT_BOOL, Boolean.valueOf(value));
dirty = true;
}
@ -279,8 +276,8 @@ public class MutableSection extends Section
*/
public void removeProperty(final long id)
{
for (final Iterator i = preprops.iterator(); i.hasNext();)
if (((Property) i.next()).getID() == id)
for (final Iterator<Property> i = preprops.iterator(); i.hasNext();)
if (i.next().getID() == id)
{
i.remove();
break;
@ -303,7 +300,7 @@ public class MutableSection extends Section
*/
protected void setPropertyBooleanValue(final int id, final boolean value)
{
setProperty(id, Variant.VT_BOOL, new Boolean(value));
setProperty(id, Variant.VT_BOOL, Boolean.valueOf(value));
}
@ -421,17 +418,15 @@ public class MutableSection extends Section
* dictionary is present. In order to cope with this problem we
* add the codepage property and set it to Unicode. */
setProperty(PropertyIDMap.PID_CODEPAGE, Variant.VT_I2,
new Integer(Constants.CP_UNICODE));
Integer.valueOf(Constants.CP_UNICODE));
codepage = getCodepage();
}
/* Sort the property list by their property IDs: */
Collections.sort(preprops, new Comparator()
Collections.sort(preprops, new Comparator<Property>()
{
public int compare(final Object o1, final Object o2)
public int compare(final Property p1, final Property p2)
{
final Property p1 = (Property) o1;
final Property p2 = (Property) o2;
if (p1.getID() < p2.getID())
return -1;
else if (p1.getID() == p2.getID())
@ -443,7 +438,7 @@ public class MutableSection extends Section
/* Write the properties and the property list into their respective
* streams: */
for (final ListIterator i = preprops.listIterator(); i.hasNext();)
for (final ListIterator<Property> i = preprops.listIterator(); i.hasNext();)
{
final MutableProperty p = (MutableProperty) i.next();
final long id = p.getID();
@ -505,14 +500,14 @@ public class MutableSection extends Section
* @exception IOException if an I/O exception occurs.
*/
private static int writeDictionary(final OutputStream out,
final Map dictionary, final int codepage)
final Map<Long,String> dictionary, final int codepage)
throws IOException
{
int length = TypeWriter.writeUIntToStream(out, dictionary.size());
for (final Iterator i = dictionary.keySet().iterator(); i.hasNext();)
for (final Iterator<Long> i = dictionary.keySet().iterator(); i.hasNext();)
{
final Long key = (Long) i.next();
final String value = (String) dictionary.get(key);
final Long key = i.next();
final String value = dictionary.get(key);
if (codepage == Constants.CP_UNICODE)
{
@ -620,21 +615,11 @@ public class MutableSection extends Section
*
* @see Section#getDictionary()
*/
public void setDictionary(final Map dictionary)
public void setDictionary(final Map<Long,String> dictionary)
throws IllegalPropertySetDataException
{
if (dictionary != null)
{
for (final Iterator i = dictionary.keySet().iterator();
i.hasNext();)
if (!(i.next() instanceof Long))
throw new IllegalPropertySetDataException
("Dictionary keys must be of type Long.");
for (final Iterator i = dictionary.values().iterator();
i.hasNext();)
if (!(i.next() instanceof String))
throw new IllegalPropertySetDataException
("Dictionary values must be of type String.");
this.dictionary = dictionary;
/* Set the dictionary property (ID 0). Please note that the second
@ -649,7 +634,7 @@ public class MutableSection extends Section
(Integer) getProperty(PropertyIDMap.PID_CODEPAGE);
if (codepage == null)
setProperty(PropertyIDMap.PID_CODEPAGE, Variant.VT_I2,
new Integer(Constants.CP_UNICODE));
Integer.valueOf(Constants.CP_UNICODE));
}
else
/* Setting the dictionary to null means to remove property 0.
@ -710,7 +695,6 @@ public class MutableSection extends Section
public void setCodepage(final int codepage)
{
setProperty(PropertyIDMap.PID_CODEPAGE, Variant.VT_I2,
new Integer(codepage));
Integer.valueOf(codepage));
}
}

View File

@ -25,8 +25,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-09-03
*/
public class NoFormatIDException extends HPSFRuntimeException
{

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -27,8 +26,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class NoPropertySetStreamException extends HPSFException
{

View File

@ -28,8 +28,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class NoSingleSectionException extends HPSFRuntimeException
{

View File

@ -56,8 +56,6 @@ import org.apache.poi.util.POILogger;
* @author Drew Varner (Drew.Varner InAndAround sc.edu)
* @see Section
* @see Variant
* @version $Id$
* @since 2002-02-09
*/
public class Property
{
@ -219,7 +217,7 @@ public class Property
for (int i = 0; i < nrEntries; i++)
{
/* The key. */
final Long id = new Long(LittleEndian.getUInt(src, o));
final Long id = Long.valueOf(LittleEndian.getUInt(src, o));
o += LittleEndian.INT_SIZE;
/* The value (a string). The length is the either the
@ -344,8 +342,9 @@ public class Property
*/
public boolean equals(final Object o)
{
if (!(o instanceof Property))
if (!(o instanceof Property)) {
return false;
}
final Property p = (Property) o;
final Object pValue = p.getValue();
final long pId = p.getID();
@ -357,8 +356,8 @@ public class Property
return false;
/* It's clear now that both values are non-null. */
final Class valueClass = value.getClass();
final Class pValueClass = pValue.getClass();
final Class<?> valueClass = value.getClass();
final Class<?> pValueClass = pValue.getClass();
if (!(valueClass.isAssignableFrom(pValueClass)) &&
!(pValueClass.isAssignableFrom(valueClass)))
return false;
@ -375,10 +374,10 @@ public class Property
{
if (t1 == t2 ||
(t1 == Variant.VT_LPSTR && t2 == Variant.VT_LPWSTR) ||
(t2 == Variant.VT_LPSTR && t1 == Variant.VT_LPWSTR))
(t2 == Variant.VT_LPSTR && t1 == Variant.VT_LPWSTR)) {
return true;
else
return false;
}
return false;
}

View File

@ -57,8 +57,6 @@ import org.apache.poi.util.LittleEndian;
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @author Drew Varner (Drew.Varner hanginIn sc.edu)
* @version $Id$
* @since 2002-02-09
*/
public class PropertySet
{

View File

@ -30,8 +30,6 @@ import org.apache.poi.hpsf.wellknown.SectionIDMap;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class PropertySetFactory
{

View File

@ -26,8 +26,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2003-08-08
* @version $Id$
*/
public class ReadingNotSupportedException
extends UnsupportedVariantTypeException

View File

@ -34,8 +34,6 @@ import org.apache.poi.util.LittleEndian;
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @author Drew Varner (Drew.Varner allUpIn sc.edu)
* @version $Id$
* @since 2002-02-09
*/
public class Section
{
@ -44,7 +42,7 @@ public class Section
* <p>Maps property IDs to section-private PID strings. These
* strings can be found in the property with ID 0.</p>
*/
protected Map dictionary;
protected Map<Long,String> dictionary;
/**
* <p>The section's format ID, {@link #getFormatID}.</p>
@ -210,7 +208,7 @@ public class Section
/* Pass 1: Read the property list. */
int pass1Offset = o1;
final List propertyList = new ArrayList(propertyCount);
final List<PropertyListEntry> propertyList = new ArrayList<PropertyListEntry>(propertyCount);
PropertyListEntry ple;
for (int i = 0; i < properties.length; i++)
{
@ -234,34 +232,22 @@ public class Section
/* Calculate the properties' lengths. */
for (int i = 0; i < propertyCount - 1; i++)
{
final PropertyListEntry ple1 =
(PropertyListEntry) propertyList.get(i);
final PropertyListEntry ple2 =
(PropertyListEntry) propertyList.get(i + 1);
PropertyListEntry ple1 = propertyList.get(i);
PropertyListEntry ple2 = propertyList.get(i + 1);
ple1.length = ple2.offset - ple1.offset;
}
if (propertyCount > 0)
{
ple = (PropertyListEntry) propertyList.get(propertyCount - 1);
ple = propertyList.get(propertyCount - 1);
ple.length = size - ple.offset;
if (ple.length <= 0)
{
final StringBuffer b = new StringBuffer();
b.append("The property set claims to have a size of ");
b.append(size);
b.append(" bytes. However, it exceeds ");
b.append(ple.offset);
b.append(" bytes.");
throw new IllegalPropertySetDataException(b.toString());
}
}
/* Look for the codepage. */
int codepage = -1;
for (final Iterator i = propertyList.iterator();
for (final Iterator<PropertyListEntry> i = propertyList.iterator();
codepage == -1 && i.hasNext();)
{
ple = (PropertyListEntry) i.next();
ple = i.next();
/* Read the codepage if the property ID is 1. */
if (ple.id == PropertyIDMap.PID_CODEPAGE)
@ -285,14 +271,14 @@ public class Section
/* Pass 2: Read all properties - including the codepage property,
* if available. */
int i1 = 0;
for (final Iterator i = propertyList.iterator(); i.hasNext();)
for (final Iterator<PropertyListEntry> i = propertyList.iterator(); i.hasNext();)
{
ple = (PropertyListEntry) i.next();
ple = i.next();
Property p = new Property(ple.id, src,
this.offset + ple.offset,
ple.length, codepage);
if (p.getID() == PropertyIDMap.PID_CODEPAGE)
p = new Property(p.getID(), p.getType(), new Integer(codepage));
p = new Property(p.getID(), p.getType(), Integer.valueOf(codepage));
properties[i1++] = p;
}
@ -308,7 +294,7 @@ public class Section
* <p>Represents an entry in the property list and holds a property's ID and
* its offset from the section's beginning.</p>
*/
class PropertyListEntry implements Comparable
class PropertyListEntry implements Comparable<PropertyListEntry>
{
int id;
int offset;
@ -321,11 +307,9 @@ public class Section
*
* @see Comparable#compareTo(java.lang.Object)
*/
public int compareTo(final Object o)
public int compareTo(final PropertyListEntry o)
{
if (!(o instanceof PropertyListEntry))
throw new ClassCastException(o.toString());
final int otherOffset = ((PropertyListEntry) o).offset;
final int otherOffset = o.offset;
if (offset < otherOffset)
return -1;
else if (offset == otherOffset)
@ -414,11 +398,11 @@ public class Section
protected boolean getPropertyBooleanValue(final int id)
{
final Boolean b = (Boolean) getProperty(id);
if (b != null)
return b.booleanValue();
else
if (b == null) {
return false;
}
return b.booleanValue();
}
@ -464,7 +448,7 @@ public class Section
{
String s = null;
if (dictionary != null)
s = (String) dictionary.get(new Long(pid));
s = (String) dictionary.get(Long.valueOf(pid));
if (s == null)
s = SectionIDMap.getPIDString(getFormatID().getBytes(), pid);
if (s == null)
@ -561,10 +545,10 @@ public class Section
dictionaryEqual = p10.getValue().equals(p20.getValue());
else if (p10 != null || p20 != null)
dictionaryEqual = false;
if (!dictionaryEqual)
return false;
else
if (dictionaryEqual) {
return Util.equals(pa1, pa2);
}
return false;
}
@ -644,7 +628,7 @@ public class Section
* @return the dictionary or <code>null</code> if the section does not have
* a dictionary.
*/
public Map getDictionary()
public Map<Long,String> getDictionary()
{
return dictionary;
}

View File

@ -53,8 +53,6 @@ import org.apache.poi.poifs.filesystem.DirectoryEntry;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public abstract class SpecialPropertySet extends MutablePropertySet
{

View File

@ -28,11 +28,8 @@ import org.apache.poi.hpsf.wellknown.PropertyIDMap;
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @see DocumentSummaryInformation
* @version $Id$
* @since 2002-02-09
*/
public class SummaryInformation extends SpecialPropertySet
{
public final class SummaryInformation extends SpecialPropertySet {
/**
* <p>The document name a summary information stream usually has in a POIFS
@ -363,10 +360,10 @@ public class SummaryInformation extends SpecialPropertySet
public long getEditTime()
{
final Date d = (Date) getProperty(PropertyIDMap.PID_EDITTIME);
if (d == null)
if (d == null) {
return 0;
else
return Util.dateToFileTime(d);
}
return Util.dateToFileTime(d);
}

View File

@ -24,11 +24,8 @@ import org.apache.poi.util.LittleEndian;
*
* @author Drew Varner (Drew.Varner inOrAround sc.edu)
* @see SummaryInformation#getThumbnail()
* @version $Id$
* @since 2002-04-29
*/
public class Thumbnail
{
public final class Thumbnail {
/**
* <p>Offset in bytes where the Clipboard Format Tag starts in the
@ -130,7 +127,7 @@ public class Thumbnail
* <p>A <code>byte[]</code> to hold a thumbnail image in ({@link
* Variant#VT_CF VT_CF}) format.</p>
*/
private byte[] thumbnailData = null;
private byte[] _thumbnailData = null;
@ -156,7 +153,7 @@ public class Thumbnail
*/
public Thumbnail(final byte[] thumbnailData)
{
this.thumbnailData = thumbnailData;
this._thumbnailData = thumbnailData;
}
@ -170,7 +167,7 @@ public class Thumbnail
*/
public byte[] getThumbnail()
{
return thumbnailData;
return _thumbnailData;
}
@ -184,7 +181,7 @@ public class Thumbnail
*/
public void setThumbnail(final byte[] thumbnail)
{
this.thumbnailData = thumbnail;
this._thumbnailData = thumbnail;
}
@ -263,21 +260,18 @@ public class Thumbnail
if (!(getClipboardFormatTag() == CFTAG_WINDOWS))
throw new HPSFException("Clipboard Format Tag of Thumbnail must " +
"be CFTAG_WINDOWS.");
if (!(getClipboardFormat() == CF_METAFILEPICT))
if (!(getClipboardFormat() == CF_METAFILEPICT)) {
throw new HPSFException("Clipboard Format of Thumbnail must " +
"be CF_METAFILEPICT.");
else
{
byte[] thumbnail = getThumbnail();
int wmfImageLength = thumbnail.length - OFFSET_WMFDATA;
byte[] wmfImage = new byte[wmfImageLength];
System.arraycopy(thumbnail,
OFFSET_WMFDATA,
wmfImage,
0,
wmfImageLength);
return wmfImage;
}
byte[] thumbnail = getThumbnail();
int wmfImageLength = thumbnail.length - OFFSET_WMFDATA;
byte[] wmfImage = new byte[wmfImageLength];
System.arraycopy(thumbnail,
OFFSET_WMFDATA,
wmfImage,
0,
wmfImageLength);
return wmfImage;
}
}

View File

@ -27,8 +27,6 @@ import org.apache.poi.util.LittleEndian;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2003-02-20
*/
public class TypeWriter
{

View File

@ -27,8 +27,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @version $Id$
* @since 2002-02-09
*/
public class UnexpectedPropertySetTypeException extends HPSFException
{

View File

@ -28,8 +28,6 @@ import org.apache.poi.util.HexDump;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2003-08-05
* @version $Id$
*/
public abstract class UnsupportedVariantTypeException
extends VariantTypeException

View File

@ -27,8 +27,6 @@ import java.util.Date;
* <p>Provides various static utility methods.</p>
*
* @author Rainer Klute (klute@rainer-klute.de)
* @version $Id$
* @since 2002-02-09
*/
public class Util
{
@ -213,10 +211,10 @@ public class Util
* @return <code>true</code> if the collections are equal, else
* <code>false</code>.
*/
public static boolean equals(final Collection c1, final Collection c2)
public static boolean equals(Collection<?> c1, Collection<?> c2)
{
final Object[] o1 = c1.toArray();
final Object[] o2 = c2.toArray();
Object[] o1 = c1.toArray();
Object[] o2 = c2.toArray();
return internalEquals(o1, o2);
}
@ -231,14 +229,14 @@ public class Util
* @return <code>true</code> if the object arrays are equal,
* <code>false</code> if they are not.
*/
public static boolean equals(final Object[] c1, final Object[] c2)
public static boolean equals(Object[] c1, Object[] c2)
{
final Object[] o1 = (Object[]) c1.clone();
final Object[] o2 = (Object[]) c2.clone();
final Object[] o1 = c1.clone();
final Object[] o2 = c2.clone();
return internalEquals(o1, o2);
}
private static boolean internalEquals(final Object[] o1, final Object[] o2)
private static boolean internalEquals(Object[] o1, Object[] o2)
{
for (int i1 = 0; i1 < o1.length; i1++)
{

View File

@ -34,8 +34,6 @@ import java.util.Map;
* <strong>[S]</strong> - may appear in a Safe Array.</p>
*
* @author Rainer Klute (klute@rainer-klute.de)
* @version $Id$
* @since 2002-02-09
*/
public class Variant
{
@ -353,32 +351,32 @@ public class Variant
/**
* <p>Denotes a variant type with a length that is unknown to HPSF yet.</p>
*/
public static final Integer LENGTH_UNKNOWN = new Integer(-2);
public static final Integer LENGTH_UNKNOWN = Integer.valueOf(-2);
/**
* <p>Denotes a variant type with a variable length.</p>
*/
public static final Integer LENGTH_VARIABLE = new Integer(-1);
public static final Integer LENGTH_VARIABLE = Integer.valueOf(-1);
/**
* <p>Denotes a variant type with a length of 0 bytes.</p>
*/
public static final Integer LENGTH_0 = new Integer(0);
public static final Integer LENGTH_0 = Integer.valueOf(0);
/**
* <p>Denotes a variant type with a length of 2 bytes.</p>
*/
public static final Integer LENGTH_2 = new Integer(2);
public static final Integer LENGTH_2 = Integer.valueOf(2);
/**
* <p>Denotes a variant type with a length of 4 bytes.</p>
*/
public static final Integer LENGTH_4 = new Integer(4);
public static final Integer LENGTH_4 = Integer.valueOf(4);
/**
* <p>Denotes a variant type with a length of 8 bytes.</p>
*/
public static final Integer LENGTH_8 = new Integer(8);
public static final Integer LENGTH_8 = Integer.valueOf(8);
@ -386,92 +384,92 @@ public class Variant
{
/* Initialize the number-to-name map: */
Map tm1 = new HashMap();
tm1.put(new Long(0), "VT_EMPTY");
tm1.put(new Long(1), "VT_NULL");
tm1.put(new Long(2), "VT_I2");
tm1.put(new Long(3), "VT_I4");
tm1.put(new Long(4), "VT_R4");
tm1.put(new Long(5), "VT_R8");
tm1.put(new Long(6), "VT_CY");
tm1.put(new Long(7), "VT_DATE");
tm1.put(new Long(8), "VT_BSTR");
tm1.put(new Long(9), "VT_DISPATCH");
tm1.put(new Long(10), "VT_ERROR");
tm1.put(new Long(11), "VT_BOOL");
tm1.put(new Long(12), "VT_VARIANT");
tm1.put(new Long(13), "VT_UNKNOWN");
tm1.put(new Long(14), "VT_DECIMAL");
tm1.put(new Long(16), "VT_I1");
tm1.put(new Long(17), "VT_UI1");
tm1.put(new Long(18), "VT_UI2");
tm1.put(new Long(19), "VT_UI4");
tm1.put(new Long(20), "VT_I8");
tm1.put(new Long(21), "VT_UI8");
tm1.put(new Long(22), "VT_INT");
tm1.put(new Long(23), "VT_UINT");
tm1.put(new Long(24), "VT_VOID");
tm1.put(new Long(25), "VT_HRESULT");
tm1.put(new Long(26), "VT_PTR");
tm1.put(new Long(27), "VT_SAFEARRAY");
tm1.put(new Long(28), "VT_CARRAY");
tm1.put(new Long(29), "VT_USERDEFINED");
tm1.put(new Long(30), "VT_LPSTR");
tm1.put(new Long(31), "VT_LPWSTR");
tm1.put(new Long(64), "VT_FILETIME");
tm1.put(new Long(65), "VT_BLOB");
tm1.put(new Long(66), "VT_STREAM");
tm1.put(new Long(67), "VT_STORAGE");
tm1.put(new Long(68), "VT_STREAMED_OBJECT");
tm1.put(new Long(69), "VT_STORED_OBJECT");
tm1.put(new Long(70), "VT_BLOB_OBJECT");
tm1.put(new Long(71), "VT_CF");
tm1.put(new Long(72), "VT_CLSID");
tm1.put(Long.valueOf(0), "VT_EMPTY");
tm1.put(Long.valueOf(1), "VT_NULL");
tm1.put(Long.valueOf(2), "VT_I2");
tm1.put(Long.valueOf(3), "VT_I4");
tm1.put(Long.valueOf(4), "VT_R4");
tm1.put(Long.valueOf(5), "VT_R8");
tm1.put(Long.valueOf(6), "VT_CY");
tm1.put(Long.valueOf(7), "VT_DATE");
tm1.put(Long.valueOf(8), "VT_BSTR");
tm1.put(Long.valueOf(9), "VT_DISPATCH");
tm1.put(Long.valueOf(10), "VT_ERROR");
tm1.put(Long.valueOf(11), "VT_BOOL");
tm1.put(Long.valueOf(12), "VT_VARIANT");
tm1.put(Long.valueOf(13), "VT_UNKNOWN");
tm1.put(Long.valueOf(14), "VT_DECIMAL");
tm1.put(Long.valueOf(16), "VT_I1");
tm1.put(Long.valueOf(17), "VT_UI1");
tm1.put(Long.valueOf(18), "VT_UI2");
tm1.put(Long.valueOf(19), "VT_UI4");
tm1.put(Long.valueOf(20), "VT_I8");
tm1.put(Long.valueOf(21), "VT_UI8");
tm1.put(Long.valueOf(22), "VT_INT");
tm1.put(Long.valueOf(23), "VT_UINT");
tm1.put(Long.valueOf(24), "VT_VOID");
tm1.put(Long.valueOf(25), "VT_HRESULT");
tm1.put(Long.valueOf(26), "VT_PTR");
tm1.put(Long.valueOf(27), "VT_SAFEARRAY");
tm1.put(Long.valueOf(28), "VT_CARRAY");
tm1.put(Long.valueOf(29), "VT_USERDEFINED");
tm1.put(Long.valueOf(30), "VT_LPSTR");
tm1.put(Long.valueOf(31), "VT_LPWSTR");
tm1.put(Long.valueOf(64), "VT_FILETIME");
tm1.put(Long.valueOf(65), "VT_BLOB");
tm1.put(Long.valueOf(66), "VT_STREAM");
tm1.put(Long.valueOf(67), "VT_STORAGE");
tm1.put(Long.valueOf(68), "VT_STREAMED_OBJECT");
tm1.put(Long.valueOf(69), "VT_STORED_OBJECT");
tm1.put(Long.valueOf(70), "VT_BLOB_OBJECT");
tm1.put(Long.valueOf(71), "VT_CF");
tm1.put(Long.valueOf(72), "VT_CLSID");
Map tm2 = new HashMap(tm1.size(), 1.0F);
tm2.putAll(tm1);
numberToName = Collections.unmodifiableMap(tm2);
/* Initialize the number-to-length map: */
tm1.clear();
tm1.put(new Long(0), LENGTH_0);
tm1.put(new Long(1), LENGTH_UNKNOWN);
tm1.put(new Long(2), LENGTH_2);
tm1.put(new Long(3), LENGTH_4);
tm1.put(new Long(4), LENGTH_4);
tm1.put(new Long(5), LENGTH_8);
tm1.put(new Long(6), LENGTH_UNKNOWN);
tm1.put(new Long(7), LENGTH_UNKNOWN);
tm1.put(new Long(8), LENGTH_UNKNOWN);
tm1.put(new Long(9), LENGTH_UNKNOWN);
tm1.put(new Long(10), LENGTH_UNKNOWN);
tm1.put(new Long(11), LENGTH_UNKNOWN);
tm1.put(new Long(12), LENGTH_UNKNOWN);
tm1.put(new Long(13), LENGTH_UNKNOWN);
tm1.put(new Long(14), LENGTH_UNKNOWN);
tm1.put(new Long(16), LENGTH_UNKNOWN);
tm1.put(new Long(17), LENGTH_UNKNOWN);
tm1.put(new Long(18), LENGTH_UNKNOWN);
tm1.put(new Long(19), LENGTH_UNKNOWN);
tm1.put(new Long(20), LENGTH_UNKNOWN);
tm1.put(new Long(21), LENGTH_UNKNOWN);
tm1.put(new Long(22), LENGTH_UNKNOWN);
tm1.put(new Long(23), LENGTH_UNKNOWN);
tm1.put(new Long(24), LENGTH_UNKNOWN);
tm1.put(new Long(25), LENGTH_UNKNOWN);
tm1.put(new Long(26), LENGTH_UNKNOWN);
tm1.put(new Long(27), LENGTH_UNKNOWN);
tm1.put(new Long(28), LENGTH_UNKNOWN);
tm1.put(new Long(29), LENGTH_UNKNOWN);
tm1.put(new Long(30), LENGTH_VARIABLE);
tm1.put(new Long(31), LENGTH_UNKNOWN);
tm1.put(new Long(64), LENGTH_8);
tm1.put(new Long(65), LENGTH_UNKNOWN);
tm1.put(new Long(66), LENGTH_UNKNOWN);
tm1.put(new Long(67), LENGTH_UNKNOWN);
tm1.put(new Long(68), LENGTH_UNKNOWN);
tm1.put(new Long(69), LENGTH_UNKNOWN);
tm1.put(new Long(70), LENGTH_UNKNOWN);
tm1.put(new Long(71), LENGTH_UNKNOWN);
tm1.put(new Long(72), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(0), LENGTH_0);
tm1.put(Long.valueOf(1), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(2), LENGTH_2);
tm1.put(Long.valueOf(3), LENGTH_4);
tm1.put(Long.valueOf(4), LENGTH_4);
tm1.put(Long.valueOf(5), LENGTH_8);
tm1.put(Long.valueOf(6), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(7), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(8), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(9), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(10), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(11), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(12), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(13), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(14), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(16), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(17), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(18), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(19), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(20), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(21), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(22), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(23), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(24), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(25), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(26), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(27), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(28), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(29), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(30), LENGTH_VARIABLE);
tm1.put(Long.valueOf(31), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(64), LENGTH_8);
tm1.put(Long.valueOf(65), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(66), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(67), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(68), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(69), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(70), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(71), LENGTH_UNKNOWN);
tm1.put(Long.valueOf(72), LENGTH_UNKNOWN);
tm2 = new HashMap(tm1.size(), 1.0F);
tm2.putAll(tm1);
numberToLength = Collections.unmodifiableMap(tm2);
@ -488,7 +486,7 @@ public class Variant
*/
public static String getVariantName(final long variantType)
{
final String name = (String) numberToName.get(new Long(variantType));
final String name = (String) numberToName.get(Long.valueOf(variantType));
return name != null ? name : "unknown variant type";
}
@ -503,7 +501,7 @@ public class Variant
*/
public static int getVariantLength(final long variantType)
{
final Long key = new Long((int) variantType);
final Long key = Long.valueOf((int) variantType);
final Long length = (Long) numberToLength.get(key);
if (length == null)
return -2;

View File

@ -45,8 +45,6 @@ import org.apache.poi.util.LittleEndianConsts;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2003-08-08
* @version $Id$
*/
public class VariantSupport extends Variant
{
@ -99,7 +97,7 @@ public class VariantSupport extends Variant
{
if (unsupportedMessage == null)
unsupportedMessage = new LinkedList();
Long vt = new Long(ex.getVariantType());
Long vt = Long.valueOf(ex.getVariantType());
if (!unsupportedMessage.contains(vt))
{
System.err.println(ex.getMessage());
@ -184,7 +182,7 @@ public class VariantSupport extends Variant
* Read a short. In Java it is represented as an
* Integer object.
*/
value = new Integer(LittleEndian.getShort(src, o1));
value = Integer.valueOf(LittleEndian.getShort(src, o1));
break;
}
case Variant.VT_I4:
@ -193,7 +191,7 @@ public class VariantSupport extends Variant
* Read a word. In Java it is represented as an
* Integer object.
*/
value = new Integer(LittleEndian.getInt(src, o1));
value = Integer.valueOf(LittleEndian.getInt(src, o1));
break;
}
case Variant.VT_I8:
@ -202,7 +200,7 @@ public class VariantSupport extends Variant
* Read a double word. In Java it is represented as a
* Long object.
*/
value = new Long(LittleEndian.getLong(src, o1));
value = Long.valueOf(LittleEndian.getLong(src, o1));
break;
}
case Variant.VT_R8:
@ -274,9 +272,20 @@ public class VariantSupport extends Variant
}
case Variant.VT_CF:
{
if(l1 < 0) {
/**
* YK: reading the ClipboardData packet (VT_CF) is not quite correct.
* The size of the data is determined by the first four bytes of the packet
* while the current implementation calculates it in the Section constructor.
* Test files in Bugzilla 42726 and 45583 clearly show that this approach does not always work.
* The workaround below attempts to gracefully handle such cases instead of throwing exceptions.
*
* August 20, 2009
*/
l1 = LittleEndian.getInt(src, o1); o1 += LittleEndian.INT_SIZE;
}
final byte[] v = new byte[l1];
for (int i = 0; i < l1; i++)
v[i] = src[(o1 + i)];
System.arraycopy(src, o1, v, 0, v.length);
value = v;
break;
}
@ -584,5 +593,4 @@ public class VariantSupport extends Variant
return length;
}
}

View File

@ -23,8 +23,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2004-06-21
* @version $Id$
*/
public abstract class VariantTypeException extends HPSFException
{

View File

@ -26,8 +26,6 @@ package org.apache.poi.hpsf;
*
* @author Rainer Klute <a
* href="mailto:klute@rainer-klute.de">&lt;klute@rainer-klute.de&gt;</a>
* @since 2003-08-08
* @version $Id$
*/
public class WritingNotSupportedException
extends UnsupportedVariantTypeException

View File

@ -14,8 +14,11 @@
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.hpsf.extractor;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Iterator;
@ -28,6 +31,7 @@ import org.apache.poi.hpsf.Property;
import org.apache.poi.hpsf.SpecialPropertySet;
import org.apache.poi.hpsf.SummaryInformation;
import org.apache.poi.hpsf.wellknown.PropertyIDMap;
import org.apache.poi.poifs.filesystem.NPOIFSFileSystem;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import org.apache.poi.util.LittleEndian;
@ -46,6 +50,9 @@ public class HPSFPropertiesExtractor extends POITextExtractor {
public HPSFPropertiesExtractor(POIFSFileSystem fs) {
super(new PropertiesOnlyDocument(fs));
}
public HPSFPropertiesExtractor(NPOIFSFileSystem fs) {
super(new PropertiesOnlyDocument(fs));
}
public String getDocumentSummaryInformationText() {
DocumentSummaryInformation dsi = document.getDocumentSummaryInformation();
@ -55,12 +62,14 @@ public class HPSFPropertiesExtractor extends POITextExtractor {
text.append( getPropertiesText(dsi) );
// Now custom ones
CustomProperties cps = dsi.getCustomProperties();
Iterator keys = cps.keySet().iterator();
while(keys.hasNext()) {
String key = (String)keys.next();
String val = getPropertyValueText( cps.get(key) );
text.append(key + " = " + val + "\n");
CustomProperties cps = dsi == null ? null : dsi.getCustomProperties();
if(cps != null) {
Iterator<String> keys = cps.nameSet().iterator();
while(keys.hasNext()) {
String key = keys.next();
String val = getPropertyValueText( cps.get(key) );
text.append(key + " = " + val + "\n");
}
}
// All done
@ -121,7 +130,7 @@ public class HPSFPropertiesExtractor extends POITextExtractor {
}
/**
* Return the text of all the properties defined in
* @return the text of all the properties defined in
* the document.
*/
public String getText() {
@ -139,13 +148,25 @@ public class HPSFPropertiesExtractor extends POITextExtractor {
* So we can get at the properties of any
* random OLE2 document.
*/
private static class PropertiesOnlyDocument extends POIDocument {
private PropertiesOnlyDocument(POIFSFileSystem fs) {
private static final class PropertiesOnlyDocument extends POIDocument {
public PropertiesOnlyDocument(NPOIFSFileSystem fs) {
super(fs.getRoot());
}
public PropertiesOnlyDocument(POIFSFileSystem fs) {
super(fs);
}
public void write(OutputStream out) throws IOException {
public void write(OutputStream out) {
throw new IllegalStateException("Unable to write, only for properties!");
}
}
public static void main(String[] args) throws IOException {
for(String file : args) {
HPSFPropertiesExtractor ext = new HPSFPropertiesExtractor(
new NPOIFSFileSystem(new File(file))
);
System.out.println(ext.getText());
}
}
}

View File

@ -73,7 +73,7 @@ public final class StyleRecord extends Record {
byte[] string = in.readRemainder();
if (fHighByte.isSet(field_3_string_options)) {
field_4_name= StringUtil.getFromUnicodeBE(string, 0, field_2_name_length);
field_4_name= StringUtil.getFromUnicodeLE(string, 0, field_2_name_length);
} else {
field_4_name=StringUtil.getFromCompressedUnicode(string, 0, field_2_name_length);
}

View File

@ -249,7 +249,25 @@ public class HSSFWorkbook extends POIDocument
public HSSFWorkbook(DirectoryNode directory, POIFSFileSystem fs, boolean preserveNodes)
throws IOException
{
super(directory, fs);
this(directory, preserveNodes);
}
/**
* given a POI POIFSFileSystem object, and a specific directory
* within it, read in its Workbook and populate the high and
* low level models. If you're reading in a workbook...start here.
*
* @param directory the POI filesystem directory to process from
* @param preserveNodes whether to preseve other nodes, such as
* macros. This takes more memory, so only say yes if you
* need to. If set, will store all of the POIFSFileSystem
* in memory
* @see org.apache.poi.poifs.filesystem.POIFSFileSystem
* @exception IOException if the stream cannot be read
*/
public HSSFWorkbook(DirectoryNode directory, boolean preserveNodes)
throws IOException
{
super(directory);
String workbookName = getWorkbookDirEntryName(directory);
this.preserveNodes = preserveNodes;
@ -257,7 +275,6 @@ public class HSSFWorkbook extends POIDocument
// If we're not preserving nodes, don't track the
// POIFS any more
if(! preserveNodes) {
this.filesystem = null;
this.directory = null;
}
@ -1163,7 +1180,7 @@ public class HSSFWorkbook extends POIDocument
excepts.add("WORKBOOK");
// Copy over all the other nodes to our new poifs
copyNodes(this.filesystem,fs,excepts);
copyNodes(this.directory.getFileSystem(),fs,excepts);
}
fs.writeFilesystem(stream);
//poifs.writeFilesystem(stream);
@ -1664,7 +1681,7 @@ public class HSSFWorkbook extends POIDocument
Object sub = subRecordIter.next();
if (sub instanceof EmbeddedObjectRefSubRecord)
{
objects.add(new HSSFObjectData((ObjRecord) obj, filesystem));
objects.add(new HSSFObjectData((ObjRecord) obj, this.directory.getFileSystem()));
}
}
}

View File

@ -0,0 +1,64 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.common;
import org.apache.poi.util.LittleEndianConsts;
/**
* <p>A class describing attributes of the Big Block Size</p>
*/
public final class POIFSBigBlockSize
{
private int bigBlockSize;
private short headerValue;
protected POIFSBigBlockSize(int bigBlockSize, short headerValue) {
this.bigBlockSize = bigBlockSize;
this.headerValue = headerValue;
}
public int getBigBlockSize() {
return bigBlockSize;
}
/**
* Returns the value that gets written into the
* header.
* Is the power of two that corresponds to the
* size of the block, eg 512 => 9
*/
public short getHeaderValue() {
return headerValue;
}
public int getPropertiesPerBlock() {
return bigBlockSize / POIFSConstants.PROPERTY_SIZE;
}
public int getBATEntriesPerBlock() {
return bigBlockSize / LittleEndianConsts.INT_SIZE;
}
public int getXBATEntriesPerBlock() {
return getBATEntriesPerBlock() - 1;
}
public int getNextXBATChainOffset() {
return getXBATEntriesPerBlock() * LittleEndianConsts.INT_SIZE;
}
}

View File

@ -21,21 +21,48 @@ package org.apache.poi.poifs.common;
/**
* <p>A repository for constants shared by POI classes.</p>
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public interface POIFSConstants
{
/** Most files use 512 bytes as their big block size */
public static final int BIG_BLOCK_SIZE = 0x0200;
public static final int SMALLER_BIG_BLOCK_SIZE = 0x0200;
public static final POIFSBigBlockSize SMALLER_BIG_BLOCK_SIZE_DETAILS =
new POIFSBigBlockSize(SMALLER_BIG_BLOCK_SIZE, (short)9);
/** Some use 4096 bytes */
public static final int LARGER_BIG_BLOCK_SIZE = 0x1000;
public static final POIFSBigBlockSize LARGER_BIG_BLOCK_SIZE_DETAILS =
new POIFSBigBlockSize(LARGER_BIG_BLOCK_SIZE, (short)12);
public static final int END_OF_CHAIN = -2;
/** How big a block in the small block stream is. Fixed size */
public static final int SMALL_BLOCK_SIZE = 0x0040;
/** How big a single property is */
public static final int PROPERTY_SIZE = 0x0080;
/**
* The minimum size of a document before it's stored using
* Big Blocks (normal streams). Smaller documents go in the
* Mini Stream (SBAT / Small Blocks)
*/
public static final int BIG_BLOCK_MINIMUM_DOCUMENT_SIZE = 0x1000;
/** The highest sector number you're allowed, 0xFFFFFFFA */
public static final int LARGEST_REGULAR_SECTOR_NUMBER = -5;
/** Indicates the sector holds a DIFAT block (0xFFFFFFFC) */
public static final int DIFAT_SECTOR_BLOCK = -4;
/** Indicates the sector holds a FAT block (0xFFFFFFFD) */
public static final int FAT_SECTOR_BLOCK = -3;
/** Indicates the sector is the end of a chain (0xFFFFFFFE) */
public static final int END_OF_CHAIN = -2;
/** Indicates the sector is not used (0xFFFFFFFF) */
public static final int UNUSED_BLOCK = -1;
/** The first 4 bytes of an OOXML file, used in detection */
public static final byte[] OOXML_FILE_HEADER =
new byte[] { 0x50, 0x4b, 0x03, 0x04 };
/** HACKY: For backwards compatibility on 3.2 */
public static final int BIG_BLOCK_SIZE = SMALLER_BIG_BLOCK_SIZE;
} // end public interface POIFSConstants;

View File

@ -0,0 +1,152 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.dev;
import java.io.FileInputStream;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.property.PropertyTable;
import org.apache.poi.poifs.storage.BlockAllocationTableReader;
import org.apache.poi.poifs.storage.BlockList;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.ListManagedBlock;
import org.apache.poi.poifs.storage.RawDataBlockList;
import org.apache.poi.poifs.storage.SmallBlockTableReader;
import org.apache.poi.util.HexDump;
import org.apache.poi.util.IntList;
/**
* A very low level debugging tool, for printing out core
* information on the headers and FAT blocks.
* You probably only want to use this if you're trying
* to understand POIFS, or if you're trying to track
* down the source of corruption in a file.
*/
public class POIFSHeaderDumper {
/**
* Display the entries of multiple POIFS files
*
* @param args the names of the files to be displayed
*/
public static void main(final String args[]) throws Exception {
if (args.length == 0) {
System.err.println("Must specify at least one file to view");
System.exit(1);
}
for (int j = 0; j < args.length; j++) {
viewFile(args[j]);
}
}
public static void viewFile(final String filename) throws Exception {
InputStream inp = new FileInputStream(filename);
// Header
HeaderBlock header_block = new HeaderBlock(inp);
displayHeader(header_block);
// Raw blocks
POIFSBigBlockSize bigBlockSize = header_block.getBigBlockSize();
RawDataBlockList data_blocks = new RawDataBlockList(inp, bigBlockSize);
displayRawBlocksSummary(data_blocks);
// Main FAT Table
BlockAllocationTableReader batReader =
new BlockAllocationTableReader(
header_block.getBigBlockSize(),
header_block.getBATCount(),
header_block.getBATArray(),
header_block.getXBATCount(),
header_block.getXBATIndex(),
data_blocks);
displayBATReader(batReader);
// Properties Table
PropertyTable properties =
new PropertyTable(header_block, data_blocks);
// Mini Fat
BlockList sbat =
SmallBlockTableReader.getSmallDocumentBlocks(
bigBlockSize, data_blocks, properties.getRoot(),
header_block.getSBATStart()
);
}
public static void displayHeader(HeaderBlock header_block) throws Exception {
System.out.println("Header Details:");
System.out.println(" Block size: " + header_block.getBigBlockSize().getBigBlockSize());
System.out.println(" BAT (FAT) header blocks: " + header_block.getBATArray().length);
System.out.println(" BAT (FAT) block count: " + header_block.getBATCount());
System.out.println(" XBAT (FAT) block count: " + header_block.getXBATCount());
System.out.println(" XBAT (FAT) block 1 at: " + header_block.getXBATIndex());
System.out.println(" SBAT (MiniFAT) block count: " + header_block.getSBATCount());
System.out.println(" SBAT (MiniFAT) block 1 at: " + header_block.getSBATStart());
System.out.println(" Property table at: " + header_block.getPropertyStart());
System.out.println("");
}
public static void displayRawBlocksSummary(RawDataBlockList data_blocks) throws Exception {
System.out.println("Raw Blocks Details:");
System.out.println(" Number of blocks: " + data_blocks.blockCount());
Method gbm = data_blocks.getClass().getSuperclass().getDeclaredMethod("get", int.class);
gbm.setAccessible(true);
for(int i=0; i<Math.min(16, data_blocks.blockCount()); i++) {
ListManagedBlock block = (ListManagedBlock)gbm.invoke(data_blocks, Integer.valueOf(i));
byte[] data = new byte[Math.min(48, block.getData().length)];
System.arraycopy(block.getData(), 0, data, 0, data.length);
System.out.println(" Block #" + i + ":");
System.out.println(HexDump.dump(data, 0, 0));
}
System.out.println("");
}
public static void displayBATReader(BlockAllocationTableReader batReader) throws Exception {
System.out.println("Sectors, as referenced from the FAT:");
Field entriesF = batReader.getClass().getDeclaredField("_entries");
entriesF.setAccessible(true);
IntList entries = (IntList)entriesF.get(batReader);
for(int i=0; i<entries.size(); i++) {
int bn = entries.get(i);
String bnS = Integer.toString(bn);
if(bn == POIFSConstants.END_OF_CHAIN) {
bnS = "End Of Chain";
} else if(bn == POIFSConstants.DIFAT_SECTOR_BLOCK) {
bnS = "DI Fat Block";
} else if(bn == POIFSConstants.FAT_SECTOR_BLOCK) {
bnS = "Normal Fat Block";
} else if(bn == POIFSConstants.UNUSED_BLOCK) {
bnS = "Block Not Used (Free)";
}
System.out.println(" Block # " + i + " -> " + bnS);
}
System.out.println("");
}
}

View File

@ -17,14 +17,15 @@
package org.apache.poi.poifs.dev;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Iterator;
import org.apache.poi.poifs.filesystem.DirectoryEntry;
import org.apache.poi.poifs.filesystem.DirectoryNode;
import org.apache.poi.poifs.filesystem.DocumentEntry;
import org.apache.poi.poifs.filesystem.DocumentNode;
import org.apache.poi.poifs.filesystem.Entry;
import org.apache.poi.poifs.filesystem.NPOIFSFileSystem;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
/**
@ -33,66 +34,71 @@ import org.apache.poi.poifs.filesystem.POIFSFileSystem;
* Much simpler than {@link POIFSViewer}
*/
public class POIFSLister {
/**
* Display the entries of multiple POIFS files
*
* @param args the names of the files to be displayed
*/
public static void main(final String args[]) throws IOException {
if (args.length == 0)
{
System.err.println("Must specify at least one file to view");
System.exit(1);
}
/**
* Display the entries of multiple POIFS files
*
* @param args the names of the files to be displayed
*/
public static void main(final String args[]) throws IOException {
if (args.length == 0) {
System.err.println("Must specify at least one file to view");
System.exit(1);
}
boolean withSizes = false;
for (int j = 0; j < args.length; j++) {
if(args[j].equalsIgnoreCase("-size") ||
args[j].equalsIgnoreCase("-sizes")) {
withSizes = true;
} else {
viewFile(args[j], withSizes);
}
}
}
boolean withSizes = false;
boolean newPOIFS = true;
for (int j = 0; j < args.length; j++) {
if (args[j].equalsIgnoreCase("-size") || args[j].equalsIgnoreCase("-sizes")) {
withSizes = true;
} else if (args[j].equalsIgnoreCase("-old") || args[j].equalsIgnoreCase("-old-poifs")) {
newPOIFS = false;
} else {
if(newPOIFS) {
viewFile(args[j], withSizes);
} else {
viewFileOld(args[j], withSizes);
}
}
}
}
public static void viewFile(final String filename, boolean withSizes) throws IOException
{
POIFSFileSystem fs = new POIFSFileSystem(
new FileInputStream(filename)
);
displayDirectory(fs.getRoot(), "", withSizes);
}
public static void viewFile(final String filename, boolean withSizes) throws IOException {
NPOIFSFileSystem fs = new NPOIFSFileSystem(new File(filename));
displayDirectory(fs.getRoot(), "", withSizes);
}
public static void displayDirectory(DirectoryNode dir, String indent, boolean withSizes) {
System.out.println(indent + dir.getName() + " -");
String newIndent = indent + " ";
public static void viewFileOld(final String filename, boolean withSizes) throws IOException {
POIFSFileSystem fs = new POIFSFileSystem(new FileInputStream(filename));
displayDirectory(fs.getRoot(), "", withSizes);
}
boolean hadChildren = false;
for(Iterator it = dir.getEntries(); it.hasNext(); ) {
hadChildren = true;
Object entry = it.next();
if(entry instanceof DirectoryNode) {
displayDirectory((DirectoryNode)entry, newIndent, withSizes);
} else {
DocumentNode doc = (DocumentNode)entry;
String name = doc.getName();
String size = "";
if(name.charAt(0) < 10) {
String altname = "(0x0" + (int)name.charAt(0) + ")" + name.substring(1);
name = name.substring(1) + " <" + altname + ">";
}
if(withSizes) {
size = " [" +
doc.getSize() + " / 0x" +
Integer.toHexString(doc.getSize()) +
"]";
}
System.out.println(newIndent + name + size);
}
}
if(!hadChildren) {
System.out.println(newIndent + "(no children)");
}
}
public static void displayDirectory(DirectoryNode dir, String indent, boolean withSizes) {
System.out.println(indent + dir.getName() + " -");
String newIndent = indent + " ";
boolean hadChildren = false;
for(Iterator<Entry> it = dir.getEntries(); it.hasNext();) {
hadChildren = true;
Entry entry = it.next();
if (entry instanceof DirectoryNode) {
displayDirectory((DirectoryNode) entry, newIndent, withSizes);
} else {
DocumentNode doc = (DocumentNode) entry;
String name = doc.getName();
String size = "";
if (name.charAt(0) < 10) {
String altname = "(0x0" + (int) name.charAt(0) + ")" + name.substring(1);
name = name.substring(1) + " <" + altname + ">";
}
if (withSizes) {
size = " [" + doc.getSize() + " / 0x" +
Integer.toHexString(doc.getSize()) + "]";
}
System.out.println(newIndent + name + size);
}
}
if (!hadChildren) {
System.out.println(newIndent + "(no children)");
}
}
}

View File

@ -54,7 +54,7 @@ public interface POIFSViewable
* @return an Iterator; may not be null, but may have an empty
* back end store
*/
@SuppressWarnings("unchecked")
public Iterator getViewableIterator();
/**

View File

@ -31,7 +31,7 @@ import org.apache.poi.poifs.property.Property;
import org.apache.poi.poifs.property.PropertyTable;
import org.apache.poi.poifs.storage.BlockAllocationTableReader;
import org.apache.poi.poifs.storage.BlockList;
import org.apache.poi.poifs.storage.HeaderBlockReader;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.RawDataBlockList;
import org.apache.poi.poifs.storage.SmallBlockTableReader;
@ -75,29 +75,31 @@ public class POIFSReader
registryClosed = true;
// read the header block from the stream
HeaderBlockReader header_block_reader = new HeaderBlockReader(stream);
HeaderBlock header_block = new HeaderBlock(stream);
// read the rest of the stream into blocks
RawDataBlockList data_blocks = new RawDataBlockList(stream, header_block_reader.getBigBlockSize());
RawDataBlockList data_blocks = new RawDataBlockList(stream, header_block.getBigBlockSize());
// set up the block allocation table (necessary for the
// data_blocks to be manageable
new BlockAllocationTableReader(header_block_reader.getBATCount(),
header_block_reader.getBATArray(),
header_block_reader.getXBATCount(),
header_block_reader.getXBATIndex(),
new BlockAllocationTableReader(header_block.getBigBlockSize(),
header_block.getBATCount(),
header_block.getBATArray(),
header_block.getXBATCount(),
header_block.getXBATIndex(),
data_blocks);
// get property table from the document
PropertyTable properties =
new PropertyTable(header_block_reader.getPropertyStart(),
data_blocks);
new PropertyTable(header_block, data_blocks);
// process documents
processProperties(SmallBlockTableReader
.getSmallDocumentBlocks(data_blocks, properties
.getRoot(), header_block_reader
.getSBATStart()), data_blocks, properties.getRoot()
.getSmallDocumentBlocks(
header_block.getBigBlockSize(),
data_blocks, properties.getRoot(),
header_block.getSBATStart()),
data_blocks, properties.getRoot()
.getChildren(), new POIFSDocumentPath());
}
@ -245,13 +247,13 @@ public class POIFSReader
{
document =
new POIFSDocument(name, small_blocks
.fetchBlocks(startBlock), size);
.fetchBlocks(startBlock, -1), size);
}
else
{
document =
new POIFSDocument(name, big_blocks
.fetchBlocks(startBlock), size);
.fetchBlocks(startBlock, -1), size);
}
while (listeners.hasNext())
{
@ -270,11 +272,11 @@ public class POIFSReader
// consume the document's data and discard it
if (property.shouldUseSmallBlocks())
{
small_blocks.fetchBlocks(startBlock);
small_blocks.fetchBlocks(startBlock, -1);
}
else
{
big_blocks.fetchBlocks(startBlock);
big_blocks.fetchBlocks(startBlock, -1);
}
}
}

View File

@ -0,0 +1,105 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.poi.poifs.storage.BATBlock.BATBlockAndIndex;
/**
* This abstract class describes a way to read, store, chain
* and free a series of blocks (be they Big or Small ones)
*/
public abstract class BlockStore {
/**
* Returns the size of the blocks managed through the block store.
*/
protected abstract int getBlockStoreBlockSize();
/**
* Load the block at the given offset.
*/
protected abstract ByteBuffer getBlockAt(final int offset) throws IOException;
/**
* Extends the file if required to hold blocks up to
* the specified offset, and return the block from there.
*/
protected abstract ByteBuffer createBlockIfNeeded(final int offset) throws IOException;
/**
* Returns the BATBlock that handles the specified offset,
* and the relative index within it
*/
protected abstract BATBlockAndIndex getBATBlockAndIndex(final int offset);
/**
* Works out what block follows the specified one.
*/
protected abstract int getNextBlock(final int offset);
/**
* Changes the record of what block follows the specified one.
*/
protected abstract void setNextBlock(final int offset, final int nextBlock);
/**
* Finds a free block, and returns its offset.
* This method will extend the file/stream if needed, and if doing
* so, allocate new FAT blocks to address the extra space.
*/
protected abstract int getFreeBlock() throws IOException;
/**
* Creates a Detector for loops in the chain
*/
protected abstract ChainLoopDetector getChainLoopDetector() throws IOException;
/**
* Used to detect if a chain has a loop in it, so
* we can bail out with an error rather than
* spinning away for ever...
*/
protected class ChainLoopDetector {
private boolean[] used_blocks;
protected ChainLoopDetector(long rawSize) {
int numBlocks = (int)Math.ceil( rawSize / getBlockStoreBlockSize() );
used_blocks = new boolean[numBlocks];
}
protected void claim(int offset) {
if(offset >= used_blocks.length) {
// They're writing, and have had new blocks requested
// for the write to proceed. That means they're into
// blocks we've allocated for them, so are safe
return;
}
// Claiming an existing block, ensure there's no loop
if(used_blocks[offset]) {
throw new IllegalStateException(
"Potential loop detected - Block " + offset +
" was already claimed but was just requested again"
);
}
used_blocks[offset] = true;
}
}
}

View File

@ -33,7 +33,7 @@ import org.apache.poi.hpsf.ClassID;
*/
public interface DirectoryEntry
extends Entry
extends Entry, Iterable<Entry>
{
/**
@ -47,7 +47,7 @@ public interface DirectoryEntry
* implementations of Entry.
*/
public Iterator getEntries();
public Iterator<Entry> getEntries();
/**
* is this DirectoryEntry empty?

View File

@ -19,9 +19,14 @@
package org.apache.poi.poifs.filesystem;
import java.io.*;
import java.util.*;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.poi.hpsf.ClassID;
import org.apache.poi.poifs.dev.POIFSViewable;
@ -34,17 +39,21 @@ import org.apache.poi.poifs.property.Property;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class DirectoryNode
extends EntryNode
implements DirectoryEntry, POIFSViewable
implements DirectoryEntry, POIFSViewable, Iterable<Entry>
{
// Map of Entry instances, keyed by their names
private Map _entries;
private Map<String,Entry> _byname;
// Our list of entries, kept sorted to preserve order
private ArrayList<Entry> _entries;
// Only one of these two will exist
// the POIFSFileSystem we belong to
private POIFSFileSystem _filesystem;
private POIFSFileSystem _ofilesystem;
// the NPOIFSFileSytem we belong to
private NPOIFSFileSystem _nfilesystem;
// the path described by this document
private POIFSDocumentPath _path;
@ -57,10 +66,32 @@ public class DirectoryNode
* @param filesystem the POIFSFileSystem we belong to
* @param parent the parent of this entry
*/
DirectoryNode(final DirectoryProperty property,
final POIFSFileSystem filesystem,
final DirectoryNode parent)
{
this(property, parent);
_ofilesystem = filesystem;
}
/**
* create a DirectoryNode. This method is not public by design; it
* is intended strictly for the internal use of this package
*
* @param property the DirectoryProperty for this DirectoryEntry
* @param nfilesystem the NPOIFSFileSystem we belong to
* @param parent the parent of this entry
*/
DirectoryNode(final DirectoryProperty property,
final NPOIFSFileSystem nfilesystem,
final DirectoryNode parent)
{
this(property, parent);
_nfilesystem = nfilesystem;
}
private DirectoryNode(final DirectoryProperty property,
final DirectoryNode parent)
{
super(property, parent);
if (parent == null)
@ -74,26 +105,30 @@ public class DirectoryNode
property.getName()
});
}
_filesystem = filesystem;
_entries = new HashMap();
Iterator iter = property.getChildren();
_byname = new HashMap<String, Entry>();
_entries = new ArrayList<Entry>();
Iterator<Property> iter = property.getChildren();
while (iter.hasNext())
{
Property child = ( Property ) iter.next();
Property child = iter.next();
Entry childNode = null;
if (child.isDirectory())
{
childNode = new DirectoryNode(( DirectoryProperty ) child,
_filesystem, this);
DirectoryProperty childDir = (DirectoryProperty) child;
if(_ofilesystem != null) {
childNode = new DirectoryNode(childDir, _ofilesystem, this);
} else {
childNode = new DirectoryNode(childDir, _nfilesystem, this);
}
}
else
{
childNode = new DocumentNode(( DocumentProperty ) child,
this);
childNode = new DocumentNode((DocumentProperty) child, this);
}
_entries.put(childNode.getName(), childNode);
_entries.add(childNode);
_byname.put(childNode.getName(), childNode);
}
}
@ -106,6 +141,22 @@ public class DirectoryNode
return _path;
}
/**
* @return the filesystem that this belongs to
*/
public POIFSFileSystem getFileSystem()
{
return _ofilesystem;
}
/**
* @return the filesystem that this belongs to
*/
public NPOIFSFileSystem getNFileSystem()
{
return _nfilesystem;
}
/**
* open a document in the directory's entry's list of entries
*
@ -116,19 +167,34 @@ public class DirectoryNode
* @exception IOException if the document does not exist or the
* name is that of a DirectoryEntry
*/
public DocumentInputStream createDocumentInputStream(
final String documentName)
throws IOException
{
Entry document = getEntry(documentName);
return createDocumentInputStream(getEntry(documentName));
}
if (!document.isDocumentEntry())
{
throw new IOException("Entry '" + documentName
/**
* open a document in the directory's entry's list of entries
*
* @param documentEntry the document to be opened
*
* @return a newly opened DocumentInputStream or NDocumentInputStream
*
* @exception IOException if the document does not exist or the
* name is that of a DirectoryEntry
*/
public DocumentInputStream createDocumentInputStream(
final Entry document)
throws IOException
{
if (!document.isDocumentEntry()) {
throw new IOException("Entry '" + document.getName()
+ "' is not a DocumentEntry");
}
return new DocumentInputStream(( DocumentEntry ) document);
DocumentEntry entry = (DocumentEntry)document;
return new DocumentInputStream(entry);
}
/**
@ -140,7 +206,6 @@ public class DirectoryNode
*
* @exception IOException
*/
DocumentEntry createDocument(final POIFSDocument document)
throws IOException
{
@ -148,8 +213,33 @@ public class DirectoryNode
DocumentNode rval = new DocumentNode(property, this);
(( DirectoryProperty ) getProperty()).addChild(property);
_filesystem.addDocument(document);
_entries.put(property.getName(), rval);
_ofilesystem.addDocument(document);
_entries.add(rval);
_byname.put(property.getName(), rval);
return rval;
}
/**
* create a new DocumentEntry
*
* @param document the new document
*
* @return the new DocumentEntry
*
* @exception IOException
*/
DocumentEntry createDocument(final NPOIFSDocument document)
throws IOException
{
DocumentProperty property = document.getDocumentProperty();
DocumentNode rval = new DocumentNode(property, this);
(( DirectoryProperty ) getProperty()).addChild(property);
_nfilesystem.addDocument(document);
_entries.add(rval);
_byname.put(property.getName(), rval);
return rval;
}
@ -161,11 +251,10 @@ public class DirectoryNode
*
* @return true if the operation succeeded, else false
*/
boolean changeName(final String oldName, final String newName)
{
boolean rval = false;
EntryNode child = ( EntryNode ) _entries.get(oldName);
EntryNode child = ( EntryNode ) _byname.get(oldName);
if (child != null)
{
@ -173,8 +262,8 @@ public class DirectoryNode
.changeName(child.getProperty(), newName);
if (rval)
{
_entries.remove(oldName);
_entries.put(child.getProperty().getName(), child);
_byname.remove(oldName);
_byname.put(child.getProperty().getName(), child);
}
}
return rval;
@ -196,8 +285,14 @@ public class DirectoryNode
if (rval)
{
_entries.remove(entry.getName());
_filesystem.remove(entry);
_entries.remove(entry);
_byname.remove(entry.getName());
if(_ofilesystem != null) {
_ofilesystem.remove(entry);
} else {
_nfilesystem.remove(entry);
}
}
return rval;
}
@ -215,9 +310,9 @@ public class DirectoryNode
* implementations of Entry.
*/
public Iterator getEntries()
public Iterator<Entry> getEntries()
{
return _entries.values().iterator();
return _entries.iterator();
}
/**
@ -263,7 +358,7 @@ public class DirectoryNode
if (name != null)
{
rval = ( Entry ) _entries.get(name);
rval = _byname.get(name);
}
if (rval == null)
{
@ -326,13 +421,20 @@ public class DirectoryNode
public DirectoryEntry createDirectory(final String name)
throws IOException
{
DirectoryNode rval;
DirectoryProperty property = new DirectoryProperty(name);
DirectoryNode rval = new DirectoryNode(property, _filesystem,
this);
if(_ofilesystem != null) {
rval = new DirectoryNode(property, _ofilesystem, this);
_ofilesystem.addDirectory(property);
} else {
rval = new DirectoryNode(property, _nfilesystem, this);
_nfilesystem.addDirectory(property);
}
(( DirectoryProperty ) getProperty()).addChild(property);
_filesystem.addDirectory(property);
_entries.put(name, rval);
_entries.add(rval);
_byname.put(name, rval);
return rval;
}
@ -410,15 +512,13 @@ public class DirectoryNode
* @return an Iterator; may not be null, but may have an empty
* back end store
*/
@SuppressWarnings("unchecked")
public Iterator getViewableIterator()
{
List components = new ArrayList();
components.add(getProperty());
SortedMap sortedEntries = new TreeMap(_entries);
Iterator iter = sortedEntries.values().iterator();
Iterator<Entry> iter = _entries.iterator();
while (iter.hasNext())
{
components.add(iter.next());
@ -451,6 +551,13 @@ public class DirectoryNode
return getName();
}
/**
* Returns an Iterator over all the entries
*/
public Iterator<Entry> iterator() {
return getEntries();
}
/* ********** END begin implementation of POIFSViewable ********** */
} // end public class DirectoryNode

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,436 +15,155 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.*;
import java.io.IOException;
import java.io.InputStream;
import org.apache.poi.util.LittleEndianInput;
/**
* This class provides methods to read a DocumentEntry managed by a
* Filesystem instance.
*
* @author Marc Johnson (mjohnson at apache dot org)
* {@link POIFSFileSystem} or {@link NPOIFSFileSystem} instance.
* It creates the appropriate one, and delegates, allowing us to
* work transparently with the two.
*/
public class DocumentInputStream extends InputStream implements LittleEndianInput {
/** returned by read operations if we're at end of document */
protected static final int EOF = -1;
public class DocumentInputStream
extends InputStream
{
protected static final int SIZE_SHORT = 2;
protected static final int SIZE_INT = 4;
protected static final int SIZE_LONG = 8;
// current offset into the Document
private int _current_offset;
private DocumentInputStream delegate;
// current marked offset into the Document (used by mark and
// reset)
private int _marked_offset;
/** For use by downstream implementations */
protected DocumentInputStream() {}
// the Document's size
private int _document_size;
/**
* Create an InputStream from the specified DocumentEntry
*
* @param document the DocumentEntry to be read
*
* @exception IOException if the DocumentEntry cannot be opened (like, maybe it has
* been deleted?)
*/
public DocumentInputStream(DocumentEntry document) throws IOException {
if (!(document instanceof DocumentNode)) {
throw new IOException("Cannot open internal document storage");
}
DocumentNode documentNode = (DocumentNode)document;
DirectoryNode parentNode = (DirectoryNode)document.getParent();
// have we been closed?
private boolean _closed;
if(documentNode.getDocument() != null) {
delegate = new ODocumentInputStream(document);
} else if(parentNode.getFileSystem() != null) {
delegate = new ODocumentInputStream(document);
} else if(parentNode.getNFileSystem() != null) {
delegate = new NDocumentInputStream(document);
} else {
throw new IOException("No FileSystem bound on the parent, can't read contents");
}
}
// the actual Document
private POIFSDocument _document;
/**
* Create an InputStream from the specified Document
*
* @param document the Document to be read
*/
public DocumentInputStream(POIFSDocument document) {
delegate = new ODocumentInputStream(document);
}
// buffer used to read one byte at a time
private byte[] _tiny_buffer;
/**
* Create an InputStream from the specified Document
*
* @param document the Document to be read
*/
public DocumentInputStream(NPOIFSDocument document) {
delegate = new NDocumentInputStream(document);
}
// returned by read operations if we're at end of document
static private final int EOD = -1;
public int available() {
return delegate.available();
}
/**
* Create an InputStream from the specified DocumentEntry
*
* @param document the DocumentEntry to be read
*
* @exception IOException if the DocumentEntry cannot be opened
* (like, maybe it has been deleted?)
*/
public void close() {
delegate.close();
}
public DocumentInputStream(final DocumentEntry document)
throws IOException
{
_current_offset = 0;
_marked_offset = 0;
_document_size = document.getSize();
_closed = false;
_tiny_buffer = null;
if (document instanceof DocumentNode)
{
_document = (( DocumentNode ) document).getDocument();
}
else
{
throw new IOException("Cannot open internal document storage");
}
}
public void mark(int ignoredReadlimit) {
delegate.mark(ignoredReadlimit);
}
/**
* Create an InputStream from the specified Document
*
* @param document the Document to be read
*
* @exception IOException if the DocumentEntry cannot be opened
* (like, maybe it has been deleted?)
*/
/**
* Tests if this input stream supports the mark and reset methods.
*
* @return <code>true</code> always
*/
public boolean markSupported() {
return true;
}
public DocumentInputStream(final POIFSDocument document)
throws IOException
{
_current_offset = 0;
_marked_offset = 0;
_document_size = document.getSize();
_closed = false;
_tiny_buffer = null;
_document = document;
}
public int read() throws IOException {
return delegate.read();
}
/**
* Returns the number of bytes that can be read (or skipped over)
* from this input stream without blocking by the next caller of a
* method for this input stream. The next caller might be the same
* thread or or another thread.
*
* @return the number of bytes that can be read from this input
* stream without blocking.
*
* @exception IOException on error (such as the stream has been
* closed)
*/
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
public int available()
throws IOException
{
dieIfClosed();
return _document_size - _current_offset;
}
public int read(byte[] b, int off, int len) throws IOException {
return delegate.read(b, off, len);
}
/**
* Closes this input stream and releases any system resources
* associated with the stream.
*
* @exception IOException
*/
/**
* Repositions this stream to the position at the time the mark() method was
* last called on this input stream. If mark() has not been called this
* method repositions the stream to its beginning.
*/
public void reset() {
delegate.reset();
}
public void close()
throws IOException
{
_closed = true;
}
public long skip(long n) throws IOException {
return delegate.skip(n);
}
/**
* Marks the current position in this input stream. A subsequent
* call to the reset method repositions this stream at the last
* marked position so that subsequent reads re-read the same
* bytes.
* <p>
* The readlimit arguments tells this input stream to allow that
* many bytes to be read before the mark position gets
* invalidated. This implementation, however, does not care.
* <p>
* The general contract of mark is that, if the method
* markSupported returns true, the stream somehow remembers all
* the bytes read after the call to mark and stands ready to
* supply those same bytes again if and whenever the method reset
* is called. However, the stream is not required to remember any
* data at all if more than readlimit bytes are read from the
* stream before reset is called. But this stream will.
*
* @param ignoredReadlimit the maximum limit of bytes that can be
* read before the mark position becomes
* invalid. Ignored by this
* implementation.
*/
public byte readByte() {
return delegate.readByte();
}
public void mark(int ignoredReadlimit)
{
_marked_offset = _current_offset;
}
public double readDouble() {
return delegate.readDouble();
}
/**
* Tests if this input stream supports the mark and reset methods.
*
* @return true
*/
public short readShort() {
return (short) readUShort();
}
public boolean markSupported()
{
return true;
}
public void readFully(byte[] buf) {
readFully(buf, 0, buf.length);
}
/**
* Reads the next byte of data from the input stream. The value
* byte is returned as an int in the range 0 to 255. If no byte is
* available because the end of the stream has been reached, the
* value -1 is returned. The definition of this method in
* java.io.InputStream allows this method to block, but it won't.
*
* @return the next byte of data, or -1 if the end of the stream
* is reached.
*
* @exception IOException
*/
public void readFully(byte[] buf, int off, int len) {
delegate.readFully(buf, off, len);
}
public int read()
throws IOException
{
dieIfClosed();
if (atEOD())
{
return EOD;
}
if (_tiny_buffer == null)
{
_tiny_buffer = new byte[ 1 ];
}
_document.read(_tiny_buffer, _current_offset++);
return ((int)_tiny_buffer[ 0 ]) & 0x000000FF;
}
public long readLong() {
return delegate.readLong();
}
/**
* Reads some number of bytes from the input stream and stores
* them into the buffer array b. The number of bytes actually read
* is returned as an integer. The definition of this method in
* java.io.InputStream allows this method to block, but it won't.
* <p>
* If b is null, a NullPointerException is thrown. If the length
* of b is zero, then no bytes are read and 0 is returned;
* otherwise, there is an attempt to read at least one byte. If no
* byte is available because the stream is at end of file, the
* value -1 is returned; otherwise, at least one byte is read and
* stored into b.
* <p>
* The first byte read is stored into element b[0], the next one
* into b[1], and so on. The number of bytes read is, at most,
* equal to the length of b. Let k be the number of bytes actually
* read; these bytes will be stored in elements b[0] through
* b[k-1], leaving elements b[k] through b[b.length-1] unaffected.
* <p>
* If the first byte cannot be read for any reason other than end
* of file, then an IOException is thrown. In particular, an
* IOException is thrown if the input stream has been closed.
* <p>
* The read(b) method for class InputStream has the same effect as:
* <p>
* <code>read(b, 0, b.length)</code>
*
* @param b the buffer into which the data is read.
*
* @return the total number of bytes read into the buffer, or -1
* if there is no more data because the end of the stream
* has been reached.
*
* @exception IOException
* @exception NullPointerException
*/
public int readInt() {
return delegate.readInt();
}
public int read(final byte [] b)
throws IOException, NullPointerException
{
return read(b, 0, b.length);
}
/**
* Reads up to len bytes of data from the input stream into an
* array of bytes. An attempt is made to read as many as len
* bytes, but a smaller number may be read, possibly zero. The
* number of bytes actually read is returned as an integer.
* <p>
* The definition of this method in java.io.InputStream allows it
* to block, but it won't.
* <p>
* If b is null, a NullPointerException is thrown.
* <p>
* If off is negative, or len is negative, or off+len is greater
* than the length of the array b, then an
* IndexOutOfBoundsException is thrown.
* <p>
* If len is zero, then no bytes are read and 0 is returned;
* otherwise, there is an attempt to read at least one byte. If no
* byte is available because the stream is at end of file, the
* value -1 is returned; otherwise, at least one byte is read and
* stored into b.
* <p>
* The first byte read is stored into element b[off], the next one
* into b[off+1], and so on. The number of bytes read is, at most,
* equal to len. Let k be the number of bytes actually read; these
* bytes will be stored in elements b[off] through b[off+k-1],
* leaving elements b[off+k] through b[off+len-1] unaffected.
* <p>
* In every case, elements b[0] through b[off] and elements
* b[off+len] through b[b.length-1] are unaffected.
* <p>
* If the first byte cannot be read for any reason other than end
* of file, then an IOException is thrown. In particular, an
* IOException is thrown if the input stream has been closed.
*
* @param b the buffer into which the data is read.
* @param off the start offset in array b at which the data is
* written.
* @param len the maximum number of bytes to read.
*
* @return the total number of bytes read into the buffer, or -1
* if there is no more data because the end of the stream
* has been reached.
*
* @exception IOException
* @exception NullPointerException
* @exception IndexOutOfBoundsException
*/
public int read(final byte [] b, final int off, final int len)
throws IOException, NullPointerException, IndexOutOfBoundsException
{
dieIfClosed();
if (b == null)
{
throw new NullPointerException("buffer is null");
}
if ((off < 0) || (len < 0) || (b.length < (off + len)))
{
throw new IndexOutOfBoundsException(
"can't read past buffer boundaries");
}
if (len == 0)
{
return 0;
}
if (atEOD())
{
return EOD;
}
int limit = Math.min(available(), len);
if ((off == 0) && (limit == b.length))
{
_document.read(b, _current_offset);
}
else
{
byte[] buffer = new byte[ limit ];
_document.read(buffer, _current_offset);
System.arraycopy(buffer, 0, b, off, limit);
}
_current_offset += limit;
return limit;
}
/**
* Repositions this stream to the position at the time the mark
* method was last called on this input stream.
* <p>
* The general contract of reset is:
* <p>
* <ul>
* <li>
* If the method markSupported returns true, then:
* <ul>
* <li>
* If the method mark has not been called since the
* stream was created, or the number of bytes read
* from the stream since mark was last called is
* larger than the argument to mark at that last
* call, then an IOException might be thrown.
* </li>
* <li>
* If such an IOException is not thrown, then the
* stream is reset to a state such that all the
* bytes read since the most recent call to mark
* (or since the start of the file, if mark has not
* been called) will be resupplied to subsequent
* callers of the read method, followed by any
* bytes that otherwise would have been the next
* input data as of the time of the call to reset.
* </li>
* </ul>
* </li>
* <li>
* If the method markSupported returns false, then:
* <ul>
* <li>
* The call to reset may throw an IOException.
* </li>
* <li>
* If an IOException is not thrown, then the
* stream is reset to a fixed state that depends
* on the particular type of the input and how it
* was created. The bytes that will be supplied to
* subsequent callers of the read method depend on
* the particular type of the input stream.
* </li>
* </ul>
* </li>
* </ul>
* <p>
* All well and good ... this class's markSupported method returns
* true and this method does not care whether you've called mark
* at all, or whether you've exceeded the number of bytes
* specified in the last call to mark. We're basically walking a
* byte array ... mark and reset to your heart's content.
*/
public void reset()
{
_current_offset = _marked_offset;
}
/**
* Skips over and discards n bytes of data from this input
* stream. The skip method may, for a variety of reasons, end up
* skipping over some smaller number of bytes, possibly 0. This
* may result from any of a number of conditions; reaching end of
* file before n bytes have been skipped is only one
* possibility. The actual number of bytes skipped is returned. If
* n is negative, no bytes are skipped.
*
* @param n the number of bytes to be skipped.
*
* @return the actual number of bytes skipped.
*
* @exception IOException
*/
public long skip(final long n)
throws IOException
{
dieIfClosed();
if (n < 0)
{
return 0;
}
int new_offset = _current_offset + ( int ) n;
if (new_offset < _current_offset)
{
// wrap around in converting a VERY large long to an int
new_offset = _document_size;
}
else if (new_offset > _document_size)
{
new_offset = _document_size;
}
long rval = new_offset - _current_offset;
_current_offset = new_offset;
return rval;
}
private void dieIfClosed()
throws IOException
{
if (_closed)
{
throw new IOException(
"cannot perform requested operation on a closed stream");
}
}
private boolean atEOD()
{
return _current_offset == _document_size;
}
} // end public class DocumentInputStream
public int readUShort() {
return delegate.readUShort();
}
public int readUByte() {
return delegate.readUByte();
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,7 +15,6 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.*;
@ -30,12 +28,10 @@ import java.util.*;
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class DocumentOutputStream
extends OutputStream
{
private OutputStream stream;
private int limit;
private int written;
public final class DocumentOutputStream extends OutputStream {
private final OutputStream _stream;
private final int _limit;
private int _written;
/**
* Create a DocumentOutputStream
@ -44,12 +40,10 @@ public class DocumentOutputStream
* read
* @param limit the maximum number of bytes that can be written
*/
DocumentOutputStream(final OutputStream stream, final int limit)
{
this.stream = stream;
this.limit = limit;
this.written = 0;
DocumentOutputStream(OutputStream stream, int limit) {
_stream = stream;
_limit = limit;
_written = 0;
}
/**
@ -64,12 +58,11 @@ public class DocumentOutputStream
* output stream has been closed, or if the
* writer tries to write too much data.
*/
public void write(final int b)
public void write(int b)
throws IOException
{
limitCheck(1);
stream.write(b);
_stream.write(b);
}
/**
@ -79,8 +72,7 @@ public class DocumentOutputStream
* @param b the data.
* @exception IOException if an I/O error occurs.
*/
public void write(final byte b[])
public void write(byte b[])
throws IOException
{
write(b, 0, b.length);
@ -106,12 +98,11 @@ public class DocumentOutputStream
* output stream is closed or if the writer
* tries to write too many bytes.
*/
public void write(final byte b[], final int off, final int len)
public void write(byte b[], int off, int len)
throws IOException
{
limitCheck(len);
stream.write(b, off, len);
_stream.write(b, off, len);
}
/**
@ -120,11 +111,10 @@ public class DocumentOutputStream
*
* @exception IOException if an I/O error occurs.
*/
public void flush()
throws IOException
{
stream.flush();
_stream.flush();
}
/**
@ -135,10 +125,7 @@ public class DocumentOutputStream
*
* @exception IOException if an I/O error occurs.
*/
public void close()
throws IOException
{
public void close() {
// ignore this call
}
@ -152,27 +139,25 @@ public class DocumentOutputStream
*
* @exception IOException on I/O error
*/
void writeFiller(final int totalLimit, final byte fill)
void writeFiller(int totalLimit, byte fill)
throws IOException
{
if (totalLimit > written)
if (totalLimit > _written)
{
byte[] filler = new byte[ totalLimit - written ];
byte[] filler = new byte[ totalLimit - _written ];
Arrays.fill(filler, fill);
stream.write(filler);
_stream.write(filler);
}
}
private void limitCheck(final int toBeWritten)
private void limitCheck(int toBeWritten)
throws IOException
{
if ((written + toBeWritten) > limit)
if ((_written + toBeWritten) > _limit)
{
throw new IOException("tried to write too much data");
}
written += toBeWritten;
_written += toBeWritten;
}
} // end public class DocumentOutputStream
}

View File

@ -0,0 +1,303 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import org.apache.poi.poifs.property.DocumentProperty;
import org.apache.poi.util.LittleEndian;
/**
* This class provides methods to read a DocumentEntry managed by a
* {@link NPOIFSFileSystem} instance.
*/
public final class NDocumentInputStream extends DocumentInputStream {
/** current offset into the Document */
private int _current_offset;
/** current block count */
private int _current_block_count;
/** current marked offset into the Document (used by mark and reset) */
private int _marked_offset;
/** and the block count for it */
private int _marked_offset_count;
/** the Document's size */
private int _document_size;
/** have we been closed? */
private boolean _closed;
/** the actual Document */
private NPOIFSDocument _document;
private Iterator<ByteBuffer> _data;
private ByteBuffer _buffer;
/**
* Create an InputStream from the specified DocumentEntry
*
* @param document the DocumentEntry to be read
*
* @exception IOException if the DocumentEntry cannot be opened (like, maybe it has
* been deleted?)
*/
public NDocumentInputStream(DocumentEntry document) throws IOException {
if (!(document instanceof DocumentNode)) {
throw new IOException("Cannot open internal document storage");
}
_current_offset = 0;
_current_block_count = 0;
_marked_offset = 0;
_marked_offset_count = 0;
_document_size = document.getSize();
_closed = false;
DocumentNode doc = (DocumentNode)document;
DocumentProperty property = (DocumentProperty)doc.getProperty();
_document = new NPOIFSDocument(
property,
((DirectoryNode)doc.getParent()).getNFileSystem()
);
_data = _document.getBlockIterator();
}
/**
* Create an InputStream from the specified Document
*
* @param document the Document to be read
*/
public NDocumentInputStream(NPOIFSDocument document) {
_current_offset = 0;
_current_block_count = 0;
_marked_offset = 0;
_marked_offset_count = 0;
_document_size = document.getSize();
_closed = false;
_document = document;
_data = _document.getBlockIterator();
}
@Override
public int available() {
if (_closed) {
throw new IllegalStateException("cannot perform requested operation on a closed stream");
}
return _document_size - _current_offset;
}
@Override
public void close() {
_closed = true;
}
@Override
public void mark(int ignoredReadlimit) {
_marked_offset = _current_offset;
_marked_offset_count = _current_block_count;
}
@Override
public int read() throws IOException {
dieIfClosed();
if (atEOD()) {
return EOF;
}
byte[] b = new byte[1];
int result = read(b, 0, 1);
if(result >= 0) {
if(b[0] < 0) {
return b[0]+256;
}
return b[0];
}
return result;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
dieIfClosed();
if (b == null) {
throw new IllegalArgumentException("buffer must not be null");
}
if (off < 0 || len < 0 || b.length < off + len) {
throw new IndexOutOfBoundsException("can't read past buffer boundaries");
}
if (len == 0) {
return 0;
}
if (atEOD()) {
return EOF;
}
int limit = Math.min(available(), len);
readFully(b, off, limit);
return limit;
}
/**
* Repositions this stream to the position at the time the mark() method was
* last called on this input stream. If mark() has not been called this
* method repositions the stream to its beginning.
*/
@Override
public void reset() {
// Special case for reset to the start
if(_marked_offset == 0 && _marked_offset_count == 0) {
_current_block_count = _marked_offset_count;
_current_offset = _marked_offset;
_data = _document.getBlockIterator();
_buffer = null;
return;
}
// Start again, then wind on to the required block
_data = _document.getBlockIterator();
_current_offset = 0;
for(int i=0; i<_marked_offset_count; i++) {
_buffer = _data.next();
_current_offset += _buffer.remaining();
}
_current_block_count = _marked_offset_count;
// Do we need to position within it?
if(_current_offset != _marked_offset) {
// Grab the right block
_buffer = _data.next();
_current_block_count++;
// Skip to the right place in it
_buffer.position(_marked_offset - _current_offset);
}
// All done
_current_offset = _marked_offset;
}
@Override
public long skip(long n) throws IOException {
dieIfClosed();
if (n < 0) {
return 0;
}
int new_offset = _current_offset + (int) n;
if (new_offset < _current_offset) {
// wrap around in converting a VERY large long to an int
new_offset = _document_size;
} else if (new_offset > _document_size) {
new_offset = _document_size;
}
long rval = new_offset - _current_offset;
// TODO Do this better
byte[] skip = new byte[(int)rval];
readFully(skip);
return rval;
}
private void dieIfClosed() throws IOException {
if (_closed) {
throw new IOException("cannot perform requested operation on a closed stream");
}
}
private boolean atEOD() {
return _current_offset == _document_size;
}
private void checkAvaliable(int requestedSize) {
if (_closed) {
throw new IllegalStateException("cannot perform requested operation on a closed stream");
}
if (requestedSize > _document_size - _current_offset) {
throw new RuntimeException("Buffer underrun - requested " + requestedSize
+ " bytes but " + (_document_size - _current_offset) + " was available");
}
}
@Override
public void readFully(byte[] buf, int off, int len) {
checkAvaliable(len);
int read = 0;
while(read < len) {
if(_buffer == null || _buffer.remaining() == 0) {
_current_block_count++;
_buffer = _data.next();
}
int limit = Math.min(len-read, _buffer.remaining());
_buffer.get(buf, off+read, limit);
_current_offset += limit;
read += limit;
}
}
@Override
public byte readByte() {
return (byte) readUByte();
}
@Override
public double readDouble() {
return Double.longBitsToDouble(readLong());
}
@Override
public long readLong() {
checkAvaliable(SIZE_LONG);
byte[] data = new byte[SIZE_LONG];
readFully(data, 0, SIZE_LONG);
return LittleEndian.getLong(data, 0);
}
@Override
public short readShort() {
return (short) readUShort();
}
@Override
public int readInt() {
checkAvaliable(SIZE_INT);
byte[] data = new byte[SIZE_INT];
readFully(data, 0, SIZE_INT);
return LittleEndian.getInt(data);
}
@Override
public int readUShort() {
checkAvaliable(SIZE_SHORT);
byte[] data = new byte[SIZE_SHORT];
readFully(data, 0, SIZE_SHORT);
return LittleEndian.getShort(data);
}
@Override
public int readUByte() {
checkAvaliable(1);
byte[] data = new byte[1];
readFully(data, 0, 1);
if(data[0] >= 0)
return data[0];
return data[0] + 256;
}
}

View File

@ -0,0 +1,193 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Iterator;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.dev.POIFSViewable;
import org.apache.poi.poifs.property.DocumentProperty;
import org.apache.poi.util.HexDump;
import org.apache.poi.util.IOUtils;
/**
* This class manages a document in the NIO POIFS filesystem.
* This is the {@link NPOIFSFileSystem} version.
*/
public final class NPOIFSDocument implements POIFSViewable {
private DocumentProperty _property;
private NPOIFSFileSystem _filesystem;
private NPOIFSStream _stream;
private int _block_size;
/**
* Constructor for an existing Document
*/
public NPOIFSDocument(DocumentProperty property, NPOIFSFileSystem filesystem)
throws IOException
{
this._property = property;
this._filesystem = filesystem;
if(property.getSize() < POIFSConstants.BIG_BLOCK_MINIMUM_DOCUMENT_SIZE) {
_stream = new NPOIFSStream(_filesystem.getMiniStore(), property.getStartBlock());
_block_size = _filesystem.getMiniStore().getBlockStoreBlockSize();
} else {
_stream = new NPOIFSStream(_filesystem, property.getStartBlock());
_block_size = _filesystem.getBlockStoreBlockSize();
}
}
/**
* Constructor for a new Document
*
* @param name the name of the POIFSDocument
* @param stream the InputStream we read data from
*/
public NPOIFSDocument(String name, NPOIFSFileSystem filesystem, InputStream stream)
throws IOException
{
this._filesystem = filesystem;
// Buffer the contents into memory. This is a bit icky...
// TODO Replace with a buffer up to the mini stream size, then streaming write
byte[] contents;
if(stream instanceof ByteArrayInputStream) {
ByteArrayInputStream bais = (ByteArrayInputStream)stream;
contents = new byte[bais.available()];
bais.read(contents);
} else {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
IOUtils.copy(stream, baos);
contents = baos.toByteArray();
}
// Do we need to store as a mini stream or a full one?
if(contents.length <= POIFSConstants.BIG_BLOCK_MINIMUM_DOCUMENT_SIZE) {
_stream = new NPOIFSStream(filesystem.getMiniStore());
_block_size = _filesystem.getMiniStore().getBlockStoreBlockSize();
} else {
_stream = new NPOIFSStream(filesystem);
_block_size = _filesystem.getBlockStoreBlockSize();
}
// Store it
_stream.updateContents(contents);
// And build the property for it
this._property = new DocumentProperty(name, contents.length);
_property.setStartBlock(_stream.getStartBlock());
}
int getDocumentBlockSize() {
return _block_size;
}
Iterator<ByteBuffer> getBlockIterator() {
return _stream.getBlockIterator();
}
/**
* @return size of the document
*/
public int getSize() {
return _property.getSize();
}
/**
* @return the instance's DocumentProperty
*/
DocumentProperty getDocumentProperty() {
return _property;
}
/**
* Get an array of objects, some of which may implement POIFSViewable
*
* @return an array of Object; may not be null, but may be empty
*/
public Object[] getViewableArray() {
Object[] results = new Object[1];
String result;
try {
if(getSize() > 0) {
// Get all the data into a single array
byte[] data = new byte[getSize()];
int offset = 0;
for(ByteBuffer buffer : _stream) {
int length = Math.min(_block_size, data.length-offset);
buffer.get(data, offset, length);
offset += length;
}
ByteArrayOutputStream output = new ByteArrayOutputStream();
HexDump.dump(data, 0, output, 0);
result = output.toString();
} else {
result = "<NO DATA>";
}
} catch (IOException e) {
result = e.getMessage();
}
results[0] = result;
return results;
}
/**
* Get an Iterator of objects, some of which may implement POIFSViewable
*
* @return an Iterator; may not be null, but may have an empty back end
* store
*/
public Iterator getViewableIterator() {
return Collections.EMPTY_LIST.iterator();
}
/**
* Give viewers a hint as to whether to call getViewableArray or
* getViewableIterator
*
* @return <code>true</code> if a viewer should call getViewableArray,
* <code>false</code> if a viewer should call getViewableIterator
*/
public boolean preferArray() {
return true;
}
/**
* Provides a short description of the object, to be used when a
* POIFSViewable object has not provided its contents.
*
* @return short description
*/
public String getShortDescription() {
StringBuffer buffer = new StringBuffer();
buffer.append("Document: \"").append(_property.getName()).append("\"");
buffer.append(" size = ").append(getSize());
return buffer.toString();
}
}

View File

@ -0,0 +1,845 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PushbackInputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.dev.POIFSViewable;
import org.apache.poi.poifs.nio.ByteArrayBackedDataSource;
import org.apache.poi.poifs.nio.DataSource;
import org.apache.poi.poifs.nio.FileBackedDataSource;
import org.apache.poi.poifs.property.DirectoryProperty;
import org.apache.poi.poifs.property.NPropertyTable;
import org.apache.poi.poifs.storage.BATBlock;
import org.apache.poi.poifs.storage.BlockAllocationTableReader;
import org.apache.poi.poifs.storage.BlockAllocationTableWriter;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.HeaderBlockConstants;
import org.apache.poi.poifs.storage.HeaderBlockWriter;
import org.apache.poi.poifs.storage.BATBlock.BATBlockAndIndex;
import org.apache.poi.util.CloseIgnoringInputStream;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.LongField;
import org.apache.poi.util.POILogFactory;
import org.apache.poi.util.POILogger;
/**
* This is the main class of the POIFS system; it manages the entire
* life cycle of the filesystem.
* This is the new NIO version
*/
public class NPOIFSFileSystem extends BlockStore
implements POIFSViewable
{
private static final POILogger _logger =
POILogFactory.getLogger(NPOIFSFileSystem.class);
/**
* Convenience method for clients that want to avoid the auto-close behaviour of the constructor.
*/
public static InputStream createNonClosingInputStream(InputStream is) {
return new CloseIgnoringInputStream(is);
}
private NPOIFSMiniStore _mini_store;
private NPropertyTable _property_table;
private List<BATBlock> _xbat_blocks;
private List<BATBlock> _bat_blocks;
private HeaderBlock _header;
private DirectoryNode _root;
private DataSource _data;
/**
* What big block size the file uses. Most files
* use 512 bytes, but a few use 4096
*/
private POIFSBigBlockSize bigBlockSize =
POIFSConstants.SMALLER_BIG_BLOCK_SIZE_DETAILS;
/**
* Constructor, intended for writing
*/
public NPOIFSFileSystem()
{
_header = new HeaderBlock(bigBlockSize);
_property_table = new NPropertyTable(_header);
_mini_store = new NPOIFSMiniStore(this, _property_table.getRoot(), new ArrayList<BATBlock>(), _header);
_xbat_blocks = new ArrayList<BATBlock>();
_bat_blocks = new ArrayList<BATBlock>();
_root = null;
}
/**
* Creates a POIFSFileSystem from a <tt>File</tt>. This uses less memory than
* creating from an <tt>InputStream</tt>. The File will be opened read-only
*
* Note that with this constructor, you will need to call {@link #close()}
* when you're done to have the underlying file closed, as the file is
* kept open during normal operation to read the data out.
*
* @param file the File from which to read the data
*
* @exception IOException on errors reading, or on invalid data
*/
public NPOIFSFileSystem(File file)
throws IOException
{
this(file, true);
}
/**
* Creates a POIFSFileSystem from a <tt>File</tt>. This uses less memory than
* creating from an <tt>InputStream</tt>.
*
* Note that with this constructor, you will need to call {@link #close()}
* when you're done to have the underlying file closed, as the file is
* kept open during normal operation to read the data out.
*
* @param file the File from which to read the data
*
* @exception IOException on errors reading, or on invalid data
*/
public NPOIFSFileSystem(File file, boolean readOnly)
throws IOException
{
this(
(new RandomAccessFile(file, readOnly? "r" : "rw")).getChannel(),
true
);
}
/**
* Creates a POIFSFileSystem from an open <tt>FileChannel</tt>. This uses
* less memory than creating from an <tt>InputStream</tt>.
*
* Note that with this constructor, you will need to call {@link #close()}
* when you're done to have the underlying Channel closed, as the channel is
* kept open during normal operation to read the data out.
*
* @param channel the FileChannel from which to read the data
*
* @exception IOException on errors reading, or on invalid data
*/
public NPOIFSFileSystem(FileChannel channel)
throws IOException
{
this(channel, false);
}
private NPOIFSFileSystem(FileChannel channel, boolean closeChannelOnError)
throws IOException
{
this();
try {
// Get the header
ByteBuffer headerBuffer = ByteBuffer.allocate(POIFSConstants.SMALLER_BIG_BLOCK_SIZE);
IOUtils.readFully(channel, headerBuffer);
// Have the header processed
_header = new HeaderBlock(headerBuffer);
// Now process the various entries
_data = new FileBackedDataSource(channel);
readCoreContents();
} catch(IOException e) {
if(closeChannelOnError) {
channel.close();
}
throw e;
} catch(RuntimeException e) {
// Comes from Iterators etc.
// TODO Decide if we can handle these better whilst
// still sticking to the iterator contract
if(closeChannelOnError) {
channel.close();
}
throw e;
}
}
/**
* Create a POIFSFileSystem from an <tt>InputStream</tt>. Normally the stream is read until
* EOF. The stream is always closed.<p/>
*
* Some streams are usable after reaching EOF (typically those that return <code>true</code>
* for <tt>markSupported()</tt>). In the unlikely case that the caller has such a stream
* <i>and</i> needs to use it after this constructor completes, a work around is to wrap the
* stream in order to trap the <tt>close()</tt> call. A convenience method (
* <tt>createNonClosingInputStream()</tt>) has been provided for this purpose:
* <pre>
* InputStream wrappedStream = POIFSFileSystem.createNonClosingInputStream(is);
* HSSFWorkbook wb = new HSSFWorkbook(wrappedStream);
* is.reset();
* doSomethingElse(is);
* </pre>
* Note also the special case of <tt>ByteArrayInputStream</tt> for which the <tt>close()</tt>
* method does nothing.
* <pre>
* ByteArrayInputStream bais = ...
* HSSFWorkbook wb = new HSSFWorkbook(bais); // calls bais.close() !
* bais.reset(); // no problem
* doSomethingElse(bais);
* </pre>
*
* @param stream the InputStream from which to read the data
*
* @exception IOException on errors reading, or on invalid data
*/
public NPOIFSFileSystem(InputStream stream)
throws IOException
{
this();
ReadableByteChannel channel = null;
boolean success = false;
try {
// Turn our InputStream into something NIO based
channel = Channels.newChannel(stream);
// Get the header
ByteBuffer headerBuffer = ByteBuffer.allocate(POIFSConstants.SMALLER_BIG_BLOCK_SIZE);
IOUtils.readFully(channel, headerBuffer);
// Have the header processed
_header = new HeaderBlock(headerBuffer);
// Sanity check the block count
BlockAllocationTableReader.sanityCheckBlockCount(_header.getBATCount());
// We need to buffer the whole file into memory when
// working with an InputStream.
// The max possible size is when each BAT block entry is used
int maxSize = BATBlock.calculateMaximumSize(_header);
ByteBuffer data = ByteBuffer.allocate(maxSize);
// Copy in the header
headerBuffer.position(0);
data.put(headerBuffer);
data.position(headerBuffer.capacity());
// Now read the rest of the stream
IOUtils.readFully(channel, data);
success = true;
// Turn it into a DataSource
_data = new ByteArrayBackedDataSource(data.array(), data.position());
} finally {
// As per the constructor contract, always close the stream
if(channel != null)
channel.close();
closeInputStream(stream, success);
}
// Now process the various entries
readCoreContents();
}
/**
* @param stream the stream to be closed
* @param success <code>false</code> if an exception is currently being thrown in the calling method
*/
private void closeInputStream(InputStream stream, boolean success) {
try {
stream.close();
} catch (IOException e) {
if(success) {
throw new RuntimeException(e);
}
// else not success? Try block did not complete normally
// just print stack trace and leave original ex to be thrown
e.printStackTrace();
}
}
/**
* Checks that the supplied InputStream (which MUST
* support mark and reset, or be a PushbackInputStream)
* has a POIFS (OLE2) header at the start of it.
* If your InputStream does not support mark / reset,
* then wrap it in a PushBackInputStream, then be
* sure to always use that, and not the original!
* @param inp An InputStream which supports either mark/reset, or is a PushbackInputStream
*/
public static boolean hasPOIFSHeader(InputStream inp) throws IOException {
// We want to peek at the first 8 bytes
inp.mark(8);
byte[] header = new byte[8];
IOUtils.readFully(inp, header);
LongField signature = new LongField(HeaderBlockConstants._signature_offset, header);
// Wind back those 8 bytes
if(inp instanceof PushbackInputStream) {
PushbackInputStream pin = (PushbackInputStream)inp;
pin.unread(header);
} else {
inp.reset();
}
// Did it match the signature?
return (signature.get() == HeaderBlockConstants._signature);
}
/**
* Read and process the PropertiesTable and the
* FAT / XFAT blocks, so that we're ready to
* work with the file
*/
private void readCoreContents() throws IOException {
// Grab the block size
bigBlockSize = _header.getBigBlockSize();
// Each block should only ever be used by one of the
// FAT, XFAT or Property Table. Ensure it does
ChainLoopDetector loopDetector = getChainLoopDetector();
// Read the FAT blocks
for(int fatAt : _header.getBATArray()) {
readBAT(fatAt, loopDetector);
}
// Now read the XFAT blocks, and the FATs within them
BATBlock xfat;
int nextAt = _header.getXBATIndex();
for(int i=0; i<_header.getXBATCount(); i++) {
loopDetector.claim(nextAt);
ByteBuffer fatData = getBlockAt(nextAt);
xfat = BATBlock.createBATBlock(bigBlockSize, fatData);
xfat.setOurBlockIndex(nextAt);
nextAt = xfat.getValueAt(bigBlockSize.getXBATEntriesPerBlock());
_xbat_blocks.add(xfat);
for(int j=0; j<bigBlockSize.getXBATEntriesPerBlock(); j++) {
int fatAt = xfat.getValueAt(j);
if(fatAt == POIFSConstants.UNUSED_BLOCK) break;
readBAT(fatAt, loopDetector);
}
}
// We're now able to load steams
// Use this to read in the properties
_property_table = new NPropertyTable(_header, this);
// Finally read the Small Stream FAT (SBAT) blocks
BATBlock sfat;
List<BATBlock> sbats = new ArrayList<BATBlock>();
_mini_store = new NPOIFSMiniStore(this, _property_table.getRoot(), sbats, _header);
nextAt = _header.getSBATStart();
for(int i=0; i<_header.getSBATCount(); i++) {
loopDetector.claim(nextAt);
ByteBuffer fatData = getBlockAt(nextAt);
sfat = BATBlock.createBATBlock(bigBlockSize, fatData);
sfat.setOurBlockIndex(nextAt);
sbats.add(sfat);
nextAt = getNextBlock(nextAt);
}
}
private void readBAT(int batAt, ChainLoopDetector loopDetector) throws IOException {
loopDetector.claim(batAt);
ByteBuffer fatData = getBlockAt(batAt);
BATBlock bat = BATBlock.createBATBlock(bigBlockSize, fatData);
bat.setOurBlockIndex(batAt);
_bat_blocks.add(bat);
}
private BATBlock createBAT(int offset, boolean isBAT) throws IOException {
// Create a new BATBlock
BATBlock newBAT = BATBlock.createEmptyBATBlock(bigBlockSize, !isBAT);
newBAT.setOurBlockIndex(offset);
// Ensure there's a spot in the file for it
ByteBuffer buffer = ByteBuffer.allocate(bigBlockSize.getBigBlockSize());
int writeTo = (1+offset) * bigBlockSize.getBigBlockSize(); // Header isn't in BATs
_data.write(buffer, writeTo);
// All done
return newBAT;
}
/**
* Load the block at the given offset.
*/
protected ByteBuffer getBlockAt(final int offset) throws IOException {
// The header block doesn't count, so add one
long startAt = (offset+1) * bigBlockSize.getBigBlockSize();
return _data.read(bigBlockSize.getBigBlockSize(), startAt);
}
/**
* Load the block at the given offset,
* extending the file if needed
*/
protected ByteBuffer createBlockIfNeeded(final int offset) throws IOException {
try {
return getBlockAt(offset);
} catch(IndexOutOfBoundsException e) {
// The header block doesn't count, so add one
long startAt = (offset+1) * bigBlockSize.getBigBlockSize();
// Allocate and write
ByteBuffer buffer = ByteBuffer.allocate(getBigBlockSize());
_data.write(buffer, startAt);
// Retrieve the properly backed block
return getBlockAt(offset);
}
}
/**
* Returns the BATBlock that handles the specified offset,
* and the relative index within it
*/
protected BATBlockAndIndex getBATBlockAndIndex(final int offset) {
return BATBlock.getBATBlockAndIndex(
offset, _header, _bat_blocks
);
}
/**
* Works out what block follows the specified one.
*/
protected int getNextBlock(final int offset) {
BATBlockAndIndex bai = getBATBlockAndIndex(offset);
return bai.getBlock().getValueAt( bai.getIndex() );
}
/**
* Changes the record of what block follows the specified one.
*/
protected void setNextBlock(final int offset, final int nextBlock) {
BATBlockAndIndex bai = getBATBlockAndIndex(offset);
bai.getBlock().setValueAt(
bai.getIndex(), nextBlock
);
}
/**
* Finds a free block, and returns its offset.
* This method will extend the file if needed, and if doing
* so, allocate new FAT blocks to address the extra space.
*/
protected int getFreeBlock() throws IOException {
// First up, do we have any spare ones?
int offset = 0;
for(int i=0; i<_bat_blocks.size(); i++) {
int numSectors = bigBlockSize.getBATEntriesPerBlock();
// Check this one
BATBlock bat = _bat_blocks.get(i);
if(bat.hasFreeSectors()) {
// Claim one of them and return it
for(int j=0; j<numSectors; j++) {
int batValue = bat.getValueAt(j);
if(batValue == POIFSConstants.UNUSED_BLOCK) {
// Bingo
return offset + j;
}
}
}
// Move onto the next BAT
offset += numSectors;
}
// If we get here, then there aren't any free sectors
// in any of the BATs, so we need another BAT
BATBlock bat = createBAT(offset, true);
bat.setValueAt(0, POIFSConstants.FAT_SECTOR_BLOCK);
_bat_blocks.add(bat);
// Now store a reference to the BAT in the required place
if(_header.getBATCount() >= 109) {
// Needs to come from an XBAT
BATBlock xbat = null;
for(BATBlock x : _xbat_blocks) {
if(x.hasFreeSectors()) {
xbat = x;
break;
}
}
if(xbat == null) {
// Oh joy, we need a new XBAT too...
xbat = createBAT(offset+1, false);
xbat.setValueAt(0, offset);
bat.setValueAt(1, POIFSConstants.DIFAT_SECTOR_BLOCK);
// Will go one place higher as XBAT added in
offset++;
// Chain it
if(_xbat_blocks.size() == 0) {
_header.setXBATStart(offset);
} else {
_xbat_blocks.get(_xbat_blocks.size()-1).setValueAt(
bigBlockSize.getXBATEntriesPerBlock(), offset
);
}
_xbat_blocks.add(xbat);
_header.setXBATCount(_xbat_blocks.size());
}
// Allocate us in the XBAT
for(int i=0; i<bigBlockSize.getXBATEntriesPerBlock(); i++) {
if(xbat.getValueAt(i) == POIFSConstants.UNUSED_BLOCK) {
xbat.setValueAt(i, offset);
}
}
} else {
// Store us in the header
int[] newBATs = new int[_header.getBATCount()+1];
System.arraycopy(_header.getBATArray(), 0, newBATs, 0, newBATs.length-1);
newBATs[newBATs.length-1] = offset;
_header.setBATArray(newBATs);
}
_header.setBATCount(_bat_blocks.size());
// The current offset stores us, but the next one is free
return offset+1;
}
@Override
protected ChainLoopDetector getChainLoopDetector() throws IOException {
return new ChainLoopDetector(_data.size());
}
/**
* For unit testing only! Returns the underlying
* properties table
*/
NPropertyTable _get_property_table() {
return _property_table;
}
/**
* Returns the MiniStore, which performs a similar low
* level function to this, except for the small blocks.
*/
public NPOIFSMiniStore getMiniStore() {
return _mini_store;
}
/**
* add a new POIFSDocument to the FileSytem
*
* @param document the POIFSDocument being added
*/
void addDocument(final NPOIFSDocument document)
{
_property_table.addProperty(document.getDocumentProperty());
}
/**
* add a new DirectoryProperty to the FileSystem
*
* @param directory the DirectoryProperty being added
*/
void addDirectory(final DirectoryProperty directory)
{
_property_table.addProperty(directory);
}
/**
* Create a new document to be added to the root directory
*
* @param stream the InputStream from which the document's data
* will be obtained
* @param name the name of the new POIFSDocument
*
* @return the new DocumentEntry
*
* @exception IOException on error creating the new POIFSDocument
*/
public DocumentEntry createDocument(final InputStream stream,
final String name)
throws IOException
{
return getRoot().createDocument(name, stream);
}
/**
* create a new DocumentEntry in the root entry; the data will be
* provided later
*
* @param name the name of the new DocumentEntry
* @param size the size of the new DocumentEntry
* @param writer the writer of the new DocumentEntry
*
* @return the new DocumentEntry
*
* @exception IOException
*/
public DocumentEntry createDocument(final String name, final int size,
final POIFSWriterListener writer)
throws IOException
{
return getRoot().createDocument(name, size, writer);
}
/**
* create a new DirectoryEntry in the root directory
*
* @param name the name of the new DirectoryEntry
*
* @return the new DirectoryEntry
*
* @exception IOException on name duplication
*/
public DirectoryEntry createDirectory(final String name)
throws IOException
{
return getRoot().createDirectory(name);
}
/**
* Write the filesystem out to the open file. Will thrown an
* {@link IllegalArgumentException} if opened from an
* {@link InputStream}.
*
* @exception IOException thrown on errors writing to the stream
*/
public void writeFilesystem() throws IOException
{
if(_data instanceof FileBackedDataSource) {
// Good, correct type
} else {
throw new IllegalArgumentException(
"POIFS opened from an inputstream, so writeFilesystem() may " +
"not be called. Use writeFilesystem(OutputStream) instead"
);
}
syncWithDataSource();
}
/**
* Write the filesystem out
*
* @param stream the OutputStream to which the filesystem will be
* written
*
* @exception IOException thrown on errors writing to the stream
*/
public void writeFilesystem(final OutputStream stream)
throws IOException
{
// Have the datasource updated
syncWithDataSource();
// Now copy the contents to the stream
_data.copyTo(stream);
}
/**
* Has our in-memory objects write their state
* to their backing blocks
*/
private void syncWithDataSource() throws IOException
{
// HeaderBlock
HeaderBlockWriter hbw = new HeaderBlockWriter(_header);
hbw.writeBlock( getBlockAt(0) );
// BATs
for(BATBlock bat : _bat_blocks) {
ByteBuffer block = getBlockAt(bat.getOurBlockIndex());
BlockAllocationTableWriter.writeBlock(bat, block);
}
// SBATs
_mini_store.syncWithDataSource();
// Properties
_property_table.write(
new NPOIFSStream(this, _header.getPropertyStart())
);
}
/**
* Closes the FileSystem, freeing any underlying files, streams
* and buffers. After this, you will be unable to read or
* write from the FileSystem.
*/
public void close() throws IOException {
_data.close();
}
/**
* read in a file and write it back out again
*
* @param args names of the files; arg[ 0 ] is the input file,
* arg[ 1 ] is the output file
*
* @exception IOException
*/
public static void main(String args[])
throws IOException
{
if (args.length != 2)
{
System.err.println(
"two arguments required: input filename and output filename");
System.exit(1);
}
FileInputStream istream = new FileInputStream(args[ 0 ]);
FileOutputStream ostream = new FileOutputStream(args[ 1 ]);
new NPOIFSFileSystem(istream).writeFilesystem(ostream);
istream.close();
ostream.close();
}
/**
* Get the root entry
*
* @return the root entry
*/
public DirectoryNode getRoot()
{
if (_root == null) {
_root = new DirectoryNode(_property_table.getRoot(), this, null);
}
return _root;
}
/**
* open a document in the root entry's list of entries
*
* @param documentName the name of the document to be opened
*
* @return a newly opened DocumentInputStream
*
* @exception IOException if the document does not exist or the
* name is that of a DirectoryEntry
*/
public DocumentInputStream createDocumentInputStream(
final String documentName)
throws IOException
{
return getRoot().createDocumentInputStream(documentName);
}
/**
* remove an entry
*
* @param entry to be removed
*/
void remove(EntryNode entry)
{
_property_table.removeProperty(entry.getProperty());
}
/* ********** START begin implementation of POIFSViewable ********** */
/**
* Get an array of objects, some of which may implement
* POIFSViewable
*
* @return an array of Object; may not be null, but may be empty
*/
public Object [] getViewableArray()
{
if (preferArray())
{
return (( POIFSViewable ) getRoot()).getViewableArray();
}
return new Object[ 0 ];
}
/**
* Get an Iterator of objects, some of which may implement
* POIFSViewable
*
* @return an Iterator; may not be null, but may have an empty
* back end store
*/
public Iterator getViewableIterator()
{
if (!preferArray())
{
return (( POIFSViewable ) getRoot()).getViewableIterator();
}
return Collections.EMPTY_LIST.iterator();
}
/**
* Give viewers a hint as to whether to call getViewableArray or
* getViewableIterator
*
* @return true if a viewer should call getViewableArray, false if
* a viewer should call getViewableIterator
*/
public boolean preferArray()
{
return (( POIFSViewable ) getRoot()).preferArray();
}
/**
* Provides a short description of the object, to be used when a
* POIFSViewable object has not provided its contents.
*
* @return short description
*/
public String getShortDescription()
{
return "POIFS FileSystem";
}
/* ********** END begin implementation of POIFSViewable ********** */
/**
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public int getBigBlockSize() {
return bigBlockSize.getBigBlockSize();
}
/**
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public POIFSBigBlockSize getBigBlockSizeDetails() {
return bigBlockSize;
}
protected int getBlockStoreBlockSize() {
return getBigBlockSize();
}
}

View File

@ -0,0 +1,211 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.List;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.property.RootProperty;
import org.apache.poi.poifs.storage.BATBlock;
import org.apache.poi.poifs.storage.BlockAllocationTableWriter;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.BATBlock.BATBlockAndIndex;
/**
* This class handles the MiniStream (small block store)
* in the NIO case for {@link NPOIFSFileSystem}
*/
public class NPOIFSMiniStore extends BlockStore
{
private NPOIFSFileSystem _filesystem;
private NPOIFSStream _mini_stream;
private List<BATBlock> _sbat_blocks;
private HeaderBlock _header;
private RootProperty _root;
protected NPOIFSMiniStore(NPOIFSFileSystem filesystem, RootProperty root,
List<BATBlock> sbats, HeaderBlock header)
{
this._filesystem = filesystem;
this._sbat_blocks = sbats;
this._header = header;
this._root = root;
this._mini_stream = new NPOIFSStream(filesystem, root.getStartBlock());
}
/**
* Load the block at the given offset.
*/
protected ByteBuffer getBlockAt(final int offset) throws IOException {
// Which big block is this?
int byteOffset = offset * POIFSConstants.SMALL_BLOCK_SIZE;
int bigBlockNumber = byteOffset / _filesystem.getBigBlockSize();
int bigBlockOffset = byteOffset % _filesystem.getBigBlockSize();
// Now locate the data block for it
Iterator<ByteBuffer> it = _mini_stream.getBlockIterator();
for(int i=0; i<bigBlockNumber; i++) {
it.next();
}
ByteBuffer dataBlock = it.next();
// Our blocks are small, so duplicating it is fine
byte[] data = new byte[POIFSConstants.SMALL_BLOCK_SIZE];
dataBlock.position(
dataBlock.position() + bigBlockOffset
);
dataBlock.get(data, 0, data.length);
// Return a ByteBuffer on this
ByteBuffer miniBuffer = ByteBuffer.wrap(data);
return miniBuffer;
}
/**
* Load the block, extending the underlying stream if needed
*/
protected ByteBuffer createBlockIfNeeded(final int offset) throws IOException {
// TODO Extend the stream if needed
// TODO Needs append support on the underlying stream
return getBlockAt(offset);
}
/**
* Returns the BATBlock that handles the specified offset,
* and the relative index within it
*/
protected BATBlockAndIndex getBATBlockAndIndex(final int offset) {
return BATBlock.getSBATBlockAndIndex(
offset, _header, _sbat_blocks
);
}
/**
* Works out what block follows the specified one.
*/
protected int getNextBlock(final int offset) {
BATBlockAndIndex bai = getBATBlockAndIndex(offset);
return bai.getBlock().getValueAt( bai.getIndex() );
}
/**
* Changes the record of what block follows the specified one.
*/
protected void setNextBlock(final int offset, final int nextBlock) {
BATBlockAndIndex bai = getBATBlockAndIndex(offset);
bai.getBlock().setValueAt(
bai.getIndex(), nextBlock
);
}
/**
* Finds a free block, and returns its offset.
* This method will extend the file if needed, and if doing
* so, allocate new FAT blocks to address the extra space.
*/
protected int getFreeBlock() throws IOException {
int sectorsPerSBAT = _filesystem.getBigBlockSizeDetails().getBATEntriesPerBlock();
// First up, do we have any spare ones?
int offset = 0;
for(int i=0; i<_sbat_blocks.size(); i++) {
// Check this one
BATBlock sbat = _sbat_blocks.get(i);
if(sbat.hasFreeSectors()) {
// Claim one of them and return it
for(int j=0; j<sectorsPerSBAT; j++) {
int sbatValue = sbat.getValueAt(j);
if(sbatValue == POIFSConstants.UNUSED_BLOCK) {
// Bingo
return offset + j;
}
}
}
// Move onto the next SBAT
offset += sectorsPerSBAT;
}
// If we get here, then there aren't any
// free sectors in any of the SBATs
// So, we need to extend the chain and add another
// Create a new BATBlock
BATBlock newSBAT = BATBlock.createEmptyBATBlock(_filesystem.getBigBlockSizeDetails(), false);
int batForSBAT = _filesystem.getFreeBlock();
newSBAT.setOurBlockIndex(batForSBAT);
// Are we the first SBAT?
if(_header.getSBATCount() == 0) {
_header.setSBATStart(batForSBAT);
_header.setSBATBlockCount(1);
} else {
// Find the end of the SBAT stream, and add the sbat in there
ChainLoopDetector loopDetector = _filesystem.getChainLoopDetector();
int batOffset = _header.getSBATStart();
while(true) {
loopDetector.claim(batOffset);
int nextBat = _filesystem.getNextBlock(batOffset);
if(nextBat == POIFSConstants.END_OF_CHAIN) {
break;
}
batOffset = nextBat;
}
// Add it in at the end
_filesystem.setNextBlock(batOffset, batForSBAT);
// And update the count
_header.setSBATBlockCount(
_header.getSBATCount() + 1
);
}
// Finish allocating
_filesystem.setNextBlock(batForSBAT, POIFSConstants.END_OF_CHAIN);
_sbat_blocks.add(newSBAT);
// Return our first spot
return offset;
}
@Override
protected ChainLoopDetector getChainLoopDetector() throws IOException {
return new ChainLoopDetector( _root.getSize() );
}
protected int getBlockStoreBlockSize() {
return POIFSConstants.SMALL_BLOCK_SIZE;
}
/**
* Writes the SBATs to their backing blocks
*/
protected void syncWithDataSource() throws IOException {
for(BATBlock sbat : _sbat_blocks) {
ByteBuffer block = _filesystem.getBlockAt(sbat.getOurBlockIndex());
BlockAllocationTableWriter.writeBlock(sbat, block);
}
}
}

View File

@ -0,0 +1,224 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.BlockStore.ChainLoopDetector;
import org.apache.poi.poifs.property.Property;
import org.apache.poi.poifs.storage.HeaderBlock;
/**
* This handles reading and writing a stream within a
* {@link NPOIFSFileSystem}. It can supply an iterator
* to read blocks, and way to write out to existing and
* new blocks.
* Most users will want a higher level version of this,
* which deals with properties to track which stream
* this is.
* This only works on big block streams, it doesn't
* handle small block ones.
* This uses the new NIO code
*
* TODO Implement a streaming write method, and append
*/
public class NPOIFSStream implements Iterable<ByteBuffer>
{
private BlockStore blockStore;
private int startBlock;
/**
* Constructor for an existing stream. It's up to you
* to know how to get the start block (eg from a
* {@link HeaderBlock} or a {@link Property})
*/
public NPOIFSStream(BlockStore blockStore, int startBlock) {
this.blockStore = blockStore;
this.startBlock = startBlock;
}
/**
* Constructor for a new stream. A start block won't
* be allocated until you begin writing to it.
*/
public NPOIFSStream(BlockStore blockStore) {
this.blockStore = blockStore;
this.startBlock = POIFSConstants.END_OF_CHAIN;
}
/**
* What block does this stream start at?
* Will be {@link POIFSConstants#END_OF_CHAIN} for a
* new stream that hasn't been written to yet.
*/
public int getStartBlock() {
return startBlock;
}
/**
* Returns an iterator that'll supply one {@link ByteBuffer}
* per block in the stream.
*/
public Iterator<ByteBuffer> iterator() {
return getBlockIterator();
}
public Iterator<ByteBuffer> getBlockIterator() {
if(startBlock == POIFSConstants.END_OF_CHAIN) {
throw new IllegalStateException(
"Can't read from a new stream before it has been written to"
);
}
return new StreamBlockByteBufferIterator(startBlock);
}
/**
* Updates the contents of the stream to the new
* set of bytes.
* Note - if this is property based, you'll still
* need to update the size in the property yourself
*/
public void updateContents(byte[] contents) throws IOException {
// How many blocks are we going to need?
int blockSize = blockStore.getBlockStoreBlockSize();
int blocks = (int)Math.ceil(contents.length / blockSize);
// Make sure we don't encounter a loop whilst overwriting
// the existing blocks
ChainLoopDetector loopDetector = blockStore.getChainLoopDetector();
// Start writing
int prevBlock = POIFSConstants.END_OF_CHAIN;
int nextBlock = startBlock;
for(int i=0; i<blocks; i++) {
int thisBlock = nextBlock;
// Allocate a block if needed, otherwise figure
// out what the next block will be
if(thisBlock == POIFSConstants.END_OF_CHAIN) {
thisBlock = blockStore.getFreeBlock();
loopDetector.claim(thisBlock);
// We're on the end of the chain
nextBlock = POIFSConstants.END_OF_CHAIN;
// Mark the previous block as carrying on to us if needed
if(prevBlock != POIFSConstants.END_OF_CHAIN) {
blockStore.setNextBlock(prevBlock, thisBlock);
}
blockStore.setNextBlock(thisBlock, POIFSConstants.END_OF_CHAIN);
// If we've just written the first block on a
// new stream, save the start block offset
if(this.startBlock == POIFSConstants.END_OF_CHAIN) {
this.startBlock = thisBlock;
}
} else {
loopDetector.claim(thisBlock);
nextBlock = blockStore.getNextBlock(thisBlock);
}
// Write it
ByteBuffer buffer = blockStore.createBlockIfNeeded(thisBlock);
buffer.put(contents, i*blockSize, blockSize);
// Update pointers
prevBlock = thisBlock;
}
int lastBlock = prevBlock;
// If we're overwriting, free any remaining blocks
NPOIFSStream toFree = new NPOIFSStream(blockStore, nextBlock);
toFree.free(loopDetector);
// Mark the end of the stream
blockStore.setNextBlock(lastBlock, POIFSConstants.END_OF_CHAIN);
}
// TODO Streaming write support
// TODO then convert fixed sized write to use streaming internally
// TODO Append write support (probably streaming)
/**
* Frees all blocks in the stream
*/
public void free() throws IOException {
ChainLoopDetector loopDetector = blockStore.getChainLoopDetector();
free(loopDetector);
}
private void free(ChainLoopDetector loopDetector) {
int nextBlock = startBlock;
while(nextBlock != POIFSConstants.END_OF_CHAIN) {
int thisBlock = nextBlock;
loopDetector.claim(thisBlock);
nextBlock = blockStore.getNextBlock(thisBlock);
blockStore.setNextBlock(thisBlock, POIFSConstants.UNUSED_BLOCK);
}
this.startBlock = POIFSConstants.END_OF_CHAIN;
}
/**
* Class that handles a streaming read of one stream
*/
protected class StreamBlockByteBufferIterator implements Iterator<ByteBuffer> {
private ChainLoopDetector loopDetector;
private int nextBlock;
protected StreamBlockByteBufferIterator(int firstBlock) {
this.nextBlock = firstBlock;
try {
this.loopDetector = blockStore.getChainLoopDetector();
} catch(IOException e) {
throw new RuntimeException(e);
}
}
public boolean hasNext() {
if(nextBlock == POIFSConstants.END_OF_CHAIN) {
return false;
}
return true;
}
public ByteBuffer next() {
if(nextBlock == POIFSConstants.END_OF_CHAIN) {
throw new IndexOutOfBoundsException("Can't read past the end of the stream");
}
try {
loopDetector.claim(nextBlock);
ByteBuffer data = blockStore.getBlockAt(nextBlock);
nextBlock = blockStore.getNextBlock(nextBlock);
return data;
} catch(IOException e) {
throw new RuntimeException(e);
}
}
public void remove() {
throw new UnsupportedOperationException();
}
}
}

View File

@ -0,0 +1,321 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import java.io.IOException;
import org.apache.poi.poifs.storage.DataInputBlock;
/**
* This class provides methods to read a DocumentEntry managed by a
* {@link POIFSFileSystem} instance.
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class ODocumentInputStream extends DocumentInputStream {
/** current offset into the Document */
private int _current_offset;
/** current marked offset into the Document (used by mark and reset) */
private int _marked_offset;
/** the Document's size */
private int _document_size;
/** have we been closed? */
private boolean _closed;
/** the actual Document */
private POIFSDocument _document;
/** the data block containing the current stream pointer */
private DataInputBlock _currentBlock;
/**
* Create an InputStream from the specified DocumentEntry
*
* @param document the DocumentEntry to be read
*
* @exception IOException if the DocumentEntry cannot be opened (like, maybe it has
* been deleted?)
*/
public ODocumentInputStream(DocumentEntry document) throws IOException {
if (!(document instanceof DocumentNode)) {
throw new IOException("Cannot open internal document storage");
}
DocumentNode documentNode = (DocumentNode)document;
if(documentNode.getDocument() == null) {
throw new IOException("Cannot open internal document storage");
}
_current_offset = 0;
_marked_offset = 0;
_document_size = document.getSize();
_closed = false;
_document = documentNode.getDocument();
_currentBlock = getDataInputBlock(0);
}
/**
* Create an InputStream from the specified Document
*
* @param document the Document to be read
*/
public ODocumentInputStream(POIFSDocument document) {
_current_offset = 0;
_marked_offset = 0;
_document_size = document.getSize();
_closed = false;
_document = document;
_currentBlock = getDataInputBlock(0);
}
@Override
public int available() {
if (_closed) {
throw new IllegalStateException("cannot perform requested operation on a closed stream");
}
return _document_size - _current_offset;
}
@Override
public void close() {
_closed = true;
}
@Override
public void mark(int ignoredReadlimit) {
_marked_offset = _current_offset;
}
private DataInputBlock getDataInputBlock(int offset) {
return _document.getDataInputBlock(offset);
}
@Override
public int read() throws IOException {
dieIfClosed();
if (atEOD()) {
return EOF;
}
int result = _currentBlock.readUByte();
_current_offset++;
if (_currentBlock.available() < 1) {
_currentBlock = getDataInputBlock(_current_offset);
}
return result;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
dieIfClosed();
if (b == null) {
throw new IllegalArgumentException("buffer must not be null");
}
if (off < 0 || len < 0 || b.length < off + len) {
throw new IndexOutOfBoundsException("can't read past buffer boundaries");
}
if (len == 0) {
return 0;
}
if (atEOD()) {
return EOF;
}
int limit = Math.min(available(), len);
readFully(b, off, limit);
return limit;
}
/**
* Repositions this stream to the position at the time the mark() method was
* last called on this input stream. If mark() has not been called this
* method repositions the stream to its beginning.
*/
@Override
public void reset() {
_current_offset = _marked_offset;
_currentBlock = getDataInputBlock(_current_offset);
}
@Override
public long skip(long n) throws IOException {
dieIfClosed();
if (n < 0) {
return 0;
}
int new_offset = _current_offset + (int) n;
if (new_offset < _current_offset) {
// wrap around in converting a VERY large long to an int
new_offset = _document_size;
} else if (new_offset > _document_size) {
new_offset = _document_size;
}
long rval = new_offset - _current_offset;
_current_offset = new_offset;
_currentBlock = getDataInputBlock(_current_offset);
return rval;
}
private void dieIfClosed() throws IOException {
if (_closed) {
throw new IOException("cannot perform requested operation on a closed stream");
}
}
private boolean atEOD() {
return _current_offset == _document_size;
}
private void checkAvaliable(int requestedSize) {
if (_closed) {
throw new IllegalStateException("cannot perform requested operation on a closed stream");
}
if (requestedSize > _document_size - _current_offset) {
throw new RuntimeException("Buffer underrun - requested " + requestedSize
+ " bytes but " + (_document_size - _current_offset) + " was available");
}
}
@Override
public byte readByte() {
return (byte) readUByte();
}
@Override
public double readDouble() {
return Double.longBitsToDouble(readLong());
}
@Override
public short readShort() {
return (short) readUShort();
}
@Override
public void readFully(byte[] buf, int off, int len) {
checkAvaliable(len);
int blockAvailable = _currentBlock.available();
if (blockAvailable > len) {
_currentBlock.readFully(buf, off, len);
_current_offset += len;
return;
}
// else read big amount in chunks
int remaining = len;
int writePos = off;
while (remaining > 0) {
boolean blockIsExpiring = remaining >= blockAvailable;
int reqSize;
if (blockIsExpiring) {
reqSize = blockAvailable;
} else {
reqSize = remaining;
}
_currentBlock.readFully(buf, writePos, reqSize);
remaining -= reqSize;
writePos += reqSize;
_current_offset += reqSize;
if (blockIsExpiring) {
if (_current_offset == _document_size) {
if (remaining > 0) {
throw new IllegalStateException(
"reached end of document stream unexpectedly");
}
_currentBlock = null;
break;
}
_currentBlock = getDataInputBlock(_current_offset);
blockAvailable = _currentBlock.available();
}
}
}
@Override
public long readLong() {
checkAvaliable(SIZE_LONG);
int blockAvailable = _currentBlock.available();
long result;
if (blockAvailable > SIZE_LONG) {
result = _currentBlock.readLongLE();
} else {
DataInputBlock nextBlock = getDataInputBlock(_current_offset + blockAvailable);
if (blockAvailable == SIZE_LONG) {
result = _currentBlock.readLongLE();
} else {
result = nextBlock.readLongLE(_currentBlock, blockAvailable);
}
_currentBlock = nextBlock;
}
_current_offset += SIZE_LONG;
return result;
}
@Override
public int readInt() {
checkAvaliable(SIZE_INT);
int blockAvailable = _currentBlock.available();
int result;
if (blockAvailable > SIZE_INT) {
result = _currentBlock.readIntLE();
} else {
DataInputBlock nextBlock = getDataInputBlock(_current_offset + blockAvailable);
if (blockAvailable == SIZE_INT) {
result = _currentBlock.readIntLE();
} else {
result = nextBlock.readIntLE(_currentBlock, blockAvailable);
}
_currentBlock = nextBlock;
}
_current_offset += SIZE_INT;
return result;
}
@Override
public int readUShort() {
checkAvaliable(SIZE_SHORT);
int blockAvailable = _currentBlock.available();
int result;
if (blockAvailable > SIZE_SHORT) {
result = _currentBlock.readUShortLE();
} else {
DataInputBlock nextBlock = getDataInputBlock(_current_offset + blockAvailable);
if (blockAvailable == SIZE_SHORT) {
result = _currentBlock.readUShortLE();
} else {
result = nextBlock.readUShortLE(_currentBlock);
}
_currentBlock = nextBlock;
}
_current_offset += SIZE_SHORT;
return result;
}
@Override
public int readUByte() {
checkAvaliable(1);
int result = _currentBlock.readUByte();
_current_offset++;
if (_currentBlock.available() < 1) {
_currentBlock = getDataInputBlock(_current_offset);
}
return result;
}
}

View File

@ -0,0 +1,279 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
import org.apache.poi.util.*;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
/**
* Represents an Ole10Native record which is wrapped around certain binary
* files being embedded in OLE2 documents.
*
* @author Rainer Schwarze
*/
public class Ole10Native {
// (the fields as they appear in the raw record:)
private final int totalSize; // 4 bytes, total size of record not including this field
private short flags1; // 2 bytes, unknown, mostly [02 00]
private final String label; // ASCIIZ, stored in this field without the terminating zero
private final String fileName; // ASCIIZ, stored in this field without the terminating zero
private short flags2; // 2 bytes, unknown, mostly [00 00]
// private byte unknown1Length; // 1 byte, specifying the length of the following byte array (unknown1)
private byte[] unknown1; // see below
private byte[] unknown2; // 3 bytes, unknown, mostly [00 00 00]
private final String command; // ASCIIZ, stored in this field without the terminating zero
private final int dataSize; // 4 bytes (if space), size of following buffer
private final byte[] dataBuffer; // varying size, the actual native data
private short flags3; // some final flags? or zero terminators?, sometimes not there
public static final String OLE10_NATIVE = "\u0001Ole10Native";
/**
* Creates an instance of this class from an embedded OLE Object. The OLE Object is expected
* to include a stream &quot;{01}Ole10Native&quot; which contains the actual
* data relevant for this class.
*
* @param poifs POI Filesystem object
* @return Returns an instance of this class
* @throws IOException on IO error
* @throws Ole10NativeException on invalid or unexcepted data format
*/
public static Ole10Native createFromEmbeddedOleObject(POIFSFileSystem poifs) throws IOException, Ole10NativeException {
boolean plain = false;
try {
poifs.getRoot().getEntry("\u0001Ole10ItemName");
plain = true;
} catch (FileNotFoundException ex) {
plain = false;
}
DocumentInputStream dis = poifs.createDocumentInputStream(OLE10_NATIVE);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
IOUtils.copy(dis, bos);
byte[] data = bos.toByteArray();
return new Ole10Native(data, 0, plain);
}
/**
* Creates an instance and fills the fields based on the data in the given buffer.
*
* @param data The buffer containing the Ole10Native record
* @param offset The start offset of the record in the buffer
* @throws Ole10NativeException on invalid or unexcepted data format
*/
public Ole10Native(byte[] data, int offset) throws Ole10NativeException {
this(data, offset, false);
}
/**
* Creates an instance and fills the fields based on the data in the given buffer.
*
* @param data The buffer containing the Ole10Native record
* @param offset The start offset of the record in the buffer
* @param plain Specified 'plain' format without filename
* @throws Ole10NativeException on invalid or unexcepted data format
*/
public Ole10Native(byte[] data, int offset, boolean plain) throws Ole10NativeException {
int ofs = offset; // current offset, initialized to start
if (data.length<offset+2) {
throw new Ole10NativeException("data is too small");
}
totalSize = LittleEndian.getInt(data, ofs);
ofs += LittleEndianConsts.INT_SIZE;
if (plain) {
dataBuffer = new byte[totalSize-4];
System.arraycopy(data, 4, dataBuffer, 0, dataBuffer.length);
dataSize = totalSize - 4;
byte[] oleLabel = new byte[8];
System.arraycopy(dataBuffer, 0, oleLabel, 0, Math.min(dataBuffer.length, 8));
label = "ole-"+ HexDump.toHex(oleLabel);
fileName = label;
command = label;
} else {
flags1 = LittleEndian.getShort(data, ofs);
ofs += LittleEndianConsts.SHORT_SIZE;
int len = getStringLength(data, ofs);
label = StringUtil.getFromCompressedUnicode(data, ofs, len - 1);
ofs += len;
len = getStringLength(data, ofs);
fileName = StringUtil.getFromCompressedUnicode(data, ofs, len - 1);
ofs += len;
flags2 = LittleEndian.getShort(data, ofs);
ofs += LittleEndianConsts.SHORT_SIZE;
len = LittleEndian.getUnsignedByte(data, ofs);
unknown1 = new byte[len];
ofs += len;
len = 3;
unknown2 = new byte[len];
ofs += len;
len = getStringLength(data, ofs);
command = StringUtil.getFromCompressedUnicode(data, ofs, len - 1);
ofs += len;
if (totalSize + LittleEndianConsts.INT_SIZE - ofs > LittleEndianConsts.INT_SIZE) {
dataSize = LittleEndian.getInt(data, ofs);
ofs += LittleEndianConsts.INT_SIZE;
if (dataSize > totalSize || dataSize<0) {
throw new Ole10NativeException("Invalid Ole10Native");
}
dataBuffer = new byte[dataSize];
System.arraycopy(data, ofs, dataBuffer, 0, dataSize);
ofs += dataSize;
if (unknown1.length > 0) {
flags3 = LittleEndian.getShort(data, ofs);
ofs += LittleEndianConsts.SHORT_SIZE;
} else {
flags3 = 0;
}
} else {
throw new Ole10NativeException("Invalid Ole10Native");
}
}
}
/*
* Helper - determine length of zero terminated string (ASCIIZ).
*/
private static int getStringLength(byte[] data, int ofs) {
int len = 0;
while (len+ofs<data.length && data[ofs + len] != 0) {
len++;
}
len++;
return len;
}
/**
* Returns the value of the totalSize field - the total length of the structure
* is totalSize + 4 (value of this field + size of this field).
*
* @return the totalSize
*/
public int getTotalSize() {
return totalSize;
}
/**
* Returns flags1 - currently unknown - usually 0x0002.
*
* @return the flags1
*/
public short getFlags1() {
return flags1;
}
/**
* Returns the label field - usually the name of the file (without directory) but
* probably may be any name specified during packaging/embedding the data.
*
* @return the label
*/
public String getLabel() {
return label;
}
/**
* Returns the fileName field - usually the name of the file being embedded
* including the full path.
*
* @return the fileName
*/
public String getFileName() {
return fileName;
}
/**
* Returns flags2 - currently unknown - mostly 0x0000.
*
* @return the flags2
*/
public short getFlags2() {
return flags2;
}
/**
* Returns unknown1 field - currently unknown.
*
* @return the unknown1
*/
public byte[] getUnknown1() {
return unknown1;
}
/**
* Returns the unknown2 field - currently being a byte[3] - mostly {0, 0, 0}.
*
* @return the unknown2
*/
public byte[] getUnknown2() {
return unknown2;
}
/**
* Returns the command field - usually the name of the file being embedded
* including the full path, may be a command specified during embedding the file.
*
* @return the command
*/
public String getCommand() {
return command;
}
/**
* Returns the size of the embedded file. If the size is 0 (zero), no data has been
* embedded. To be sure, that no data has been embedded, check whether
* {@link #getDataBuffer()} returns <code>null</code>.
*
* @return the dataSize
*/
public int getDataSize() {
return dataSize;
}
/**
* Returns the buffer containing the embedded file's data, or <code>null</code>
* if no data was embedded. Note that an embedding may provide information about
* the data, but the actual data is not included. (So label, filename etc. are
* available, but this method returns <code>null</code>.)
*
* @return the dataBuffer
*/
public byte[] getDataBuffer() {
return dataBuffer;
}
/**
* Returns the flags3 - currently unknown.
*
* @return the flags3
*/
public short getFlags3() {
return flags3;
}
}

View File

@ -0,0 +1,24 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.filesystem;
public class Ole10NativeException extends Exception {
public Ole10NativeException(String message) {
super(message);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,9 @@ package org.apache.poi.poifs.filesystem;
import java.io.File;
import org.apache.poi.util.POILogFactory;
import org.apache.poi.util.POILogger;
/**
* Class POIFSDocumentPath
*
@ -30,6 +33,8 @@ import java.io.File;
public class POIFSDocumentPath
{
private static final POILogger log = POILogFactory.getLogger(POIFSDocumentPath.class);
private String[] components;
private int hashcode = 0;
@ -125,12 +130,17 @@ public class POIFSDocumentPath
{
for (int j = 0; j < components.length; j++)
{
if ((components[ j ] == null)
|| (components[ j ].length() == 0))
if (components[ j ] == null)
{
throw new IllegalArgumentException(
"components cannot contain null or empty strings");
"components cannot contain null");
}
if (components[ j ].length() == 0)
{
log.log(POILogger.WARN, "Directory under " + path + " has an empty name, " +
"not all OLE2 readers will handle this file correctly!");
}
this.components[ j + path.components.length ] =
components[ j ];
}

View File

@ -31,6 +31,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.dev.POIFSViewable;
import org.apache.poi.poifs.property.DirectoryProperty;
@ -42,11 +43,12 @@ import org.apache.poi.poifs.storage.BlockAllocationTableWriter;
import org.apache.poi.poifs.storage.BlockList;
import org.apache.poi.poifs.storage.BlockWritable;
import org.apache.poi.poifs.storage.HeaderBlockConstants;
import org.apache.poi.poifs.storage.HeaderBlockReader;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.HeaderBlockWriter;
import org.apache.poi.poifs.storage.RawDataBlockList;
import org.apache.poi.poifs.storage.SmallBlockTableReader;
import org.apache.poi.poifs.storage.SmallBlockTableWriter;
import org.apache.poi.util.CloseIgnoringInputStream;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.LongField;
import org.apache.poi.util.POILogFactory;
@ -65,23 +67,6 @@ public class POIFSFileSystem
private static final POILogger _logger =
POILogFactory.getLogger(POIFSFileSystem.class);
private static final class CloseIgnoringInputStream extends InputStream {
private final InputStream _is;
public CloseIgnoringInputStream(InputStream is) {
_is = is;
}
public int read() throws IOException {
return _is.read();
}
public int read(byte[] b, int off, int len) throws IOException {
return _is.read(b, off, len);
}
public void close() {
// do nothing
}
}
/**
* Convenience method for clients that want to avoid the auto-close behaviour of the constructor.
*/
@ -97,14 +82,16 @@ public class POIFSFileSystem
* What big block size the file uses. Most files
* use 512 bytes, but a few use 4096
*/
private int bigBlockSize = POIFSConstants.BIG_BLOCK_SIZE;
private POIFSBigBlockSize bigBlockSize =
POIFSConstants.SMALLER_BIG_BLOCK_SIZE_DETAILS;
/**
* Constructor, intended for writing
*/
public POIFSFileSystem()
{
_property_table = new PropertyTable();
HeaderBlock header_block = new HeaderBlock(bigBlockSize);
_property_table = new PropertyTable(header_block);
_documents = new ArrayList();
_root = null;
}
@ -144,12 +131,12 @@ public class POIFSFileSystem
this();
boolean success = false;
HeaderBlockReader header_block_reader;
HeaderBlock header_block;
RawDataBlockList data_blocks;
try {
// read the header block from the stream
header_block_reader = new HeaderBlockReader(stream);
bigBlockSize = header_block_reader.getBigBlockSize();
header_block = new HeaderBlock(stream);
bigBlockSize = header_block.getBigBlockSize();
// read the rest of the stream into blocks
data_blocks = new RawDataBlockList(stream, bigBlockSize);
@ -161,23 +148,31 @@ public class POIFSFileSystem
// set up the block allocation table (necessary for the
// data_blocks to be manageable
new BlockAllocationTableReader(header_block_reader.getBATCount(),
header_block_reader.getBATArray(),
header_block_reader.getXBATCount(),
header_block_reader.getXBATIndex(),
new BlockAllocationTableReader(header_block.getBigBlockSize(),
header_block.getBATCount(),
header_block.getBATArray(),
header_block.getXBATCount(),
header_block.getXBATIndex(),
data_blocks);
// get property table from the document
PropertyTable properties =
new PropertyTable(header_block_reader.getPropertyStart(),
data_blocks);
new PropertyTable(header_block, data_blocks);
// init documents
processProperties(SmallBlockTableReader
.getSmallDocumentBlocks(data_blocks, properties
.getRoot(), header_block_reader
.getSBATStart()), data_blocks, properties.getRoot()
.getChildren(), null);
processProperties(
SmallBlockTableReader.getSmallDocumentBlocks(
bigBlockSize, data_blocks, properties.getRoot(),
header_block.getSBATStart()
),
data_blocks,
properties.getRoot().getChildren(),
null,
header_block.getPropertyStart()
);
// For whatever reason CLSID of root is always 0.
getRoot().setStorageClsid(properties.getRoot().getStorageClsid());
}
/**
* @param stream the stream to be closed
@ -307,11 +302,11 @@ public class POIFSFileSystem
// create the small block store, and the SBAT
SmallBlockTableWriter sbtw =
new SmallBlockTableWriter(_documents, _property_table.getRoot());
new SmallBlockTableWriter(bigBlockSize, _documents, _property_table.getRoot());
// create the block allocation table
BlockAllocationTableWriter bat =
new BlockAllocationTableWriter();
new BlockAllocationTableWriter(bigBlockSize);
// create a list of BATManaged objects: the documents plus the
// property table and the small block table
@ -349,7 +344,7 @@ public class POIFSFileSystem
int batStartBlock = bat.createBlocks();
// get the extended block allocation table blocks
HeaderBlockWriter header_block_writer = new HeaderBlockWriter();
HeaderBlockWriter header_block_writer = new HeaderBlockWriter(bigBlockSize);
BATBlock[] xbat_blocks =
header_block_writer.setBATBlocks(bat.countBlocks(),
batStartBlock);
@ -491,7 +486,8 @@ public class POIFSFileSystem
private void processProperties(final BlockList small_blocks,
final BlockList big_blocks,
final Iterator properties,
final DirectoryNode dir)
final DirectoryNode dir,
final int headerPropertiesStartAt)
throws IOException
{
while (properties.hasNext())
@ -511,7 +507,8 @@ public class POIFSFileSystem
processProperties(
small_blocks, big_blocks,
(( DirectoryProperty ) property).getChildren(), new_dir);
(( DirectoryProperty ) property).getChildren(),
new_dir, headerPropertiesStartAt);
}
else
{
@ -522,14 +519,15 @@ public class POIFSFileSystem
if (property.shouldUseSmallBlocks())
{
document =
new POIFSDocument(name, small_blocks
.fetchBlocks(startBlock), size);
new POIFSDocument(name,
small_blocks.fetchBlocks(startBlock, headerPropertiesStartAt),
size);
}
else
{
document =
new POIFSDocument(name,
big_blocks.fetchBlocks(startBlock),
big_blocks.fetchBlocks(startBlock, headerPropertiesStartAt),
size);
}
parent.createDocument(document);
@ -552,10 +550,7 @@ public class POIFSFileSystem
{
return (( POIFSViewable ) getRoot()).getViewableArray();
}
else
{
return new Object[ 0 ];
}
return new Object[ 0 ];
}
/**
@ -572,10 +567,7 @@ public class POIFSFileSystem
{
return (( POIFSViewable ) getRoot()).getViewableIterator();
}
else
{
return Collections.EMPTY_LIST.iterator();
}
return Collections.EMPTY_LIST.iterator();
}
/**
@ -607,7 +599,13 @@ public class POIFSFileSystem
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public int getBigBlockSize() {
return bigBlockSize;
return bigBlockSize.getBigBlockSize();
}
/**
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public POIFSBigBlockSize getBigBlockSizeDetails() {
return bigBlockSize;
}
/* ********** END begin implementation of POIFSViewable ********** */

View File

@ -0,0 +1,94 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.nio;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* A POIFS {@link DataSource} backed by a byte array.
*/
public class ByteArrayBackedDataSource extends DataSource {
private byte[] buffer;
private long size;
public ByteArrayBackedDataSource(byte[] data, int size) {
this.buffer = data;
this.size = size;
}
public ByteArrayBackedDataSource(byte[] data) {
this(data, data.length);
}
public ByteBuffer read(int length, long position) {
if(position >= size) {
throw new IndexOutOfBoundsException(
"Unable to read " + length + " bytes from " +
position + " in stream of length " + size
);
}
int toRead = (int)Math.min(length, size - position);
return ByteBuffer.wrap(buffer, (int)position, toRead);
}
public void write(ByteBuffer src, long position) {
// Extend if needed
long endPosition = position + src.capacity();
if(endPosition > buffer.length) {
extend(endPosition);
}
// Now copy
src.get(buffer, (int)position, src.capacity());
// Update size if needed
if(endPosition > size) {
size = endPosition;
}
}
private void extend(long length) {
// Consider extending by a bit more than requested
long difference = length - buffer.length;
if(difference < buffer.length*0.25) {
difference = (long)(buffer.length*0.25);
}
if(difference < 4096) {
difference = 4096;
}
byte[] nb = new byte[(int)(difference+buffer.length)];
System.arraycopy(buffer, 0, nb, 0, (int)size);
buffer = nb;
}
public void copyTo(OutputStream stream) throws IOException {
stream.write(buffer, 0, (int)size);
}
public long size() {
return size;
}
public void close() {
buffer = null;
size = -1;
}
}

View File

@ -0,0 +1,35 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.nio;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Common definition of how we read and write bytes
*/
public abstract class DataSource {
public abstract ByteBuffer read(int length, long position) throws IOException;
public abstract void write(ByteBuffer src, long position) throws IOException;
public abstract long size() throws IOException;
/** Close the underlying stream */
public abstract void close() throws IOException;
/** Copies the contents to the specified OutputStream */
public abstract void copyTo(OutputStream stream) throws IOException;
}

View File

@ -0,0 +1,88 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.nio;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import org.apache.poi.util.IOUtils;
/**
* A POIFS {@link DataSource} backed by a File
*/
public class FileBackedDataSource extends DataSource {
private FileChannel channel;
public FileBackedDataSource(File file) throws FileNotFoundException {
if(!file.exists()) {
throw new FileNotFoundException(file.toString());
}
this.channel = (new RandomAccessFile(file, "r")).getChannel();
}
public FileBackedDataSource(FileChannel channel) {
this.channel = channel;
}
public ByteBuffer read(int length, long position) throws IOException {
if(position >= size()) {
throw new IllegalArgumentException("Position " + position + " past the end of the file");
}
// Read
channel.position(position);
ByteBuffer dst = ByteBuffer.allocate(length);
int worked = IOUtils.readFully(channel, dst);
// Check
if(worked == -1) {
throw new IllegalArgumentException("Position " + position + " past the end of the file");
}
// Ready it for reading
dst.position(0);
// All done
return dst;
}
public void write(ByteBuffer src, long position) throws IOException {
channel.write(src, position);
}
public void copyTo(OutputStream stream) throws IOException {
// Wrap the OutputSteam as a channel
WritableByteChannel out = Channels.newChannel(stream);
// Now do the transfer
channel.transferTo(0, channel.size(), out);
}
public long size() throws IOException {
return channel.size();
}
public void close() throws IOException {
channel.close();
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,43 +15,34 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.property;
import java.util.*;
import java.io.IOException;
import org.apache.poi.poifs.storage.SmallDocumentBlock;
import java.util.*;
/**
* Directory property
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class DirectoryProperty extends Property implements Parent { // TODO - fix instantiable superclass
public class DirectoryProperty
extends Property
implements Parent
{
/** List of Property instances */
private List<Property> _children;
// List of Property instances
private List _children;
// set of children's names
private Set _children_names;
/** set of children's names */
private Set<String> _children_names;
/**
* Default constructor
*
* @param name the name of the directory
*/
public DirectoryProperty(String name)
{
super();
_children = new ArrayList();
_children_names = new HashSet();
_children = new ArrayList<Property>();
_children_names = new HashSet<String>();
setName(name);
setSize(0);
setPropertyType(PropertyConstants.DIRECTORY_TYPE);
@ -67,13 +57,12 @@ public class DirectoryProperty
* @param array byte data
* @param offset offset into byte data
*/
protected DirectoryProperty(final int index, final byte [] array,
final int offset)
{
super(index, array, offset);
_children = new ArrayList();
_children_names = new HashSet();
_children = new ArrayList<Property>();
_children_names = new HashSet<String>();
}
/**
@ -84,8 +73,7 @@ public class DirectoryProperty
*
* @return true if the name change could be made, else false
*/
public boolean changeName(final Property property, final String newName)
public boolean changeName(Property property, String newName)
{
boolean result;
String oldName = property.getName();
@ -116,8 +104,7 @@ public class DirectoryProperty
*
* @return true if the Property could be deleted, else false
*/
public boolean deleteChild(final Property property)
public boolean deleteChild(Property property)
{
boolean result = _children.remove(property);
@ -128,9 +115,7 @@ public class DirectoryProperty
return result;
}
public static class PropertyComparator
implements Comparator
{
public static class PropertyComparator implements Comparator<Property> {
/**
* Object equality, implemented as object identity
@ -139,7 +124,6 @@ public class DirectoryProperty
*
* @return true if identical, else false
*/
public boolean equals(Object o)
{
return this == o;
@ -160,12 +144,11 @@ public class DirectoryProperty
* zero if o1 == o2,
* positive value if o1 > o2.
*/
public int compare(Object o1, Object o2)
public int compare(Property o1, Property o2)
{
String VBA_PROJECT = "_VBA_PROJECT";
String name1 = (( Property ) o1).getName();
String name2 = (( Property ) o2).getName();
String name1 = o1.getName();
String name2 = o2.getName();
int result = name1.length() - name2.length();
if (result == 0)
@ -200,14 +183,11 @@ public class DirectoryProperty
}
return result;
}
} // end private class PropertyComparator
/* ********** START extension of Property ********** */
}
/**
* @return true if a directory type Property
*/
public boolean isDirectory()
{
return true;
@ -217,13 +197,11 @@ public class DirectoryProperty
* Perform whatever activities need to be performed prior to
* writing
*/
protected void preWrite()
{
if (_children.size() > 0)
{
Property[] children =
( Property [] ) _children.toArray(new Property[ 0 ]);
Property[] children = _children.toArray(new Property[ 0 ]);
Arrays.sort(children, new PropertyComparator());
int midpoint = children.length / 2;
@ -259,17 +237,13 @@ public class DirectoryProperty
}
}
/* ********** END extension of Property ********** */
/* ********** START implementation of Parent ********** */
/**
* Get an iterator over the children of this Parent; all elements
* are instances of Property.
*
* @return Iterator of children; may refer to an empty collection
*/
public Iterator getChildren()
public Iterator<Property> getChildren()
{
return _children.iterator();
}
@ -282,7 +256,6 @@ public class DirectoryProperty
* @exception IOException if we already have a child with the same
* name
*/
public void addChild(final Property property)
throws IOException
{
@ -295,7 +268,4 @@ public class DirectoryProperty
_children_names.add(name);
_children.add(property);
}
/* ********** END implementation of Parent ********** */
} // end public class DirectoryProperty
}

View File

@ -0,0 +1,128 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.property;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.NPOIFSFileSystem;
import org.apache.poi.poifs.filesystem.NPOIFSStream;
import org.apache.poi.poifs.storage.HeaderBlock;
/**
* This class embodies the Property Table for a {@link NPOIFSFileSystem};
* this is basically the directory for all of the documents in the
* filesystem.
*/
public final class NPropertyTable extends PropertyTableBase {
private POIFSBigBlockSize _bigBigBlockSize;
public NPropertyTable(HeaderBlock headerBlock)
{
super(headerBlock);
_bigBigBlockSize = headerBlock.getBigBlockSize();
}
/**
* reading constructor (used when we've read in a file and we want
* to extract the property table from it). Populates the
* properties thoroughly
*
* @param headerBlock the header block of the file
* @param filesystem the filesystem to read from
*
* @exception IOException if anything goes wrong (which should be
* a result of the input being NFG)
*/
public NPropertyTable(final HeaderBlock headerBlock,
final NPOIFSFileSystem filesystem)
throws IOException
{
super(
headerBlock,
buildProperties(
(new NPOIFSStream(filesystem, headerBlock.getPropertyStart())).iterator(),
headerBlock.getBigBlockSize()
)
);
_bigBigBlockSize = headerBlock.getBigBlockSize();
}
/**
* Builds
* @param startAt
* @param filesystem
* @return
* @throws IOException
*/
private static List<Property> buildProperties(final Iterator<ByteBuffer> dataSource,
final POIFSBigBlockSize bigBlockSize) throws IOException
{
List<Property> properties = new ArrayList<Property>();
while(dataSource.hasNext()) {
ByteBuffer bb = dataSource.next();
// Turn it into an array
byte[] data;
if(bb.hasArray() && bb.arrayOffset() == 0 &&
bb.array().length == bigBlockSize.getBigBlockSize()) {
data = bb.array();
} else {
data = new byte[bigBlockSize.getBigBlockSize()];
bb.get(data, 0, data.length);
}
PropertyFactory.convertToProperties(data, properties);
}
return properties;
}
/**
* Return the number of BigBlock's this instance uses
*
* @return count of BigBlock instances
*/
public int countBlocks()
{
int size = _properties.size() * POIFSConstants.PROPERTY_SIZE;
return (int)Math.ceil(size / _bigBigBlockSize.getBigBlockSize());
}
/**
* Writes the properties out into the given low-level stream
*/
public void write(NPOIFSStream stream) throws IOException {
// TODO - Use a streaming write
ByteArrayOutputStream baos = new ByteArrayOutputStream();
for(Property property : _properties) {
property.writeData(baos);
}
stream.updateContents(baos.toByteArray());
// Update the start position if needed
if(getStartBlock() != stream.getStartBlock()) {
setStartBlock(stream.getStartBlock());
}
}
}

View File

@ -39,9 +39,7 @@ import org.apache.poi.util.ShortField;
* @author Marc Johnson (mjohnson at apache dot org)
*/
public abstract class Property
implements Child, POIFSViewable
{
public abstract class Property implements Child, POIFSViewable {
static final private byte _default_fill = ( byte ) 0x00;
static final private int _name_size_offset = 0x40;
static final private int _max_name_length =
@ -67,7 +65,7 @@ public abstract class Property
static final protected byte _NODE_RED = 0;
// documents must be at least this size to be stored in big blocks
static final private int _big_block_minimum_bytes = 4096;
static final private int _big_block_minimum_bytes = POIFSConstants.BIG_BLOCK_MINIMUM_DOCUMENT_SIZE;
private String _name;
private ShortField _name_size;
private ByteField _property_type;
@ -88,10 +86,6 @@ public abstract class Property
private Child _next_child;
private Child _previous_child;
/**
* Default constructor
*/
protected Property()
{
_raw_data = new byte[ POIFSConstants.PROPERTY_SIZE ];
@ -129,8 +123,7 @@ public abstract class Property
* @param array byte data
* @param offset offset into byte data
*/
protected Property(final int index, final byte [] array, final int offset)
protected Property(int index, byte [] array, int offset)
{
_raw_data = new byte[ POIFSConstants.PROPERTY_SIZE ];
System.arraycopy(array, offset, _raw_data, 0,
@ -187,8 +180,7 @@ public abstract class Property
* @exception IOException on problems writing to the specified
* stream.
*/
public void writeData(final OutputStream stream)
public void writeData(OutputStream stream)
throws IOException
{
stream.write(_raw_data);
@ -200,8 +192,7 @@ public abstract class Property
*
* @param startBlock the start block index
*/
public void setStartBlock(final int startBlock)
public void setStartBlock(int startBlock)
{
_start_block.set(startBlock, _raw_data);
}
@ -209,7 +200,6 @@ public abstract class Property
/**
* @return the start block
*/
public int getStartBlock()
{
return _start_block.get();
@ -220,7 +210,6 @@ public abstract class Property
*
* @return size in bytes
*/
public int getSize()
{
return _size.get();
@ -232,7 +221,6 @@ public abstract class Property
*
* @return true if the size is less than _big_block_minimum_bytes
*/
public boolean shouldUseSmallBlocks()
{
return Property.isSmall(_size.get());
@ -246,8 +234,7 @@ public abstract class Property
* @return true if the length is less than
* _big_block_minimum_bytes
*/
public static boolean isSmall(final int length)
public static boolean isSmall(int length)
{
return length < _big_block_minimum_bytes;
}
@ -257,7 +244,6 @@ public abstract class Property
*
* @return property name as String
*/
public String getName()
{
return _name;
@ -266,7 +252,6 @@ public abstract class Property
/**
* @return true if a directory type Property
*/
abstract public boolean isDirectory();
/**
@ -284,7 +269,7 @@ public abstract class Property
*
* @param name the new name
*/
protected final void setName(final String name)
protected void setName(String name)
{
char[] char_array = name.toCharArray();
int limit = Math.min(char_array.length, _max_name_length);
@ -329,8 +314,7 @@ public abstract class Property
*
* @param propertyType the property type (root, file, directory)
*/
protected void setPropertyType(final byte propertyType)
protected void setPropertyType(byte propertyType)
{
_property_type.set(propertyType, _raw_data);
}
@ -340,8 +324,7 @@ public abstract class Property
*
* @param nodeColor the node color (red or black)
*/
protected void setNodeColor(final byte nodeColor)
protected void setNodeColor(byte nodeColor)
{
_node_color.set(nodeColor, _raw_data);
}
@ -351,8 +334,7 @@ public abstract class Property
*
* @param child the child property's index in the Property Table
*/
protected void setChildProperty(final int child)
protected void setChildProperty(int child)
{
_child_property.set(child, _raw_data);
}
@ -362,7 +344,6 @@ public abstract class Property
*
* @return child property index
*/
protected int getChildIndex()
{
return _child_property.get();
@ -373,8 +354,7 @@ public abstract class Property
*
* @param size the size of the document, in bytes
*/
protected void setSize(final int size)
protected void setSize(int size)
{
_size.set(size, _raw_data);
}
@ -385,8 +365,7 @@ public abstract class Property
* @param index this Property's index within its containing
* Property Table
*/
protected void setIndex(final int index)
protected void setIndex(int index)
{
_index = index;
}
@ -396,7 +375,6 @@ public abstract class Property
*
* @return the index of this Property within its Property Table
*/
protected int getIndex()
{
return _index;
@ -406,7 +384,6 @@ public abstract class Property
* Perform whatever activities need to be performed prior to
* writing
*/
abstract protected void preWrite();
/**
@ -414,7 +391,6 @@ public abstract class Property
*
* @return index of next sibling
*/
int getNextChildIndex()
{
return _next_property.get();
@ -425,7 +401,6 @@ public abstract class Property
*
* @return index of previous sibling
*/
int getPreviousChildIndex()
{
return _previous_property.get();
@ -438,20 +413,16 @@ public abstract class Property
*
* @return true if the index is valid
*/
static boolean isValidIndex(int index)
{
return index != _NO_INDEX;
}
/* ********** START implementation of Child ********** */
/**
* Get the next Child, if any
*
* @return the next Child; may return null
*/
public Child getNextChild()
{
return _next_child;
@ -462,7 +433,6 @@ public abstract class Property
*
* @return the previous Child; may return null
*/
public Child getPreviousChild()
{
return _previous_child;
@ -474,8 +444,7 @@ public abstract class Property
* @param child the new 'next' child; may be null, which has the
* effect of saying there is no 'next' child
*/
public void setNextChild(final Child child)
public void setNextChild(Child child)
{
_next_child = child;
_next_property.set((child == null) ? _NO_INDEX
@ -489,8 +458,7 @@ public abstract class Property
* @param child the new 'previous' child; may be null, which has
* the effect of saying there is no 'previous' child
*/
public void setPreviousChild(final Child child)
public void setPreviousChild(Child child)
{
_previous_child = child;
_previous_property.set((child == null) ? _NO_INDEX
@ -498,16 +466,12 @@ public abstract class Property
.getIndex(), _raw_data);
}
/* ********** END implementation of Child ********** */
/* ********** START begin implementation of POIFSViewable ********** */
/**
* Get an array of objects, some of which may implement
* POIFSViewable
*
* @return an array of Object; may not be null, but may be empty
*/
public Object [] getViewableArray()
{
Object[] results = new Object[ 5 ];
@ -518,11 +482,11 @@ public abstract class Property
long time = _days_1.get();
time <<= 32;
time += (( long ) _seconds_1.get()) & 0x0000FFFFL;
time += _seconds_1.get() & 0x0000FFFFL;
results[ 3 ] = "Time 1 = " + time;
time = _days_2.get();
time <<= 32;
time += (( long ) _seconds_2.get()) & 0x0000FFFFL;
time += _seconds_2.get() & 0x0000FFFFL;
results[ 4 ] = "Time 2 = " + time;
return results;
}
@ -534,7 +498,6 @@ public abstract class Property
* @return an Iterator; may not be null, but may have an empty
* back end store
*/
public Iterator getViewableIterator()
{
return Collections.EMPTY_LIST.iterator();
@ -547,7 +510,6 @@ public abstract class Property
* @return true if a viewer should call getViewableArray, false if
* a viewer should call getViewableIterator
*/
public boolean preferArray()
{
return true;
@ -559,7 +521,6 @@ public abstract class Property
*
* @return short description
*/
public String getShortDescription()
{
StringBuffer buffer = new StringBuffer();
@ -567,7 +528,4 @@ public abstract class Property
buffer.append("Property: \"").append(getName()).append("\"");
return buffer.toString();
}
/* ********** END begin implementation of POIFSViewable ********** */
} // end public abstract class Property
}

View File

@ -28,7 +28,7 @@ import org.apache.poi.poifs.storage.ListManagedBlock;
/**
* Factory for turning an array of RawDataBlock instances containing
* Proprty data into an array of proper Property objects.
* Property data into an array of proper Property objects.
*
* The array produced may be sparse, in that any portion of data that
* should correspond to a Property, but which does not map to a proper
@ -40,7 +40,6 @@ import org.apache.poi.poifs.storage.ListManagedBlock;
class PropertyFactory
{
// no need for an accessible constructor
private PropertyFactory()
{
@ -56,48 +55,52 @@ class PropertyFactory
*
* @exception IOException if any of the blocks are empty
*/
static List convertToProperties(ListManagedBlock [] blocks)
static List<Property> convertToProperties(ListManagedBlock [] blocks)
throws IOException
{
List properties = new ArrayList();
List<Property> properties = new ArrayList<Property>();
for (int j = 0; j < blocks.length; j++)
{
byte[] data = blocks[ j ].getData();
int property_count = data.length
/ POIFSConstants.PROPERTY_SIZE;
int offset = 0;
for (int k = 0; k < property_count; k++)
{
switch (data[ offset + PropertyConstants.PROPERTY_TYPE_OFFSET ])
{
case PropertyConstants.DIRECTORY_TYPE :
properties
.add(new DirectoryProperty(properties.size(),
data, offset));
break;
case PropertyConstants.DOCUMENT_TYPE :
properties.add(new DocumentProperty(properties.size(),
data, offset));
break;
case PropertyConstants.ROOT_TYPE :
properties.add(new RootProperty(properties.size(),
data, offset));
break;
default :
properties.add(null);
break;
}
offset += POIFSConstants.PROPERTY_SIZE;
}
for (int j = 0; j < blocks.length; j++) {
byte[] data = blocks[ j ].getData();
convertToProperties(data, properties);
}
return properties;
}
static void convertToProperties(byte[] data, List<Property> properties)
throws IOException
{
int property_count = data.length / POIFSConstants.PROPERTY_SIZE;
int offset = 0;
for (int k = 0; k < property_count; k++) {
switch (data[ offset + PropertyConstants.PROPERTY_TYPE_OFFSET ]) {
case PropertyConstants.DIRECTORY_TYPE :
properties.add(
new DirectoryProperty(properties.size(), data, offset)
);
break;
case PropertyConstants.DOCUMENT_TYPE :
properties.add(
new DocumentProperty(properties.size(), data, offset)
);
break;
case PropertyConstants.ROOT_TYPE :
properties.add(
new RootProperty(properties.size(), data, offset)
);
break;
default :
properties.add(null);
break;
}
offset += POIFSConstants.PROPERTY_SIZE;
}
}
} // end package scope class PropertyFactory

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,45 +15,33 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.property;
import java.io.IOException;
import java.io.OutputStream;
import java.util.*;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.BATManaged;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import org.apache.poi.poifs.storage.BlockWritable;
import org.apache.poi.poifs.storage.HeaderBlock;
import org.apache.poi.poifs.storage.PropertyBlock;
import org.apache.poi.poifs.storage.RawDataBlock;
import org.apache.poi.poifs.storage.RawDataBlockList;
/**
* This class embodies the Property Table for the filesystem; this is
* basically the dsirectory for all of the documents in the
* This class embodies the Property Table for the {@link POIFSFileSystem};
* this is basically the directory for all of the documents in the
* filesystem.
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class PropertyTable extends PropertyTableBase implements BlockWritable {
private POIFSBigBlockSize _bigBigBlockSize;
private BlockWritable[] _blocks;
public class PropertyTable
implements BATManaged, BlockWritable
{
private int _start_block;
private List _properties;
private BlockWritable[] _blocks;
/**
* Default constructor
*/
public PropertyTable()
public PropertyTable(HeaderBlock headerBlock)
{
_start_block = POIFSConstants.END_OF_CHAIN;
_properties = new ArrayList();
addProperty(new RootProperty());
super(headerBlock);
_bigBigBlockSize = headerBlock.getBigBlockSize();
_blocks = null;
}
@ -63,68 +50,32 @@ public class PropertyTable
* to extract the property table from it). Populates the
* properties thoroughly
*
* @param startBlock the first block of the property table
* @param headerBlock the header block of the file
* @param blockList the list of blocks
*
* @exception IOException if anything goes wrong (which should be
* a result of the input being NFG)
*/
public PropertyTable(final int startBlock,
public PropertyTable(final HeaderBlock headerBlock,
final RawDataBlockList blockList)
throws IOException
{
_start_block = POIFSConstants.END_OF_CHAIN;
super(
headerBlock,
PropertyFactory.convertToProperties(
blockList.fetchBlocks(headerBlock.getPropertyStart(), -1)
)
);
_bigBigBlockSize = headerBlock.getBigBlockSize();
_blocks = null;
_properties =
PropertyFactory
.convertToProperties(blockList.fetchBlocks(startBlock));
populatePropertyTree(( DirectoryProperty ) _properties.get(0));
}
/**
* Add a property to the list of properties we manage
*
* @param property the new Property to manage
*/
public void addProperty(final Property property)
{
_properties.add(property);
}
/**
* Remove a property from the list of properties we manage
*
* @param property the Property to be removed
*/
public void removeProperty(final Property property)
{
_properties.remove(property);
}
/**
* Get the root property
*
* @return the root property
*/
public RootProperty getRoot()
{
// it's always the first element in the List
return ( RootProperty ) _properties.get(0);
}
/**
* Prepare to be written
*/
public void preWrite()
{
Property[] properties =
( Property [] ) _properties.toArray(new Property[ 0 ]);
Property[] properties = _properties.toArray(new Property[_properties.size()]);
// give each property its index
for (int k = 0; k < properties.length; k++)
@ -133,7 +84,7 @@ public class PropertyTable
}
// allocate the blocks for the property table
_blocks = PropertyBlock.createPropertyBlockArray(_properties);
_blocks = PropertyBlock.createPropertyBlockArray(_bigBigBlockSize, _properties);
// prepare each property for writing
for (int k = 0; k < properties.length; k++)
@ -142,82 +93,17 @@ public class PropertyTable
}
}
/**
* Get the start block for the property table
*
* @return start block index
*/
public int getStartBlock()
{
return _start_block;
}
private void populatePropertyTree(DirectoryProperty root)
throws IOException
{
int index = root.getChildIndex();
if (!Property.isValidIndex(index))
{
// property has no children
return;
}
Stack children = new Stack();
children.push(_properties.get(index));
while (!children.empty())
{
Property property = ( Property ) children.pop();
root.addChild(property);
if (property.isDirectory())
{
populatePropertyTree(( DirectoryProperty ) property);
}
index = property.getPreviousChildIndex();
if (Property.isValidIndex(index))
{
children.push(_properties.get(index));
}
index = property.getNextChildIndex();
if (Property.isValidIndex(index))
{
children.push(_properties.get(index));
}
}
}
/* ********** START implementation of BATManaged ********** */
/**
* Return the number of BigBlock's this instance uses
*
* @return count of BigBlock instances
*/
public int countBlocks()
{
return (_blocks == null) ? 0
: _blocks.length;
}
/**
* Set the start block for this instance
*
* @param index index into the array of BigBlock instances making
* up the the filesystem
*/
public void setStartBlock(final int index)
{
_start_block = index;
}
/* ********** END implementation of BATManaged ********** */
/* ********** START implementation of BlockWritable ********** */
/**
* Write the storage to an OutputStream
*
@ -227,7 +113,6 @@ public class PropertyTable
* @exception IOException on problems writing to the specified
* stream
*/
public void writeBlocks(final OutputStream stream)
throws IOException
{
@ -239,7 +124,4 @@ public class PropertyTable
}
}
}
/* ********** END implementation of BlockWritable ********** */
} // end public class PropertyTable
}

View File

@ -0,0 +1,153 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.property;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Stack;
import org.apache.poi.poifs.filesystem.BATManaged;
import org.apache.poi.poifs.storage.HeaderBlock;
/**
* This class embodies the Property Table for the filesystem,
* which looks up entries in the filesystem to their
* chain of blocks.
* This is the core support, there are implementations
* for the different block schemes as needed.
*/
public abstract class PropertyTableBase implements BATManaged {
private final HeaderBlock _header_block;
protected final List<Property> _properties;
public PropertyTableBase(final HeaderBlock header_block)
{
_header_block = header_block;
_properties = new ArrayList<Property>();
addProperty(new RootProperty());
}
/**
* Reading constructor (used when we've read in a file and we want
* to extract the property table from it). Populates the
* properties thoroughly
*
* @param startBlock the first block of the property table
* @param blockList the list of blocks
*
* @exception IOException if anything goes wrong (which should be
* a result of the input being NFG)
*/
public PropertyTableBase(final HeaderBlock header_block,
final List<Property> properties)
throws IOException
{
_header_block = header_block;
_properties = properties;
populatePropertyTree( (DirectoryProperty)_properties.get(0));
}
/**
* Add a property to the list of properties we manage
*
* @param property the new Property to manage
*/
public void addProperty(Property property)
{
_properties.add(property);
}
/**
* Remove a property from the list of properties we manage
*
* @param property the Property to be removed
*/
public void removeProperty(final Property property)
{
_properties.remove(property);
}
/**
* Get the root property
*
* @return the root property
*/
public RootProperty getRoot()
{
// it's always the first element in the List
return ( RootProperty ) _properties.get(0);
}
private void populatePropertyTree(DirectoryProperty root)
throws IOException
{
int index = root.getChildIndex();
if (!Property.isValidIndex(index))
{
// property has no children
return;
}
Stack<Property> children = new Stack<Property>();
children.push(_properties.get(index));
while (!children.empty())
{
Property property = children.pop();
root.addChild(property);
if (property.isDirectory())
{
populatePropertyTree(( DirectoryProperty ) property);
}
index = property.getPreviousChildIndex();
if (Property.isValidIndex(index))
{
children.push(_properties.get(index));
}
index = property.getNextChildIndex();
if (Property.isValidIndex(index))
{
children.push(_properties.get(index));
}
}
}
/**
* Get the start block for the property table
*
* @return start block index
*/
public int getStartBlock()
{
return _header_block.getPropertyStart();
}
/**
* Set the start block for this instance
*
* @param index index into the array of BigBlock instances making
* up the the filesystem
*/
public void setStartBlock(final int index)
{
_header_block.setPropertyStart(index);
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,13 +15,8 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.property;
import java.util.*;
import java.io.IOException;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.storage.SmallDocumentBlock;
@ -31,18 +25,12 @@ import org.apache.poi.poifs.storage.SmallDocumentBlock;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class RootProperty
extends DirectoryProperty
{
/**
* Default constructor
*/
public final class RootProperty extends DirectoryProperty {
private static final String NAME = "Root Entry";
RootProperty()
{
super("Root Entry");
super(NAME);
// overrides
setNodeColor(_NODE_BLACK);
@ -57,7 +45,6 @@ public class RootProperty
* @param array byte data
* @param offset offset into byte data
*/
protected RootProperty(final int index, final byte [] array,
final int offset)
{
@ -69,10 +56,17 @@ public class RootProperty
*
* @param size size in terms of small blocks
*/
public void setSize(int size)
{
super.setSize(SmallDocumentBlock.calcSize(size));
}
} // end public class RootProperty
/**
* Returns the fixed name "Root Entry", as the
* raw property doesn't have a real name set
*/
@Override
public String getName() {
return NAME;
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,18 +15,17 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* A block of block allocation table entries. BATBlocks are created
@ -35,36 +33,105 @@ import org.apache.poi.util.LittleEndianConsts;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class BATBlock extends BigBlock {
/**
* For a regular fat block, these are 128 / 1024
* next sector values.
* For a XFat (DIFat) block, these are 127 / 1023
* next sector values, then a chaining value.
*/
private int[] _values;
public class BATBlock
extends BigBlock
{
private static final int _entries_per_block =
POIFSConstants.BIG_BLOCK_SIZE / LittleEndianConsts.INT_SIZE;
private static final int _entries_per_xbat_block = _entries_per_block
- 1;
private static final int _xbat_chain_offset =
_entries_per_xbat_block * LittleEndianConsts.INT_SIZE;
private static final byte _default_value = ( byte ) 0xFF;
private IntegerField[] _fields;
private byte[] _data;
/**
* Does this BATBlock have any free sectors in it?
*/
private boolean _has_free_sectors;
/**
* Where in the file are we?
*/
private int ourBlockIndex;
/**
* Create a single instance initialized with default values
*/
private BATBlock()
private BATBlock(POIFSBigBlockSize bigBlockSize)
{
_data = new byte[ POIFSConstants.BIG_BLOCK_SIZE ];
Arrays.fill(_data, _default_value);
_fields = new IntegerField[ _entries_per_block ];
int offset = 0;
super(bigBlockSize);
for (int j = 0; j < _entries_per_block; j++)
{
_fields[ j ] = new IntegerField(offset);
offset += LittleEndianConsts.INT_SIZE;
int _entries_per_block = bigBlockSize.getBATEntriesPerBlock();
_values = new int[_entries_per_block];
_has_free_sectors = true;
Arrays.fill(_values, POIFSConstants.UNUSED_BLOCK);
}
/**
* Create a single instance initialized (perhaps partially) with entries
*
* @param entries the array of block allocation table entries
* @param start_index the index of the first entry to be written
* to the block
* @param end_index the index, plus one, of the last entry to be
* written to the block (writing is for all index
* k, start_index <= k < end_index)
*/
private BATBlock(POIFSBigBlockSize bigBlockSize, final int [] entries,
final int start_index, final int end_index)
{
this(bigBlockSize);
for (int k = start_index; k < end_index; k++) {
_values[k - start_index] = entries[k];
}
// Do we have any free sectors?
if(end_index - start_index == _values.length) {
recomputeFree();
}
}
private void recomputeFree() {
boolean hasFree = false;
for(int k=0; k<_values.length; k++) {
if(_values[k] == POIFSConstants.UNUSED_BLOCK) {
hasFree = true;
break;
}
}
_has_free_sectors = hasFree;
}
/**
* Create a single BATBlock from the byte buffer, which must hold at least
* one big block of data to be read.
*/
public static BATBlock createBATBlock(final POIFSBigBlockSize bigBlockSize, ByteBuffer data)
{
// Create an empty block
BATBlock block = new BATBlock(bigBlockSize);
// Fill it
byte[] buffer = new byte[LittleEndian.INT_SIZE];
for(int i=0; i<block._values.length; i++) {
data.get(buffer);
block._values[i] = LittleEndian.getInt(buffer);
}
block.recomputeFree();
// All done
return block;
}
/**
* Creates a single BATBlock, with all the values set to empty.
*/
public static BATBlock createEmptyBATBlock(final POIFSBigBlockSize bigBlockSize, boolean isXBAT) {
BATBlock block = new BATBlock(bigBlockSize);
if(isXBAT) {
block.setXBATChain(bigBlockSize, POIFSConstants.END_OF_CHAIN);
}
return block;
}
/**
@ -75,17 +142,17 @@ public class BATBlock
*
* @return the newly created array of BATBlocks
*/
public static BATBlock [] createBATBlocks(final int [] entries)
public static BATBlock [] createBATBlocks(final POIFSBigBlockSize bigBlockSize, final int [] entries)
{
int block_count = calculateStorageRequirements(entries.length);
int block_count = calculateStorageRequirements(bigBlockSize, entries.length);
BATBlock[] blocks = new BATBlock[ block_count ];
int index = 0;
int remaining = entries.length;
int _entries_per_block = bigBlockSize.getBATEntriesPerBlock();
for (int j = 0; j < entries.length; j += _entries_per_block)
{
blocks[ index++ ] = new BATBlock(entries, j,
blocks[ index++ ] = new BATBlock(bigBlockSize, entries, j,
(remaining > _entries_per_block)
? j + _entries_per_block
: entries.length);
@ -104,21 +171,23 @@ public class BATBlock
* @return the newly created array of BATBlocks
*/
public static BATBlock [] createXBATBlocks(final int [] entries,
public static BATBlock [] createXBATBlocks(final POIFSBigBlockSize bigBlockSize,
final int [] entries,
final int startBlock)
{
int block_count =
calculateXBATStorageRequirements(entries.length);
calculateXBATStorageRequirements(bigBlockSize, entries.length);
BATBlock[] blocks = new BATBlock[ block_count ];
int index = 0;
int remaining = entries.length;
int _entries_per_xbat_block = bigBlockSize.getXBATEntriesPerBlock();
if (block_count != 0)
{
for (int j = 0; j < entries.length; j += _entries_per_xbat_block)
{
blocks[ index++ ] =
new BATBlock(entries, j,
new BATBlock(bigBlockSize, entries, j,
(remaining > _entries_per_xbat_block)
? j + _entries_per_xbat_block
: entries.length);
@ -126,9 +195,9 @@ public class BATBlock
}
for (index = 0; index < blocks.length - 1; index++)
{
blocks[ index ].setXBATChain(startBlock + index + 1);
blocks[ index ].setXBATChain(bigBlockSize, startBlock + index + 1);
}
blocks[ index ].setXBATChain(POIFSConstants.END_OF_CHAIN);
blocks[ index ].setXBATChain(bigBlockSize, POIFSConstants.END_OF_CHAIN);
}
return blocks;
}
@ -141,9 +210,9 @@ public class BATBlock
*
* @return the number of BATBlocks needed
*/
public static int calculateStorageRequirements(final int entryCount)
public static int calculateStorageRequirements(final POIFSBigBlockSize bigBlockSize, final int entryCount)
{
int _entries_per_block = bigBlockSize.getBATEntriesPerBlock();
return (entryCount + _entries_per_block - 1) / _entries_per_block;
}
@ -155,69 +224,124 @@ public class BATBlock
*
* @return the number of XBATBlocks needed
*/
public static int calculateXBATStorageRequirements(final int entryCount)
public static int calculateXBATStorageRequirements(final POIFSBigBlockSize bigBlockSize, final int entryCount)
{
int _entries_per_xbat_block = bigBlockSize.getXBATEntriesPerBlock();
return (entryCount + _entries_per_xbat_block - 1)
/ _entries_per_xbat_block;
}
/**
* @return number of entries per block
*/
public static final int entriesPerBlock()
{
return _entries_per_block;
}
/**
* @return number of entries per XBAT block
*/
public static final int entriesPerXBATBlock()
{
return _entries_per_xbat_block;
}
/**
* @return offset of chain index of XBAT block
*/
public static final int getXBATChainOffset()
{
return _xbat_chain_offset;
}
private void setXBATChain(int chainIndex)
{
_fields[ _entries_per_xbat_block ].set(chainIndex, _data);
}
/**
* Create a single instance initialized (perhaps partially) with entries
* Calculates the maximum size of a file which is addressable given the
* number of FAT (BAT) sectors specified. (We don't care if those BAT
* blocks come from the 109 in the header, or from header + XBATS, it
* won't affect the calculation)
*
* @param entries the array of block allocation table entries
* @param start_index the index of the first entry to be written
* to the block
* @param end_index the index, plus one, of the last entry to be
* written to the block (writing is for all index
* k, start_index <= k < end_index)
* The actual file size will be between [size of fatCount-1 blocks] and
* [size of fatCount blocks].
* For 512 byte block sizes, this means we may over-estimate by up to 65kb.
* For 4096 byte block sizes, this means we may over-estimate by up to 4mb
*/
public static int calculateMaximumSize(final POIFSBigBlockSize bigBlockSize,
final int numBATs) {
int size = 1; // Header isn't FAT addressed
private BATBlock(final int [] entries, final int start_index,
final int end_index)
{
this();
for (int k = start_index; k < end_index; k++)
{
_fields[ k - start_index ].set(entries[ k ], _data);
}
// The header has up to 109 BATs, and extra ones are referenced
// from XBATs
// However, all BATs can contain 128/1024 blocks
size += (numBATs * bigBlockSize.getBATEntriesPerBlock());
// So far we've been in sector counts, turn into bytes
return size * bigBlockSize.getBigBlockSize();
}
public static int calculateMaximumSize(final HeaderBlock header)
{
return calculateMaximumSize(header.getBigBlockSize(), header.getBATCount());
}
/**
* Returns the BATBlock that handles the specified offset,
* and the relative index within it.
* The List of BATBlocks must be in sequential order
*/
public static BATBlockAndIndex getBATBlockAndIndex(final int offset,
final HeaderBlock header, final List<BATBlock> bats) {
POIFSBigBlockSize bigBlockSize = header.getBigBlockSize();
int whichBAT = (int)Math.floor(offset / bigBlockSize.getBATEntriesPerBlock());
int index = offset % bigBlockSize.getBATEntriesPerBlock();
return new BATBlockAndIndex( index, bats.get(whichBAT) );
}
/**
* Returns the BATBlock that handles the specified offset,
* and the relative index within it, for the mini stream.
* The List of BATBlocks must be in sequential order
*/
public static BATBlockAndIndex getSBATBlockAndIndex(final int offset,
final HeaderBlock header, final List<BATBlock> sbats) {
POIFSBigBlockSize bigBlockSize = header.getBigBlockSize();
// SBATs are so much easier, as they're chained streams
int whichSBAT = (int)Math.floor(offset / bigBlockSize.getBATEntriesPerBlock());
int index = offset % bigBlockSize.getBATEntriesPerBlock();
return new BATBlockAndIndex( index, sbats.get(whichSBAT) );
}
private void setXBATChain(final POIFSBigBlockSize bigBlockSize, int chainIndex)
{
int _entries_per_xbat_block = bigBlockSize.getXBATEntriesPerBlock();
_values[ _entries_per_xbat_block ] = chainIndex;
}
/**
* Does this BATBlock have any free sectors in it, or
* is it full?
*/
public boolean hasFreeSectors() {
return _has_free_sectors;
}
public int getValueAt(int relativeOffset) {
if(relativeOffset >= _values.length) {
throw new ArrayIndexOutOfBoundsException(
"Unable to fetch offset " + relativeOffset + " as the " +
"BAT only contains " + _values.length + " entries"
);
}
return _values[relativeOffset];
}
public void setValueAt(int relativeOffset, int value) {
int oldValue = _values[relativeOffset];
_values[relativeOffset] = value;
// Do we need to re-compute the free?
if(value == POIFSConstants.UNUSED_BLOCK) {
_has_free_sectors = true;
return;
}
if(oldValue == POIFSConstants.UNUSED_BLOCK) {
recomputeFree();
}
}
/**
* Record where in the file we live
*/
public void setOurBlockIndex(int index) {
this.ourBlockIndex = index;
}
/**
* Retrieve where in the file we live
*/
public int getOurBlockIndex() {
return ourBlockIndex;
}
/* ********** START extension of BigBlock ********** */
/**
/**
* Write the block's data to an OutputStream
*
* @param stream the OutputStream to which the stored data should
@ -226,13 +350,51 @@ public class BATBlock
* @exception IOException on problems writing to the specified
* stream
*/
void writeData(final OutputStream stream)
throws IOException
{
doWriteData(stream, _data);
// Save it out
stream.write( serialize() );
}
void writeData(final ByteBuffer block)
throws IOException
{
// Save it out
block.put( serialize() );
}
private byte[] serialize() {
// Create the empty array
byte[] data = new byte[ bigBlockSize.getBigBlockSize() ];
// Fill in the values
int offset = 0;
for(int i=0; i<_values.length; i++) {
LittleEndian.putInt(data, offset, _values[i]);
offset += LittleEndian.INT_SIZE;
}
// Done
return data;
}
/* ********** END extension of BigBlock ********** */
} // end public class BATBlock
public static class BATBlockAndIndex {
private final int index;
private final BATBlock block;
private BATBlockAndIndex(int index, BATBlock block) {
this.index = index;
this.block = block;
}
public int getIndex() {
return index;
}
public BATBlock getBlock() {
return block;
}
}
}

View File

@ -21,8 +21,8 @@ package org.apache.poi.poifs.storage;
/**
* Abstract base class of all POIFS block storage classes. All
* extensions of BigBlock should write 512 bytes of data when
* requested to write their data.
* extensions of BigBlock should write 512 or 4096 bytes of data when
* requested to write their data (as per their BigBlockSize).
*
* This class has package scope, as there is no reason at this time to
* make the class public.
@ -33,9 +33,21 @@ package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
abstract class BigBlock
implements BlockWritable
{
/**
* Either 512 bytes ({@link POIFSConstants#SMALLER_BIG_BLOCK_SIZE})
* or 4096 bytes ({@link POIFSConstants#LARGER_BIG_BLOCK_SIZE})
*/
protected POIFSBigBlockSize bigBlockSize;
protected BigBlock(POIFSBigBlockSize bigBlockSize) {
this.bigBlockSize = bigBlockSize;
}
/**
* Default implementation of write for extending classes that

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,13 +15,13 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.util.*;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.util.IntList;
import org.apache.poi.util.LittleEndian;
@ -43,10 +42,22 @@ import org.apache.poi.util.LittleEndianConsts;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class BlockAllocationTableReader {
public class BlockAllocationTableReader
{
private IntList _entries;
/**
* Maximum number size (in blocks) of the allocation table as supported by
* POI.<br/>
*
* This constant has been chosen to help POI identify corrupted data in the
* header block (rather than crash immediately with {@link OutOfMemoryError}
* ). It's not clear if the compound document format actually specifies any
* upper limits. For files with 512 byte blocks, having an allocation table
* of 65,335 blocks would correspond to a total file size of 4GB. Needless
* to say, POI probably cannot handle files anywhere near that size.
*/
private static final int MAX_BLOCK_COUNT = 65535;
private final IntList _entries;
private POIFSBigBlockSize bigBlockSize;
/**
* create a BlockAllocationTableReader for an existing filesystem. Side
@ -66,33 +77,41 @@ public class BlockAllocationTableReader
* @exception IOException if, in trying to create the table, we
* encounter logic errors
*/
public BlockAllocationTableReader(POIFSBigBlockSize bigBlockSize, int block_count, int [] block_array,
int xbat_count, int xbat_index, BlockList raw_block_list) throws IOException {
this(bigBlockSize);
public BlockAllocationTableReader(final int block_count,
final int [] block_array,
final int xbat_count,
final int xbat_index,
final BlockList raw_block_list)
throws IOException
{
this();
if (block_count <= 0)
{
throw new IOException(
"Illegal block count; minimum count is 1, got " + block_count
+ " instead");
}
sanityCheckBlockCount(block_count);
// acquire raw data blocks containing the BAT block data
RawDataBlock blocks[] = new RawDataBlock[ block_count ];
// We want to get the whole of the FAT table
// To do this:
// * Work through raw_block_list, which points to the
// first (up to) 109 BAT blocks
// * Jump to the XBAT offset, and read in XBATs which
// point to more BAT blocks
int limit = Math.min(block_count, block_array.length);
int block_index;
// This will hold all of the BAT blocks in order
RawDataBlock blocks[] = new RawDataBlock[ block_count ];
// Process the first (up to) 109 BAT blocks
for (block_index = 0; block_index < limit; block_index++)
{
// Check that the sector number of the BAT block is a valid one
int nextOffset = block_array[ block_index ];
if(nextOffset > raw_block_list.blockCount()) {
throw new IOException("Your file contains " + raw_block_list.blockCount() +
" sectors, but the initial DIFAT array at index " + block_index +
" referenced block # " + nextOffset + ". This isn't allowed and " +
" your file is corrupt");
}
// Record the sector number of this BAT block
blocks[ block_index ] =
( RawDataBlock ) raw_block_list
.remove(block_array[ block_index ]);
( RawDataBlock ) raw_block_list.remove(nextOffset);
}
// Process additional BAT blocks via the XBATs
if (block_index < block_count)
{
@ -103,9 +122,12 @@ public class BlockAllocationTableReader
"BAT count exceeds limit, yet XBAT index indicates no valid entries");
}
int chain_index = xbat_index;
int max_entries_per_block = BATBlock.entriesPerXBATBlock();
int chain_index_offset = BATBlock.getXBATChainOffset();
int max_entries_per_block = bigBlockSize.getXBATEntriesPerBlock();
int chain_index_offset = bigBlockSize.getNextXBATChainOffset();
// Each XBAT block contains either:
// (maximum number of sector indexes) + index of next XBAT
// some sector indexes + FREE sectors to max # + EndOfChain
for (int j = 0; j < xbat_count; j++)
{
limit = Math.min(block_count - block_index,
@ -132,8 +154,8 @@ public class BlockAllocationTableReader
throw new IOException("Could not find all blocks");
}
// now that we have all of the raw data blocks, go through and
// create the indices
// Now that we have all of the raw data blocks which make
// up the FAT, go through and create the indices
setEntries(blocks, raw_block_list);
}
@ -145,26 +167,32 @@ public class BlockAllocationTableReader
*
* @exception IOException
*/
BlockAllocationTableReader(final ListManagedBlock [] blocks,
final BlockList raw_block_list)
throws IOException
{
this();
BlockAllocationTableReader(POIFSBigBlockSize bigBlockSize, ListManagedBlock[] blocks, BlockList raw_block_list)
throws IOException {
this(bigBlockSize);
setEntries(blocks, raw_block_list);
}
/**
* Constructor BlockAllocationTableReader
*
*
*/
BlockAllocationTableReader()
{
BlockAllocationTableReader(POIFSBigBlockSize bigBlockSize) {
this.bigBlockSize = bigBlockSize;
_entries = new IntList();
}
public static void sanityCheckBlockCount(int block_count) throws IOException {
if (block_count <= 0) {
throw new IOException(
"Illegal block count; minimum count is 1, got " +
block_count + " instead"
);
}
if (block_count > MAX_BLOCK_COUNT) {
throw new IOException(
"Block count " + block_count +
" is too high. POI maximum is " + MAX_BLOCK_COUNT + "."
);
}
}
/**
* walk the entries from a specified point and return the
* associated blocks. The associated blocks are removed from the
@ -177,21 +205,43 @@ public class BlockAllocationTableReader
*
* @exception IOException if there is a problem acquiring the blocks
*/
ListManagedBlock [] fetchBlocks(final int startBlock,
final BlockList blockList)
throws IOException
{
List blocks = new ArrayList();
ListManagedBlock[] fetchBlocks(int startBlock, int headerPropertiesStartBlock,
BlockList blockList) throws IOException {
List<ListManagedBlock> blocks = new ArrayList<ListManagedBlock>();
int currentBlock = startBlock;
boolean firstPass = true;
ListManagedBlock dataBlock = null;
while (currentBlock != POIFSConstants.END_OF_CHAIN)
{
blocks.add(blockList.remove(currentBlock));
currentBlock = _entries.get(currentBlock);
// Process the chain from the start to the end
// Normally we have header, data, end
// Sometimes we have data, header, end
// For those cases, stop at the header, not the end
while (currentBlock != POIFSConstants.END_OF_CHAIN) {
try {
// Grab the data at the current block offset
dataBlock = blockList.remove(currentBlock);
blocks.add(dataBlock);
// Now figure out which block we go to next
currentBlock = _entries.get(currentBlock);
firstPass = false;
} catch(IOException e) {
if(currentBlock == headerPropertiesStartBlock) {
// Special case where things are in the wrong order
System.err.println("Warning, header block comes after data blocks in POIFS block listing");
currentBlock = POIFSConstants.END_OF_CHAIN;
} else if(currentBlock == 0 && firstPass) {
// Special case where the termination isn't done right
// on an empty set
System.err.println("Warning, incorrectly terminated empty data blocks in POIFS block listing (should end at -2, ended at 0)");
currentBlock = POIFSConstants.END_OF_CHAIN;
} else {
// Ripple up
throw e;
}
}
}
return ( ListManagedBlock [] ) blocks
.toArray(new ListManagedBlock[ 0 ]);
return blocks.toArray(new ListManagedBlock[blocks.size()]);
}
// methods for debugging reader
@ -203,19 +253,14 @@ public class BlockAllocationTableReader
*
* @return true if the specific block is used, else false
*/
boolean isUsed(int index) {
boolean isUsed(final int index)
{
boolean rval = false;
try
{
rval = _entries.get(index) != -1;
try {
return _entries.get(index) != -1;
} catch (IndexOutOfBoundsException e) {
// ignored
return false;
}
catch (IndexOutOfBoundsException ignored)
{
}
return rval;
}
/**
@ -229,18 +274,11 @@ public class BlockAllocationTableReader
*
* @exception IOException if the current block is unused
*/
int getNextBlockIndex(final int index)
throws IOException
{
if (isUsed(index))
{
int getNextBlockIndex(int index) throws IOException {
if (isUsed(index)) {
return _entries.get(index);
}
else
{
throw new IOException("index " + index + " is unused");
}
throw new IOException("index " + index + " is unused");
}
/**
@ -249,15 +287,9 @@ public class BlockAllocationTableReader
* @param blocks the array of blocks containing the indices
* @param raw_blocks the list of blocks being managed. Unused
* blocks will be eliminated from the list
*
* @exception IOException
*/
private void setEntries(final ListManagedBlock [] blocks,
final BlockList raw_blocks)
throws IOException
{
int limit = BATBlock.entriesPerBlock();
private void setEntries(ListManagedBlock[] blocks, BlockList raw_blocks) throws IOException {
int limit = bigBlockSize.getBATEntriesPerBlock();
for (int block_index = 0; block_index < blocks.length; block_index++)
{
@ -281,5 +313,4 @@ public class BlockAllocationTableReader
}
raw_blocks.setBAT(this);
}
} // end class BlockAllocationTableReader
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,19 +15,16 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.*;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.BATManaged;
import org.apache.poi.util.IntList;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* This class manages and creates the Block Allocation Table, which is
@ -45,23 +41,21 @@ import org.apache.poi.util.LittleEndianConsts;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class BlockAllocationTableWriter
implements BlockWritable, BATManaged
{
public final class BlockAllocationTableWriter implements BlockWritable, BATManaged {
private IntList _entries;
private BATBlock[] _blocks;
private int _start_block;
private POIFSBigBlockSize _bigBlockSize;
/**
* create a BlockAllocationTableWriter
*/
public BlockAllocationTableWriter()
public BlockAllocationTableWriter(POIFSBigBlockSize bigBlockSize)
{
_start_block = POIFSConstants.END_OF_CHAIN;
_entries = new IntList();
_blocks = new BATBlock[ 0 ];
_bigBlockSize = bigBlockSize;
_start_block = POIFSConstants.END_OF_CHAIN;
_entries = new IntList();
_blocks = new BATBlock[ 0 ];
}
/**
@ -69,7 +63,6 @@ public class BlockAllocationTableWriter
*
* @return start block index of BAT blocks
*/
public int createBlocks()
{
int xbat_blocks = 0;
@ -78,12 +71,13 @@ public class BlockAllocationTableWriter
while (true)
{
int calculated_bat_blocks =
BATBlock.calculateStorageRequirements(bat_blocks
BATBlock.calculateStorageRequirements(_bigBlockSize,
bat_blocks
+ xbat_blocks
+ _entries.size());
int calculated_xbat_blocks =
HeaderBlockWriter
.calculateXBATStorageRequirements(calculated_bat_blocks);
HeaderBlockWriter.calculateXBATStorageRequirements(
_bigBlockSize, calculated_bat_blocks);
if ((bat_blocks == calculated_bat_blocks)
&& (xbat_blocks == calculated_xbat_blocks))
@ -92,11 +86,8 @@ public class BlockAllocationTableWriter
// stable ... we're OK
break;
}
else
{
bat_blocks = calculated_bat_blocks;
xbat_blocks = calculated_xbat_blocks;
}
bat_blocks = calculated_bat_blocks;
xbat_blocks = calculated_xbat_blocks;
}
int startBlock = allocateSpace(bat_blocks);
@ -112,7 +103,6 @@ public class BlockAllocationTableWriter
*
* @return the starting index of the blocks
*/
public int allocateSpace(final int blockCount)
{
int startBlock = _entries.size();
@ -136,7 +126,6 @@ public class BlockAllocationTableWriter
*
* @return the starting block index
*/
public int getStartBlock()
{
return _start_block;
@ -145,14 +134,11 @@ public class BlockAllocationTableWriter
/**
* create the BATBlocks
*/
void simpleCreateBlocks()
{
_blocks = BATBlock.createBATBlocks(_entries.toArray());
_blocks = BATBlock.createBATBlocks(_bigBlockSize, _entries.toArray());
}
/* ********** START implementation of BlockWritable ********** */
/**
* Write the storage to an OutputStream
*
@ -162,7 +148,6 @@ public class BlockAllocationTableWriter
* @exception IOException on problems writing to the specified
* stream
*/
public void writeBlocks(final OutputStream stream)
throws IOException
{
@ -172,15 +157,20 @@ public class BlockAllocationTableWriter
}
}
/* ********** END implementation of BlockWritable ********** */
/* ********** START implementation of BATManaged ********** */
/**
* Write the BAT into its associated block
*/
public static void writeBlock(final BATBlock bat, final ByteBuffer block)
throws IOException
{
bat.writeData(block);
}
/**
* Return the number of BigBlock's this instance uses
*
* @return count of BigBlock instances
*/
public int countBlocks()
{
return _blocks.length;
@ -188,15 +178,9 @@ public class BlockAllocationTableWriter
/**
* Set the start block for this instance
*
* @param start_block
*/
public void setStartBlock(int start_block)
{
_start_block = start_block;
}
/* ********** END implementation of BATManaged ********** */
} // end class BlockAllocationTableWriter
}

View File

@ -59,13 +59,14 @@ public interface BlockList
* blocks are removed from the list.
*
* @param startBlock the index of the first block in the stream
* @param headerPropertiesStartBlock the index of the first header block in the stream
*
* @return the stream as an array of correctly ordered blocks
*
* @exception IOException if blocks are missing
*/
public ListManagedBlock [] fetchBlocks(final int startBlock)
public ListManagedBlock [] fetchBlocks(final int startBlock, final int headerPropertiesStartBlock)
throws IOException;
/**
@ -78,5 +79,7 @@ public interface BlockList
public void setBAT(final BlockAllocationTableReader bat)
throws IOException;
public int blockCount();
} // end public interface BlockList

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,29 +15,19 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.*;
import java.util.*;
import java.io.IOException;
/**
* A simple implementation of BlockList
*
* @author Marc Johnson (mjohnson at apache dot org
*/
class BlockListImpl
implements BlockList
{
abstract class BlockListImpl implements BlockList {
private ListManagedBlock[] _blocks;
private BlockAllocationTableReader _bat;
/**
* Constructor BlockListImpl
*/
protected BlockListImpl()
{
_blocks = new ListManagedBlock[ 0 ];
@ -50,21 +39,17 @@ class BlockListImpl
*
* @param blocks blocks to be managed
*/
protected void setBlocks(final ListManagedBlock [] blocks)
{
_blocks = blocks;
}
/* ********** START implementation of BlockList ********** */
/**
* remove the specified block from the list
*
* @param index the index of the specified block; if the index is
* out of range, that's ok
*/
public void zap(final int index)
{
if ((index >= 0) && (index < _blocks.length))
@ -73,6 +58,14 @@ class BlockListImpl
}
}
/**
* Unit testing method. Gets, without sanity checks or
* removing.
*/
protected ListManagedBlock get(final int index) {
return _blocks[index];
}
/**
* remove and return the specified block from the list
*
@ -83,7 +76,6 @@ class BlockListImpl
* @exception IOException if the index is out of range or has
* already been removed
*/
public ListManagedBlock remove(final int index)
throws IOException
{
@ -94,8 +86,10 @@ class BlockListImpl
result = _blocks[ index ];
if (result == null)
{
throw new IOException("block[ " + index
+ " ] already removed");
throw new IOException(
"block[ " + index + " ] already removed - " +
"does your POIFS have circular or duplicate block references?"
);
}
_blocks[ index ] = null;
}
@ -118,8 +112,7 @@ class BlockListImpl
*
* @exception IOException if blocks are missing
*/
public ListManagedBlock [] fetchBlocks(final int startBlock)
public ListManagedBlock [] fetchBlocks(final int startBlock, final int headerPropertiesStartBlock)
throws IOException
{
if (_bat == null)
@ -127,17 +120,14 @@ class BlockListImpl
throw new IOException(
"Improperly initialized list: no block allocation table provided");
}
return _bat.fetchBlocks(startBlock, this);
return _bat.fetchBlocks(startBlock, headerPropertiesStartBlock, this);
}
/**
* set the associated BlockAllocationTable
*
* @param bat the associated BlockAllocationTable
*
* @exception IOException
*/
public void setBAT(final BlockAllocationTableReader bat)
throws IOException
{
@ -149,6 +139,20 @@ class BlockListImpl
_bat = bat;
}
/* ********** END implementation of BlockList ********** */
} // end package-scope class BlockListImpl
/**
* Returns the count of the number of blocks
*/
public int blockCount() {
return _blocks.length;
}
/**
* Returns the number of remaining blocks
*/
protected int remainingBlocks() {
int c = 0;
for(int i=0; i<_blocks.length; i++) {
if(_blocks[i] != null) c++;
}
return c;
}
}

View File

@ -0,0 +1,186 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
/**
* Wraps a <tt>byte</tt> array and provides simple data input access.
* Internally, this class maintains a buffer read index, so that for the most part, primitive
* data can be read in a data-input-stream-like manner.<p/>
*
* Note - the calling class should call the {@link #available()} method to detect end-of-buffer
* and move to the next data block when the current is exhausted.
* For optimisation reasons, no error handling is performed in this class. Thus, mistakes in
* calling code ran may raise ugly exceptions here, like {@link ArrayIndexOutOfBoundsException},
* etc .<p/>
*
* The multi-byte primitive input methods ({@link #readUShortLE()}, {@link #readIntLE()} and
* {@link #readLongLE()}) have corresponding 'spanning read' methods which (when required) perform
* a read across the block boundary. These spanning read methods take the previous
* {@link DataInputBlock} as a parameter.
* Reads of larger amounts of data (into <tt>byte</tt> array buffers) must be managed by the caller
* since these could conceivably involve more than two blocks.
*
* @author Josh Micich
*/
public final class DataInputBlock {
/**
* Possibly any size (usually 512K or 64K). Assumed to be at least 8 bytes for all blocks
* before the end of the stream. The last block in the stream can be any size except zero.
*/
private final byte[] _buf;
private int _readIndex;
private int _maxIndex;
DataInputBlock(byte[] data, int startOffset) {
_buf = data;
_readIndex = startOffset;
_maxIndex = _buf.length;
}
public int available() {
return _maxIndex-_readIndex;
}
public int readUByte() {
return _buf[_readIndex++] & 0xFF;
}
/**
* Reads a <tt>short</tt> which was encoded in <em>little endian</em> format.
*/
public int readUShortLE() {
int i = _readIndex;
int b0 = _buf[i++] & 0xFF;
int b1 = _buf[i++] & 0xFF;
_readIndex = i;
return (b1 << 8) + (b0 << 0);
}
/**
* Reads a <tt>short</tt> which spans the end of <tt>prevBlock</tt> and the start of this block.
*/
public int readUShortLE(DataInputBlock prevBlock) {
// simple case - will always be one byte in each block
int i = prevBlock._buf.length-1;
int b0 = prevBlock._buf[i++] & 0xFF;
int b1 = _buf[_readIndex++] & 0xFF;
return (b1 << 8) + (b0 << 0);
}
/**
* Reads an <tt>int</tt> which was encoded in <em>little endian</em> format.
*/
public int readIntLE() {
int i = _readIndex;
int b0 = _buf[i++] & 0xFF;
int b1 = _buf[i++] & 0xFF;
int b2 = _buf[i++] & 0xFF;
int b3 = _buf[i++] & 0xFF;
_readIndex = i;
return (b3 << 24) + (b2 << 16) + (b1 << 8) + (b0 << 0);
}
/**
* Reads an <tt>int</tt> which spans the end of <tt>prevBlock</tt> and the start of this block.
*/
public int readIntLE(DataInputBlock prevBlock, int prevBlockAvailable) {
byte[] buf = new byte[4];
readSpanning(prevBlock, prevBlockAvailable, buf);
int b0 = buf[0] & 0xFF;
int b1 = buf[1] & 0xFF;
int b2 = buf[2] & 0xFF;
int b3 = buf[3] & 0xFF;
return (b3 << 24) + (b2 << 16) + (b1 << 8) + (b0 << 0);
}
/**
* Reads a <tt>long</tt> which was encoded in <em>little endian</em> format.
*/
public long readLongLE() {
int i = _readIndex;
int b0 = _buf[i++] & 0xFF;
int b1 = _buf[i++] & 0xFF;
int b2 = _buf[i++] & 0xFF;
int b3 = _buf[i++] & 0xFF;
int b4 = _buf[i++] & 0xFF;
int b5 = _buf[i++] & 0xFF;
int b6 = _buf[i++] & 0xFF;
int b7 = _buf[i++] & 0xFF;
_readIndex = i;
return (((long)b7 << 56) +
((long)b6 << 48) +
((long)b5 << 40) +
((long)b4 << 32) +
((long)b3 << 24) +
(b2 << 16) +
(b1 << 8) +
(b0 << 0));
}
/**
* Reads a <tt>long</tt> which spans the end of <tt>prevBlock</tt> and the start of this block.
*/
public long readLongLE(DataInputBlock prevBlock, int prevBlockAvailable) {
byte[] buf = new byte[8];
readSpanning(prevBlock, prevBlockAvailable, buf);
int b0 = buf[0] & 0xFF;
int b1 = buf[1] & 0xFF;
int b2 = buf[2] & 0xFF;
int b3 = buf[3] & 0xFF;
int b4 = buf[4] & 0xFF;
int b5 = buf[5] & 0xFF;
int b6 = buf[6] & 0xFF;
int b7 = buf[7] & 0xFF;
return (((long)b7 << 56) +
((long)b6 << 48) +
((long)b5 << 40) +
((long)b4 << 32) +
((long)b3 << 24) +
(b2 << 16) +
(b1 << 8) +
(b0 << 0));
}
/**
* Reads a small amount of data from across the boundary between two blocks.
* The {@link #_readIndex} of this (the second) block is updated accordingly.
* Note- this method (and other code) assumes that the second {@link DataInputBlock}
* always is big enough to complete the read without being exhausted.
*/
private void readSpanning(DataInputBlock prevBlock, int prevBlockAvailable, byte[] buf) {
System.arraycopy(prevBlock._buf, prevBlock._readIndex, buf, 0, prevBlockAvailable);
int secondReadLen = buf.length-prevBlockAvailable;
System.arraycopy(_buf, 0, buf, prevBlockAvailable, secondReadLen);
_readIndex = secondReadLen;
}
/**
* Reads <tt>len</tt> bytes from this block into the supplied buffer.
*/
public void readFully(byte[] buf, int off, int len) {
System.arraycopy(_buf, _readIndex, buf, off, len);
_readIndex += len;
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,30 +15,23 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* A block of document data.
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class DocumentBlock
extends BigBlock
{
public final class DocumentBlock extends BigBlock {
private static final byte _default_value = ( byte ) 0xFF;
private byte[] _data;
private int _bytes_read;
@ -55,6 +47,11 @@ public class DocumentBlock
public DocumentBlock(final RawDataBlock block)
throws IOException
{
super(
block.getBigBlockSize() == POIFSConstants.SMALLER_BIG_BLOCK_SIZE ?
POIFSConstants.SMALLER_BIG_BLOCK_SIZE_DETAILS :
POIFSConstants.LARGER_BIG_BLOCK_SIZE_DETAILS
);
_data = block.getData();
_bytes_read = _data.length;
}
@ -67,10 +64,10 @@ public class DocumentBlock
* @exception IOException
*/
public DocumentBlock(final InputStream stream)
public DocumentBlock(final InputStream stream, POIFSBigBlockSize bigBlockSize)
throws IOException
{
this();
this(bigBlockSize);
int count = IOUtils.readFully(stream, _data);
_bytes_read = (count == -1) ? 0
@ -81,9 +78,10 @@ public class DocumentBlock
* Create a single instance initialized with default values
*/
private DocumentBlock()
private DocumentBlock(POIFSBigBlockSize bigBlockSize)
{
_data = new byte[ POIFSConstants.BIG_BLOCK_SIZE ];
super(bigBlockSize);
_data = new byte[ bigBlockSize.getBigBlockSize() ];
Arrays.fill(_data, _default_value);
}
@ -106,7 +104,7 @@ public class DocumentBlock
public boolean partiallyRead()
{
return _bytes_read != POIFSConstants.BIG_BLOCK_SIZE;
return _bytes_read != bigBlockSize.getBigBlockSize();
}
/**
@ -129,26 +127,27 @@ public class DocumentBlock
* input array
*/
public static DocumentBlock [] convert(final byte [] array,
public static DocumentBlock [] convert(final POIFSBigBlockSize bigBlockSize,
final byte [] array,
final int size)
{
DocumentBlock[] rval =
new DocumentBlock[ (size + POIFSConstants.BIG_BLOCK_SIZE - 1) / POIFSConstants.BIG_BLOCK_SIZE ];
new DocumentBlock[ (size + bigBlockSize.getBigBlockSize() - 1) / bigBlockSize.getBigBlockSize() ];
int offset = 0;
for (int k = 0; k < rval.length; k++)
{
rval[ k ] = new DocumentBlock();
rval[ k ] = new DocumentBlock(bigBlockSize);
if (offset < array.length)
{
int length = Math.min(POIFSConstants.BIG_BLOCK_SIZE,
int length = Math.min(bigBlockSize.getBigBlockSize(),
array.length - offset);
System.arraycopy(array, offset, rval[ k ]._data, 0, length);
if (length != POIFSConstants.BIG_BLOCK_SIZE)
if (length != bigBlockSize.getBigBlockSize())
{
Arrays.fill(rval[ k ]._data, length,
POIFSConstants.BIG_BLOCK_SIZE,
bigBlockSize.getBigBlockSize(),
_default_value);
}
}
@ -156,50 +155,26 @@ public class DocumentBlock
{
Arrays.fill(rval[ k ]._data, _default_value);
}
offset += POIFSConstants.BIG_BLOCK_SIZE;
offset += bigBlockSize.getBigBlockSize();
}
return rval;
}
/**
* read data from an array of DocumentBlocks
*
* @param blocks the blocks to read from
* @param buffer the buffer to write the data into
* @param offset the offset into the array of blocks to read from
*/
public static void read(final DocumentBlock [] blocks,
final byte [] buffer, final int offset)
{
int firstBlockIndex = offset / POIFSConstants.BIG_BLOCK_SIZE;
int firstBlockOffset = offset % POIFSConstants.BIG_BLOCK_SIZE;
int lastBlockIndex = (offset + buffer.length - 1)
/ POIFSConstants.BIG_BLOCK_SIZE;
if (firstBlockIndex == lastBlockIndex)
{
System.arraycopy(blocks[ firstBlockIndex ]._data,
firstBlockOffset, buffer, 0, buffer.length);
public static DataInputBlock getDataInputBlock(DocumentBlock[] blocks, int offset) {
if(blocks == null || blocks.length == 0) {
return null;
}
else
{
int buffer_offset = 0;
System.arraycopy(blocks[ firstBlockIndex ]._data,
firstBlockOffset, buffer, buffer_offset,
POIFSConstants.BIG_BLOCK_SIZE
- firstBlockOffset);
buffer_offset += POIFSConstants.BIG_BLOCK_SIZE - firstBlockOffset;
for (int j = firstBlockIndex + 1; j < lastBlockIndex; j++)
{
System.arraycopy(blocks[ j ]._data, 0, buffer, buffer_offset,
POIFSConstants.BIG_BLOCK_SIZE);
buffer_offset += POIFSConstants.BIG_BLOCK_SIZE;
}
System.arraycopy(blocks[ lastBlockIndex ]._data, 0, buffer,
buffer_offset, buffer.length - buffer_offset);
}
// Key things about the size of the block
POIFSBigBlockSize bigBlockSize = blocks[0].bigBlockSize;
int BLOCK_SHIFT = bigBlockSize.getHeaderValue();
int BLOCK_SIZE = bigBlockSize.getBigBlockSize();
int BLOCK_MASK = BLOCK_SIZE - 1;
// Now do the offset lookup
int firstBlockIndex = offset >> BLOCK_SHIFT;
int firstBlockOffset= offset & BLOCK_MASK;
return new DataInputBlock(blocks[firstBlockIndex]._data, firstBlockOffset);
}
/* ********** START extension of BigBlock ********** */

View File

@ -0,0 +1,388 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.OfficeXmlFileException;
import org.apache.poi.util.HexDump;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
import org.apache.poi.util.LongField;
import org.apache.poi.util.POILogFactory;
import org.apache.poi.util.POILogger;
import org.apache.poi.util.ShortField;
/**
* The block containing the archive header
*/
public final class HeaderBlock implements HeaderBlockConstants {
private static final POILogger _logger =
POILogFactory.getLogger(HeaderBlock.class);
/**
* What big block size the file uses. Most files
* use 512 bytes, but a few use 4096
*/
private final POIFSBigBlockSize bigBlockSize;
/**
* Number of big block allocation table blocks (int).
* (Number of FAT Sectors in Microsoft parlance).
*/
private int _bat_count;
/**
* Start of the property set block (int index of the property set
* chain's first big block).
*/
private int _property_start;
/**
* start of the small block allocation table (int index of small
* block allocation table's first big block)
*/
private int _sbat_start;
/**
* Number of small block allocation table blocks (int)
* (Number of MiniFAT Sectors in Microsoft parlance)
*/
private int _sbat_count;
/**
* Big block index for extension to the big block allocation table
*/
private int _xbat_start;
/**
* Number of big block allocation table blocks (int)
* (Number of DIFAT Sectors in Microsoft parlance)
*/
private int _xbat_count;
/**
* The data. Only ever 512 bytes, because 4096 byte
* files use zeros for the extra header space.
*/
private final byte[] _data;
private static final byte _default_value = ( byte ) 0xFF;
/**
* create a new HeaderBlockReader from an InputStream
*
* @param stream the source InputStream
*
* @exception IOException on errors or bad data
*/
public HeaderBlock(InputStream stream) throws IOException {
// Grab the first 512 bytes
// (For 4096 sized blocks, the remaining 3584 bytes are zero)
// Then, process the contents
this(readFirst512(stream));
// Fetch the rest of the block if needed
if(bigBlockSize.getBigBlockSize() != 512) {
int rest = bigBlockSize.getBigBlockSize() - 512;
byte[] tmp = new byte[rest];
IOUtils.readFully(stream, tmp);
}
}
public HeaderBlock(ByteBuffer buffer) throws IOException {
this(IOUtils.toByteArray(buffer, POIFSConstants.SMALLER_BIG_BLOCK_SIZE));
}
private HeaderBlock(byte[] data) throws IOException {
this._data = data;
// verify signature
long signature = LittleEndian.getLong(_data, _signature_offset);
if (signature != _signature) {
// Is it one of the usual suspects?
byte[] OOXML_FILE_HEADER = POIFSConstants.OOXML_FILE_HEADER;
if(_data[0] == OOXML_FILE_HEADER[0] &&
_data[1] == OOXML_FILE_HEADER[1] &&
_data[2] == OOXML_FILE_HEADER[2] &&
_data[3] == OOXML_FILE_HEADER[3]) {
throw new OfficeXmlFileException("The supplied data appears to be in the Office 2007+ XML. You are calling the part of POI that deals with OLE2 Office Documents. You need to call a different part of POI to process this data (eg XSSF instead of HSSF)");
}
if ((signature & 0xFF8FFFFFFFFFFFFFL) == 0x0010000200040009L) {
// BIFF2 raw stream starts with BOF (sid=0x0009, size=0x0004, data=0x00t0)
throw new IllegalArgumentException("The supplied data appears to be in BIFF2 format. "
+ "POI only supports BIFF8 format");
}
// Give a generic error
throw new IOException("Invalid header signature; read "
+ longToHex(signature) + ", expected "
+ longToHex(_signature));
}
// Figure out our block size
if (_data[30] == 12) {
this.bigBlockSize = POIFSConstants.LARGER_BIG_BLOCK_SIZE_DETAILS;
} else if(_data[30] == 9) {
this.bigBlockSize = POIFSConstants.SMALLER_BIG_BLOCK_SIZE_DETAILS;
} else {
throw new IOException("Unsupported blocksize (2^"+ _data[30] + "). Expected 2^9 or 2^12.");
}
// Setup the fields to read and write the counts and starts
_bat_count = new IntegerField(_bat_count_offset, data).get();
_property_start = new IntegerField(_property_start_offset,_data).get();
_sbat_start = new IntegerField(_sbat_start_offset, _data).get();
_sbat_count = new IntegerField(_sbat_block_count_offset, _data).get();
_xbat_start = new IntegerField(_xbat_start_offset, _data).get();
_xbat_count = new IntegerField(_xbat_count_offset, _data).get();
}
/**
* Create a single instance initialized with default values
*/
public HeaderBlock(POIFSBigBlockSize bigBlockSize)
{
this.bigBlockSize = bigBlockSize;
// Our data is always 512 big no matter what
_data = new byte[ POIFSConstants.SMALLER_BIG_BLOCK_SIZE ];
Arrays.fill(_data, _default_value);
// Set all the default values
new LongField(_signature_offset, _signature, _data);
new IntegerField(0x08, 0, _data);
new IntegerField(0x0c, 0, _data);
new IntegerField(0x10, 0, _data);
new IntegerField(0x14, 0, _data);
new ShortField(0x18, ( short ) 0x3b, _data);
new ShortField(0x1a, ( short ) 0x3, _data);
new ShortField(0x1c, ( short ) -2, _data);
new ShortField(0x1e, bigBlockSize.getHeaderValue(), _data);
new IntegerField(0x20, 0x6, _data);
new IntegerField(0x24, 0, _data);
new IntegerField(0x28, 0, _data);
new IntegerField(0x34, 0, _data);
new IntegerField(0x38, 0x1000, _data);
// Initialise the variables
_bat_count = 0;
_sbat_count = 0;
_xbat_count = 0;
_property_start = POIFSConstants.END_OF_CHAIN;
_sbat_start = POIFSConstants.END_OF_CHAIN;
_xbat_start = POIFSConstants.END_OF_CHAIN;
}
private static byte[] readFirst512(InputStream stream) throws IOException {
// Grab the first 512 bytes
// (For 4096 sized blocks, the remaining 3584 bytes are zero)
byte[] data = new byte[512];
int bsCount = IOUtils.readFully(stream, data);
if(bsCount != 512) {
throw alertShortRead(bsCount, 512);
}
return data;
}
private static String longToHex(long value) {
return new String(HexDump.longToHex(value));
}
private static IOException alertShortRead(int pRead, int expectedReadSize) {
int read;
if (pRead < 0) {
//Can't have -1 bytes read in the error message!
read = 0;
} else {
read = pRead;
}
String type = " byte" + (read == 1 ? (""): ("s"));
return new IOException("Unable to read entire header; "
+ read + type + " read; expected "
+ expectedReadSize + " bytes");
}
/**
* get start of Property Table
*
* @return the index of the first block of the Property Table
*/
public int getPropertyStart() {
return _property_start;
}
/**
* Set start of Property Table
*
* @param startBlock the index of the first block of the Property Table
*/
public void setPropertyStart(final int startBlock) {
_property_start = startBlock;
}
/**
* @return start of small block (MiniFAT) allocation table
*/
public int getSBATStart() {
return _sbat_start;
}
public int getSBATCount() {
return _sbat_count;
}
/**
* Set start of small block allocation table
*
* @param startBlock the index of the first big block of the small
* block allocation table
*/
public void setSBATStart(final int startBlock) {
_sbat_start = startBlock;
}
/**
* Set count of SBAT blocks
*
* @param count the number of SBAT blocks
*/
public void setSBATBlockCount(final int count)
{
_sbat_count = count;
}
/**
* @return number of BAT blocks
*/
public int getBATCount() {
return _bat_count;
}
/**
* Sets the number of BAT blocks that are used.
* This is the number used in both the BAT and XBAT.
*/
public void setBATCount(final int count) {
_bat_count = count;
}
/**
* Returns the offsets to the first (up to) 109
* BAT sectors.
* Any additional BAT sectors are held in the XBAT (DIFAT)
* sectors in a chain.
* @return BAT offset array
*/
public int[] getBATArray() {
// Read them in
int[] result = new int[ Math.min(_bat_count,_max_bats_in_header) ];
int offset = _bat_array_offset;
for (int j = 0; j < result.length; j++) {
result[ j ] = LittleEndian.getInt(_data, offset);
offset += LittleEndianConsts.INT_SIZE;
}
return result;
}
/**
* Sets the offsets of the first (up to) 109
* BAT sectors.
*/
public void setBATArray(int[] bat_array) {
int count = Math.min(bat_array.length, _max_bats_in_header);
int blank = _max_bats_in_header - count;
int offset = _bat_array_offset;
for(int i=0; i<count; i++) {
LittleEndian.putInt(_data, offset, bat_array[i]);
offset += LittleEndianConsts.INT_SIZE;
}
for(int i=0; i<blank; i++) {
LittleEndian.putInt(_data, offset, POIFSConstants.UNUSED_BLOCK);
offset += LittleEndianConsts.INT_SIZE;
}
}
/**
* @return XBAT (DIFAT) count
*/
public int getXBATCount() {
return _xbat_count;
}
/**
* Sets the number of XBAT (DIFAT) blocks used
*/
public void setXBATCount(final int count) {
_xbat_count = count;
}
/**
* @return XBAT (DIFAT) index
*/
public int getXBATIndex() {
return _xbat_start;
}
/**
* Sets the first XBAT (DIFAT) block location
*/
public void setXBATStart(final int startBlock) {
_xbat_start = startBlock;
}
/**
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public POIFSBigBlockSize getBigBlockSize() {
return bigBlockSize;
}
/**
* Write the block's data to an OutputStream
*
* @param stream the OutputStream to which the stored data should
* be written
*
* @exception IOException on problems writing to the specified
* stream
*/
void writeData(final OutputStream stream)
throws IOException
{
// Update the counts and start positions
new IntegerField(_bat_count_offset, _bat_count, _data);
new IntegerField(_property_start_offset, _property_start, _data);
new IntegerField(_sbat_start_offset, _sbat_start, _data);
new IntegerField(_sbat_block_count_offset, _sbat_count, _data);
new IntegerField(_xbat_start_offset, _xbat_start, _data);
new IntegerField(_xbat_count_offset, _xbat_count, _data);
// Write the main data out
stream.write(_data, 0, 512);
// Now do the padding if needed
for(int i=POIFSConstants.SMALLER_BIG_BLOCK_SIZE; i<bigBlockSize.getBigBlockSize(); i++) {
stream.write(0);
}
}
}

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,29 +15,28 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
import org.apache.poi.util.LongField;
import org.apache.poi.util.ShortField;
/**
* Constants used in reading/writing the Header block
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public interface HeaderBlockConstants
{
public static final long _signature = 0xE11AB1A1E011CFD0L;
public static final int _bat_array_offset = 0x4c;
public static final int _max_bats_in_header =
(POIFSConstants.BIG_BLOCK_SIZE - _bat_array_offset)
/ LittleEndianConsts.INT_SIZE;
(POIFSConstants.SMALLER_BIG_BLOCK_SIZE - _bat_array_offset)
/ LittleEndianConsts.INT_SIZE; // If 4k blocks, rest is blank
// Note - in Microsoft terms:
// BAT ~= FAT
// SBAT ~= MiniFAT
// XBAT ~= DIFat
// useful offsets
public static final int _signature_offset = 0;

View File

@ -1,205 +0,0 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.*;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.OfficeXmlFileException;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
import org.apache.poi.util.LongField;
/**
* The block containing the archive header
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class HeaderBlockReader
implements HeaderBlockConstants
{
/**
* What big block size the file uses. Most files
* use 512 bytes, but a few use 4096
*/
private int bigBlockSize = POIFSConstants.BIG_BLOCK_SIZE;
// number of big block allocation table blocks (int)
private IntegerField _bat_count;
// start of the property set block (int index of the property set
// chain's first big block)
private IntegerField _property_start;
// start of the small block allocation table (int index of small
// block allocation table's first big block)
private IntegerField _sbat_start;
// big block index for extension to the big block allocation table
private IntegerField _xbat_start;
private IntegerField _xbat_count;
private byte[] _data;
/**
* create a new HeaderBlockReader from an InputStream
*
* @param stream the source InputStream
*
* @exception IOException on errors or bad data
*/
public HeaderBlockReader(final InputStream stream)
throws IOException
{
// At this point, we don't know how big our
// block sizes are
// So, read the first 32 bytes to check, then
// read the rest of the block
byte[] blockStart = new byte[32];
int bsCount = IOUtils.readFully(stream, blockStart);
if(bsCount != 32) {
alertShortRead(bsCount);
}
// Figure out our block size
if(blockStart[30] == 12) {
bigBlockSize = POIFSConstants.LARGER_BIG_BLOCK_SIZE;
}
_data = new byte[ bigBlockSize ];
System.arraycopy(blockStart, 0, _data, 0, blockStart.length);
// Now we can read the rest of our header
int byte_count = IOUtils.readFully(stream, _data, blockStart.length, _data.length - blockStart.length);
if (byte_count+bsCount != bigBlockSize) {
alertShortRead(byte_count);
}
// verify signature
LongField signature = new LongField(_signature_offset, _data);
if (signature.get() != _signature)
{
// Is it one of the usual suspects?
byte[] OOXML_FILE_HEADER = POIFSConstants.OOXML_FILE_HEADER;
if(_data[0] == OOXML_FILE_HEADER[0] &&
_data[1] == OOXML_FILE_HEADER[1] &&
_data[2] == OOXML_FILE_HEADER[2] &&
_data[3] == OOXML_FILE_HEADER[3]) {
throw new OfficeXmlFileException("The supplied data appears to be in the Office 2007+ XML. POI only supports OLE2 Office documents");
}
// Give a generic error
throw new IOException("Invalid header signature; read "
+ signature.get() + ", expected "
+ _signature);
}
_bat_count = new IntegerField(_bat_count_offset, _data);
_property_start = new IntegerField(_property_start_offset, _data);
_sbat_start = new IntegerField(_sbat_start_offset, _data);
_xbat_start = new IntegerField(_xbat_start_offset, _data);
_xbat_count = new IntegerField(_xbat_count_offset, _data);
}
private void alertShortRead(int read) throws IOException {
if (read == -1)
//Cant have -1 bytes read in the error message!
read = 0;
String type = " byte" + ((read == 1) ? ("")
: ("s"));
throw new IOException("Unable to read entire header; "
+ read + type + " read; expected "
+ bigBlockSize + " bytes");
}
/**
* get start of Property Table
*
* @return the index of the first block of the Property Table
*/
public int getPropertyStart()
{
return _property_start.get();
}
/**
* @return start of small block allocation table
*/
public int getSBATStart()
{
return _sbat_start.get();
}
/**
* @return number of BAT blocks
*/
public int getBATCount()
{
return _bat_count.get();
}
/**
* @return BAT array
*/
public int [] getBATArray()
{
int[] result = new int[ _max_bats_in_header ];
int offset = _bat_array_offset;
for (int j = 0; j < _max_bats_in_header; j++)
{
result[ j ] = LittleEndian.getInt(_data, offset);
offset += LittleEndianConsts.INT_SIZE;
}
return result;
}
/**
* @return XBAT count
*/
public int getXBATCount()
{
return _xbat_count.get();
}
/**
* @return XBAT index
*/
public int getXBATIndex()
{
return _xbat_start.get();
}
/**
* @return The Big Block size, normally 512 bytes, sometimes 4096 bytes
*/
public int getBigBlockSize() {
return bigBlockSize;
}
} // end public class HeaderBlockReader

View File

@ -19,80 +19,38 @@
package org.apache.poi.poifs.storage;
import java.io.*;
import java.util.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndianConsts;
import org.apache.poi.util.LongField;
import org.apache.poi.util.ShortField;
/**
* The block containing the archive header
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class HeaderBlockWriter
extends BigBlock
implements HeaderBlockConstants
public class HeaderBlockWriter implements HeaderBlockConstants, BlockWritable
{
private static final byte _default_value = ( byte ) 0xFF;
// number of big block allocation table blocks (int)
private IntegerField _bat_count;
// start of the property set block (int index of the property set
// chain's first big block)
private IntegerField _property_start;
// start of the small block allocation table (int index of small
// block allocation table's first big block)
private IntegerField _sbat_start;
// number of big blocks holding the small block allocation table
private IntegerField _sbat_block_count;
// big block index for extension to the big block allocation table
private IntegerField _xbat_start;
private IntegerField _xbat_count;
private byte[] _data;
private final HeaderBlock _header_block;
/**
* Create a single instance initialized with default values
*/
public HeaderBlockWriter()
public HeaderBlockWriter(POIFSBigBlockSize bigBlockSize)
{
_data = new byte[ POIFSConstants.BIG_BLOCK_SIZE ];
Arrays.fill(_data, _default_value);
new LongField(_signature_offset, _signature, _data);
new IntegerField(0x08, 0, _data);
new IntegerField(0x0c, 0, _data);
new IntegerField(0x10, 0, _data);
new IntegerField(0x14, 0, _data);
new ShortField(0x18, ( short ) 0x3b, _data);
new ShortField(0x1a, ( short ) 0x3, _data);
new ShortField(0x1c, ( short ) -2, _data);
new ShortField(0x1e, ( short ) 0x9, _data);
new IntegerField(0x20, 0x6, _data);
new IntegerField(0x24, 0, _data);
new IntegerField(0x28, 0, _data);
_bat_count = new IntegerField(_bat_count_offset, 0, _data);
_property_start = new IntegerField(_property_start_offset,
POIFSConstants.END_OF_CHAIN,
_data);
new IntegerField(0x34, 0, _data);
new IntegerField(0x38, 0x1000, _data);
_sbat_start = new IntegerField(_sbat_start_offset,
POIFSConstants.END_OF_CHAIN, _data);
_sbat_block_count = new IntegerField(_sbat_block_count_offset, 0,
_data);
_xbat_start = new IntegerField(_xbat_start_offset,
POIFSConstants.END_OF_CHAIN, _data);
_xbat_count = new IntegerField(_xbat_count_offset, 0, _data);
_header_block = new HeaderBlock(bigBlockSize);
}
/**
* Create a single instance initialized with the specified
* existing values
*/
public HeaderBlockWriter(HeaderBlock headerBlock)
{
_header_block = headerBlock;
}
/**
@ -111,16 +69,19 @@ public class HeaderBlockWriter
final int startBlock)
{
BATBlock[] rvalue;
POIFSBigBlockSize bigBlockSize = _header_block.getBigBlockSize();
_bat_count.set(blockCount, _data);
_header_block.setBATCount(blockCount);
// Set the BAT locations
int limit = Math.min(blockCount, _max_bats_in_header);
int offset = _bat_array_offset;
for (int j = 0; j < limit; j++)
{
new IntegerField(offset, startBlock + j, _data);
offset += LittleEndianConsts.INT_SIZE;
int[] bat_blocks = new int[limit];
for (int j = 0; j < limit; j++) {
bat_blocks[j] = startBlock + j;
}
_header_block.setBATArray(bat_blocks);
// Now do the XBATs
if (blockCount > _max_bats_in_header)
{
int excess_blocks = blockCount - _max_bats_in_header;
@ -131,16 +92,16 @@ public class HeaderBlockWriter
excess_block_array[ j ] = startBlock + j
+ _max_bats_in_header;
}
rvalue = BATBlock.createXBATBlocks(excess_block_array,
rvalue = BATBlock.createXBATBlocks(bigBlockSize, excess_block_array,
startBlock + blockCount);
_xbat_start.set(startBlock + blockCount, _data);
_header_block.setXBATStart(startBlock + blockCount);
}
else
{
rvalue = BATBlock.createXBATBlocks(new int[ 0 ], 0);
_xbat_start.set(POIFSConstants.END_OF_CHAIN, _data);
rvalue = BATBlock.createXBATBlocks(bigBlockSize, new int[ 0 ], 0);
_header_block.setXBATStart(POIFSConstants.END_OF_CHAIN);
}
_xbat_count.set(rvalue.length, _data);
_header_block.setXBATCount(rvalue.length);
return rvalue;
}
@ -150,10 +111,9 @@ public class HeaderBlockWriter
* @param startBlock the index of the first block of the Property
* Table
*/
public void setPropertyStart(final int startBlock)
{
_property_start.set(startBlock, _data);
_header_block.setPropertyStart(startBlock);
}
/**
@ -162,10 +122,9 @@ public class HeaderBlockWriter
* @param startBlock the index of the first big block of the small
* block allocation table
*/
public void setSBATStart(final int startBlock)
{
_sbat_start.set(startBlock, _data);
_header_block.setSBATStart(startBlock);
}
/**
@ -173,10 +132,9 @@ public class HeaderBlockWriter
*
* @param count the number of SBAT blocks
*/
public void setSBATBlockCount(final int count)
{
_sbat_block_count.set(count, _data);
_header_block.setSBATBlockCount(count);
}
/**
@ -188,11 +146,11 @@ public class HeaderBlockWriter
* @return number of XBAT blocks needed
*/
static int calculateXBATStorageRequirements(final int blockCount)
static int calculateXBATStorageRequirements(POIFSBigBlockSize bigBlockSize, final int blockCount)
{
return (blockCount > _max_bats_in_header)
? BATBlock.calculateXBATStorageRequirements(blockCount
- _max_bats_in_header)
? BATBlock.calculateXBATStorageRequirements(
bigBlockSize, blockCount - _max_bats_in_header)
: 0;
}
@ -207,11 +165,29 @@ public class HeaderBlockWriter
* @exception IOException on problems writing to the specified
* stream
*/
void writeData(final OutputStream stream)
public void writeBlocks(final OutputStream stream)
throws IOException
{
doWriteData(stream, _data);
_header_block.writeData(stream);
}
/**
* Write the block's data to an existing block
*
* @param block the ByteBuffer of the block to which the
* stored data should be written
*
* @exception IOException on problems writing to the block
*/
public void writeBlock(ByteBuffer block)
throws IOException
{
ByteArrayOutputStream baos = new ByteArrayOutputStream(
_header_block.getBigBlockSize().getBigBlockSize()
);
_header_block.writeData(baos);
block.put(baos.toByteArray());
}
/* ********** END extension of BigBlock ********** */

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,30 +15,21 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.*;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.*;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.property.Property;
import org.apache.poi.util.IntegerField;
import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.LittleEndianConsts;
/**
* A block of Property instances
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class PropertyBlock
extends BigBlock
{
private static final int _properties_per_block =
POIFSConstants.BIG_BLOCK_SIZE / POIFSConstants.PROPERTY_SIZE;
public final class PropertyBlock extends BigBlock {
private Property[] _properties;
/**
@ -49,10 +39,12 @@ public class PropertyBlock
* @param offset the offset into the properties array
*/
private PropertyBlock(final Property [] properties, final int offset)
private PropertyBlock(final POIFSBigBlockSize bigBlockSize, final Property [] properties, final int offset)
{
_properties = new Property[ _properties_per_block ];
for (int j = 0; j < _properties_per_block; j++)
super(bigBlockSize);
_properties = new Property[ bigBlockSize.getPropertiesPerBlock() ];
for (int j = 0; j < _properties.length; j++)
{
_properties[ j ] = properties[ j + offset ];
}
@ -70,8 +62,9 @@ public class PropertyBlock
*/
public static BlockWritable [] createPropertyBlockArray(
final List properties)
final POIFSBigBlockSize bigBlockSize, final List<Property> properties)
{
int _properties_per_block = bigBlockSize.getPropertiesPerBlock();
int block_count =
(properties.size() + _properties_per_block - 1)
/ _properties_per_block;
@ -101,7 +94,7 @@ public class PropertyBlock
for (int j = 0; j < block_count; j++)
{
rvalue[ j ] = new PropertyBlock(to_be_written,
rvalue[ j ] = new PropertyBlock(bigBlockSize, to_be_written,
j * _properties_per_block);
}
return rvalue;
@ -122,6 +115,7 @@ public class PropertyBlock
void writeData(final OutputStream stream)
throws IOException
{
int _properties_per_block = bigBlockSize.getPropertiesPerBlock();
for (int j = 0; j < _properties_per_block; j++)
{
_properties[ j ].writeData(stream);

View File

@ -51,13 +51,14 @@ public class RawDataBlock
*/
public RawDataBlock(final InputStream stream)
throws IOException {
this(stream, POIFSConstants.BIG_BLOCK_SIZE);
this(stream, POIFSConstants.SMALLER_BIG_BLOCK_SIZE);
}
/**
* Constructor RawDataBlock
*
* @param stream the InputStream from which the data will be read
* @param blockSize the size of the POIFS blocks, normally 512 bytes {@link POIFSConstants#BIG_BLOCK_SIZE}
* @param blockSize the size of the POIFS blocks, normally 512 bytes
* {@link org.apache.poi.poifs.common.POIFSConstants#SMALLER_BIG_BLOCK_SIZE}
*
* @exception IOException on I/O errors, and if an insufficient
* amount of data is read (the InputStream must
@ -112,6 +113,10 @@ public class RawDataBlock
return _hasData;
}
public String toString() {
return "RawDataBlock of size " + _data.length;
}
/* ********** START implementation of ListManagedBlock ********** */
/**
@ -131,6 +136,13 @@ public class RawDataBlock
return _data;
}
/**
* What's the big block size?
*/
public int getBigBlockSize() {
return _data.length;
}
/* ********** END implementation of ListManagedBlock ********** */
} // end public class RawDataBlock

View File

@ -23,6 +23,8 @@ import java.io.*;
import java.util.*;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
/**
* A list of RawDataBlocks instances, and methods to manage the list
*
@ -43,14 +45,14 @@ public class RawDataBlockList
* block is read
*/
public RawDataBlockList(final InputStream stream, int bigBlockSize)
public RawDataBlockList(final InputStream stream, POIFSBigBlockSize bigBlockSize)
throws IOException
{
List blocks = new ArrayList();
List<RawDataBlock> blocks = new ArrayList<RawDataBlock>();
while (true)
{
RawDataBlock block = new RawDataBlock(stream, bigBlockSize);
RawDataBlock block = new RawDataBlock(stream, bigBlockSize.getBigBlockSize());
// If there was data, add the block to the list
if(block.hasData()) {
@ -62,7 +64,7 @@ public class RawDataBlockList
break;
}
}
setBlocks(( RawDataBlock [] ) blocks.toArray(new RawDataBlock[ 0 ]));
setBlocks( blocks.toArray(new RawDataBlock[ blocks.size() ]) );
}
} // end public class RawDataBlockList

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,24 +15,20 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.IOException;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.property.RootProperty;
import java.util.*;
import java.io.*;
/**
* This class implements reading the small document block list from an
* existing file
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public class SmallBlockTableReader
{
public final class SmallBlockTableReader {
/**
* fetch the small document block list from an existing file
@ -48,17 +43,23 @@ public class SmallBlockTableReader
*
* @exception IOException
*/
public static BlockList getSmallDocumentBlocks(
final POIFSBigBlockSize bigBlockSize,
final RawDataBlockList blockList, final RootProperty root,
final int sbatStart)
throws IOException
{
BlockList list =
new SmallDocumentBlockList(SmallDocumentBlock
.extract(blockList.fetchBlocks(root.getStartBlock())));
// Fetch the blocks which hold the Small Blocks stream
ListManagedBlock [] smallBlockBlocks =
blockList.fetchBlocks(root.getStartBlock(), -1);
new BlockAllocationTableReader(blockList.fetchBlocks(sbatStart),
// Turn that into a list
BlockList list =new SmallDocumentBlockList(
SmallDocumentBlock.extract(bigBlockSize, smallBlockBlocks));
// Process
new BlockAllocationTableReader(bigBlockSize,
blockList.fetchBlocks(sbatStart, -1),
list);
return list;
}

View File

@ -19,6 +19,7 @@
package org.apache.poi.poifs.storage;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
import org.apache.poi.poifs.filesystem.BATManaged;
import org.apache.poi.poifs.filesystem.POIFSDocument;
@ -50,10 +51,11 @@ public class SmallBlockTableWriter
* @param root the Filesystem's root property
*/
public SmallBlockTableWriter(final List documents,
public SmallBlockTableWriter(final POIFSBigBlockSize bigBlockSize,
final List documents,
final RootProperty root)
{
_sbat = new BlockAllocationTableWriter();
_sbat = new BlockAllocationTableWriter(bigBlockSize);
_small_blocks = new ArrayList();
_root = root;
Iterator iter = documents.iterator();
@ -76,7 +78,7 @@ public class SmallBlockTableWriter
}
_sbat.simpleCreateBlocks();
_root.setSize(_small_blocks.size());
_big_block_count = SmallDocumentBlock.fill(_small_blocks);
_big_block_count = SmallDocumentBlock.fill(bigBlockSize,_small_blocks);
}
/**

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,13 +15,16 @@
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.storage;
import java.io.*;
import java.util.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.poi.poifs.common.POIFSBigBlockSize;
import org.apache.poi.poifs.common.POIFSConstants;
/**
@ -31,27 +33,35 @@ import org.apache.poi.poifs.common.POIFSConstants;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class SmallDocumentBlock implements BlockWritable, ListManagedBlock {
private static final int BLOCK_SHIFT = 6;
public class SmallDocumentBlock
implements BlockWritable, ListManagedBlock
{
private byte[] _data;
private static final byte _default_fill = ( byte ) 0xff;
private static final int _block_size = 64;
private static final int _blocks_per_big_block =
POIFSConstants.BIG_BLOCK_SIZE / _block_size;
private static final int _block_size = 1 << BLOCK_SHIFT;
private static final int BLOCK_MASK = _block_size-1;
private SmallDocumentBlock(final byte [] data, final int index)
private final int _blocks_per_big_block;
private final POIFSBigBlockSize _bigBlockSize;
private SmallDocumentBlock(final POIFSBigBlockSize bigBlockSize, final byte [] data, final int index)
{
this();
this(bigBlockSize);
System.arraycopy(data, index * _block_size, _data, 0, _block_size);
}
private SmallDocumentBlock()
private SmallDocumentBlock(final POIFSBigBlockSize bigBlockSize)
{
_bigBlockSize = bigBlockSize;
_blocks_per_big_block = getBlocksPerBigBlock(bigBlockSize);
_data = new byte[ _block_size ];
}
private static int getBlocksPerBigBlock(final POIFSBigBlockSize bigBlockSize)
{
return bigBlockSize.getBigBlockSize() / _block_size;
}
/**
* convert a single long array into an array of SmallDocumentBlock
* instances
@ -62,9 +72,9 @@ public class SmallDocumentBlock
* @return an array of SmallDocumentBlock instances, filled from
* the array
*/
public static SmallDocumentBlock [] convert(final byte [] array,
final int size)
public static SmallDocumentBlock [] convert(POIFSBigBlockSize bigBlockSize,
byte [] array,
int size)
{
SmallDocumentBlock[] rval =
new SmallDocumentBlock[ (size + _block_size - 1) / _block_size ];
@ -72,7 +82,7 @@ public class SmallDocumentBlock
for (int k = 0; k < rval.length; k++)
{
rval[ k ] = new SmallDocumentBlock();
rval[ k ] = new SmallDocumentBlock(bigBlockSize);
if (offset < array.length)
{
int length = Math.min(_block_size, array.length - offset);
@ -101,9 +111,10 @@ public class SmallDocumentBlock
*
* @return number of big blocks the list encompasses
*/
public static int fill(final List blocks)
public static int fill(POIFSBigBlockSize bigBlockSize, List blocks)
{
int _blocks_per_big_block = getBlocksPerBigBlock(bigBlockSize);
int count = blocks.size();
int big_block_count = (count + _blocks_per_big_block - 1)
/ _blocks_per_big_block;
@ -111,7 +122,7 @@ public class SmallDocumentBlock
for (; count < full_count; count++)
{
blocks.add(makeEmptySmallDocumentBlock());
blocks.add(makeEmptySmallDocumentBlock(bigBlockSize));
}
return big_block_count;
}
@ -128,9 +139,9 @@ public class SmallDocumentBlock
* @exception ArrayIndexOutOfBoundsException if, somehow, the store
* contains less data than size indicates
*/
public static SmallDocumentBlock [] convert(final BlockWritable [] store,
final int size)
public static SmallDocumentBlock [] convert(POIFSBigBlockSize bigBlockSize,
BlockWritable [] store,
int size)
throws IOException, ArrayIndexOutOfBoundsException
{
ByteArrayOutputStream stream = new ByteArrayOutputStream();
@ -145,7 +156,7 @@ public class SmallDocumentBlock
for (int index = 0; index < rval.length; index++)
{
rval[ index ] = new SmallDocumentBlock(data, index);
rval[ index ] = new SmallDocumentBlock(bigBlockSize, data, index);
}
return rval;
}
@ -157,13 +168,12 @@ public class SmallDocumentBlock
* data
*
* @return a List of SmallDocumentBlock's extracted from the input
*
* @exception IOException
*/
public static List extract(ListManagedBlock [] blocks)
public static List extract(POIFSBigBlockSize bigBlockSize, ListManagedBlock [] blocks)
throws IOException
{
int _blocks_per_big_block = getBlocksPerBigBlock(bigBlockSize);
List sdbs = new ArrayList();
for (int j = 0; j < blocks.length; j++)
@ -172,52 +182,16 @@ public class SmallDocumentBlock
for (int k = 0; k < _blocks_per_big_block; k++)
{
sdbs.add(new SmallDocumentBlock(data, k));
sdbs.add(new SmallDocumentBlock(bigBlockSize, data, k));
}
}
return sdbs;
}
/**
* read data from an array of SmallDocumentBlocks
*
* @param blocks the blocks to read from
* @param buffer the buffer to write the data into
* @param offset the offset into the array of blocks to read from
*/
public static void read(final BlockWritable [] blocks,
final byte [] buffer, final int offset)
{
int firstBlockIndex = offset / _block_size;
int firstBlockOffset = offset % _block_size;
int lastBlockIndex = (offset + buffer.length - 1) / _block_size;
if (firstBlockIndex == lastBlockIndex)
{
System.arraycopy(
(( SmallDocumentBlock ) blocks[ firstBlockIndex ])._data,
firstBlockOffset, buffer, 0, buffer.length);
}
else
{
int buffer_offset = 0;
System.arraycopy(
(( SmallDocumentBlock ) blocks[ firstBlockIndex ])._data,
firstBlockOffset, buffer, buffer_offset,
_block_size - firstBlockOffset);
buffer_offset += _block_size - firstBlockOffset;
for (int j = firstBlockIndex + 1; j < lastBlockIndex; j++)
{
System.arraycopy((( SmallDocumentBlock ) blocks[ j ])._data,
0, buffer, buffer_offset, _block_size);
buffer_offset += _block_size;
}
System.arraycopy(
(( SmallDocumentBlock ) blocks[ lastBlockIndex ])._data, 0,
buffer, buffer_offset, buffer.length - buffer_offset);
}
public static DataInputBlock getDataInputBlock(SmallDocumentBlock[] blocks, int offset) {
int firstBlockIndex = offset >> BLOCK_SHIFT;
int firstBlockOffset= offset & BLOCK_MASK;
return new DataInputBlock(blocks[firstBlockIndex]._data, firstBlockOffset);
}
/**
@ -227,27 +201,24 @@ public class SmallDocumentBlock
*
* @return total size
*/
public static int calcSize(int size)
{
return size * _block_size;
}
private static SmallDocumentBlock makeEmptySmallDocumentBlock()
private static SmallDocumentBlock makeEmptySmallDocumentBlock(POIFSBigBlockSize bigBlockSize)
{
SmallDocumentBlock block = new SmallDocumentBlock();
SmallDocumentBlock block = new SmallDocumentBlock(bigBlockSize);
Arrays.fill(block._data, _default_fill);
return block;
}
private static int convertToBlockCount(final int size)
private static int convertToBlockCount(int size)
{
return (size + _block_size - 1) / _block_size;
}
/* ********** START implementation of BlockWritable ********** */
/**
* Write the storage to an OutputStream
*
@ -257,16 +228,12 @@ public class SmallDocumentBlock
* @exception IOException on problems writing to the specified
* stream
*/
public void writeBlocks(final OutputStream stream)
public void writeBlocks(OutputStream stream)
throws IOException
{
stream.write(_data);
}
/* ********** END implementation of BlockWritable ********** */
/* ********** START implementation of ListManagedBlock ********** */
/**
* Get the data from the block
*
@ -274,13 +241,11 @@ public class SmallDocumentBlock
*
* @exception IOException if there is no data
*/
public byte [] getData()
throws IOException
{
public byte [] getData() {
return _data;
}
/* ********** END implementation of ListManagedBlock ********** */
} // end public class SmallDocumentBlock
public POIFSBigBlockSize getBigBlockSize() {
return _bigBlockSize;
}
}

View File

@ -40,7 +40,7 @@ public class SmallDocumentBlockList
public SmallDocumentBlockList(final List blocks)
{
setBlocks(( SmallDocumentBlock [] ) blocks
.toArray(new SmallDocumentBlock[ 0 ]));
.toArray(new SmallDocumentBlock[ blocks.size() ]));
}
} // end public class SmallDocumentBlockList

View File

@ -21,7 +21,6 @@ package org.apache.poi.util;
* Utility classes for dealing with arrays.
*
* @author Glen Stampoultzis
* @version $Id$
*/
public class ArrayUtil
{

View File

@ -1,4 +1,3 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -16,7 +15,6 @@
limitations under the License.
==================================================================== */
package org.apache.poi.util;
import java.util.*;
@ -89,34 +87,20 @@ import java.util.*;
*
* @author Marc Johnson (mjohnson at apache dot org)
*/
public final class BinaryTree // final for performance
extends AbstractMap
{
private Node[] _root = new Node[]
{
null, null
};
private int _size = 0;
private int _modifications = 0;
private Set[] _key_set = new Set[]
{
null, null
};
private Set[] _entry_set = new Set[]
{
null, null
};
private Collection[] _value_collection = new Collection[]
{
null, null
};
private static final int _KEY = 0;
private static final int _VALUE = 1;
private static final int _INDEX_SUM = _KEY + _VALUE;
private static final int _MINIMUM_INDEX = 0;
private static final int _INDEX_COUNT = 2;
private static final String[] _data_name = new String[]
//for performance
public class BinaryTree extends AbstractMap {
final Node[] _root;
int _size = 0;
int _modifications = 0;
private final Set[] _key_set = new Set[] { null, null };
private final Set[] _entry_set = new Set[] { null, null };
private final Collection[] _value_collection = new Collection[] { null, null };
static int _KEY = 0;
static int _VALUE = 1;
private static int _INDEX_SUM = _KEY + _VALUE;
private static int _MINIMUM_INDEX = 0;
private static int _INDEX_COUNT = 2;
private static String[] _data_name = new String[]
{
"key", "value"
};
@ -124,9 +108,8 @@ public final class BinaryTree // final for performance
/**
* Construct a new BinaryTree
*/
public BinaryTree()
{
public BinaryTree() {
_root = new Node[]{ null, null, };
}
/**
@ -146,11 +129,11 @@ public final class BinaryTree // final for performance
* or duplicate values in the
* map
*/
public BinaryTree(final Map map)
public BinaryTree(Map map)
throws ClassCastException, NullPointerException,
IllegalArgumentException
{
this();
putAll(map);
}
@ -167,8 +150,7 @@ public final class BinaryTree // final for performance
* inappropriate type for this map.
* @exception NullPointerException if the value is null
*/
public Object getKeyForValue(final Object value)
public Object getKeyForValue(Object value)
throws ClassCastException, NullPointerException
{
return doGet(( Comparable ) value, _VALUE);
@ -182,8 +164,7 @@ public final class BinaryTree // final for performance
* @return previous key associated with specified value, or null
* if there was no mapping for value.
*/
public Object removeValue(final Object value)
public Object removeValue(Object value)
{
return doRemove(( Comparable ) value, _VALUE);
}
@ -207,7 +188,6 @@ public final class BinaryTree // final for performance
*
* @return a set view of the mappings contained in this map.
*/
public Set entrySetByValue()
{
if (_entry_set[ _VALUE ] == null)
@ -423,8 +403,7 @@ public final class BinaryTree // final for performance
* key. null if the specified key or value could not be
* found
*/
private Object doRemove(final Comparable o, final int index)
private Object doRemove(Comparable o, int index)
{
Node node = lookup(o, index);
Object rval = null;
@ -447,8 +426,7 @@ public final class BinaryTree // final for performance
* key was mapped); null if we couldn't find the specified
* object
*/
private Object doGet(final Comparable o, final int index)
private Object doGet(Comparable o, int index)
{
checkNonNullComparable(o, index);
Node node = lookup(o, index);
@ -464,8 +442,7 @@ public final class BinaryTree // final for performance
*
* @return _VALUE (if _KEY was specified), else _KEY
*/
private int oppositeIndex(final int index)
private int oppositeIndex(int index)
{
// old trick ... to find the opposite of a value, m or n,
@ -483,8 +460,7 @@ public final class BinaryTree // final for performance
* @return the desired Node, or null if there is no mapping of the
* specified data
*/
private Node lookup(final Comparable data, final int index)
public Node lookup(Comparable data, int index)
{
Node rval = null;
Node node = _root[ index ];
@ -498,11 +474,8 @@ public final class BinaryTree // final for performance
rval = node;
break;
}
else
{
node = (cmp < 0) ? node.getLeft(index)
: node.getRight(index);
}
node = (cmp < 0) ? node.getLeft(index)
: node.getRight(index);
}
return rval;
}
@ -516,10 +489,9 @@ public final class BinaryTree // final for performance
* @return negative value if o1 < o2; 0 if o1 == o2; positive
* value if o1 > o2
*/
private static int compare(final Comparable o1, final Comparable o2)
private static int compare(Comparable o1, Comparable o2)
{
return (( Comparable ) o1).compareTo(o2);
return o1.compareTo(o2);
}
/**
@ -532,8 +504,7 @@ public final class BinaryTree // final for performance
* @return the smallest node, from the specified node, in the
* specified mapping
*/
private static Node leastNode(final Node node, final int index)
static Node leastNode(Node node, int index)
{
Node rval = node;
@ -555,8 +526,7 @@ public final class BinaryTree // final for performance
*
* @return the specified node
*/
private Node nextGreater(final Node node, final int index)
static Node nextGreater(Node node, int index)
{
Node rval = null;
@ -601,9 +571,7 @@ public final class BinaryTree // final for performance
* @param to the node whose color we're changing; may be null
* @param index _KEY or _VALUE
*/
private static void copyColor(final Node from, final Node to,
final int index)
private static void copyColor(Node from, Node to, int index)
{
if (to != null)
{
@ -627,11 +595,9 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static boolean isRed(final Node node, final int index)
private static boolean isRed(Node node, int index)
{
return ((node == null) ? false
: node.isRed(index));
return node == null ? false : node.isRed(index);
}
/**
@ -641,11 +607,9 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static boolean isBlack(final Node node, final int index)
private static boolean isBlack(Node node, int index)
{
return ((node == null) ? true
: node.isBlack(index));
return node == null ? true : node.isBlack(index);
}
/**
@ -654,8 +618,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static void makeRed(final Node node, final int index)
private static void makeRed(Node node, int index)
{
if (node != null)
{
@ -669,8 +632,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static void makeBlack(final Node node, final int index)
private static void makeBlack(Node node, int index)
{
if (node != null)
{
@ -685,8 +647,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static Node getGrandParent(final Node node, final int index)
private static Node getGrandParent(Node node, int index)
{
return getParent(getParent(node, index), index);
}
@ -698,8 +659,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static Node getParent(final Node node, final int index)
private static Node getParent(Node node, int index)
{
return ((node == null) ? null
: node.getParent(index));
@ -712,8 +672,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static Node getRightChild(final Node node, final int index)
private static Node getRightChild(Node node, int index)
{
return (node == null) ? null
: node.getRight(index);
@ -726,8 +685,7 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static Node getLeftChild(final Node node, final int index)
private static Node getLeftChild(Node node, int index)
{
return (node == null) ? null
: node.getLeft(index);
@ -744,15 +702,14 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static boolean isLeftChild(final Node node, final int index)
{
return (node == null) ? true
: ((node.getParent(index) == null) ? false
: (node
== node.getParent(
index).getLeft(
index)));
private static boolean isLeftChild(Node node, int index) {
if (node == null) {
return true;
}
if (node.getParent(index) == null) {
return false;
}
return node == node.getParent(index).getLeft(index);
}
/**
@ -766,15 +723,15 @@ public final class BinaryTree // final for performance
* @param node the node (may be null) in question
* @param index _KEY or _VALUE
*/
private static boolean isRightChild(final Node node, final int index)
private static boolean isRightChild(Node node, int index)
{
return (node == null) ? true
: ((node.getParent(index) == null) ? false
: (node
== node.getParent(
index).getRight(
index)));
if (node == null) {
return true;
}
if (node.getParent(index) == null) {
return false;
}
return node == node.getParent(index).getRight(index);
}
/**
@ -783,8 +740,7 @@ public final class BinaryTree // final for performance
* @param node the node to be rotated
* @param index _KEY or _VALUE
*/
private void rotateLeft(final Node node, final int index)
private void rotateLeft(Node node, int index)
{
Node right_child = node.getRight(index);
@ -818,8 +774,7 @@ public final class BinaryTree // final for performance
* @param node the node to be rotated
* @param index _KEY or _VALUE
*/
private void rotateRight(final Node node, final int index)
private void rotateRight(Node node, int index)
{
Node left_child = node.getLeft(index);
@ -854,8 +809,7 @@ public final class BinaryTree // final for performance
* @param inserted_node the node to be inserted
* @param index _KEY or _VALUE
*/
private void doRedBlackInsert(final Node inserted_node, final int index)
private void doRedBlackInsert(Node inserted_node, int index)
{
Node current_node = inserted_node;
@ -931,8 +885,7 @@ public final class BinaryTree // final for performance
*
* @param deleted_node the node to be deleted
*/
private void doRedBlackDelete(final Node deleted_node)
void doRedBlackDelete(Node deleted_node)
{
for (int index = _MINIMUM_INDEX; index < _INDEX_COUNT; index++)
{
@ -1023,9 +976,8 @@ public final class BinaryTree // final for performance
* @param replacement_node the node being replaced
* @param index _KEY or _VALUE
*/
private void doRedBlackDeleteFixup(final Node replacement_node,
final int index)
private void doRedBlackDeleteFixup(Node replacement_node,
int index)
{
Node current_node = replacement_node;
@ -1121,8 +1073,7 @@ public final class BinaryTree // final for performance
* @param y another node
* @param index _KEY or _VALUE
*/
private void swapPosition(final Node x, final Node y, final int index)
private void swapPosition(Node x, Node y, int index)
{
// Save initial values.
@ -1244,9 +1195,8 @@ public final class BinaryTree // final for performance
* @exception NullPointerException if o is null
* @exception ClassCastException if o is not Comparable
*/
private static void checkNonNullComparable(final Object o,
final int index)
private static void checkNonNullComparable(Object o,
int index)
{
if (o == null)
{
@ -1268,8 +1218,7 @@ public final class BinaryTree // final for performance
* @exception NullPointerException if key is null
* @exception ClassCastException if key is not Comparable
*/
private static void checkKey(final Object key)
private static void checkKey(Object key)
{
checkNonNullComparable(key, _KEY);
}
@ -1282,8 +1231,7 @@ public final class BinaryTree // final for performance
* @exception NullPointerException if value is null
* @exception ClassCastException if value is not Comparable
*/
private static void checkValue(final Object value)
private static void checkValue(Object value)
{
checkNonNullComparable(value, _VALUE);
}
@ -1298,8 +1246,7 @@ public final class BinaryTree // final for performance
* @exception NullPointerException if key or value is null
* @exception ClassCastException if key or value is not Comparable
*/
private static void checkKeyAndValue(final Object key, final Object value)
private static void checkKeyAndValue(Object key, Object value)
{
checkKey(key);
checkValue(value);
@ -1310,7 +1257,6 @@ public final class BinaryTree // final for performance
* concurrent modification of the map through the map and through
* an Iterator from one of its Set or Collection views
*/
private void modify()
{
_modifications++;
@ -1319,7 +1265,6 @@ public final class BinaryTree // final for performance
/**
* bump up the size and note that the map has changed
*/
private void grow()
{
modify();
@ -1329,7 +1274,6 @@ public final class BinaryTree // final for performance
/**
* decrement the size and note that the map has changed
*/
private void shrink()
{
modify();
@ -1344,8 +1288,7 @@ public final class BinaryTree // final for performance
* @exception IllegalArgumentException if the node already exists
* in the value mapping
*/
private void insertValue(final Node newNode)
private void insertValue(Node newNode)
throws IllegalArgumentException
{
Node node = _root[ _VALUE ];
@ -1400,7 +1343,6 @@ public final class BinaryTree // final for performance
*
* @return the number of key-value mappings in this map.
*/
public int size()
{
return _size;
@ -1419,8 +1361,7 @@ public final class BinaryTree // final for performance
* type for this map.
* @exception NullPointerException if the key is null
*/
public boolean containsKey(final Object key)
public boolean containsKey(Object key)
throws ClassCastException, NullPointerException
{
checkKey(key);
@ -1436,8 +1377,7 @@ public final class BinaryTree // final for performance
* @return true if this map maps one or more keys to the specified
* value.
*/
public boolean containsValue(final Object value)
public boolean containsValue(Object value)
{
checkValue(value);
return lookup(( Comparable ) value, _VALUE) != null;
@ -1456,8 +1396,7 @@ public final class BinaryTree // final for performance
* type for this map.
* @exception NullPointerException if the key is null
*/
public Object get(final Object key)
public Object get(Object key)
throws ClassCastException, NullPointerException
{
return doGet(( Comparable ) key, _KEY);
@ -1483,8 +1422,7 @@ public final class BinaryTree // final for performance
* value duplicates an
* existing value
*/
public Object put(final Object key, final Object value)
public Object put(Object key, Object value)
throws ClassCastException, NullPointerException,
IllegalArgumentException
{
@ -1562,8 +1500,7 @@ public final class BinaryTree // final for performance
* @return previous value associated with specified key, or null
* if there was no mapping for key.
*/
public Object remove(final Object key)
public Object remove(Object key)
{
return doRemove(( Comparable ) key, _KEY);
}
@ -1571,7 +1508,6 @@ public final class BinaryTree // final for performance
/**
* Removes all mappings from this map
*/
public void clear()
{
modify();
@ -1592,7 +1528,6 @@ public final class BinaryTree // final for performance
*
* @return a set view of the keys contained in this map.
*/
public Set keySet()
{
if (_key_set[ _KEY ] == null)
@ -1650,7 +1585,6 @@ public final class BinaryTree // final for performance
*
* @return a collection view of the values contained in this map.
*/
public Collection values()
{
if (_value_collection[ _KEY ] == null)
@ -1723,7 +1657,6 @@ public final class BinaryTree // final for performance
*
* @return a set view of the mappings contained in this map.
*/
public Set entrySet()
{
if (_entry_set[ _KEY ] == null)
@ -1803,8 +1736,7 @@ public final class BinaryTree // final for performance
*
* @param type
*/
BinaryTreeIterator(final int type)
BinaryTreeIterator(int type)
{
_type = type;
_expected_modifications = BinaryTree.this._modifications;
@ -1825,7 +1757,7 @@ public final class BinaryTree // final for performance
* @return true if the iterator has more elements.
*/
public final boolean hasNext()
public boolean hasNext()
{
return _next_node != null;
}
@ -1842,7 +1774,7 @@ public final class BinaryTree // final for performance
* back
*/
public final Object next()
public Object next()
throws NoSuchElementException, ConcurrentModificationException
{
if (_next_node == null)
@ -1878,7 +1810,7 @@ public final class BinaryTree // final for performance
* back
*/
public final void remove()
public void remove()
throws IllegalStateException, ConcurrentModificationException
{
if (_last_returned_node == null)
@ -1897,7 +1829,7 @@ public final class BinaryTree // final for performance
/* ********** END implementation of Iterator ********** */
} // end private abstract class BinaryTreeIterator
// final for performance
// for performance
private static final class Node
implements Map.Entry
{
@ -1917,7 +1849,7 @@ public final class BinaryTree // final for performance
* @param value
*/
Node(final Comparable key, final Comparable value)
Node(Comparable key, Comparable value)
{
_data = new Comparable[]
{
@ -1949,8 +1881,7 @@ public final class BinaryTree // final for performance
*
* @return the key or value
*/
private Comparable getData(final int index)
public Comparable getData(int index)
{
return _data[ index ];
}
@ -1961,8 +1892,7 @@ public final class BinaryTree // final for performance
* @param node the new left node
* @param index _KEY or _VALUE
*/
private void setLeft(final Node node, final int index)
public void setLeft(Node node, int index)
{
_left[ index ] = node;
}
@ -1975,7 +1905,7 @@ public final class BinaryTree // final for performance
* @return the left node -- may be null
*/
private Node getLeft(final int index)
public Node getLeft(int index)
{
return _left[ index ];
}
@ -1986,8 +1916,7 @@ public final class BinaryTree // final for performance
* @param node the new right node
* @param index _KEY or _VALUE
*/
private void setRight(final Node node, final int index)
public void setRight(Node node, int index)
{
_right[ index ] = node;
}
@ -2000,7 +1929,7 @@ public final class BinaryTree // final for performance
* @return the right node -- may be null
*/
private Node getRight(final int index)
public Node getRight(int index)
{
return _right[ index ];
}
@ -2011,8 +1940,7 @@ public final class BinaryTree // final for performance
* @param node the new parent node
* @param index _KEY or _VALUE
*/
private void setParent(final Node node, final int index)
public void setParent(Node node, int index)
{
_parent[ index ] = node;
}
@ -2024,8 +1952,7 @@ public final class BinaryTree // final for performance
*
* @return the parent node -- may be null
*/
private Node getParent(final int index)
public Node getParent(int index)
{
return _parent[ index ];
}
@ -2036,8 +1963,7 @@ public final class BinaryTree // final for performance
* @param node the node to swap with
* @param index _KEY or _VALUE
*/
private void swapColors(final Node node, final int index)
public void swapColors(Node node, int index)
{
// Swap colors -- old hacker's trick
@ -2053,8 +1979,7 @@ public final class BinaryTree // final for performance
*
* @return true if black (which is represented as a true boolean)
*/
private boolean isBlack(final int index)
public boolean isBlack(int index)
{
return _black[ index ];
}
@ -2066,8 +1991,7 @@ public final class BinaryTree // final for performance
*
* @return true if non-black
*/
private boolean isRed(final int index)
public boolean isRed(int index)
{
return !_black[ index ];
}
@ -2077,8 +2001,7 @@ public final class BinaryTree // final for performance
*
* @param index _KEY or _VALUE
*/
private void setBlack(final int index)
public void setBlack(int index)
{
_black[ index ] = true;
}
@ -2088,8 +2011,7 @@ public final class BinaryTree // final for performance
*
* @param index _KEY or _VALUE
*/
private void setRed(final int index)
public void setRed(int index)
{
_black[ index ] = false;
}
@ -2100,8 +2022,7 @@ public final class BinaryTree // final for performance
* @param node the node whose color we're adopting
* @param index _KEY or _VALUE
*/
private void copyColor(final Node node, final int index)
public void copyColor(Node node, int index)
{
_black[ index ] = node._black[ index ];
}
@ -2111,7 +2032,6 @@ public final class BinaryTree // final for performance
/**
* @return the key corresponding to this entry.
*/
public Object getKey()
{
return _data[ _KEY ];
@ -2120,7 +2040,6 @@ public final class BinaryTree // final for performance
/**
* @return the value corresponding to this entry.
*/
public Object getValue()
{
return _data[ _VALUE ];
@ -2133,10 +2052,7 @@ public final class BinaryTree // final for performance
* @param ignored
*
* @return does not return
*
* @exception UnsupportedOperationException
*/
public Object setValue(Object ignored)
throws UnsupportedOperationException
{
@ -2154,7 +2070,6 @@ public final class BinaryTree // final for performance
* @return true if the specified object is equal to this map
* entry.
*/
public boolean equals(Object o)
{
if (this == o)
@ -2188,5 +2103,4 @@ public final class BinaryTree // final for performance
/* ********** END implementation of Map.Entry ********** */
}
} // end public class BinaryTree
}

View File

@ -16,7 +16,6 @@
limitations under the License.
==================================================================== */
package org.apache.poi.util;
import java.util.*;
@ -27,18 +26,15 @@ import java.util.*;
* @author Jason Height (jheight at apache dot org)
*/
public class BitFieldFactory
{
public class BitFieldFactory {
private static Map instances = new HashMap();
public static BitField getInstance(final int mask) {
BitField f = (BitField)instances.get(new Integer(mask));
public static BitField getInstance(int mask) {
BitField f = (BitField)instances.get(Integer.valueOf(mask));
if (f == null) {
f = new BitField(mask);
instances.put(new Integer(mask), f);
instances.put(Integer.valueOf(mask), f);
}
return f;
}
} // end public class BitFieldFactory
}

View File

@ -0,0 +1,41 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.util;
import java.io.FilterInputStream;
import java.io.InputStream;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
/**
* A wrapper around an {@link InputStream}, which
* ignores close requests made to it.
*
* Useful with {@link POIFSFileSystem}, where you want
* to control the close yourself.
*/
public class CloseIgnoringInputStream extends FilterInputStream {
public CloseIgnoringInputStream(InputStream in) {
super(in);
}
public void close() {
// Does nothing and ignores you
return;
}
}

View File

@ -0,0 +1,34 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.util;
/**
* Implementors of this interface allow client code to 'delay' writing to a certain section of a
* data output stream.<br/>
* A typical application is for writing BIFF records when the size is not known until well after
* the header has been written. The client code can call {@link #createDelayedOutput(int)}
* to reserve two bytes of the output for the 'ushort size' header field. The delayed output can
* be written at any stage.
*
* @author Josh Micich
*/
public interface DelayableLittleEndianOutput extends LittleEndianOutput {
/**
* Creates an output stream intended for outputting a sequence of <tt>size</tt> bytes.
*/
LittleEndianOutput createDelayedOutput(int size);
}

View File

@ -0,0 +1,84 @@
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.util;
import java.awt.*;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Properties;
@SuppressWarnings("deprecation")
public class FontMetricsDumper
{
public static void main( String[] args ) throws IOException
{
Properties props = new Properties();
Font[] allFonts = GraphicsEnvironment.getLocalGraphicsEnvironment().getAllFonts();
for ( int i = 0; i < allFonts.length; i++ )
{
String fontName = allFonts[i].getFontName();
Font font = new Font(fontName, Font.BOLD, 10);
FontMetrics fontMetrics = Toolkit.getDefaultToolkit().getFontMetrics(font);
int fontHeight = fontMetrics.getHeight();
props.setProperty("font." + fontName + ".height", fontHeight+"");
StringBuffer characters = new StringBuffer();
for (char c = 'a'; c <= 'z'; c++)
{
characters.append( c + ", " );
}
for (char c = 'A'; c <= 'Z'; c++)
{
characters.append( c + ", " );
}
for (char c = '0'; c <= '9'; c++)
{
characters.append( c + ", " );
}
StringBuffer widths = new StringBuffer();
for (char c = 'a'; c <= 'z'; c++)
{
widths.append( fontMetrics.getWidths()[c] + ", " );
}
for (char c = 'A'; c <= 'Z'; c++)
{
widths.append( fontMetrics.getWidths()[c] + ", " );
}
for (char c = '0'; c <= '9'; c++)
{
widths.append( fontMetrics.getWidths()[c] + ", " );
}
props.setProperty("font." + fontName + ".characters", characters.toString());
props.setProperty("font." + fontName + ".widths", widths.toString());
}
FileOutputStream fileOut = new FileOutputStream("font_metrics.properties");
try
{
props.store(fileOut, "Font Metrics");
}
finally
{
fileOut.close();
}
}
}

View File

@ -399,10 +399,10 @@ public class HexDump {
while (bytesRemaining-- > 0)
{
int c = in.read();
if (c == -1)
if (c == -1) {
break;
else
buf.write(c);
}
buf.write(c);
}
}

Some files were not shown because too many files have changed in this diff Show More