/* * [ProcessZips.java] * * Summary: create any new zip files that need creating, and fill them with the element files. * * Copyright: (c) 2003-2017 Roedy Green, Canadian Mind Products, http://mindprod.com * * Licence: This software may be copied and used freely for any purpose but military. * http://mindprod.com/contact/nonmil.html * * Requires: JDK 1.8+ * * Created with: JetBrains IntelliJ IDEA IDE http://www.jetbrains.com/idea/ * * Version History: * 10.2 2009-04-03 tidy up code to check presence of necessary files to make it more WORA. */ package com.mindprod.replicatorsender; import com.mindprod.common18.Localise; import com.mindprod.common18.ST; import com.mindprod.compactor.Compactor; import com.mindprod.filetransfer.FileTransfer; import com.mindprod.hunkio.HunkIO; import com.mindprod.replicatorcommon.Config; import com.mindprod.replicatorcommon.FilenameContext; import com.mindprod.replicatorcommon.ReplicatorCommon; import com.mindprod.replicatorcommon.ZipnameContext; import com.mindprod.sorted.Merge; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import static java.lang.System.*; /** * create any new zip files that need creating, and fill them with the element files. * * @author Roedy Green, Canadian Mind Products * @version 10.2 2009-04-03 tidy up code to check presence of necessary files to make it more WORA. * @since 2003-09-14 */ final class ProcessZips { /** * Used for copying files. Use default buffersize for all file transfers. Buffers themselves allocated as needed. * No web traffic involved, so timeouts irrelevant. */ private static final FileTransfer ft = new FileTransfer(); /** * Add one element to a newly created zip file. * * @param zip ZipOutputStream to tack this element on the end. * @param fd descriptor of file to be added. * * @throws IOException if file trouble. */ private static void addOneDeletedElement( ZipOutputStream zip, MaxiFD fd ) throws IOException { final String elementName = fd.getFilename( FilenameContext.INSIDE_ZIP ); // deleting. if ( Config.DEBUGGING ) { out.println( getFileActionVerb( fd ) + " " + elementName ); } else { // side effect is to calculate stats getFileActionVerb( fd ); } final ZipEntry entry = new ZipEntry( elementName ); entry.setTime( fd.getTimestamp() ); // clump date not encoded. entry.setComment( "deleted" ); // no need to setCRC, setSize, computed automatically. zip.putNextEntry( entry ); zip.closeEntry(); } /** * Add one element to a newly created zip file. * * @param zip ZipOutputStream to tack this element on the end. * @param zd descriptor of the zip file * @param fd descriptor of file to be added. * * @throws IOException if can't add element. */ private static void addOneElement( ZipOutputStream zip, MaxiZD zd, MaxiFD fd ) throws IOException { final String elementName = fd.getFilename( FilenameContext.INSIDE_ZIP ); if ( Config.DEBUGGING ) { out.println( getFileActionVerb( fd ) + " " + elementName ); } else { // side effect is to calculate stats getFileActionVerb( fd ); } final ZipEntry entry = new ZipEntry( elementName ); entry.setTime( fd.getTimestamp() ); final int oldFileLength = ( int ) fd.getFileLength(); int fileLength; int shrinkage; final File fileToAdd = new File( fd.getFilename( FilenameContext.ON_SOURCE ) ); // strings all interned so == compare is safe, // but use equals for safety. Interning speeds things up. final String extension = fd.getExtension(); final boolean isHtml = extension.equals( "html" ) || extension.equals( "htm" ); // but not htmlfrag, xml, xmlfrag. if ( isHtml && ConfigForSender.COMPACT_HTML.equals( "original" ) ) { if ( !fileToAdd.canWrite() ) { // canWrite true implies file exists. ReplicatorCommon .fatal( "Read-only original files cannot be compacted: " + fd.getFilename( FilenameContext.ON_SOURCE ) ); } // compact the original, uses compactStringKeepingMacrosAndComments to compact just the distributed. Compactor.compactFile( true, fileToAdd ); // no need to refresh fileToAdd even though it is now a different // directory entry. fileLength = ( int ) fileToAdd.length(); shrinkage = oldFileLength - fileLength; if ( shrinkage != 0 ) { // set file back to its original date so it will be in the // proper zip. //noinspection ResultOfMethodCallIgnored fileToAdd.setLastModified( fd.getTimestamp() ); if ( Config.DEBUGGING ) { out.println( "original file compacted from " + oldFileLength + " to " + fileLength + " bytes." ); } // record the new size so we will recognize it. zd.livewood -= shrinkage; fd.setFileLength( fileLength ); } } else { // did not compact the original. It should not have changed. fileLength = ( int ) fileToAdd.length(); if ( oldFileLength != fileLength ) { err.println( Localise.localise( fd.dump() ) ); ReplicatorCommon .fatal( "file " + elementName + " has unexpectedly changed length from " + oldFileLength + " to " + fileLength + " bytes." + "\n" + "Please don't update distribution files while the Replicator is running." ); } } // no need to setCRC, setSize, computed automatically. // hook entry into zip zip.putNextEntry( entry ); if ( isHtml && ConfigForSender.COMPACT_HTML.equals( "distributed" ) ) { // compact just the distribution, leaving original intact. // read entire file, compact and append as element. final String wholeFile = HunkIO.readEntireFile( fileToAdd ); // compact the distributed version, uses compactFile to compact original. final String compacted = Compactor.compactStringKeepingMacrosAndComments( wholeFile, fileToAdd.getPath() ); final byte[] octets = compacted.getBytes( /* default encoding */ ); fileLength = octets.length; shrinkage = oldFileLength - fileLength; if ( shrinkage != 0 ) { if ( Config.DEBUGGING ) { out.println( "distributed file compacted from " + oldFileLength + " to " + fileLength + " bytes." ); } // We don't adjust livewood or fd. // We need to recognise the file in future in its fluffy form // with its fluffy length. Its old date is just fine. } zip.write( octets ); } else { // no COMPACTING ft.copy( fileToAdd, zip, false/* don't close target */ ); } zip.closeEntry(); } /** * Physically creates zip file, adding elements to it. Maintains livewood, firstElement and lastElement. * * @param zd zip file to be created. * * @throws IOException if cannot create file. */ private static void createOneZipFile( MaxiZD zd ) throws IOException { // all files have already been assigned to their zips. if ( Config.DEBUGGING ) { out.println( "creating " + zd.getZipFilename( ZipnameContext.RELATIVE ) + "..." ); } final FileOutputStream fos = new FileOutputStream( zd .getZipFilename( ZipnameContext.ON_SOURCE ) ); final ZipOutputStream zip = new ZipOutputStream( fos ); zip.setLevel( ConfigForSender.COMPRESSION_LEVEL ); zip.setMethod( ZipOutputStream.DEFLATED ); final int zipNumber = zd.getZipNumber(); // sorted by clumping, but would not matter if it were not. for ( com.mindprod.replicatorsender.MaxiFD fd : ReplicatorSender.combinedFilesAndDeletions ) { if ( fd.getZipNumber() == zipNumber ) { if ( fd.isDeleted() ) { addOneDeletedElement( zip, fd ); } else { addOneElement( zip, zd, fd ); } } } // end for zip.close(); } // end createOneZipFile /** * Delete an old zip file completely. If already deleted, won't hurt. * * @param zd zip descriptor * * @return true if file successfully deleted, or was already deleted. */ private static boolean deleteOneZipFile( MaxiZD zd ) { final File fileToDelete = new File( zd.getZipFilename( ZipnameContext.ON_SOURCE ) ); return fileToDelete.delete() || !fileToDelete.exists(); } /** * Emaciate the zip file down to zero bytes. * * @param zd zip descriptor * * @throws IOException if cannot emaciate file. */ private static void emaciateOneZipFile( MaxiZD zd ) throws IOException { final FileOutputStream fos = new FileOutputStream( zd.getZipFilename( ZipnameContext.ON_SOURCE ) ); fos.close(); } /** * come up with verb to describe what we just did to this file * * @param fd file just processed. * * @return verb describing our action. */ private static String getFileActionVerb( final MaxiFD fd ) { final String verb; if ( fd.getClumping() <= ReplicatorSender.newestRepackagedTimestamp ) { // repacking files from a retired zip, that have not changed. if ( fd.isDeleted() ) { verb = "redeleting"; if ( StatsForSender.oldestRedeletedFile == null ) { StatsForSender.oldestRedeletedFile = fd; } else { StatsForSender.newestRedeletedFile = fd; } } else { verb = "repacking"; StatsForSender.recentRepackedFilesCount++; if ( StatsForSender.oldestRepackagedFile == null ) { StatsForSender.oldestRepackagedFile = fd; } else { StatsForSender.newestRepackagedFile = fd; } } } else { // packing file for the first time, or packing a newly changed file. if ( fd.isDeleted() ) { verb = "deleting"; if ( StatsForSender.oldestRecentlyDeletedFile == null ) { StatsForSender.oldestRecentlyDeletedFile = fd; } else { StatsForSender.newestRecentlyDeletedFile = fd; } } else { verb = "packing"; if ( StatsForSender.oldestRecentlyPackagedFile == null ) { StatsForSender.oldestRecentlyPackagedFile = fd; } StatsForSender.newestRecentlyPackagedFile = fd; } // TODO: ideally we should split packing into // verb = "new"; // and // verb = "updating"; // there is currently no field in MaxiFD to track the difference . } return ST.leftPad( verb, 10, false ); } /** * Create all the newly planned zip files, processZipsPass1 existing one as appropriate. */ public static void processZipsPass1() { boolean hasNulls = false; final ArrayList allZips = ReplicatorSender.allZips; try { // don't replace with for:each. we need zipIndex. for ( int zipIndex = 0; zipIndex < allZips.size(); zipIndex++ ) { final MaxiZD zd = allZips.get( zipIndex ); switch ( zd.status ) { case A_UNCREATED: createOneZipFile( zd ); zd.status = ZDStatus.B_CREATED; StatsForSender.recentZipsCreated++; break; case B_CREATED: // leave alone break; case C_DECOMMISSIONED: zd.status = ZDStatus.D_RETIRED; zd.retiredOnTimestamp = System.currentTimeMillis(); StatsForSender.recentZipsRetired++; break; case D_RETIRED: // we hold off emaciating the file to allow any // outstanding downloads to complete. if ( zd.retiredOnTimestamp < ConfigForSender.RETIREMENT_AXE_TIMESTAMP ) { // retirement period is up. Get on with final // burial. emaciateOneZipFile( zd ); zd.status = ZDStatus.E_EMACIATED; StatsForSender.recentZipsEmaciated++; } // otherwise we do nothing; the zip stays retired // until RETIREMENT hours have elapsed. break; case E_EMACIATED: if ( deleteOneZipFile( zd ) ) { zd.status = ZDStatus.F_DELETED; StatsForSender.recentZipsDeleted++; // we forget all about this zip from now on. allZips.set( zipIndex, null ); // will prune out later. hasNulls = true; } else { err.println( "Unable to delete zip " + zd + ". Will try again later." ); } break; default: case F_DELETED: assert false : "processZips: unexpected zip status."; } // end switch } // end for } catch ( IOException e ) { ReplicatorCommon.fatal( "Problems creating zip files." + "\n" + e.getMessage() ); } if ( hasNulls ) { // prune out deleted zips ReplicatorSender.allZips = Merge.pruneNulls( allZips ); } StatsForSender.highestZip = ReplicatorSender.nextZipNumber - 1; } // end createZips /** * Delete recently emaciated Zip files, just prior to second upload. */ public static void processZipsPass2() { boolean hasNulls = false; final ArrayList allZips = ReplicatorSender.allZips; // don't replace with for:each. we need zipIndex. for ( int zipIndex = 0; zipIndex < allZips.size(); zipIndex++ ) { final MaxiZD zd = allZips.get( zipIndex ); switch ( zd.status ) { case E_EMACIATED: if ( deleteOneZipFile( zd ) ) { zd.status = ZDStatus.F_DELETED; StatsForSender.recentZipsDeleted++; // we forget all about this zip from now on. allZips.set( zipIndex, null ); // will prune out later. hasNulls = true; } else { err.println( "Unable to delete zip " + zd + ". Will try again later." ); } break; default: // ignore everything but EMACIATED } // end switch } // end for if ( hasNulls ) { // prune out deleted zips ReplicatorSender.allZips = Merge.pruneNulls( allZips ); } StatsForSender.highestZip = ReplicatorSender.nextZipNumber - 1; } // end createZips } // end ProcessZips