/* * [FetchZips.java] * * Summary: Receiver side, fetches zips and opens them. * * Copyright: (c) 2003-2017 Roedy Green, Canadian Mind Products, http://mindprod.com * * Licence: This software may be copied and used freely for any purpose but military. * http://mindprod.com/contact/nonmil.html * * Requires: JDK 1.8+ * * Created with: JetBrains IntelliJ IDEA IDE http://www.jetbrains.com/idea/ * * Version History: * 10.2 2009-04-03 tidy up code to check presence of necessary files to make it more WORA. */ package com.mindprod.replicator; import com.mindprod.common18.EIO; import com.mindprod.filetransfer.FileTransfer; import com.mindprod.filter.ClamFilter; import com.mindprod.replicatorcommon.Config; import com.mindprod.replicatorcommon.FilenameContext; import com.mindprod.replicatorcommon.IO; import com.mindprod.replicatorcommon.MiniFD; import com.mindprod.replicatorcommon.MiniZD; import com.mindprod.replicatorcommon.ZipnameContext; import javax.swing.SwingWorker; import java.io.File; import java.io.IOException; import java.net.Authenticator; import java.net.MalformedURLException; import java.net.URL; import java.util.Enumeration; import java.util.concurrent.TimeUnit; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import static com.mindprod.replicator.ConfigForReceiver.RECEIVER_LOG; import static com.mindprod.replicator.ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR; import static java.lang.System.*; /** * Receiver side, fetches zips and opens them. * * @author Roedy Green, Canadian Mind Products * @version 10.2 2009-04-03 tidy up code to check presence of necessary files to make it more WORA. * @since 2003-09-13 */ final class FetchZips extends SwingWorker { /** * Use default buffersize for all file transfers. Buffers themselves allocated as needed. */ static final FileTransfer ft = new FileTransfer(); /** * All the active zip files. */ static MiniZD[] allDownloadableZips; /** * all files in current distribution. No deadwood. Only used if verify selected. */ static MiniFD[] allFilesAndDeletions; static { // might as well wait, just in case net congestion clears ft.setConnectTimeout( ( int ) TimeUnit.MINUTES.toMillis( 2 ) ); ft.setReadTimeout( ( int ) TimeUnit.MINUTES.toMillis( 2 ) ); } /** * how complete we are 0.. 100.0 */ private double progress; /** * Constructor */ public FetchZips() { } /** * delete all zips in the staging directory. */ private static void deleteAllZips() { String[] zips = new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR ) .list( new ClamFilter( "z", ".zip" ) ); for ( String zip : zips ) { Replicator.doing( "deleting debris from crash: " + zip ); //noinspection ResultOfMethodCallIgnored,ResultOfMethodCallIgnored new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR, zip ).delete(); } } /** * get rid of files no longer used, e.g. from previous versions. */ private static void deleteJunk() { // will not throw exception if file is already gone. new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR, "filemanifest.ser" ).delete(); //noinspection ResultOfMethodCallIgnored new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR, "manifest.ser" ).delete(); } /** * Delete a zip file after it has been unpacked. * * @param zd zip descriptor */ private static void deleteOneZip( MiniZD zd ) { // it must exist already or something is very haywire. new File( zd.getZipFilename( ZipnameContext.ON_TARGET ) ).delete(); } /** * delete any retired zips */ private static void deleteRetiredZips() { // if the zip is not in the list, it goes. // gather list of zips String[] zips = new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR ) .list( new ClamFilter( "z", ".zip" ) ); // process each old zip in the staging directory. Usually there are // none. for ( String zip : zips ) { String zipToRetire = zip.toLowerCase(); boolean keep = false; // compare against list of active zips. for ( MiniZD downloadableZip : allDownloadableZips ) { if ( downloadableZip.getRelativeZipFilename() .equals( zipToRetire ) ) { keep = true; break; } } if ( !keep ) { Replicator.doing( "deleting: " + zipToRetire ); new File( ConfigForReceiver.RECEIVER_ZIP_STAGING_DIR, zipToRetire ).delete(); } } } /** * Download one zip file from the website. URL may point to something other than a website, e.g. dir on a lan. * * @param zd Zip to download. * * @throws MalformedURLException if URL is bad. */ private static void downloadOneZip( MiniZD zd ) throws MalformedURLException { String filename = zd.getZipFilename( ZipnameContext.ON_TARGET ); String url = zd.getZipFilename( ZipnameContext.ON_RECEIVER_ZIP_URL ); StringBuilder sb = new StringBuilder( 200 ); sb.append( "up to: " ); sb.append( Config.TIMESTAMP_SECOND_FORMAT.format( zd.getHighestClumpingInZip() ) ); sb.append( " downloading: " ); sb.append( url ); Replicator.doing( sb.toString() ); if ( !ft.download( new URL( url ), new File( filename ), false ) ) { Replicator.fatal( "Unable to download zip archive file " + url + " to " + filename ); } } /** * Download and unpack the z999.zip files. * * @throws IOException if trouble fetching zips */ private void downloadAndUnpackZips() throws IOException { // count how many zips will will download int zipsToDownload = 0; for ( MiniZD zd : allDownloadableZips ) { // No need for slop. These are precise numbers that came // originally from the sender. We won't therefore reload a zip // we have previously processed, even if new jar has the same timestamp, // just a different filename as the last element of the last jar. // Clumping numbers are unique, so we won't get it trouble if a // series of files have the same date. if ( zd.getHighestClumpingInZip() > PersistReceiverState .clumpingProcessedSoFar ) { zipsToDownload++; } } final double progressPointsPerZip = ( zipsToDownload == 0 ) ? 0 : ( 100. - progress ) / zipsToDownload; // process just new zips. // We need not process new consolidations of old files. for ( MiniZD zd : allDownloadableZips ) { if ( zd.getHighestClumpingInZip() > PersistReceiverState .clumpingProcessedSoFar ) { // we need this zip, it contains stuff more recent that we // have processed so far. downloadOneZip( zd ); if ( stopProgress( progressPointsPerZip * .75 ) ) { return; } if ( ConfigForReceiver.UNPACK_ZIPS ) { // decompress each element in the zip unpackOneZip( zd, progressPointsPerZip * .25 ); } if ( !ConfigForReceiver.KEEP_ZIPS ) { // delete the zip file. deleteOneZip( zd ); } PersistReceiverState.clumpingProcessedSoFar = zd.getHighestClumpingInZip(); // save state after each zip unpacked so that // in case we crash, we can pick up where we left off. Log.println( "milestone: " + Config.TIMESTAMP_MILLISECOND_FORMAT .format( PersistReceiverState.clumpingProcessedSoFar ) ); PersistReceiverState.saveStateToReceiverSer(); } // end if want that zip. } // end for each zip } /** * record progress. * * @param increment how much stopProgress have we made. * * @return true if user has cancelled us. */ @SuppressWarnings( { "BooleanMethodNameMustStartWithQuestion" } ) private boolean stopProgress( double increment ) { progress += increment; if ( progress > 100 ) { progress = 100; } setProgress( ( int ) progress ); return isCancelled(); } /** * Unpack one zip file, and put all its contents into the target directory. It may contain some deadwood, but so * long as we process zips in the proper order the deadwood will be over written. * * @param zd Which zip file to unpack. * @param totalProgressPoints how many progress points available for this zip's progress. * * @throws IOException if can't access zip. */ private void unpackOneZip( MiniZD zd, double totalProgressPoints ) throws IOException { // can't use ZipInputStream, since getSize would fail final File zf = new File( zd.getZipFilename( ZipnameContext.ON_TARGET ) ); ZipFile zip = new ZipFile( zf ); // we get to split totalProgressPoints over all file unpackings. int fileCount = zip.size(); final double progressPointsPerFile = ( fileCount == 0 ) ? 0 : totalProgressPoints / fileCount; // for each element in the zip // can't use for:each, only works with Iterator not Enumeration. for ( Enumeration e = zip.entries(); e.hasMoreElements(); ) { ZipEntry entry = ( ZipEntry ) e.nextElement(); // We must process the entire zip. It contains only // timestamps, not clumpings. Our progress so far is based // on clump, not timestamps. // Unfortunately this means we can't easily bypass entries // we have previously done. We do it using the actual timestamps comparing // on a file by file basis. String elementName = entry.getName(); // indent to make zip entries stand out in log. // inside zip, uses / names. final File elementFile = new File( ConfigForReceiver.RECEIVER_BASE_DIR, elementName.replace( '/', File.separatorChar ) ); IO.ensureDirectoryExists( elementFile.getParent() ); // Test for deleted marker, possibly null if ( "deleted".equals( entry.getComment() ) ) { final boolean existed = elementFile.exists(); final boolean success = elementFile.delete(); // may have been already deleted, since we sometimes // process and entry more than once when it gets // repacked. if ( existed && success ) { Replicator.doing( " deleting: " + elementName ); StatsForReceiver.deletedFilesCount++; } // If that makes the directory empty, // delete the parent directory, // and on up toward the root, but don't delete // RECEIVER_BASE_DIR File parentDir = elementFile.getParentFile(); while ( parentDir.isDirectory() && EIO.getCanOrAbsPath( parentDir ).length() > ConfigForReceiver.RECEIVER_BASE_DIR.length() && parentDir.list().length == 0 ) { //noinspection ResultOfMethodCallIgnored parentDir.delete(); parentDir = parentDir.getParentFile(); } } else { final boolean doneBefore = elementFile.lastModified() == entry.getTime() && elementFile.length() == entry.getSize(); if ( !doneBefore ) { Replicator.doing( " unpacking: " + elementName ); ft.copy( zip.getInputStream( entry ), elementFile ); //noinspection ResultOfMethodCallIgnored elementFile.setLastModified( entry.getTime() ); StatsForReceiver.receivedFilesCount++; } } if ( stopProgress( progressPointsPerFile ) ) { return; } } // end for each element in the zip zip.close(); } // end unpackOneZip /** * verify that all files that should be distributed are distributed and are up to date. * * @throws java.net.MalformedURLException if there is a malformed filename in the distribution. */ private void verifyDistribution() throws MalformedURLException { // we don't need the zips any more, free up the space. allDownloadableZips = null; progress = 0; if ( stopProgress( 0 ) ) { return; } ReadManifests.downloadFileManifestSer(); if ( stopProgress( 10 ) ) { return; } ReadManifests.unpackFileManifestSer(); if ( stopProgress( 5 ) ) { return; } final int fileCount = allFilesAndDeletions.length; final double progressPointsPerFile = ( fileCount == 0 ) ? 0 : ( 100 - progress ) / fileCount; MiniFD prev = new MiniFD( "", "" ); prev.setClumping( Config.CUTOFF_TIMESTAMP ); Log.divider(); for ( MiniFD fileInDistribution : allFilesAndDeletions ) { final String nameOfFileInDistribution = fileInDistribution.getFilename( FilenameContext.ON_TARGET ); Replicator.doing( "verifying: " + nameOfFileInDistribution ); final File candidate = new File( nameOfFileInDistribution ); if ( fileInDistribution.isDeleted() ) { if ( candidate.isFile() ) { // should have been deleted earlier. // we can just delete it now. Log.println( "Re-deleting " + nameOfFileInDistribution + " which should have already been deleted." ); //noinspection ResultOfMethodCallIgnored candidate.delete(); } // otherwise ok, deleted as it should be. } else { // ordinary file should exist if ( !candidate.isFile() ) { // file does not exist at all. // back up and start over just after prev. // this will be the oldest missing file, even if there // are more missing files. PersistReceiverState.clumpingProcessedSoFar = prev.getClumping(); PersistReceiverState.saveStateToReceiverSer(); Replicator.missing( nameOfFileInDistribution ); } // allow slop since files and elements were // created with crude ZIP timestamp. else if ( candidate.lastModified() + Config.ALLOWED_SLOP < fileInDistribution.getTimestamp() ) { // old date on file. // back up and start over just after prev. PersistReceiverState.clumpingProcessedSoFar = prev.getClumping(); PersistReceiverState.saveStateToReceiverSer(); Replicator.outDated( nameOfFileInDistribution ); } else if ( candidate.length() != fileInDistribution.getFileLength() ) { // wrong file size. // back up and start over just after prev. PersistReceiverState.clumpingProcessedSoFar = prev.getClumping(); PersistReceiverState.saveStateToReceiverSer(); Replicator.wrongSize( nameOfFileInDistribution ); } // otherwise ok, exists as it should. } prev = fileInDistribution; if ( stopProgress( progressPointsPerFile ) ) { return; } } setProgress( 100 ); Log.divider(); } /** * Main task. Executed in background thread. Controls one download/unpack session. At the very least it downloads a * small zip manifest file, and then decides there are no zips to process. */ @Override public Void doInBackground() { progress = 0; //Initialize progress property. if ( stopProgress( 0 ) ) { return null; } try { // open log now, if not open already. Log.open( RECEIVER_ZIP_STAGING_DIR + File .separatorChar + RECEIVER_LOG ); final String totalConfigDescription = ConfigForReceiver.dumpConfig(); Log.println( totalConfigDescription ); // display net result of all configuring. if ( ConfigForReceiver.DEBUGGING ) { out.println( totalConfigDescription ); } // find out how for have got so far in downloading. PersistReceiverState.restoreStateFromReceiverSer(); if ( stopProgress( 2 ) ) { return null; } // arrange for userid and password if ( ConfigForReceiver.AUTHENTICATION.equals( "basic" ) ) { Authenticator.setDefault( new ReceiverAuthenticator() ); } // refresh stale aux files RefreshAux.refreshAuxFiles(); if ( stopProgress( 2 ) ) { return null; } // find out what's new ReadManifests.downloadZipManifestSer(); if ( stopProgress( 2 ) ) { return null; } // read and dump list of zips ReadManifests.unpackZipManifestSer(); if ( stopProgress( 2 ) ) { return null; } // Delete retired zips from our staging directory. // Only Relays will have any. Normally zips are deleted // immediately after unpacking. if ( ConfigForReceiver.KEEP_ZIPS ) { deleteRetiredZips(); } else { deleteAllZips(); } deleteJunk(); if ( stopProgress( 2 ) ) { return null; } // return early on abort. downloadAndUnpackZips(); setProgress( 100 ); if ( Replicator.verify ) { Replicator.freezeVerifyBox(); verifyDistribution(); } } catch ( IOException e ) { Replicator.fatal( "IO Exception processing zips." + "\n" + e.getMessage() ); } return null; } /** * Executed in event dispatching thread when background finishes. */ @Override public void done() { if ( this.isCancelled() ) { Replicator.cancelled(); } else { Replicator.done(); } } } // end FetchZips