TestDFSUpgrade.java
上传用户:quxuerui
上传日期:2018-01-08
资源大小:41811k
文件大小:11k
源码类别:

网格计算

开发平台:

Java

  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements.  See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership.  The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License.  You may obtain a copy of the License at
  9. *
  10. *     http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.hadoop.hdfs;
  19. import java.io.File;
  20. import java.io.IOException;
  21. import junit.framework.TestCase;
  22. import org.apache.commons.logging.Log;
  23. import org.apache.commons.logging.LogFactory;
  24. import org.apache.hadoop.conf.Configuration;
  25. import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
  26. import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
  27. import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
  28. import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
  29. import org.apache.hadoop.hdfs.server.common.HdfsConstants;
  30. import org.apache.hadoop.hdfs.server.common.Storage;
  31. import org.apache.hadoop.hdfs.server.common.StorageInfo;
  32. import org.apache.hadoop.fs.FileUtil;
  33. /**
  34. * This test ensures the appropriate response (successful or failure) from
  35. * the system when the system is upgraded under various storage state and
  36. * version conditions.
  37. */
  38. public class TestDFSUpgrade extends TestCase {
  39.  
  40.   private static final Log LOG = LogFactory.getLog(
  41.                                                    "org.apache.hadoop.hdfs.TestDFSUpgrade");
  42.   private Configuration conf;
  43.   private int testCounter = 0;
  44.   private MiniDFSCluster cluster = null;
  45.     
  46.   /**
  47.    * Writes an INFO log message containing the parameters.
  48.    */
  49.   void log(String label, int numDirs) {
  50.     LOG.info("============================================================");
  51.     LOG.info("***TEST " + (testCounter++) + "*** " 
  52.              + label + ":"
  53.              + " numDirs="+numDirs);
  54.   }
  55.   
  56.   /**
  57.    * Verify that the current and previous directories exist.  Verify that 
  58.    * previous hasn't been modified by comparing the checksum of all it's
  59.    * containing files with their original checksum.  It is assumed that
  60.    * the server has recovered and upgraded.
  61.    */
  62.   void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  63.     switch (nodeType) {
  64.     case NAME_NODE:
  65.       for (int i = 0; i < baseDirs.length; i++) {
  66.         assertTrue(new File(baseDirs[i],"current").isDirectory());
  67.         assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
  68.         assertTrue(new File(baseDirs[i],"current/edits").isFile());
  69.         assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
  70.         assertTrue(new File(baseDirs[i],"current/fstime").isFile());
  71.       }
  72.       break;
  73.     case DATA_NODE:
  74.       for (int i = 0; i < baseDirs.length; i++) {
  75.         assertEquals(
  76.                      UpgradeUtilities.checksumContents(
  77.                                                        nodeType, new File(baseDirs[i],"current")),
  78.                      UpgradeUtilities.checksumMasterContents(nodeType));
  79.       }
  80.       break;
  81.     }
  82.     for (int i = 0; i < baseDirs.length; i++) {
  83.       assertTrue(new File(baseDirs[i],"previous").isDirectory());
  84.       assertEquals(
  85.                    UpgradeUtilities.checksumContents(
  86.                                                      nodeType, new File(baseDirs[i],"previous")),
  87.                    UpgradeUtilities.checksumMasterContents(nodeType));
  88.     }
  89.   }
  90.  
  91.   /**
  92.    * Attempts to start a NameNode with the given operation.  Starting
  93.    * the NameNode should throw an exception.
  94.    */
  95.   void startNameNodeShouldFail(StartupOption operation) {
  96.     try {
  97.       cluster = new MiniDFSCluster(conf, 0, operation); // should fail
  98.       throw new AssertionError("NameNode should have failed to start");
  99.     } catch (Exception expected) {
  100.       // expected
  101.     }
  102.   }
  103.   
  104.   /**
  105.    * Attempts to start a DataNode with the given operation.  Starting
  106.    * the DataNode should throw an exception.
  107.    */
  108.   void startDataNodeShouldFail(StartupOption operation) {
  109.     try {
  110.       cluster.startDataNodes(conf, 1, false, operation, null); // should fail
  111.       throw new AssertionError("DataNode should have failed to start");
  112.     } catch (Exception expected) {
  113.       // expected
  114.       assertFalse(cluster.isDataNodeUp());
  115.     }
  116.   }
  117.  
  118.   /**
  119.    * This test attempts to upgrade the NameNode and DataNode under
  120.    * a number of valid and invalid conditions.
  121.    */
  122.   public void testUpgrade() throws Exception {
  123.     File[] baseDirs;
  124.     UpgradeUtilities.initialize();
  125.     
  126.     for (int numDirs = 1; numDirs <= 2; numDirs++) {
  127.       conf = new Configuration();
  128.       conf.setInt("dfs.datanode.scan.period.hours", -1);      
  129.       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
  130.       String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
  131.       String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
  132.       
  133.       log("Normal NameNode upgrade", numDirs);
  134.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  135.       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  136.       checkResult(NAME_NODE, nameNodeDirs);
  137.       cluster.shutdown();
  138.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  139.       
  140.       log("Normal DataNode upgrade", numDirs);
  141.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  142.       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  143.       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
  144.       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
  145.       checkResult(DATA_NODE, dataNodeDirs);
  146.       cluster.shutdown();
  147.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  148.       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  149.       
  150.       log("NameNode upgrade with existing previous dir", numDirs);
  151.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  152.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
  153.       startNameNodeShouldFail(StartupOption.UPGRADE);
  154.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  155.       
  156.       log("DataNode upgrade with existing previous dir", numDirs);
  157.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  158.       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  159.       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
  160.       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
  161.       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
  162.       checkResult(DATA_NODE, dataNodeDirs);
  163.       cluster.shutdown();
  164.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  165.       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  166.       log("DataNode upgrade with future stored layout version in current", numDirs);
  167.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  168.       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  169.       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
  170.       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
  171.                                          new StorageInfo(Integer.MIN_VALUE,
  172.                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
  173.                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
  174.       startDataNodeShouldFail(StartupOption.REGULAR);
  175.       cluster.shutdown();
  176.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  177.       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  178.       
  179.       log("DataNode upgrade with newer fsscTime in current", numDirs);
  180.       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  181.       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
  182.       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
  183.       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
  184.                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
  185.                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
  186.                                                          Long.MAX_VALUE));
  187.       startDataNodeShouldFail(StartupOption.REGULAR);
  188.       cluster.shutdown();
  189.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  190.       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
  191.       log("NameNode upgrade with no edits file", numDirs);
  192.       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  193.       for (File f : baseDirs) { 
  194.         FileUtil.fullyDelete(new File(f,"edits"));
  195.       }
  196.       startNameNodeShouldFail(StartupOption.UPGRADE);
  197.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  198.       
  199.       log("NameNode upgrade with no image file", numDirs);
  200.       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  201.       for (File f : baseDirs) { 
  202.         FileUtil.fullyDelete(new File(f,"fsimage")); 
  203.       }
  204.       startNameNodeShouldFail(StartupOption.UPGRADE);
  205.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  206.       
  207.       log("NameNode upgrade with corrupt version file", numDirs);
  208.       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  209.       for (File f : baseDirs) { 
  210.         UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
  211.       }
  212.       startNameNodeShouldFail(StartupOption.UPGRADE);
  213.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  214.       
  215.       log("NameNode upgrade with old layout version in current", numDirs);
  216.       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  217.       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
  218.                                          new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
  219.                                                          UpgradeUtilities.getCurrentNamespaceID(null),
  220.                                                          UpgradeUtilities.getCurrentFsscTime(null)));
  221.       startNameNodeShouldFail(StartupOption.UPGRADE);
  222.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  223.       
  224.       log("NameNode upgrade with future layout version in current", numDirs);
  225.       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
  226.       UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
  227.                                          new StorageInfo(Integer.MIN_VALUE,
  228.                                                          UpgradeUtilities.getCurrentNamespaceID(null),
  229.                                                          UpgradeUtilities.getCurrentFsscTime(null)));
  230.       startNameNodeShouldFail(StartupOption.UPGRADE);
  231.       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  232.     } // end numDir loop
  233.   }
  234.  
  235.   protected void tearDown() throws Exception {
  236.     LOG.info("Shutting down MiniDFSCluster");
  237.     if (cluster != null) cluster.shutdown();
  238.   }
  239.     
  240.   public static void main(String[] args) throws Exception {
  241.     new TestDFSUpgrade().testUpgrade();
  242.   }
  243.   
  244. }