TestFileCreationDelete.java
上传用户:quxuerui
上传日期:2018-01-08
资源大小:41811k
文件大小:4k
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.hadoop.hdfs;
- import java.io.IOException;
- import org.apache.commons.logging.impl.Log4JLogger;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.FSDataOutputStream;
- import org.apache.hadoop.fs.FileSystem;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
- import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
- import org.apache.hadoop.hdfs.server.namenode.NameNode;
- import org.apache.log4j.Level;
- public class TestFileCreationDelete extends junit.framework.TestCase {
- {
- ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
- ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
- ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
- }
- public void testFileCreationDeleteParent() throws IOException {
- Configuration conf = new Configuration();
- final int MAX_IDLE_TIME = 2000; // 2s
- conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
- conf.setInt("heartbeat.recheck.interval", 1000);
- conf.setInt("dfs.heartbeat.interval", 1);
- conf.setBoolean("dfs.support.append", true);
- // create cluster
- MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
- FileSystem fs = null;
- try {
- cluster.waitActive();
- fs = cluster.getFileSystem();
- final int nnport = cluster.getNameNodePort();
- // create file1.
- Path dir = new Path("/foo");
- Path file1 = new Path(dir, "file1");
- FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
- System.out.println("testFileCreationDeleteParent: "
- + "Created file " + file1);
- TestFileCreation.writeFile(stm1, 1000);
- stm1.sync();
- // create file2.
- Path file2 = new Path("/file2");
- FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
- System.out.println("testFileCreationDeleteParent: "
- + "Created file " + file2);
- TestFileCreation.writeFile(stm2, 1000);
- stm2.sync();
- // rm dir
- fs.delete(dir, true);
- // restart cluster with the same namenode port as before.
- // This ensures that leases are persisted in fsimage.
- cluster.shutdown();
- try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
- cluster = new MiniDFSCluster(nnport, conf, 1, false, true,
- null, null, null);
- cluster.waitActive();
- // restart cluster yet again. This triggers the code to read in
- // persistent leases from fsimage.
- cluster.shutdown();
- try {Thread.sleep(5000);} catch (InterruptedException e) {}
- cluster = new MiniDFSCluster(nnport, conf, 1, false, true,
- null, null, null);
- cluster.waitActive();
- fs = cluster.getFileSystem();
- assertTrue(!fs.exists(file1));
- assertTrue(fs.exists(file2));
- } finally {
- fs.close();
- cluster.shutdown();
- }
- }
- }