View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  package org.apache.commons.vfs2.provider.hdfs;
18  
19  import java.io.File;
20  import java.io.IOException;
21  
22  import org.apache.commons.io.FileUtils;
23  import org.apache.commons.vfs2.AbstractProviderTestConfig;
24  import org.apache.commons.vfs2.AbstractVfsTestCase;
25  import org.apache.commons.vfs2.FileObject;
26  import org.apache.commons.vfs2.FileSystemManager;
27  import org.apache.commons.vfs2.ProviderTestConfig;
28  import org.apache.commons.vfs2.ProviderTestSuite;
29  import org.apache.commons.vfs2.impl.DefaultFileSystemManager;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.fs.FileSystem;
32  import org.apache.hadoop.fs.Path;
33  import org.apache.hadoop.hdfs.DFSConfigKeys;
34  import org.apache.hadoop.hdfs.MiniDFSCluster;
35  import org.apache.log4j.Level;
36  import org.apache.log4j.Logger;
37  
38  import junit.framework.Test;
39  
40  /**
41   * This test class uses the Hadoop MiniDFSCluster class to create an embedded Hadoop cluster.
42   * <P>
43   * This will only work on systems that Hadoop supports.
44   */
45  public class HdfsFileProviderTestCase extends AbstractProviderTestConfig {
46      public static class HdfsProviderTestSuite extends ProviderTestSuite {
47  
48          // Turn off the MiniDFSCluster logging
49          static {
50              System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.NoOpLog");
51          }
52  
53          public HdfsProviderTestSuite(final ProviderTestConfig providerConfig, final boolean addEmptyDir)
54                  throws Exception {
55              super(providerConfig, addEmptyDir);
56          }
57  
58          @SuppressWarnings("deprecation")
59          private void copyTestResources(final File directory, final Path parent) throws Exception {
60              for (final File file : directory.listFiles()) {
61                  if (file.isFile()) {
62                      final Path src = new Path(file.getAbsolutePath());
63                      final Path dst = new Path(parent, file.getName());
64                      hdfs.copyFromLocalFile(src, dst);
65                  } else if (file.isDirectory()) {
66                      final Path dir = new Path(parent, file.getName());
67                      if (hdfs.mkdirs(dir)) {
68                          copyTestResources(file, dir);
69                      } else {
70                          fail("Unable to make directory: " + dir);
71                      }
72                  }
73              }
74  
75          }
76  
77          @SuppressWarnings("deprecation")
78          @Override
79          protected void setUp() throws Exception {
80              Logger.getRootLogger().setLevel(Level.OFF);
81  
82              // Put the MiniDFSCluster directory in the target directory
83              final File data = new File("target/test/hdfstestcasedata").getAbsoluteFile();
84              data.mkdirs();
85              System.setProperty("test.build.data", data.toString());
86              FileUtils.cleanDirectory(data);
87  
88              // Setup HDFS
89              conf = new Configuration();
90              conf.set(FileSystem.FS_DEFAULT_NAME_KEY, HDFS_URI);
91              conf.set("hadoop.security.token.service.use_ip", "true");
92              conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize
93  
94              HdfsFileProviderTest.setUmask(conf);
95  
96              try {
97                  cluster = new MiniDFSCluster(PORT, conf, 1, true, true, true, null, null, null, null);
98                  cluster.waitActive();
99              } catch (final IOException e) {
100                 throw new RuntimeException("Error setting up mini cluster", e);
101             }
102             hdfs = cluster.getFileSystem();
103 
104             // Copy the test directory into HDFS
105             final Path base = new Path("/test-data");
106             assertTrue("Unable to create base directory", hdfs.mkdirs(base));
107             final File testDir = AbstractVfsTestCase.getTestDirectoryFile();
108             copyTestResources(testDir, base);
109 
110             super.setUp();
111         }
112 
113         @Override
114         protected void tearDown() throws Exception {
115             super.tearDown();
116             if (null != hdfs) {
117                 hdfs.close();
118             }
119         }
120     }
121 
122     private static final int PORT = 8720;
123     private static final String HDFS_URI = "hdfs://localhost:" + PORT;
124     private static FileSystem hdfs;
125     private static Configuration conf;
126 
127     private static MiniDFSCluster cluster;
128 
129     /**
130      * Creates the test suite for the zip file system.
131      */
132     public static Test suite() throws Exception {
133         return new HdfsProviderTestSuite(new HdfsFileProviderTestCase(), false);
134     }
135 
136     /**
137      * Returns the base folder for read tests.
138      */
139     @Override
140     public FileObject getBaseTestFolder(final FileSystemManager manager) throws Exception {
141         final String uri = HDFS_URI + "/test-data";
142         return manager.resolveFile(uri);
143     }
144 
145     /**
146      * Prepares the file system manager.
147      */
148     @Override
149     public void prepare(final DefaultFileSystemManager manager) throws Exception {
150         manager.addProvider("hdfs", new HdfsFileProvider());
151     }
152 
153 }