1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master.cleaner;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertFalse;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.IOException;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.hadoop.fs.FileStatus;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HConstants;
34  import org.apache.hadoop.hbase.MediumTests;
35  import org.apache.hadoop.hbase.Server;
36  import org.apache.hadoop.hbase.ServerName;
37  import org.apache.hadoop.hbase.catalog.CatalogTracker;
38  import org.apache.hadoop.hbase.util.EnvironmentEdge;
39  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
40  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
41  import org.junit.AfterClass;
42  import org.junit.BeforeClass;
43  import org.junit.Test;
44  import org.junit.experimental.categories.Category;
45  
46  @Category(MediumTests.class)
47  public class TestHFileCleaner {
48    private static final Log LOG = LogFactory.getLog(TestHFileCleaner.class);
49  
50    private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
51  
52    @BeforeClass
53    public static void setupCluster() throws Exception {
54      // have to use a minidfs cluster because the localfs doesn't modify file times correctly
55      UTIL.startMiniDFSCluster(1);
56    }
57  
58    @AfterClass
59    public static void shutdownCluster() throws Exception {
60      UTIL.shutdownMiniDFSCluster();
61    }
62  
63    @Test
64    public void testTTLCleaner() throws IOException, InterruptedException {
65      FileSystem fs = UTIL.getDFSCluster().getFileSystem();
66      Path root = UTIL.getDataTestDir();
67      Path file = new Path(root, "file");
68      fs.createNewFile(file);
69      long createTime = System.currentTimeMillis();
70      assertTrue("Test file not created!", fs.exists(file));
71      TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
72      // update the time info for the file, so the cleaner removes it
73      fs.setTimes(file, createTime - 100, -1);
74      Configuration conf = UTIL.getConfiguration();
75      conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
76      cleaner.setConf(conf);
77      assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
78          + " with create time:" + createTime, cleaner.isFileDeletable(file));
79    }
80  
81    /**
82     * @param file to check
83     * @return loggable information about the file
84     */
85    private String getFileStats(Path file, FileSystem fs) throws IOException {
86      FileStatus status = fs.getFileStatus(file);
87      return "File" + file + ", mtime:" + status.getModificationTime() + ", atime:"
88          + status.getAccessTime();
89    }
90  
91    @Test(timeout = 60 *1000)
92    public void testHFileCleaning() throws Exception {
93      final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
94      String prefix = "someHFileThatWouldBeAUUID";
95      Configuration conf = UTIL.getConfiguration();
96      // set TTL
97      long ttl = 2000;
98      conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
99        "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
100     conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
101     Server server = new DummyServer();
102     Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
103     FileSystem fs = FileSystem.get(conf);
104     HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
105 
106     // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
107     final long createTime = System.currentTimeMillis();
108     fs.delete(archivedHfileDir, true);
109     fs.mkdirs(archivedHfileDir);
110     // Case 1: 1 invalid file, which should be deleted directly
111     fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
112     // Case 2: 1 "recent" file, not even deletable for the first log cleaner
113     // (TimeToLiveLogCleaner), so we are not going down the chain
114     LOG.debug("Now is: " + createTime);
115     for (int i = 1; i < 32; i++) {
116       // Case 3: old files which would be deletable for the first log cleaner
117       // (TimeToLiveHFileCleaner),
118       Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
119       fs.createNewFile(fileName);
120       // set the creation time past ttl to ensure that it gets removed
121       fs.setTimes(fileName, createTime - ttl - 1, -1);
122       LOG.debug("Creating " + getFileStats(fileName, fs));
123     }
124 
125     // Case 2: 1 newer file, not even deletable for the first log cleaner
126     // (TimeToLiveLogCleaner), so we are not going down the chain
127     Path saved = new Path(archivedHfileDir, "thisFileShouldBeSaved.00000000000");
128     fs.createNewFile(saved);
129     // set creation time in the future, so definitely within TTL
130     fs.setTimes(saved, createTime + (ttl * 2), -1);
131     LOG.debug("Creating " + getFileStats(saved, fs));
132 
133     assertEquals(33, fs.listStatus(archivedHfileDir).length);
134 
135     // set a custom edge manager to handle time checking
136     EnvironmentEdge setTime = new EnvironmentEdge() {
137       @Override
138       public long currentTimeMillis() {
139         return createTime;
140       }
141     };
142     EnvironmentEdgeManager.injectEdge(setTime);
143 
144     // run the chore
145     cleaner.chore();
146 
147     for (FileStatus file : fs.listStatus(archivedHfileDir)) {
148       LOG.debug("Kept hfile: " + file.getPath());
149     }
150 
151     // ensure we only end up with the saved file
152     assertEquals("Didn't dev expected number of files in the archive!", 1,
153       fs.listStatus(archivedHfileDir).length);
154 
155     cleaner.interrupt();
156     // reset the edge back to the original edge
157     EnvironmentEdgeManager.injectEdge(originalEdge);
158   }
159 
160   @Test
161   public void testRemovesEmptyDirectories() throws Exception {
162     Configuration conf = UTIL.getConfiguration();
163     // no cleaner policies = delete all files
164     conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
165     Server server = new DummyServer();
166     Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
167 
168     // setup the cleaner
169     FileSystem fs = UTIL.getDFSCluster().getFileSystem();
170     HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
171 
172     // make all the directories for archiving files
173     Path table = new Path(archivedHfileDir, "table");
174     Path region = new Path(table, "regionsomthing");
175     Path family = new Path(region, "fam");
176     Path file = new Path(family, "file12345");
177     fs.mkdirs(family);
178     if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
179     fs.create(file).close();
180     if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);
181 
182     // run the chore to cleanup the files (and the directories above it)
183     cleaner.chore();
184 
185     // make sure all the parent directories get removed
186     assertFalse("family directory not removed for empty directory", fs.exists(family));
187     assertFalse("region directory not removed for empty directory", fs.exists(region));
188     assertFalse("table directory not removed for empty directory", fs.exists(table));
189     assertTrue("archive directory", fs.exists(archivedHfileDir));
190   }
191 
192   static class DummyServer implements Server {
193 
194     @Override
195     public Configuration getConfiguration() {
196       return UTIL.getConfiguration();
197     }
198 
199     @Override
200     public ZooKeeperWatcher getZooKeeper() {
201       try {
202         return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
203       } catch (IOException e) {
204         e.printStackTrace();
205       }
206       return null;
207     }
208 
209     @Override
210     public CatalogTracker getCatalogTracker() {
211       return null;
212     }
213 
214     @Override
215     public ServerName getServerName() {
216       return new ServerName("regionserver,60020,000000");
217     }
218 
219     @Override
220     public void abort(String why, Throwable e) {
221     }
222 
223     @Override
224     public boolean isAborted() {
225       return false;
226     }
227 
228     @Override
229     public void stop(String why) {}
230 
231     @Override
232     public boolean isStopped() {
233       return false;
234     }
235   }
236 
237   @org.junit.Rule
238   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
239     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
240 }
241