|
||||||||||
| PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
| SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD | |||||||||
java.lang.Objectorg.apache.hadoop.hbase.util.FSUtils
@InterfaceAudience.Public @InterfaceStability.Evolving public abstract class FSUtils
Utility methods for interacting with the underlying file system.
| Nested Class Summary | |
|---|---|
static class |
FSUtils.BlackListDirFilter
Directory filter that doesn't include any of the directories in the specified blacklist |
static class |
FSUtils.DirFilter
A PathFilter that only allows directories. |
static class |
FSUtils.FamilyDirFilter
Filter for all dirs that are legal column family names. |
static class |
FSUtils.HFileFilter
Filter for HFiles that excludes reference files. |
static class |
FSUtils.RegionDirFilter
Filter for all dirs that don't start with '.' |
static class |
FSUtils.UserTableDirFilter
A PathFilter that returns usertable directories. |
| Field Summary | |
|---|---|
static boolean |
WINDOWS
Set to true on Windows platforms |
| Constructor Summary | |
|---|---|
protected |
FSUtils()
|
| Method Summary | |
|---|---|
static void |
checkAccess(org.apache.hadoop.security.UserGroupInformation ugi,
org.apache.hadoop.fs.FileStatus file,
org.apache.hadoop.fs.permission.FsAction action)
Throw an exception if an action is not permitted by a user on a file. |
static boolean |
checkClusterIdExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait)
Checks that a cluster ID file exists in the HBase root directory |
static void |
checkDfsSafeMode(org.apache.hadoop.conf.Configuration conf)
Check whether dfs is in safemode. |
static void |
checkFileSystemAvailable(org.apache.hadoop.fs.FileSystem fs)
Checks to see if the specified file system is available |
static void |
checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message)
Verifies current version of file system |
static void |
checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message,
int wait,
int retries)
Verifies current version of file system |
static HDFSBlocksDistribution |
computeHDFSBlocksDistribution(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status,
long start,
long length)
Compute HDFS blocks distribution of a given file, or a portion of the file |
static org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
boolean overwrite)
Create the specified file on the filesystem. |
static org.apache.hadoop.fs.FSDataOutputStream |
create(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
InetSocketAddress[] favoredNodes)
Create the specified file on the filesystem. |
static boolean |
delete(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
boolean recursive)
Calls fs.delete() and returns the value returned by the fs.delete() |
static boolean |
deleteDirectory(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
Delete if exists. |
static ClusterId |
getClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Returns the value of the unique cluster ID stored for this HBase instance. |
static org.apache.hadoop.fs.FileSystem |
getCurrentFileSystem(org.apache.hadoop.conf.Configuration conf)
|
static long |
getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Return the number of bytes that large input files should be optimally be split into to minimize i/o time. |
static int |
getDefaultBufferSize(org.apache.hadoop.fs.FileSystem fs)
Returns the default buffer size to use during writes. |
static short |
getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
|
static List<org.apache.hadoop.fs.Path> |
getFamilyDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path regionDir)
Given a particular region dir, return all the familydirs inside it |
static org.apache.hadoop.fs.permission.FsPermission |
getFilePermissions(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf,
String permssionConfKey)
Get the file permissions specified in the configuration, if they are enabled. |
static FSUtils |
getInstance(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf)
|
static List<org.apache.hadoop.fs.Path> |
getLocalTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
|
static org.apache.hadoop.fs.Path |
getNamespaceDir(org.apache.hadoop.fs.Path rootdir,
String namespace)
Returns the Path object representing
the namespace directory under path rootdir |
static String |
getPath(org.apache.hadoop.fs.Path p)
Return the 'path' component of a Path. |
static List<org.apache.hadoop.fs.Path> |
getRegionDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir)
Given a particular table dir, return all the regiondirs inside it, excluding files such as .tableinfo |
static org.apache.hadoop.fs.Path |
getRootDir(org.apache.hadoop.conf.Configuration c)
|
static org.apache.hadoop.fs.Path |
getTableDir(org.apache.hadoop.fs.Path rootdir,
TableName tableName)
Returns the Path object representing the table directory under
path rootdir |
static List<org.apache.hadoop.fs.Path> |
getTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
|
static Map<String,Integer> |
getTableFragmentation(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the HBase rootdir and checks how many stores for each table have more than one file in them. |
static Map<String,Integer> |
getTableFragmentation(HMaster master)
Runs through the HBase rootdir and checks how many stores for each table have more than one file in them. |
static TableName |
getTableName(org.apache.hadoop.fs.Path tablePath)
Returns the TableName object representing
the table directory under
path rootdir |
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to the full Path. |
static Map<String,org.apache.hadoop.fs.Path> |
getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName)
Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile names to the full Path. |
static int |
getTotalTableFragmentation(HMaster master)
Returns the total overall fragmentation percentage. |
static String |
getVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Verifies current version of file system |
static boolean |
isAppendSupported(org.apache.hadoop.conf.Configuration conf)
Heuristic to determine whether is safe or not to open a file for append Looks both for dfs.support.append and use reflection to search for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() |
static boolean |
isExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Calls fs.exists(). |
static boolean |
isHDFS(org.apache.hadoop.conf.Configuration conf)
|
static boolean |
isMajorCompacted(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the hbase rootdir and checks all stores have only one file in them -- that is, they've been major compacted. |
static boolean |
isMajorCompactedPre020(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Runs through the hbase rootdir and checks all stores have only one file in them -- that is, they've been major compacted. |
static boolean |
isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
org.apache.hadoop.fs.Path pathTail)
Compare path component of the Path URI; e.g. |
static boolean |
isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
String pathTail)
Compare path component of the Path URI; e.g. |
static boolean |
isPre020FileLayout(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
Expects to find -ROOT- directory. |
static boolean |
isRecoveredEdits(org.apache.hadoop.fs.Path path)
Checks if the given path is the one with 'recovered.edits' dir. |
static boolean |
isStartingWithPath(org.apache.hadoop.fs.Path rootPath,
String path)
Compare of path component. |
static org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
Calls fs.listStatus() and treats FileNotFoundException as non-fatal This would accommodates differences between hadoop versions |
static org.apache.hadoop.fs.FileStatus[] |
listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir,
org.apache.hadoop.fs.PathFilter filter)
Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. |
static void |
logFileSystemState(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path root,
org.apache.commons.logging.Log LOG)
Log the current state of the filesystem from a certain root directory |
static boolean |
metaRegionExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Checks if meta region exists |
abstract void |
recoverFileLease(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path p,
org.apache.hadoop.conf.Configuration conf,
CancelableProgressable reporter)
Recover file lease. |
static String |
removeRootPath(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
Checks for the presence of the root path (using the provided conf object) in the given path. |
static boolean |
renameAndSetModifyTime(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dest)
|
static void |
setClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
ClusterId clusterId,
int wait)
Writes a new unique identifier for this cluster to the "hbase.id" file in the HBase root directory |
static void |
setFsDefault(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root)
|
static void |
setRootDir(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root)
|
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
Sets version of file system |
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait,
int retries)
Sets version of file system |
static void |
setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
String version,
int wait,
int retries)
Sets version of file system |
static org.apache.hadoop.fs.Path |
validateRootPath(org.apache.hadoop.fs.Path root)
Verifies root directory path is a valid URI with a scheme |
static void |
waitOnSafeMode(org.apache.hadoop.conf.Configuration conf,
long wait)
If DFS, check safe mode and if so, wait until we clear it. |
| Methods inherited from class java.lang.Object |
|---|
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
| Field Detail |
|---|
public static final boolean WINDOWS
| Constructor Detail |
|---|
protected FSUtils()
| Method Detail |
|---|
public static boolean isStartingWithPath(org.apache.hadoop.fs.Path rootPath,
String path)
path
starts with rootPath, then the function returns true
- Parameters:
rootPath - path -
- Returns:
- True if
path starts with rootPath
public static boolean isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
String pathTail)
pathToSearch - Path we will be trying to match.pathTail -
pathTail is tail on the path of pathToSearch
public static boolean isMatchingTail(org.apache.hadoop.fs.Path pathToSearch,
org.apache.hadoop.fs.Path pathTail)
pathToSearch - Path we will be trying to match.pathTail -
pathTail is tail on the path of pathToSearch
public static FSUtils getInstance(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf)
public static boolean deleteDirectory(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
throws IOException
fs - filesystem objectdir - directory to delete
dir
IOException - e
public static long getDefaultBlockSize(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
throws IOException
fs - filesystem object
IOException - e
public static short getDefaultReplication(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
throws IOException
IOExceptionpublic static int getDefaultBufferSize(org.apache.hadoop.fs.FileSystem fs)
fs - filesystem object
public static org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
InetSocketAddress[] favoredNodes)
throws IOException
fs - FileSystem on which to write the filepath - Path to the file to writeperm - permissionsfavoredNodes -
IOException - if the file cannot be created
public static org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
org.apache.hadoop.fs.permission.FsPermission perm,
boolean overwrite)
throws IOException
fs - FileSystem on which to write the filepath - Path to the file to writeperm - overwrite - Whether or not the created file should be overwritten.
IOException - if the file cannot be created
public static org.apache.hadoop.fs.permission.FsPermission getFilePermissions(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.conf.Configuration conf,
String permssionConfKey)
fs - filesystem that the file will be created on.conf - configuration to read for determining if permissions are
enabled and which to usepermssionConfKey - property key in the configuration to use when
finding the permission
public static void checkFileSystemAvailable(org.apache.hadoop.fs.FileSystem fs)
throws IOException
fs - filesystem
IOException - e
public static void checkDfsSafeMode(org.apache.hadoop.conf.Configuration conf)
throws IOException
conf -
IOException
public static String getVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException,
DeserializationException
fs - filesystem objectrootdir - root hbase directory
IOException - e
DeserializationException
public static void checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message)
throws IOException,
DeserializationException
fs - file systemrootdir - root directory of HBase installationmessage - if true, issues a message on System.out
IOException - e
DeserializationException
public static void checkVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
boolean message,
int wait,
int retries)
throws IOException,
DeserializationException
fs - file systemrootdir - root directory of HBase installationmessage - if true, issues a message on System.outwait - wait intervalretries - number of times to retry
IOException - e
DeserializationException
public static void setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException
fs - filesystem objectrootdir - hbase root
IOException - e
public static void setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait,
int retries)
throws IOException
fs - filesystem objectrootdir - hbase rootwait - time to wait for retryretries - number of times to retry before failing
IOException - e
public static void setVersion(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
String version,
int wait,
int retries)
throws IOException
fs - filesystem objectrootdir - hbase root directoryversion - version to setwait - time to wait for retryretries - number of times to retry before throwing an IOException
IOException - e
public static boolean checkClusterIdExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
int wait)
throws IOException
fs - the root directory FileSystemrootdir - the HBase root directory in HDFSwait - how long to wait between retries
true if the file exists, otherwise false
IOException - if checking the FileSystem fails
public static ClusterId getClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException
fs - the root directory FileSystemrootdir - the path to the HBase root directory
IOException - if reading the cluster ID file fails
public static void setClusterId(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir,
ClusterId clusterId,
int wait)
throws IOException
fs - the root directory FileSystemrootdir - the path to the HBase root directoryclusterId - the unique identifier to storewait - how long (in milliseconds) to wait between retries
IOException - if writing to the FileSystem fails and no wait value
public static org.apache.hadoop.fs.Path validateRootPath(org.apache.hadoop.fs.Path root)
throws IOException
root - root directory path
root argument.
IOException - if not a valid URI with a scheme
public static String removeRootPath(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
throws IOException
path - conf -
IOException
public static void waitOnSafeMode(org.apache.hadoop.conf.Configuration conf,
long wait)
throws IOException
conf - configurationwait - Sleep between retries
IOException - epublic static String getPath(org.apache.hadoop.fs.Path p)
hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir,
this method returns /hbase_trunk/TestTable/compaction.dir.
This method is useful if you want to print out a Path without qualifying
Filesystem instance.
p - Filesystem Path whose 'path' component we are to return.
public static org.apache.hadoop.fs.Path getRootDir(org.apache.hadoop.conf.Configuration c)
throws IOException
c - configuration
hbase.rootdir from
configuration as a qualified Path.
IOException - e
public static void setRootDir(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root)
throws IOException
IOException
public static void setFsDefault(org.apache.hadoop.conf.Configuration c,
org.apache.hadoop.fs.Path root)
throws IOException
IOException
public static boolean metaRegionExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException
fs - file systemrootdir - root directory of HBase installation
IOException - e
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.FileStatus status,
long start,
long length)
throws IOException
fs - file systemstatus - file status of the filestart - start position of the portionlength - length of the portion
IOException
public static boolean isMajorCompacted(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
throws IOException
fs - filesystemhbaseRootDir - hbase root directory
IOException - e
public static int getTotalTableFragmentation(HMaster master)
throws IOException
master - The master defining the HBase root and file system.
IOException - When scanning the directory fails.
public static Map<String,Integer> getTableFragmentation(HMaster master)
throws IOException
master - The master defining the HBase root and file system.
IOException - When scanning the directory fails.
public static Map<String,Integer> getTableFragmentation(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
throws IOException
fs - The file system to use.hbaseRootDir - The root directory to scan.
IOException - When scanning the directory fails.
public static boolean isPre020FileLayout(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
throws IOException
fs - filesystemhbaseRootDir - hbase root directory
IOException - e
public static boolean isMajorCompactedPre020(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
throws IOException
isMajorCompacted(FileSystem, Path) in that it expects a
pre-0.20.0 hbase layout on the filesystem. Used migrating.
fs - filesystemhbaseRootDir - hbase root directory
IOException - e
public static org.apache.hadoop.fs.Path getTableDir(org.apache.hadoop.fs.Path rootdir,
TableName tableName)
Path object representing the table directory under
path rootdir
rootdir - qualified path of HBase root directorytableName - name of table
Path for tablepublic static TableName getTableName(org.apache.hadoop.fs.Path tablePath)
TableName object representing
the table directory under
path rootdir
tablePath - path of table
Path for table
public static org.apache.hadoop.fs.Path getNamespaceDir(org.apache.hadoop.fs.Path rootdir,
String namespace)
Path object representing
the namespace directory under path rootdir
rootdir - qualified path of HBase root directorynamespace - namespace name
Path for tablepublic static boolean isAppendSupported(org.apache.hadoop.conf.Configuration conf)
conf -
public static boolean isHDFS(org.apache.hadoop.conf.Configuration conf)
throws IOException
conf -
IOException
public abstract void recoverFileLease(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path p,
org.apache.hadoop.conf.Configuration conf,
CancelableProgressable reporter)
throws IOException
fs - FileSystem handlep - Path of file to recover leaseconf - Configuration handle
IOException
public static List<org.apache.hadoop.fs.Path> getTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException
IOException
public static List<org.apache.hadoop.fs.Path> getLocalTableDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootdir)
throws IOException
fs - rootdir -
rootdir. Ignore non table hbase folders such as
.logs, .oldlogs, .corrupt folders.
IOExceptionpublic static boolean isRecoveredEdits(org.apache.hadoop.fs.Path path)
path -
public static List<org.apache.hadoop.fs.Path> getRegionDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path tableDir)
throws IOException
fs - A file system for the PathtableDir - Path to a specific table directory IOException
public static List<org.apache.hadoop.fs.Path> getFamilyDirs(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path regionDir)
throws IOException
fs - A file system for the PathregionDir - Path to a specific region directory
IOException
public static org.apache.hadoop.fs.FileSystem getCurrentFileSystem(org.apache.hadoop.conf.Configuration conf)
throws IOException
conf -
IOException
public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(Map<String,org.apache.hadoop.fs.Path> map,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir,
TableName tableName)
throws IOException
map - map to add values. If null, this method will create and populate one to returnfs - The file system to use.hbaseRootDir - The root directory to scan.tableName - name of the table to scan.
IOException - When scanning the directory fails.
public static Map<String,org.apache.hadoop.fs.Path> getTableStoreFilePathMap(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path hbaseRootDir)
throws IOException
fs - The file system to use.hbaseRootDir - The root directory to scan.
IOException - When scanning the directory fails.
public static org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir,
org.apache.hadoop.fs.PathFilter filter)
throws IOException
fs - file systemdir - directoryfilter - path filter
IOException
public static org.apache.hadoop.fs.FileStatus[] listStatus(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path dir)
throws IOException
fs - file systemdir - directory
IOException
public static boolean delete(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
boolean recursive)
throws IOException
fs - path - recursive -
IOException
public static boolean isExists(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
throws IOException
fs - path -
IOException
public static void checkAccess(org.apache.hadoop.security.UserGroupInformation ugi,
org.apache.hadoop.fs.FileStatus file,
org.apache.hadoop.fs.permission.FsAction action)
throws org.apache.hadoop.security.AccessControlException
ugi - the userfile - the fileaction - the action
org.apache.hadoop.security.AccessControlException
public static void logFileSystemState(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path root,
org.apache.commons.logging.Log LOG)
throws IOException
fs - filesystem to investigateroot - root file/directory to start logging fromLOG - log to output information
IOException - if an unexpected exception occurs
public static boolean renameAndSetModifyTime(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dest)
throws IOException
IOException
|
||||||||||
| PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
| SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD | |||||||||