public class MLPDataFileInputFormat
extends org.apache.hadoop.mapreduce.lib.input.FileInputFormat<java.lang.String[],org.apache.spark.sql.Row>
Constructor and Description |
---|
MLPDataFileInputFormat() |
Modifier and Type | Method and Description |
---|---|
org.apache.hadoop.mapreduce.RecordReader<java.lang.String[],org.apache.spark.sql.Row> |
createRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
java.util.List<org.apache.hadoop.mapreduce.InputSplit> |
getSplits(org.apache.hadoop.mapreduce.JobContext job) |
protected boolean |
isSplitable(org.apache.hadoop.mapreduce.JobContext context,
org.apache.hadoop.fs.Path filename) |
addInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, listStatus, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSize
public org.apache.hadoop.mapreduce.RecordReader<java.lang.String[],org.apache.spark.sql.Row> createRecordReader(org.apache.hadoop.mapreduce.InputSplit split, org.apache.hadoop.mapreduce.TaskAttemptContext context) throws java.io.IOException, java.lang.InterruptedException
createRecordReader
in class org.apache.hadoop.mapreduce.InputFormat<java.lang.String[],org.apache.spark.sql.Row>
java.io.IOException
java.lang.InterruptedException
protected boolean isSplitable(org.apache.hadoop.mapreduce.JobContext context, org.apache.hadoop.fs.Path filename)
isSplitable
in class org.apache.hadoop.mapreduce.lib.input.FileInputFormat<java.lang.String[],org.apache.spark.sql.Row>
public java.util.List<org.apache.hadoop.mapreduce.InputSplit> getSplits(org.apache.hadoop.mapreduce.JobContext job) throws java.io.IOException
getSplits
in class org.apache.hadoop.mapreduce.lib.input.FileInputFormat<java.lang.String[],org.apache.spark.sql.Row>
java.io.IOException