Description: The process involves using Hadoop MapReduce to find the maximum and minimum values in a dataset. The Mapper class reads data and passes values to the Reducer class, where the min and max are computed for each category or column.
Source Code
public class maxmin {
public static class maxminmapper extends Mapper {
Text t1 = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] colvalue = value.toString().split(",");
for (int i = 0; i < colvalue.length; i++) {
t1.set(String.valueOf(i + 1));
context.write(t1, new DoubleWritable(Double.parseDouble(colvalue[i])));
}
}
public static class maxminReducer extends Reducer {
public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException {
double min = Integer.MAX_VALUE, max = 0;
Iterator iterator = values.iterator();
while (iterator.hasNext()) {
double value = iterator.next().get();
if (value < min) {
min = value;
}
if (value > max) {
max = value;
}
}
context.write(new Text(key), new DoubleWritable(min));
context.write(new Text(key), new DoubleWritable(max));
}
public static void main(String[] args) throws Exception {
Path inputPath = new Path("hdfs://localhost:54310/home/sortinput");
Path outputDir = new Path("hdfs://localhost:54310/home/MaxMinOutput1");
Configuration conf = new Configuration();
Job job = new Job(conf, "Find Minimum and Maximum");
job.setJarByClass(maxmin.class);
FileSystem fs = FileSystem.get(conf);