hadoop 执行MapReducer报错
0
在linux执行MP的是报这种错,有朋友指导一下如何解决吗
[hdfs@hadoop javawar]$ hadoop jar test.jar test /tmp/logs/hello.txt /tmp/logs/
Exception in thread "main" java.lang.ClassNotFoundException: test
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:270)
at org.apache.hadoop.util.RunJar.run(RunJar.java:214)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
源码如下
package java_app2;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class test {
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
Path infile = new Path(args[0]); //定义文件输入的路径,目录
Path outFile = new Path(args[1]); //定义文件的输出路径,目录
Job job = Job.getInstance();
job.setJarByClass(test.class); //设置主要工作类
FileInputFormat.addInputPath(job, infile);
FileOutputFormat.setOutputPath(job, outFile);
job.setMapperClass(WcMap.class);
//job.setCombinerClass(mp.WcReduce.class);
job.setReducerClass(WcReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class); //1901,1902 Text,String convert to Text- new Text(String)
job.setOutputValueClass(IntWritable.class); //int convert to IntWritable- new IntWritable(int)
job.waitForCompletion(true);
}
}
class WcMap extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
if (!"0".equals(key.toString())) {
String[] line = value.toString().split(" ");
for (String word : line)
{
context.write(new Text(word), new IntWritable(1));
}
}
}
}
class WcReduce extends Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
//may {1,1,1,1}
protected void reduce(Text arg0, Iterable<IntWritable> arg1,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : arg1)
{
sum += i.get();
}
context.write(arg0, new IntWritable(sum));
}
[hdfs@hadoop javawar]$ hadoop jar test.jar test /tmp/logs/hello.txt /tmp/logs/
Exception in thread "main" java.lang.ClassNotFoundException: test
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:270)
at org.apache.hadoop.util.RunJar.run(RunJar.java:214)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
源码如下
package java_app2;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class test {
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
Path infile = new Path(args[0]); //定义文件输入的路径,目录
Path outFile = new Path(args[1]); //定义文件的输出路径,目录
Job job = Job.getInstance();
job.setJarByClass(test.class); //设置主要工作类
FileInputFormat.addInputPath(job, infile);
FileOutputFormat.setOutputPath(job, outFile);
job.setMapperClass(WcMap.class);
//job.setCombinerClass(mp.WcReduce.class);
job.setReducerClass(WcReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class); //1901,1902 Text,String convert to Text- new Text(String)
job.setOutputValueClass(IntWritable.class); //int convert to IntWritable- new IntWritable(int)
job.waitForCompletion(true);
}
}
class WcMap extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
if (!"0".equals(key.toString())) {
String[] line = value.toString().split(" ");
for (String word : line)
{
context.write(new Text(word), new IntWritable(1));
}
}
}
}
class WcReduce extends Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
//may {1,1,1,1}
protected void reduce(Text arg0, Iterable<IntWritable> arg1,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : arg1)
{
sum += i.get();
}
context.write(arg0, new IntWritable(sum));
}
没有找到相关结果
重要提示:提问者不能发表回复,可以通过评论与回答者沟通,沟通后可以通过编辑功能完善问题描述,以便后续其他人能够更容易理解问题.
1 个回复
MarsJ - 大数据玩家~DS 2017-01-09 回答
赞同来自: