Home Forums Hortonworks Sandbox Stdout log capture

This topic contains 1 reply, has 1 voice, and was last updated by  Dominic Fox 5 months, 4 weeks ago.

  • Creator
    Topic
  • #52352

    Dominic Fox
    Participant

    I have a WordCount MapReduce job, which tries to write log messages to stdout during Map and Reduce execution, using both log4j’s Logger and System.out.println() statements. Neither output is captured in the job history for the job when I run it against an unaltered HDP 2.1 Sandbox VM instance – in fact, the logs (when viewed through the job history browser) always look like this:

    Log Type: stderr
    Log Length: 222
    log4j:WARN No appenders could be found for logger (org.apache.hadoop.ipc.Server).
    log4j:WARN Please initialize the log4j system properly.
    log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.

    Log Type: stdout
    Log Length: 0

    Log Type: syslog
    Log Length: 48335
    [...lots of syslog stuff, but none of my log messages]

    Is there some piece of configuration I am missing?

Viewing 1 replies (of 1 total)

You must be logged in to reply to this topic.

  • Author
    Replies
  • #52355

    Dominic Fox
    Participant

    (here, for reference, is the Wordcount class I’m using)

    package com.opencredo.hadoop.logging;

    import org.apache.log4j.Logger;
    import org.apache.log4j.LogManager;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapred.*;

    import java.io.IOException;
    import java.util.Iterator;
    import java.util.StringTokenizer;

    public class WordCount {

    private static final Logger logger = LogManager.getLogger(WordCount.class);

    public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
    String line = value.toString();

    logger.warn(“Mapping line ” + line);
    System.out.println(“Mapping line ” + line);
    System.err.println(“Mapping line ” + line);

    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
    String nextToken = tokenizer.nextToken();

    logger.warn(“Outputting word ” + nextToken);
    word.set(nextToken);

    output.collect(word, one);
    }
    }
    }

    public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
    public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
    logger.error(“Reducing values for ” + key);

    int sum = 0;
    while (values.hasNext()) {
    sum += values.next().get();
    }
    logger.error(“Outputting sum ” + sum + ” for key ” + key);
    output.collect(key, new IntWritable(sum));
    }
    }

    public static void main(String[] args) throws Exception {
    JobConf conf = new JobConf(WordCount.class);
    conf.setJobName(“wordcount”);

    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(IntWritable.class);

    conf.setMapperClass(Map.class);
    conf.setCombinerClass(Reduce.class);
    conf.setReducerClass(Reduce.class);

    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    JobClient.runJob(conf);
    }
    }

    Collapse
Viewing 1 replies (of 1 total)