# The job name.camus.job.name=CamusJob# Kafka brokers to connect to, format: kafka.brokers=host1:port,host2:port,host3:portkafka.brokers=# Top-level data output directory, sub-directories are dynamically created for each topic pulledetl.destination.path=/user/username/topics# HDFS location to keep execution files: requests, offsets, error logs and count filesetl.execution.base.path=/user/username/exec# HDFS location of keep historical execution files, usually a sub-directory of the base.pathetl.execution.history.path=/user/username/camus/exec/history# Concrete implementation of the decoder class to use.camus.message.decoder.class=io.confluent.camus.etl.kafka.coders.AvroMessageDecoder# Max number of MapReduce tasks to use, each task can pull multiple topic partitionsmapred.map.tasks=30# Max historical time that will be pulled from each partition based on event timestampkafka.max.pull.hrs=1# Events with a timestamp older than this will be discardedkafka.max.historical.days=3# Max minutes for each mapper to pull messages from Kafka (-1 means no limit)kafka.max.pull.minutes.per.task=-1# Only topics in whitelist are pulled from Kafka and no topics from the blacklist is pulledkafka.blacklist.topics=kafka.whitelist.topics=log4j.configuration=true# Name of the client as seen by kafkakafka.client.name=camus# Stops the mapper from getting inundated with decoder exceptions from the same topicmax.decoder.exceptions.to.print=5