Commit 4989de28 authored by pjvan_thof's avatar pjvan_thof
Browse files

Merge remote-tracking branch 'remotes/origin/develop' into fix-BIOPET-718

parents d057e8d5 6c42eee3
......@@ -21,10 +21,6 @@ node('local') {
}
}
stage('Report Tests') {
junit '*/target/surefire-reports/*.xml'
}
stage('Check git on changes') {
sh 'if [ $(git diff | wc -l) -eq 0 ]; then true; else echo "[ERROR] Git is not clean anymore after build"; git diff; echo "[ERROR] This might be caused by reformated code, if so run maven locally"; false; fi'
}
......
......@@ -16,14 +16,12 @@ package nl.lumc.sasc.biopet.extensions.tools
import java.io.File
import nl.lumc.sasc.biopet.core.summary.{Summarizable}
import nl.lumc.sasc.biopet.core.summary.Summarizable
import nl.lumc.sasc.biopet.core.{Reference, ToolCommandFunction}
import nl.lumc.sasc.biopet.utils.config.Configurable
import nl.lumc.sasc.biopet.utils.{ConfigUtils}
import nl.lumc.sasc.biopet.utils.ConfigUtils
import org.broadinstitute.gatk.utils.commandline.{Input, Output}
import scala.io.Source
/**
* This tool will generate statistics from a vcf file
*
......@@ -41,10 +39,10 @@ class VcfStats(val parent: Configurable)
var input: File = _
@Input
protected var index: File = null
protected var index: File = _
@Output
protected var statsFile: File = null
protected var statsFile: File = _
override def defaultCoreMemory = 3.0
override def defaultThreads = 3
......@@ -59,6 +57,8 @@ class VcfStats(val parent: Configurable)
var intervals: Option[File] = None
override def beforeGraph(): Unit = {
super.beforeGraph()
if (intervals.isEmpty) intervals = config("intervals")
reference = referenceFasta()
index = new File(input.getAbsolutePath + ".tbi")
}
......@@ -71,7 +71,7 @@ class VcfStats(val parent: Configurable)
}
/** Creates command to execute extension */
override def cmdLine =
override def cmdLine: String =
super.cmdLine +
required("-I", input) +
required("-o", outputDir) +
......
......@@ -54,10 +54,12 @@ object BiopetToolsExecutable extends BiopetExecutable {
nl.lumc.sasc.biopet.tools.vcfstats.VcfStats,
nl.lumc.sasc.biopet.tools.VcfToTsv,
nl.lumc.sasc.biopet.tools.ReplaceContigsGtfFile,
nl.lumc.sasc.biopet.tools.ExtractTagsFromGtf,
nl.lumc.sasc.biopet.tools.ReplaceContigsVcfFile,
nl.lumc.sasc.biopet.tools.VcfWithVcf,
nl.lumc.sasc.biopet.tools.VepNormalizer,
nl.lumc.sasc.biopet.tools.WipeReads,
nl.lumc.sasc.biopet.tools.MultiCoverage,
nl.lumc.sasc.biopet.tools.NcbiReportToContigMap,
nl.lumc.sasc.biopet.tools.DownloadNcbiAssembly
)
......
package nl.lumc.sasc.biopet.tools
import java.io.{File, PrintWriter}
import nl.lumc.sasc.biopet.utils.ToolCommand
import nl.lumc.sasc.biopet.utils.annotation.Feature
import scala.io.Source
/**
* Created by pjvan_thof on 8-6-17.
*/
object ExtractTagsFromGtf extends ToolCommand {
case class Args(outputFile: File = null,
gtfFile: File = null,
tags: List[String] = Nil,
feature: Option[String] = None)
extends AbstractArgs
class OptParser extends AbstractOptParser {
opt[File]('o', "output") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(outputFile = x)
} text "Input refFlat file. Mandatory"
opt[File]('g', "gtfFile") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(gtfFile = x)
} text "Output gtf file. Mandatory"
opt[String]('t', "tag") required () unbounded () valueName "<string>" action { (x, c) =>
c.copy(tags = c.tags ::: x :: Nil)
} text "Tags to extract"
opt[String]('f', "feature") unbounded () valueName "<string>" action { (x, c) =>
c.copy(feature = Some(x))
} text "Filter for only this feature type"
}
def main(args: Array[String]): Unit = {
val argsParser = new OptParser
val cmdArgs
: Args = argsParser.parse(args, Args()) getOrElse (throw new IllegalArgumentException)
logger.info("Start")
val reader = Source.fromFile(cmdArgs.gtfFile)
val writer = new PrintWriter(cmdArgs.outputFile)
writer.println(cmdArgs.tags.mkString("#", "\t", ""))
reader
.getLines()
.filter(!_.startsWith("#"))
.map(Feature.fromLine)
.filter(f => cmdArgs.feature.forall(_ == f.feature))
.foreach { f =>
writer.println(cmdArgs.tags.map(f.attributes.get).map(_.getOrElse(".")).mkString("\t"))
}
reader.close()
writer.close()
logger.info("Done")
}
}
......@@ -17,13 +17,13 @@ object GtfToRefflat extends ToolCommand {
extends AbstractArgs
class OptParser extends AbstractOptParser {
opt[File]('r', "refFlat") required () valueName "<file>" action { (x, c) =>
opt[File]('r', "refFlat") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(refFlat = x)
} text "Input refFlat file. Mandatory"
opt[File]('g', "gtfFile") required () valueName "<file>" action { (x, c) =>
opt[File]('g', "gtfFile") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(gtfFile = x)
} text "Output gtf file. Mandatory"
opt[File]('R', "referenceFasta") valueName "<file>" action { (x, c) =>
opt[File]('R', "referenceFasta") unbounded () valueName "<file>" action { (x, c) =>
c.copy(referenceFasta = Some(x))
} text "Reference file"
}
......
package nl.lumc.sasc.biopet.tools
import java.io.{File, PrintWriter}
import htsjdk.samtools.SamReaderFactory
import nl.lumc.sasc.biopet.utils.{BamUtils, ToolCommand}
import nl.lumc.sasc.biopet.utils.intervals.BedRecordList
import scala.collection.JavaConversions._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by pjvanthof on 17/06/2017.
*/
object MultiCoverage extends ToolCommand {
case class Args(bedFile: File = null,
bamFiles: List[File] = Nil,
outputFile: File = null,
mean: Boolean = false)
extends AbstractArgs
class OptParser extends AbstractOptParser {
opt[File]('L', "bedFile") required () maxOccurs 1 unbounded () valueName "<file>" action {
(x, c) =>
c.copy(bedFile = x)
} text "input bedfile"
opt[File]('b', "bamFile") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(bamFiles = x :: c.bamFiles)
} text "input bam files"
opt[File]('o', "output") required () maxOccurs 1 unbounded () valueName "<file>" action {
(x, c) =>
c.copy(outputFile = x)
} text "output file"
opt[Unit]("mean") unbounded () valueName "<file>" action { (x, c) =>
c.copy(mean = true)
} text "By default total bases is outputed, enable this option make the output relative to region length"
}
/**
* @param args the command line arguments
*/
def main(args: Array[String]): Unit = {
val argsParser = new OptParser
val cmdargs
: Args = argsParser.parse(args, Args()) getOrElse (throw new IllegalArgumentException)
logger.info("Start")
val bamFiles = BamUtils.sampleBamMap(cmdargs.bamFiles)
val futures = for (region <- BedRecordList.fromFile(cmdargs.bedFile).allRecords)
yield
Future {
val samInterval = region.toSamInterval
val counts = bamFiles.map {
case (sampleName, bamFile) =>
val samReader = SamReaderFactory.makeDefault.open(bamFile)
val count = samReader
.queryOverlapping(samInterval.getContig, samInterval.getStart, samInterval.getEnd)
.foldLeft(0L) {
case (bases, samRecord) =>
val start = (samInterval.getStart :: samRecord.getAlignmentStart :: Nil).max
val end = (samInterval.getEnd :: samRecord.getAlignmentEnd + 1 :: Nil).min
val length = end - start
bases + (if (length < 0) 0 else length)
}
samReader.close()
if (cmdargs.mean && region.length > 0) sampleName -> (count.toDouble / region.length)
else if (cmdargs.mean) sampleName -> 0.0
else sampleName -> count
}
region -> counts
}
logger.info("Reading bam files")
var count = 0
val writer = new PrintWriter(cmdargs.outputFile)
val samples = bamFiles.keys.toList
writer.println(s"#contig\tstart\tend\t${samples.mkString("\t")}")
for (future <- futures) {
val (region, counts) = Await.result(future, Duration.Inf)
writer.println(
s"${region.chr}\t${region.start}\t${region.end}\t${samples.map(counts).mkString("\t")}")
count += 1
if (count % 1000 == 0) logger.info(s"$count regions done")
}
logger.info(s"$count regions done")
writer.close()
logger.info("Done")
}
}
......@@ -18,7 +18,7 @@ object ReplaceContigsGtfFile extends ToolCommand {
extends AbstractArgs
class OptParser extends AbstractOptParser {
opt[File]('I', "input") required () valueName "<file>" action { (x, c) =>
opt[File]('I', "input") required () unbounded () valueName "<file>" action { (x, c) =>
c.copy(input = x)
} text "Input gtf file"
opt[File]('o', "output") required () unbounded () valueName "<file>" action { (x, c) =>
......
......@@ -30,7 +30,7 @@ import scala.collection.JavaConversions._
/**
* Created by ahbbollen on 27-8-15.
*/
class MpileupToVcfTest extends TestNGSuite with MockitoSugar with Matchers {
class MpileupToVcfTest extends TestNGSuite with Matchers {
import MpileupToVcf._
private def resourcePath(p: String): String = {
......
package nl.lumc.sasc.biopet.tools
import java.io.File
import java.nio.file.Paths
import org.scalatest.Matchers
import org.scalatest.testng.TestNGSuite
import org.testng.annotations.Test
import scala.io.Source
/**
* Created by pjvanthof on 17/06/2017.
*/
class MultiCoverageTest extends TestNGSuite with Matchers {
private def resourcePath(p: String): String = {
Paths.get(getClass.getResource(p).toURI).toString
}
@Test
def testDefault(): Unit = {
val outputFile = File.createTempFile("output.", ".txt")
outputFile.deleteOnExit()
MultiCoverage.main(
Array("-L",
resourcePath("/rrna02.bed"),
"-b",
resourcePath("/paired01.bam"),
"-o",
outputFile.getAbsolutePath))
Source.fromFile(outputFile).getLines().toList shouldBe List(
"#contig\tstart\tend\tWipeReadsTestCase",
"chrQ\t300\t350\t0",
"chrQ\t350\t400\t0",
"chrQ\t450\t480\t9",
"chrQ\t470\t475\t0",
"chrQ\t1\t200\t40",
"chrQ\t150\t250\t19"
)
}
@Test
def testMean(): Unit = {
val outputFile = File.createTempFile("output.", ".txt")
outputFile.deleteOnExit()
MultiCoverage.main(
Array("-L",
resourcePath("/rrna02.bed"),
"-b",
resourcePath("/paired01.bam"),
"-o",
outputFile.getAbsolutePath,
"--mean"))
Source.fromFile(outputFile).getLines().toList shouldBe List(
"#contig\tstart\tend\tWipeReadsTestCase",
"chrQ\t300\t350\t0.0",
"chrQ\t350\t400\t0.0",
"chrQ\t450\t480\t0.3",
"chrQ\t470\t475\t0.0",
"chrQ\t1\t200\t0.20100502512562815",
"chrQ\t150\t250\t0.19"
)
}
}
......@@ -151,7 +151,7 @@ case class BedRecord(chr: String,
this
}
def toSamInterval = (name, strand) match {
def toSamInterval: Interval = (name, strand) match {
case (Some(name), Some(strand)) => new Interval(chr, start + 1, end, !strand, name)
case (Some(name), _) => new Interval(chr, start + 1, end, false, name)
case _ => new Interval(chr, start + 1, end)
......
......@@ -3,20 +3,25 @@
## Introduction
Biopet (Bio Pipeline Execution Toolkit) is the main pipeline development framework of the LUMC Sequencing Analysis Support Core team. It contains our main pipelines and some of the command line tools we develop in-house. It is meant to be used in the main [SHARK](https://humgenprojects.lumc.nl/trac/shark) computing cluster. While usage outside of SHARK is technically possible, some adjustments may need to be made in order to do so.
Biopet (Bio Pipeline Execution Toolkit) is the main pipeline development framework of the LUMC Sequencing Analysis Support Core team.
It contains our main pipelines and some of the command line tools we develop in-house.
It is meant to be used in the main [SHARK](https://humgenprojects.lumc.nl/trac/shark) computing cluster.
While usage outside of SHARK is technically possible, some adjustments may need to be made in order to do so.
## Quick Start
### Running Biopet in the SHARK cluster
Biopet is available as a JAR package in SHARK. The easiest way to start using it is to activate the `biopet` environment module, which sets useful aliases and environment variables:
Biopet is available as a JAR package in SHARK.
The easiest way to start using it is to activate the `biopet` environment module, which sets useful aliases and environment variables:
~~~
$ module load biopet/v0.8.0
$ module load biopet/v0.9.0
~~~
With each Biopet release, an accompanying environment module is also released. The latest release is version 0.6.0, thus `biopet/v0.6.0` is the module you would want to load.
With each Biopet release, an accompanying environment module is also released. The latest release is version 0.9.0,
thus `biopet/v0.9.0` is the module you would want to load.
After loading the module, you can access the biopet package by simply typing `biopet`:
......@@ -24,7 +29,9 @@ After loading the module, you can access the biopet package by simply typing `bi
$ biopet
~~~
This will show you a list of tools and pipelines that you can use straight away. You can also execute `biopet pipeline` to show only available pipelines or `biopet tool` to show only the tools. What you should be aware of, is that this is actually a shell function that calls `java` on the system-wide available Biopet JAR file.
This will show you a list of tools and pipelines that you can use straight away. You can also execute `biopet pipeline`
to show only available pipelines or `biopet tool` to show only the tools.
What you should be aware of, is that this is actually a shell function that calls `java` on the system-wide available Biopet JAR file.
~~~
$ java -jar <path/to/current/biopet/release.jar>
......@@ -38,7 +45,11 @@ Almost all of the pipelines have a common usage pattern with a similar set of fl
$ biopet pipeline <pipeline_name> -config <path/to/config.json> -qsub -jobParaEnv BWA -jobQueue all.q -retry 2
~~~
The command above will do a *dry* run of a pipeline using a config file as if the command would be submitted to the SHARK cluster (the `-qsub` flag) to the `BWA` parallel environment (the `-jobParaEnv BWA` flag). The `-jobQueue all.q` flag ensures that the proper Queue is used. We also set the maximum retry of failing jobs to two times (via the `-retry 2` flag). Doing a good run is a good idea to ensure that your real run proceeds smoothly. It may not catch all the errors, but if the dry run fails you can be sure that the real run will never succeed.
The command above will do a *dry* run of a pipeline using a config file as if the command would be submitted to the SHARK cluster
(the `-qsub` flag) to the `BWA` parallel environment (the `-jobParaEnv BWA` flag). The `-jobQueue all.q` flag ensures that the proper Queue
is used. We also set the maximum retry of failing jobs to two times (via the `-retry 2` flag).
Doing a dry run is a good idea to ensure that your real run proceeds smoothly. It may not catch all the errors, but if the dry run fails
you can be sure that the real run will never succeed.
If the dry run proceeds without problems, you can then do the real run by using the `-run` flag:
......@@ -46,7 +57,10 @@ If the dry run proceeds without problems, you can then do the real run by using
$ biopet pipeline <pipeline_name> -config <path/to/config.json> -qsub -jobParaEnv BWA -jobQueue all.q -retry 2 -run
~~~
It is usually a good idea to do the real run using `screen` or `nohup` to prevent the job from terminating when you log out of SHARK. In practice, using `biopet` as it is is also fine. What you need to keep in mind, is that each pipeline has their own expected config layout. You can check out more about the general structure of our config files [here](general/config.md). For the specific structure that each pipeline accepts, please consult the respective pipeline page.
It is usually a good idea to do the real run using `screen` or `nohup` to prevent the job from terminating when you log out of SHARK.
In practice, using `biopet` as it is, is also fine. What you need to keep in mind, is that each pipeline has its own expected config layout.
You can check out more about the general structure of our config files [here](general/config.md). For the specific structure that each
pipeline accepts, please consult the respective pipeline page.
### Convention in this documentation
......@@ -68,14 +82,18 @@ The `biopet` shortcut is only available on the SHARK cluster with the `module` e
### Running Biopet in your own computer
At the moment, we do not provide links to download the Biopet package. If you are interested in trying out Biopet locally, please contact us as [sasc@lumc.nl](mailto:sasc@lumc.nl).
At the moment, we do not provide links to download the Biopet package. If you are interested in trying out Biopet locally,
please contact us at [sasc@lumc.nl](mailto:sasc@lumc.nl).
## Contributing to Biopet
Biopet is based on the Queue framework developed by the Broad Institute as part of their Genome Analysis Toolkit (GATK) framework. The current Biopet release is based on the GATK 3.4 release.
Biopet is based on the Queue framework developed by the Broad Institute as part of their Genome Analysis Toolkit (GATK) framework.
The current Biopet release is based on the GATK 3.7 release.
We welcome any kind of contribution, be it merge requests on the code base, documentation updates, or any kinds of other fixes! The main language we use is Scala, though the repository also contains a small bit of Python and R. Our main code repository is located at [https://github.com/biopet/biopet](https://github.com/biopet/biopet/issues), along with our issue tracker.
We welcome any kind of contribution, be it merge requests on the code base, documentation updates, or any kinds of other fixes!
The main language we use is Scala, though the repository also contains a small bit of Python and R. Our main code repository is located at [https://github.com/biopet/biopet](https://github.com/biopet/biopet/issues),
along with our issue tracker.
## About
......
......@@ -120,7 +120,8 @@ OutDir
| +-- <sample_name>.krkn.json
~~~
The `Gears`-specific results are contained in a folder named after each tool that was used (by default `Gears` uses centrifuge). They are stored in the following files:
The `Gears`-specific results are contained in a folder named after each tool that was used (by default `Gears` uses centrifuge).
They are stored in the following files:
| File suffix | Application | Content | Description |
......@@ -133,12 +134,16 @@ The `Gears`-specific results are contained in a folder named after each tool tha
Kraken specific output
| File suffix | Application | Content | Description |
| ----------- | ----------- | ------- | ----------- |
| *.krkn.raw | kraken | tsv | Annotation per sequence |
| *.krkn.full | kraken-report | tsv | List of all annotation possible with counts filled in for this specific sample|
| *.krkn.json | krakenReportToJson | json | JSON representation of the taxonomy report, for postprocessing |
QIIME specific output
| File suffix | Application | Content | Description |
| ----------- | ----------- | ------- | ----------- |
| *.otu_table.biom | qiime | biom | Biom file containing counts for OTUs identified in the input |
| *.otu_map.txt | qiime | tsv | Tab-separated file containing information about which samples a taxon has been identified in |
......
......@@ -8,28 +8,22 @@ Basty will output phylogenetic trees, which makes it very easy to look at the va
### Tools for this pipeline
* [Shiva](shiva.md)
* [BastyGenerateFasta](../tools/BastyGenerateFasta.md)
* [BastyGenerateFasta](../../tools/BastyGenerateFasta.md)
* <a href="http://sco.h-its.org/exelixis/software.html" target="_blank">RAxml</a>
* <a href="https://github.com/sanger-pathogens/Gubbins" target="_blank">Gubbins</a>
### Requirements
To run with a specific species, please do not forget to create the proper index files.
The index files are created from the supplied reference:
* ```.dict``` (can be produced with <a href="http://broadinstitute.github.io/picard/" target="_blank">Picard tool suite</a>)
* ```.fai``` (can be produced with <a href="http://samtools.sourceforge.net/samtools.shtml" target="_blank">Samtools faidx</a>
* ```.idxSpecificForAligner``` (depending on which aligner is used one should create a suitable index specific for that aligner.
Each aligner has his own way of creating index files. Therefore the options for creating the index files can be found inside the aligner itself)
To run with a specific species, please do not forget to [create the proper index files](multisamplemapping.md#Setting-up).
### Configuration
To run Basty, please create the proper [Config](../general/config.md) files.
To run Basty, please create the proper [Config](../../general/config.md) files.
Batsy uses the [Shiva](shiva.md) pipeline internally. Please check the documentation for this pipeline for the options.
#### Sample input extensions
Please refer [to our mapping pipeline](mapping.md) for information about how the input samples should be handled.
Please refer [to our mapping pipeline](../mapping.md) for information about how the input samples should be handled.
#### Required configuration values
......@@ -76,7 +70,7 @@ biopet pipeline basty -h
~~~
#### Run the pipeline:
Note that one should first create the appropriate [configs](../general/config.md).
Note that one should first create the appropriate [configs](../../general/config.md).
~~~
biopet pipeline basty -run -config MySamples.json -config MySettings.json
......@@ -85,13 +79,13 @@ biopet pipeline basty -run -config MySamples.json -config MySettings.json
### Result files
The output files this pipeline produces are:
* A complete output from [Flexiprep](flexiprep.md)
* A complete output from [Flexiprep](../flexiprep.md)
* BAM files, produced with the mapping pipeline. (either BWA, Bowtie, Stampy, Star and Star 2-pass. default: BWA)
* VCF file from all samples together
* The output from the tool [BastyGenerateFasta](../tools/BastyGenerateFasta.md)
* The output from the tool [BastyGenerateFasta](../../tools/BastyGenerateFasta.md)
* FASTA containing variants only
* FASTA containing all the consensus sequences based on min. coverage (default:8) but can be modified in the config
* A phylogenetic tree based on the variants called with the Shiva pipeline generated with the tool [BastyGenerateFasta](../tools/BastyGenerateFasta.md)
* A phylogenetic tree based on the variants called with the Shiva pipeline generated with the tool [BastyGenerateFasta](../../tools/BastyGenerateFasta.md)
~~~
......
......@@ -2,18 +2,21 @@
## Introduction
Carp is a pipeline for analyzing ChIP-seq NGS data. It uses the BWA MEM aligner and the MACS2 peak caller by default to align ChIP-seq data and call the peaks and allows you to run all your samples (control or otherwise) in one go.
Carp is a pipeline for analyzing ChIP-seq NGS data. It uses the `bwa mem` aligner and the [MACS2](https://github.com/taoliu/MACS/wiki) peak caller
by default to align ChIP-seq data and call the peaks and allows you to run all your samples (control or otherwise) in one go.
### Sample input extensions
Please refer [to our mapping pipeline](mapping.md) for information about how the input samples should be handled.
Please refer to our [config documentation page](../../general/config.md) for information about how the input samples should be handled.
## Configuration File
### Sample Configuration
The layout of the sample configuration for Carp is basically the same as with our other multi sample pipelines it may be either ```json``` or ```yaml``` formatted.
Below we show two examples for ```json``` and ```yaml```. One should appreciate that multiple libraries can be used if a sample is sequenced on multiple lanes. This is noted with library id in the config file.
The layout of the sample configuration for Carp is basically the same as with our other multisample pipelines.
It may be either `json` or `yaml` formatted.
Below we show two examples for `json` and `yaml`. One should appreciate that multiple libraries can be used if a sample is sequenced on multiple lanes.
This is noted with library id in the config file.
~~~ json
......@@ -64,9 +67,12 @@ samples:
~~~
What's important here is that you can specify the control ChIP-seq experiment(s) for a given sample. These controls are usually
ChIP-seq runs from input DNA and/or from treatment with nonspecific binding proteins such as IgG. In the example above, we are specifying `sample_Y` as the control for `sample_X`.
**Please notice** that the control is given in the form of a ```list```. This is because sometimes one wants to use multiple control samples, this can be achieved to pass the sampleNames of the control samples in a list to the field **control** in the config file.
In ```json``` this will become:
ChIP-seq runs from input DNA and/or from treatment with nonspecific binding proteins such as IgG.
In the example above, we are specifying `sample_Y` as the control for `sample_X`.
**Please notice** that the control is given in the form of a ```list```. This is because sometimes one wants to use multiple control samples,
this can be achieved to pass the sampleNames of the control samples in a list to the field **control** in the config file.
In `json` this will become:
~~~ json
{
......@@ -93,39 +99,50 @@ samples:
For the pipeline settings, there are some values that you need to specify while some are optional. Required settings are:
1. `output_dir`: path to output directory (if it does not exist, Carp will create it for you).
2. `reference`: this must point to a reference FASTA file and in the same directory, there must be a `.dict` file of the FASTA file.
| ConfigNamespace | Name | Type | Default | Function |
| --------- | ---- | ---- | ------- | -------- |
| - | output_dir | String | - | Path to output directory (if it does not exist, Gentrap will create it for you) |
| mapping | reference_fasta | String | This must point to a reference `FASTA` file and in the same directory, there must be a `.dict` file of the FASTA file.|
While optional settings are:
1. `aligner`: which aligner to use (`bwa` or `bowtie`)
2. `macs2`: Here only the callpeak modus is implemented. But one can set all the options from [macs2 callpeak](https://github.com/taoliu/MACS/#call-peaks) in this settings config. Note that the config value is: `macs2_callpeak`
| ConfigNamespace | Name | Type | Default | Function |
| --------- | ---- | ---- | ------- | -------- |
| mapping | aligner | String | bwa-mem | Aligner of choice. Options: `bowtie` |
Here only the `callpeak` function of macs2 is implemented.
In order to pass parameters specific to macs2 callpeak the `macs2callpeak` namespace should be used.
For example, including the following in your config file, will set the effective genome size:
```yaml
macs2callpeak:
gsize: 2.7e9
```
[Gears](gears) is run automatically for the data analysed with `Carp`. There are two levels on which this can be done and this should be specified in the [config](../general/config) file:
A comprehensive list of all available options for `masc2 callpeak` can be found [here](https://github.com/taoliu/MACS/#call-peaks).