diff --git a/docs/config.md b/docs/config.md
index c10419859bed6d3f510c80afe0b27fea913d774f..de3342b195b1dc7acb338729792d9dabc59c5228 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -72,16 +72,16 @@ Global setting examples are:
 #### Example settings config
 ~~~
 {
-        "reference": "/data/LGTC/projects/vandoorn-melanoma/data/references/hg19_nohap/ucsc.hg19_nohap.fasta",
-        "dbsnp": "/data/LGTC/projects/vandoorn-melanoma/data/references/hg19_nohap/dbsnp_137.hg19_nohap.vcf",
+        "reference": "/references/hg19_nohap/ucsc.hg19_nohap.fasta",
+        "dbsnp": "/references/hg19_nohap/dbsnp_137.hg19_nohap.vcf",
         "joint_variantcalling": false,
         "haplotypecaller": { "scattercount": 100 },
         "multisample": { "haplotypecaller": { "scattercount": 1000 } },
         "picard": { "validationstringency": "LENIENT" },
         "library_variantcalling_temp": true,
-        "target_bed_temp": "/data/LGTC/projects/vandoorn-melanoma/analysis/target.bed",
+        "target_bed_temp": "analysis/target.bed",
         "min_dp": 5,
-        "bedtools": {"exe":"/share/isilon/system/local/BEDtools/bedtools-2.17.0/bin/bedtools"},
+        "bedtools": {"exe":"/BEDtools/bedtools-2.17.0/bin/bedtools"},
         "bam_to_fastq": true,
         "baserecalibrator": { "memory_limit": 8, "vmem":"16G" },
         "samtofastq": {"memory_limit": 8, "vmem": "16G"},
@@ -95,4 +95,4 @@ Global setting examples are:
 ### JSON validation
 
 To check if the JSON file created is correct we can use multiple options the simplest way is using [this](http://jsonformatter.curiousconcept.com/)
-website. It is also possible to use Python or Scala for validating but this requires some more knowledge.
\ No newline at end of file
+website. It is also possible to use Python or Scala for validating but this requires some more knowledge.
diff --git a/protected/basty/src/main/scala/nl/lumc/sasc/biopet/pipelines/basty/Basty.scala b/protected/basty/src/main/scala/nl/lumc/sasc/biopet/pipelines/basty/Basty.scala
index 0f2eed5d1b30e96cb36d9cc6f3936e93a87364f2..5088e44c2eca6f54b23104c7695c3a03d5f762c8 100644
--- a/protected/basty/src/main/scala/nl/lumc/sasc/biopet/pipelines/basty/Basty.scala
+++ b/protected/basty/src/main/scala/nl/lumc/sasc/biopet/pipelines/basty/Basty.scala
@@ -33,7 +33,7 @@ class Basty(val root: Configurable) extends QScript with MultiSampleQScript {
   def makeSample(id: String) = new Sample(id)
   class Sample(sampleId: String) extends AbstractSample(sampleId) {
     def makeLibrary(id: String) = new Library(id)
-    class Library(libraryId: String) extends AbstractLibrary(libraryId) {
+    class Library(libId: String) extends AbstractLibrary(libId) {
       protected def addJobs(): Unit = {}
     }
 
@@ -41,7 +41,7 @@ class Basty(val root: Configurable) extends QScript with MultiSampleQScript {
     var outputSnps: FastaOutput = _
 
     protected def addJobs(): Unit = {
-      addLibsJobs()
+      addPerLibJobs()
       output = addGenerateFasta(sampleId, sampleDir)
       outputSnps = addGenerateFasta(sampleId, sampleDir, snpsOnly = true)
     }
@@ -56,11 +56,13 @@ class Basty(val root: Configurable) extends QScript with MultiSampleQScript {
     gatkPipeline.biopetScript
     addAll(gatkPipeline.functions)
 
+    addSamplesJobs()
+  }
+
+  def addMultiSampleJobs(): Unit = {
     val refVariants = addGenerateFasta(null, outputDir + "reference/", outputName = "reference")
     val refVariantSnps = addGenerateFasta(null, outputDir + "reference/", outputName = "reference", snpsOnly = true)
 
-    addSamplesJobs()
-
     val catVariants = Cat(this, refVariants.variants :: samples.map(_._2.output.variants).toList, outputDir + "fastas/variant.fasta")
     add(catVariants)
     val catVariantsSnps = Cat(this, refVariantSnps.variants :: samples.map(_._2.outputSnps.variants).toList, outputDir + "fastas/variant.snps_only.fasta")
@@ -122,13 +124,14 @@ class Basty(val root: Configurable) extends QScript with MultiSampleQScript {
 
       val gubbins = new RunGubbins(this)
       gubbins.fastafile = concensusVariants
-      gubbins.startingTree = raxmlBi.getBipartitionsFile
+      gubbins.startingTree = Some(raxmlBi.getBipartitionsFile)
       gubbins.outputDirectory = outputDir + dirSufixGubbins
       add(gubbins)
     }
 
     addTreeJobs(catVariantsSnps.output, catConsensusVariantsSnps.output, outputDir + "trees" + File.separator + "snps_only", "snps_only")
     addTreeJobs(catVariants.output, catConsensusVariants.output, outputDir + "trees" + File.separator + "snps_indels", "snps_indels")
+
   }
 
   def addGenerateFasta(sampleName: String, outputDir: String, outputName: String = null,
diff --git a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/BaseRecalibrator.scala b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/BaseRecalibrator.scala
index 3912958a59d8d58a67b4cd615409e4a5c03ec990..c07a2a66c363fe9623ad35225e9f76eb45aa67a4 100644
--- a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/BaseRecalibrator.scala
+++ b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/BaseRecalibrator.scala
@@ -12,7 +12,7 @@ class BaseRecalibrator(val root: Configurable) extends org.broadinstitute.gatk.q
   memoryLimit = Option(4)
   override val defaultVmem = "8G"
 
-  if (config.contains("scattercount")) scatterCount = config("scattercount")
+  if (config.contains("scattercount")) scatterCount = config("scattercount", default = 1)
   if (config.contains("dbsnp")) knownSites :+= new File(config("dbsnp").asString)
   if (config.contains("known_sites")) knownSites :+= new File(config("known_sites").asString)
 }
diff --git a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/GatkGeneral.scala b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/GatkGeneral.scala
index dc7cc5e4035b9cbffdeb6db732c06cabd4d52c74..147398ac798c077da0722b2078f7ea21c666b7d1 100644
--- a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/GatkGeneral.scala
+++ b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/GatkGeneral.scala
@@ -19,7 +19,7 @@ trait GatkGeneral extends CommandLineGATK with BiopetJavaCommandLineFunction {
 
   if (config.contains("intervals")) intervals = config("intervals").asFileList
   if (config.contains("exclude_intervals")) excludeIntervals = config("exclude_intervals").asFileList
-  reference_sequence = config("reference")
-  gatk_key = config("gatk_key")
+  reference_sequence = config("reference", required = true)
+  if (config.contains("gatk_key")) gatk_key = config("gatk_key")
   if (config.contains("pedigree")) pedigree = config("pedigree").asFileList
 }
diff --git a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/HaplotypeCaller.scala b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/HaplotypeCaller.scala
index 001fb9d17b858320d22549bae321ca0b820eac12..b8f4ea4efa3ea8cfed563c2b82651c0001b794f7 100644
--- a/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/HaplotypeCaller.scala
+++ b/protected/biopet-gatk-extensions/src/main/scala/nl/lumc/sasc/biopet/extensions/gatk/HaplotypeCaller.scala
@@ -9,40 +9,40 @@ import nl.lumc.sasc.biopet.core.config.Configurable
 import org.broadinstitute.gatk.utils.variant.GATKVCFIndexType
 
 class HaplotypeCaller(val root: Configurable) extends org.broadinstitute.gatk.queue.extensions.gatk.HaplotypeCaller with GatkGeneral {
-  override def afterGraph {
-    super.afterGraph
-
-    min_mapping_quality_score = config("minMappingQualityScore", default = 20)
-    if (config.contains("scattercount")) scatterCount = config("scattercount")
-    if (config.contains("dbsnp")) this.dbsnp = config("dbsnp")
-    this.sample_ploidy = config("ploidy")
-    nct = config("threads", default = 1)
-    bamOutput = config("bamOutput")
-    memoryLimit = Option(nct.getOrElse(1) * 2)
-    if (config.contains("allSitePLs")) this.allSitePLs = config("allSitePLs")
-    if (config.contains("output_mode")) {
-      import org.broadinstitute.gatk.tools.walkers.genotyper.OutputMode._
-      config("output_mode").asString match {
-        case "EMIT_ALL_CONFIDENT_SITES" => output_mode = EMIT_ALL_CONFIDENT_SITES
-        case "EMIT_ALL_SITES"           => output_mode = EMIT_ALL_SITES
-        case "EMIT_VARIANTS_ONLY"       => output_mode = EMIT_VARIANTS_ONLY
-        case e                          => logger.warn("output mode '" + e + "' does not exist")
-      }
+  min_mapping_quality_score = config("minMappingQualityScore", default = 20)
+  scatterCount = config("scattercount", default = 1)
+  if (config.contains("dbsnp")) this.dbsnp = config("dbsnp")
+  this.sample_ploidy = config("ploidy")
+  if (config.contains("bamOutput")) bamOutput = config("bamOutput")
+  if (config.contains("allSitePLs")) allSitePLs = config("allSitePLs")
+  if (config.contains("output_mode")) {
+    import org.broadinstitute.gatk.tools.walkers.genotyper.OutputMode._
+    config("output_mode").asString match {
+      case "EMIT_ALL_CONFIDENT_SITES" => output_mode = EMIT_ALL_CONFIDENT_SITES
+      case "EMIT_ALL_SITES"           => output_mode = EMIT_ALL_SITES
+      case "EMIT_VARIANTS_ONLY"       => output_mode = EMIT_VARIANTS_ONLY
+      case e                          => logger.warn("output mode '" + e + "' does not exist")
     }
+  }
 
-    if (config("inputtype", default = "dna").asString == "rna") {
-      dontUseSoftClippedBases = config("dontusesoftclippedbases", default = true)
-      stand_call_conf = config("stand_call_conf", default = 5)
-      stand_emit_conf = config("stand_emit_conf", default = 0)
-    } else {
-      dontUseSoftClippedBases = config("dontusesoftclippedbases", default = false)
-      stand_call_conf = config("stand_call_conf", default = 5)
-      stand_emit_conf = config("stand_emit_conf", default = 0)
-    }
+  if (config("inputtype", default = "dna").asString == "rna") {
+    dontUseSoftClippedBases = config("dontusesoftclippedbases", default = true)
+    stand_call_conf = config("stand_call_conf", default = 5)
+    stand_emit_conf = config("stand_emit_conf", default = 0)
+  } else {
+    dontUseSoftClippedBases = config("dontusesoftclippedbases", default = false)
+    stand_call_conf = config("stand_call_conf", default = 5)
+    stand_emit_conf = config("stand_emit_conf", default = 0)
+  }
+
+  override def afterGraph {
+    super.afterGraph
     if (bamOutput != null && nct.getOrElse(1) > 1) {
-      nct = Option(1)
+      threads = 1
       logger.warn("BamOutput is on, nct/threads is forced to set on 1, this option is only for debug")
     }
+    nct = Some(threads)
+    memoryLimit = Option(memoryLimit.getOrElse(2.0) * nct.getOrElse(1))
   }
 
   def useGvcf() {
diff --git a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkPipeline.scala b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkPipeline.scala
index 8ea6f9706b0e675b3f4fa1271c72038b1b2c3d6f..c76139133f23cf434884ad69ccc9dafc69b2cac9 100644
--- a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkPipeline.scala
+++ b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkPipeline.scala
@@ -36,17 +36,16 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
 
   var singleSampleCalling = config("single_sample_calling", default = true)
   var reference: File = config("reference", required = true)
-  var dbsnp: File = config("dbsnp")
   var useAllelesOption: Boolean = config("use_alleles_option", default = false)
   val externalGvcfs = config("external_gvcfs_files", default = Nil).asFileList
 
   def makeSample(id: String) = new Sample(id)
   class Sample(sampleId: String) extends AbstractSample(sampleId) {
     def makeLibrary(id: String) = new Library(id)
-    class Library(libraryId: String) extends AbstractLibrary(libraryId) {
+    class Library(libId: String) extends AbstractLibrary(libId) {
       val mapping = new Mapping(qscript)
       mapping.sampleId = sampleId
-      mapping.libraryId = libraryId
+      mapping.libId = libId
       mapping.outputDir = libDir + "/variantcalling/"
 
       /** Library variantcalling */
@@ -67,12 +66,12 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
           if (!bamFile.exists) throw new IllegalStateException("Bam in config does not exist, file: " + bamFile)
 
           if (config("bam_to_fastq", default = false).asBoolean) {
-            val samToFastq = SamToFastq(qscript, bamFile, libDir + sampleId + "-" + libraryId + ".R1.fastq",
-              libDir + sampleId + "-" + libraryId + ".R2.fastq")
+            val samToFastq = SamToFastq(qscript, bamFile, libDir + sampleId + "-" + libId + ".R1.fastq",
+              libDir + sampleId + "-" + libId + ".R2.fastq")
             samToFastq.isIntermediate = true
             qscript.add(samToFastq)
             mapping.input_R1 = samToFastq.fastqR1
-            mapping.input_R2 = samToFastq.fastqR2
+            mapping.input_R2 = Some(samToFastq.fastqR2)
             mapping.init
             mapping.biopetScript
             addAll(mapping.functions) // Add functions of mapping to curent function pool
@@ -83,17 +82,17 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
             val header = inputSam.getFileHeader.getReadGroups
             for (readGroup <- inputSam.getFileHeader.getReadGroups) {
               if (readGroup.getSample != sampleId) logger.warn("Sample ID readgroup in bam file is not the same")
-              if (readGroup.getLibrary != libraryId) logger.warn("Library ID readgroup in bam file is not the same")
-              if (readGroup.getSample != sampleId || readGroup.getLibrary != libraryId) readGroupOke = false
+              if (readGroup.getLibrary != libId) logger.warn("Library ID readgroup in bam file is not the same")
+              if (readGroup.getSample != sampleId || readGroup.getLibrary != libId) readGroupOke = false
             }
             inputSam.close
 
             if (!readGroupOke) {
               if (config("correct_readgroups", default = false)) {
                 logger.info("Correcting readgroups, file:" + bamFile)
-                val aorrg = AddOrReplaceReadGroups(qscript, bamFile, new File(libDir + sampleId + "-" + libraryId + ".bam"))
-                aorrg.RGID = sampleId + "-" + libraryId
-                aorrg.RGLB = libraryId
+                val aorrg = AddOrReplaceReadGroups(qscript, bamFile, new File(libDir + sampleId + "-" + libId + ".bam"))
+                aorrg.RGID = sampleId + "-" + libId
+                aorrg.RGLB = libId
                 aorrg.RGSM = sampleId
                 aorrg.isIntermediate = true
                 qscript.add(aorrg)
@@ -106,7 +105,7 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
             Some(bamFile)
           }
         } else {
-          logger.error("Sample: " + sampleId + ": No R1 found for run: " + libraryId)
+          logger.error("Sample: " + sampleId + ": No R1 found for run: " + libId)
           None
         }
 
@@ -127,7 +126,7 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
     gatkVariantcalling.outputDir = sampleDir + "/variantcalling/"
 
     protected def addJobs(): Unit = {
-      addLibsJobs()
+      addPerLibJobs()
       gatkVariantcalling.inputBams = libraries.map(_._2.mapping.finalBamFile).toList
       gatkVariantcalling.preProcesBams = false
       if (!singleSampleCalling) {
@@ -150,10 +149,11 @@ class GatkPipeline(val root: Configurable) extends QScript with MultiSampleQScri
     override def configPath: List[String] = super.configPath ::: "multisample" :: Nil
   }
 
-  def biopetScript() {
-    addSamplesJobs
+  def biopetScript(): Unit = {
+    addSamplesJobs()
+  }
 
-    //SampleWide jobs
+  def addMultiSampleJobs(): Unit = {
     val gvcfFiles: List[File] = if (mergeGvcfs && externalGvcfs.size + samples.size > 1) {
       val newFile = outputDir + "merged.gvcf.vcf.gz"
       add(CombineGVCFs(this, externalGvcfs ++ samples.map(_._2.gatkVariantcalling.scriptOutput.gvcfFile), newFile))
diff --git a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVariantcalling.scala b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVariantcalling.scala
index 759181117116f0df027a2c98243cb5123777fa16..8bac4aaf68c33a245da877d460bc26abb9ebe564 100644
--- a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVariantcalling.scala
+++ b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVariantcalling.scala
@@ -7,7 +7,7 @@ package nl.lumc.sasc.biopet.pipelines.gatk
 
 import nl.lumc.sasc.biopet.core.{ BiopetQScript, PipelineCommand }
 import java.io.File
-import nl.lumc.sasc.biopet.tools.{ MpileupToVcf, VcfFilter, MergeAlleles }
+import nl.lumc.sasc.biopet.tools.{ VcfStats, MpileupToVcf, VcfFilter, MergeAlleles }
 import nl.lumc.sasc.biopet.core.config.Configurable
 import nl.lumc.sasc.biopet.extensions.gatk.{ AnalyzeCovariates, BaseRecalibrator, GenotypeGVCFs, HaplotypeCaller, IndelRealigner, PrintReads, RealignerTargetCreator, SelectVariants, CombineVariants, UnifiedGenotyper }
 import nl.lumc.sasc.biopet.extensions.picard.MarkDuplicates
@@ -32,9 +32,6 @@ class GatkVariantcalling(val root: Configurable) extends QScript with BiopetQScr
   @Argument(doc = "Reference", shortName = "R", required = false)
   var reference: File = config("reference", required = true)
 
-  @Argument(doc = "Dbsnp", shortName = "dbsnp", required = false)
-  var dbsnp: File = config("dbsnp")
-
   @Argument(doc = "OutputName", required = false)
   var outputName: String = _
 
@@ -53,7 +50,7 @@ class GatkVariantcalling(val root: Configurable) extends QScript with BiopetQScr
 
   def init() {
     if (outputName == null && sampleID != null) outputName = sampleID
-    else if (outputName == null) outputName = "noname"
+    else if (outputName == null) outputName = config("output_name", default = "noname")
     if (outputDir == null) throw new IllegalStateException("Missing Output directory on gatk module")
     else if (!outputDir.endsWith("/")) outputDir += "/"
 
@@ -200,6 +197,12 @@ class GatkVariantcalling(val root: Configurable) extends QScript with BiopetQScr
       val cvFinal = CombineVariants(this, mergeList.toList, outputDir + outputName + ".final.vcf.gz")
       cvFinal.genotypemergeoption = org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.GenotypeMergeType.UNSORTED
       add(cvFinal)
+
+      val vcfStats = new VcfStats(this)
+      vcfStats.input = cvFinal.out
+      vcfStats.setOutputDir(outputDir + File.separator + "vcfstats")
+      add(vcfStats)
+
       scriptOutput.finalVcfFile = cvFinal.out
     }
   }
diff --git a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVcfSampleCompare.scala b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVcfSampleCompare.scala
deleted file mode 100644
index c3f5add4b52092e607e1c4975745e7f58793dd08..0000000000000000000000000000000000000000
--- a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/GatkVcfSampleCompare.scala
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Due to the license issue with GATK, this part of Biopet can only be used inside the
- * LUMC. Please refer to https://git.lumc.nl/biopet/biopet/wikis/home for instructions
- * on how to use this protected part of biopet or contact us at sasc@lumc.nl
- */
-package nl.lumc.sasc.biopet.pipelines.gatk
-
-import nl.lumc.sasc.biopet.core.{ BiopetQScript, PipelineCommand }
-import java.io.File
-import nl.lumc.sasc.biopet.core.config.Configurable
-import nl.lumc.sasc.biopet.extensions.gatk.CombineVariants
-import nl.lumc.sasc.biopet.extensions.gatk.SelectVariants
-import nl.lumc.sasc.biopet.extensions.gatk.VariantEval
-import org.broadinstitute.gatk.queue.QScript
-import org.broadinstitute.gatk.utils.commandline.{ Input, Argument }
-
-class GatkVcfSampleCompare(val root: Configurable) extends QScript with BiopetQScript {
-  def this() = this(null)
-
-  @Input(doc = "Sample vcf file(s)", shortName = "V")
-  var vcfFiles: List[File] = _
-
-  @Argument(doc = "Reference", shortName = "R", required = false)
-  var reference: File = config("reference")
-
-  @Argument(doc = "Target bed", shortName = "targetBed", required = false)
-  var targetBed: List[File] = Nil
-
-  @Argument(doc = "Samples", shortName = "sample", required = false)
-  var samples: List[String] = Nil
-
-  var vcfFile: File = _
-  var sampleVcfs: Map[String, File] = Map()
-  def generalSampleDir = outputDir + "samples/"
-
-  def init() {
-    if (config.contains("target_bed"))
-      for (bed <- config("target_bed").asList)
-        targetBed :+= bed.toString
-    if (outputDir == null) throw new IllegalStateException("Missing Output directory on gatk module")
-    else if (!outputDir.endsWith("/")) outputDir += "/"
-  }
-
-  def biopetScript() {
-    vcfFile = if (vcfFiles.size > 1) {
-      val combineVariants = CombineVariants(this, vcfFiles, outputDir + "merge.vcf")
-      add(combineVariants)
-      combineVariants.out
-    } else vcfFiles.head
-
-    for (sample <- samples) {
-      sampleVcfs += (sample -> new File(generalSampleDir + sample + File.separator + sample + ".vcf"))
-      val selectVariants = SelectVariants(this, vcfFile, sampleVcfs(sample))
-      selectVariants.sample_name = Seq(sample)
-      selectVariants.excludeNonVariants = true
-      add(selectVariants)
-    }
-
-    val sampleCompareMetrics = new SampleCompareMetrics(this)
-    sampleCompareMetrics.samples = samples
-    sampleCompareMetrics.sampleDir = generalSampleDir
-    sampleCompareMetrics.snpRelFile = outputDir + "compare.snp.rel.tsv"
-    sampleCompareMetrics.snpAbsFile = outputDir + "compare.snp.abs.tsv"
-    sampleCompareMetrics.indelRelFile = outputDir + "compare.indel.rel.tsv"
-    sampleCompareMetrics.indelAbsFile = outputDir + "compare.indel.abs.tsv"
-    sampleCompareMetrics.totalFile = outputDir + "total.tsv"
-
-    for ((sample, sampleVcf) <- sampleVcfs) {
-      val sampleDir = generalSampleDir + sample + File.separator
-      for ((compareSample, compareSampleVcf) <- sampleVcfs) {
-        val variantEval = VariantEval(this,
-          sampleVcf,
-          compareSampleVcf,
-          new File(sampleDir + sample + "-" + compareSample + ".eval.txt"),
-          Seq("VariantType", "CompRod"),
-          Seq("CompOverlap")
-        )
-        if (targetBed != null) variantEval.L = targetBed
-        add(variantEval)
-        sampleCompareMetrics.deps ::= variantEval.out
-      }
-    }
-    add(sampleCompareMetrics)
-  }
-}
-
-object GatkVcfSampleCompare extends PipelineCommand
diff --git a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/SampleCompareMetrics.scala b/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/SampleCompareMetrics.scala
deleted file mode 100644
index 861455fe4d886219813409023d022ea096f413c9..0000000000000000000000000000000000000000
--- a/protected/biopet-gatk-pipelines/src/main/scala/nl/lumc/sasc/biopet/pipelines/gatk/SampleCompareMetrics.scala
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Due to the license issue with GATK, this part of Biopet can only be used inside the
- * LUMC. Please refer to https://git.lumc.nl/biopet/biopet/wikis/home for instructions
- * on how to use this protected part of biopet or contact us at sasc@lumc.nl
- */
-package nl.lumc.sasc.biopet.pipelines.gatk
-
-import java.io.File
-import java.io.PrintWriter
-import nl.lumc.sasc.biopet.core.BiopetJavaCommandLineFunction
-import nl.lumc.sasc.biopet.core.config.Configurable
-import org.broadinstitute.gatk.utils.R.RScriptExecutor
-import org.broadinstitute.gatk.utils.commandline.{ Output, Argument }
-import scala.io.Source
-import org.broadinstitute.gatk.utils.R.{ RScriptLibrary, RScriptExecutor }
-import org.broadinstitute.gatk.utils.io.Resource
-import scala.collection.mutable.Map
-import scala.math._
-
-class SampleCompareMetrics(val root: Configurable) extends BiopetJavaCommandLineFunction {
-  javaMainClass = getClass.getName
-
-  @Argument(doc = "Sample Dir", shortName = "sampleDir", required = true)
-  var sampleDir: String = _
-
-  @Argument(doc = "Samples", shortName = "sample", required = true)
-  var samples: List[String] = Nil
-
-  @Argument(doc = "File sufix", shortName = "sufix", required = false)
-  var fileSufix: String = _
-
-  @Output(doc = "snpRelFile", shortName = "snpRelFile", required = true)
-  var snpRelFile: File = _
-
-  @Output(doc = "snpAbsFile", shortName = "snpAbsFile", required = true)
-  var snpAbsFile: File = _
-
-  @Output(doc = "indelRelFile", shortName = "indelRelFile", required = true)
-  var indelRelFile: File = _
-
-  @Output(doc = "indelAbsFile", shortName = "indelAbsFile", required = true)
-  var indelAbsFile: File = _
-
-  @Output(doc = "totalFile", shortName = "totalFile", required = true)
-  var totalFile: File = _
-
-  override val defaultVmem = "8G"
-  memoryLimit = Option(4.0)
-
-  override def commandLine = super.commandLine +
-    required("-sampleDir", sampleDir) +
-    repeat("-sample", samples) +
-    optional("-fileSufix", fileSufix) +
-    required("-snpRelFile", snpRelFile) +
-    required("-snpAbsFile", snpAbsFile) +
-    required("-indelRelFile", indelRelFile) +
-    required("-indelAbsFile", indelAbsFile) +
-    required("-totalFile", totalFile)
-}
-
-object SampleCompareMetrics {
-  var sampleDir: String = _
-  var samples: List[String] = Nil
-  var fileSufix: String = ".eval.txt"
-  var snpRelFile: File = _
-  var snpAbsFile: File = _
-  var indelRelFile: File = _
-  var indelAbsFile: File = _
-  var totalFile: File = _
-  /**
-   * @param args the command line arguments
-   */
-  def main(args: Array[String]): Unit = {
-
-    for (t <- 0 until args.size) {
-      args(t) match {
-        case "-sample"       => samples +:= args(t + 1)
-        case "-sampleDir"    => sampleDir = args(t + 1)
-        case "-fileSufix"    => fileSufix = args(t + 1)
-        case "-snpRelFile"   => snpRelFile = new File(args(t + 1))
-        case "-snpAbsFile"   => snpAbsFile = new File(args(t + 1))
-        case "-indelRelFile" => indelRelFile = new File(args(t + 1))
-        case "-indelAbsFile" => indelAbsFile = new File(args(t + 1))
-        case "-totalFile"    => totalFile = new File(args(t + 1))
-        case _               =>
-      }
-    }
-    if (sampleDir == null) throw new IllegalStateException("No sampleDir, use -sampleDir")
-    else if (!sampleDir.endsWith("/")) sampleDir += "/"
-
-    val regex = """\W+""".r
-    val snpsOverlap: Map[(String, String), Int] = Map()
-    val indelsOverlap: Map[(String, String), Int] = Map()
-    val snpsTotal: Map[String, Int] = Map()
-    val indelsTotal: Map[String, Int] = Map()
-    for (sample1 <- samples; sample2 <- samples) {
-      val reader = Source.fromFile(new File(sampleDir + sample1 + "/" + sample1 + "-" + sample2 + fileSufix))
-      for (line <- reader.getLines) {
-        regex.split(line) match {
-          case Array(_, _, _, varType, all, novel, overlap, rate, _*) => {
-            varType match {
-              case "SNP" => {
-                snpsOverlap += (sample1, sample2) -> overlap.toInt
-                snpsTotal += sample1 -> all.toInt
-              }
-              case "INDEL" => {
-                indelsOverlap += (sample1, sample2) -> overlap.toInt
-                indelsTotal += sample1 -> all.toInt
-              }
-              case _ =>
-            }
-          }
-          case _ =>
-        }
-      }
-      reader.close()
-    }
-
-    val snpRelWritter = new PrintWriter(snpRelFile)
-    val snpAbsWritter = new PrintWriter(snpAbsFile)
-    val indelRelWritter = new PrintWriter(indelRelFile)
-    val indelAbsWritter = new PrintWriter(indelAbsFile)
-
-    val allWritters = List(snpRelWritter, snpAbsWritter, indelRelWritter, indelAbsWritter)
-    for (writter <- allWritters) writter.println(samples.mkString("\t", "\t", ""))
-    for (sample1 <- samples) {
-      for (writter <- allWritters) writter.print(sample1)
-      for (sample2 <- samples) {
-        snpRelWritter.print("\t" + (round((snpsOverlap(sample1, sample2).toDouble / snpsTotal(sample1) * 10000.0)) / 10000.0))
-        snpAbsWritter.print("\t" + snpsOverlap(sample1, sample2))
-        indelRelWritter.print("\t" + (round((indelsOverlap(sample1, sample2).toDouble / indelsTotal(sample1) * 10000.0)) / 10000.0))
-        indelAbsWritter.print("\t" + indelsOverlap(sample1, sample2))
-      }
-      for (writter <- allWritters) writter.println()
-    }
-    for (writter <- allWritters) writter.close()
-
-    val totalWritter = new PrintWriter(totalFile)
-    totalWritter.println("Sample\tSNPs\tIndels")
-    for (sample <- samples)
-      totalWritter.println(sample + "\t" + snpsTotal(sample) + "\t" + indelsTotal(sample))
-    totalWritter.close()
-
-    def plot(file: File) {
-      val executor = new RScriptExecutor
-      executor.addScript(new Resource("plotHeatmap.R", getClass))
-      executor.addArgs(file, file.getAbsolutePath.stripSuffix(".tsv") + ".png", file.getAbsolutePath.stripSuffix(".tsv") + ".clustering.png")
-      executor.exec()
-    }
-    plot(snpRelFile)
-    plot(indelRelFile)
-  }
-}
\ No newline at end of file
diff --git a/protected/biopet-protected-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutableProtected.scala b/protected/biopet-protected-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutableProtected.scala
index 9457fd36cfab986c5fdc9d5cbc29836ced4e0962..902e292fca017947affb287249ccff764e943100 100644
--- a/protected/biopet-protected-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutableProtected.scala
+++ b/protected/biopet-protected-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutableProtected.scala
@@ -12,7 +12,6 @@ object BiopetExecutableProtected extends BiopetExecutable {
     nl.lumc.sasc.biopet.pipelines.gatk.GatkVariantcalling,
     nl.lumc.sasc.biopet.pipelines.gatk.GatkPipeline,
     nl.lumc.sasc.biopet.pipelines.gatk.GatkVariantRecalibration,
-    nl.lumc.sasc.biopet.pipelines.gatk.GatkVcfSampleCompare,
     nl.lumc.sasc.biopet.pipelines.basty.Basty)
 
   def tools = BiopetExecutablePublic.tools
diff --git a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/pipelines/gatk/plotHeatmap.R b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/pipelines/gatk/plotHeatmap.R
deleted file mode 100644
index 4158db708d58c8cc19b535dcfe871c626fa51ad6..0000000000000000000000000000000000000000
--- a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/pipelines/gatk/plotHeatmap.R
+++ /dev/null
@@ -1,24 +0,0 @@
-library('gplots')
-
-args <- commandArgs(TRUE)
-inputArg <- args[1]
-outputArg <- args[2]
-outputArgClustering <- args[3]
-
-col <- heat.colors(250)
-col[250] <- "#00FF00"
-
-heat<-read.table(inputArg, header = 1, sep= '\t')
-rownames(heat) <- heat[,1]
-heat<- heat[,-1]
-
-heat<- as.matrix(heat)
-
-png(file = outputArg, width = 1000, height = 1000)
-heatmap.2(heat, trace = 'none', col = col, Colv=NA, Rowv=NA, dendrogram="none")
-dev.off()
-
-
-png(file = outputArgClustering, width = 1000, height = 1000)
-heatmap.2(heat, trace = 'none', col = col, Colv="Rowv", dendrogram="row")
-dev.off()
diff --git a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/scripts/sync_paired_end_reads.py b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/scripts/sync_paired_end_reads.py
deleted file mode 100644
index be4ab0035b7254cfbfd70b18c272cca1fd8c47dd..0000000000000000000000000000000000000000
--- a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/scripts/sync_paired_end_reads.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# Biopet is built on top of GATK Queue for building bioinformatic
-# pipelines. It is mainly intended to support LUMC SHARK cluster which is running
-# SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
-# should also be able to execute Biopet tools and pipelines.
-#
-# Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
-#
-# Contact us at: sasc@lumc.nl
-#
-# A dual licensing mode is applied. The source code within this project that are
-# not part of GATK Queue is freely available for non-commercial use under an AGPL
-# license; For commercial users or users who do not want to follow the AGPL
-# license, please contact us to obtain a separate license.
-#
-
-"""
-(Re-)sync two filtered paired end FASTQ files.
-
-Given two filtered paired end read files and one of the original read files,
-re-sync the filtered reads by filtering out anything that is only present in
-one of the two files.
-
-Usage:
-  {command} <orig.fq> <reads_1.fq> <reads_2.fq> \\
-      <reads_1.synced.fq> <reads_2.synced.fq>
-
-The synced reads are written to disk as <reads_1.synced.fq> and
-<reads_2.synced.fq>. Afterwards some counts are printed.
-
-
-Both Illumina old-style and new-style paired-end header lines are supported.
-
-The original read file is used to speed up processing: it contains all
-possible reads from both edited reads (in all files in the same order) so it
-can process all files line by line, not having to read a single file in
-memory. Some ideas were taken from [1].
-
-[1] https://gist.github.com/588841/
-
-2011-11-03, Martijn Vermaat <m.vermaat.hg@lumc.nl>
-"""
-
-
-import sys
-import re
-
-
-def sync_paired_end_reads(original, reads_a, reads_b, synced_a, synced_b):
-    """
-    Filter out reads from two paired end read files that are not present in
-    both of them. Do this in a reasonable amount of time by using a file
-    containing all of the reads for one of the paired ends.
-
-    All arguments are open file handles.
-
-    @arg original: File containing all original reads for one of the paired
-                   ends.
-    @arg reads_a:  First from paired end read files.
-    @arg reads_b:  Second from paired end read files.
-    @arg synced_a: Filtered reads from first paired end read file.
-    @arg synced_b: Filtered reads from second paired end read file.
-
-    @return:       Triple (filtered_a, filtered_b, kept) containing counts
-                   of the number of reads filtered from both input files and
-                   the total number of reads kept in the synced results.
-
-    @todo: Print warnings if obvious things are not right (a or b still has
-           lines after original is processed).
-    """
-    # This matches 1, 2, or 3 preceded by / _ or whitespace. Its rightmost
-    # match in a header line is used to identify the read pair.
-    sep = re.compile('[\s_/][123]')
-
-    def next_record(fh):
-        return [fh.readline().strip() for i in range(4)]
-
-    def head(record):
-        return sep.split(record[0])[:-1]
-
-    headers = (sep.split(x.strip())[:-1] for i, x in enumerate(original)
-               if not (i % 4))
-
-    filtered_a = filtered_b = kept = 0
-
-    a, b = next_record(reads_a), next_record(reads_b)
-
-    for header in headers:
-        if header == head(a) and head(b) != header:
-            a = next_record(reads_a)
-            filtered_a += 1
-
-        if header == head(b) and head(a) != header:
-            b = next_record(reads_b)
-            filtered_b += 1
-
-        if header == head(a) == head(b):
-            print >>synced_a, '\n'.join(a)
-            print >>synced_b, '\n'.join(b)
-            a, b = next_record(reads_a), next_record(reads_b)
-            kept += 1
-
-    return filtered_a, filtered_b, kept
-
-
-if __name__ == '__main__':
-    if len(sys.argv) < 6:
-        sys.stderr.write(__doc__.split('\n\n\n')[0].strip().format(
-            command=sys.argv[0]) + '\n')
-        sys.exit(1)
-    try:
-        original = open(sys.argv[1], 'r')
-        reads_a = open(sys.argv[2], 'r')
-        reads_b = open(sys.argv[3], 'r')
-        synced_a = open(sys.argv[4], 'w')
-        synced_b = open(sys.argv[5], 'w')
-        filtered_a, filtered_b, kept = \
-                    sync_paired_end_reads(original, reads_a, reads_b,
-                                          synced_a, synced_b)
-        print 'Filtered %i reads from first read file.' % filtered_a
-        print 'Filtered %i reads from second read file.' % filtered_b
-        print 'Synced read files contain %i reads.' % kept
-    except IOError as (_, message):
-        sys.stderr.write('Error: %s\n' % message)
-        sys.exit(1)
diff --git a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotHeatmap.R b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotHeatmap.R
new file mode 100644
index 0000000000000000000000000000000000000000..7f7237e90f6593e3d6cf110da005cd89c154d466
--- /dev/null
+++ b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotHeatmap.R
@@ -0,0 +1,35 @@
+library('gplots')
+library('RColorBrewer')
+
+args <- commandArgs(TRUE)
+inputArg <- args[1]
+outputArg <- args[2]
+outputArgClustering <- args[3]
+outputArgDendrogram <- args[4]
+
+
+heat<-read.table(inputArg, header = 1, sep= '\t', stringsAsFactors = F)
+#heat[heat==1] <- NA
+rownames(heat) <- heat[,1]
+heat<- heat[,-1]
+heat<- as.matrix(heat)
+
+colNumber <- 50
+col <- rev(colorRampPalette(brewer.pal(11, "Spectral"))(colNumber))
+for (i in (colNumber+1):(colNumber+round((dist(range(heat)) - dist(range(heat[heat < 1]))) / dist(range(heat[heat < 1])) * colNumber))) {
+    col[i] <- col[colNumber]
+}
+col[length(col)] <- "#00FF00"
+
+png(file = outputArg, width = 1200, height = 1200)
+heatmap.2(heat, trace = 'none', col = col, Colv=NA, Rowv=NA, dendrogram="none", margins = c(12, 12), na.color="#00FF00")
+dev.off()
+
+hc <- hclust(d = dist(heat))
+png(file = outputArgDendrogram, width = 1200, height = 1200)
+plot(as.dendrogram(hc), horiz=TRUE, asp=0.02)
+dev.off()
+
+png(file = outputArgClustering, width = 1200, height = 1200)
+heatmap.2(heat, trace = 'none', col = col, Colv="Rowv", dendrogram="row",margins = c(12, 12), na.color="#00FF00")
+dev.off()
diff --git a/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotXY.R b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotXY.R
new file mode 100644
index 0000000000000000000000000000000000000000..63fd7b03262d94094a9f151b22cca812f10cee1f
--- /dev/null
+++ b/public/biopet-framework/src/main/resources/nl/lumc/sasc/biopet/tools/plotXY.R
@@ -0,0 +1,20 @@
+library('ggplot2')
+library('reshape2')
+
+args <- commandArgs(TRUE)
+inputArg <- args[1]
+outputArg <- args[2]
+
+tsv<-read.table(inputArg, header = 1, sep= '\t', stringsAsFactors = F)
+
+data <- melt(tsv)
+
+data$X <- as.numeric(data$X)
+data <- na.omit(data)
+data <- data[data$value > 0,]
+
+print("Starting to plot")
+png(file = outputArg, width = 1500, height = 1500)
+ggplot(data, aes(x=X, y=value, color=variable, group=variable)) + geom_line()
+dev.off()
+print("plot done")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetCommandLineFunctionTrait.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetCommandLineFunctionTrait.scala
index 57c63593526f20ba6d94cd169d93af15b54a6328..4a5b25f7a5b816fe32ad39897a334a6e13fe1eda 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetCommandLineFunctionTrait.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetCommandLineFunctionTrait.scala
@@ -15,13 +15,11 @@
  */
 package nl.lumc.sasc.biopet.core
 
-//import java.io.BufferedInputStream
 import java.io.File
 import nl.lumc.sasc.biopet.core.config.Configurable
 import org.broadinstitute.gatk.queue.QException
 import org.broadinstitute.gatk.queue.function.CommandLineFunction
 import org.broadinstitute.gatk.utils.commandline.{ Input, Argument }
-//import scala.io.Source
 import scala.sys.process.{ Process, ProcessLogger }
 import scala.util.matching.Regex
 import java.io.FileInputStream
@@ -38,7 +36,7 @@ trait BiopetCommandLineFunctionTrait extends CommandLineFunction with Configurab
   val defaultThreads = 1
 
   @Argument(doc = "Vmem", required = false)
-  var vmem: String = _
+  var vmem: Option[String] = None
   val defaultVmem: String = ""
 
   @Argument(doc = "Executable", required = false)
@@ -53,17 +51,17 @@ trait BiopetCommandLineFunctionTrait extends CommandLineFunction with Configurab
   override def freezeFieldValues() {
     checkExecutable
     afterGraph
-    jobOutputFile = new File(firstOutput.getParent + "/." + firstOutput.getName + "." + configName + ".out")
+    if (jobOutputFile == null) jobOutputFile = new File(firstOutput.getParent + "/." + firstOutput.getName + "." + configName + ".out")
 
     if (threads == 0) threads = getThreads(defaultThreads)
     if (threads > 1) nCoresRequest = Option(threads)
 
-    if (vmem == null) {
+    if (vmem.isEmpty) {
       vmem = config("vmem")
-      if (vmem == null && !defaultVmem.isEmpty) vmem = defaultVmem
+      if (vmem.isEmpty && defaultVmem.nonEmpty) vmem = Some(defaultVmem)
     }
-    if (vmem != null) jobResourceRequests :+= "h_vmem=" + vmem
-    jobName = configName + ":" + firstOutput.getName
+    if (vmem.isDefined) jobResourceRequests :+= "h_vmem=" + vmem.get
+    jobName = configName + ":" + (if (firstOutput != null) firstOutput.getName else jobOutputFile)
 
     super.freezeFieldValues()
   }
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetQScript.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetQScript.scala
index 374ba068fb576e26b7f1fa33eaf1896c3ff36f63..1a3f565880dc3a2bd5450efd86cfa188e647eef4 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetQScript.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/BiopetQScript.scala
@@ -17,7 +17,7 @@ package nl.lumc.sasc.biopet.core
 
 import java.io.File
 import java.io.PrintWriter
-import nl.lumc.sasc.biopet.core.config.{ Config, Configurable }
+import nl.lumc.sasc.biopet.core.config.{ ConfigValueIndex, Config, Configurable }
 import org.broadinstitute.gatk.utils.commandline.Argument
 import org.broadinstitute.gatk.queue.QSettings
 import org.broadinstitute.gatk.queue.function.QFunction
@@ -29,7 +29,14 @@ trait BiopetQScript extends Configurable with GatkLogging {
   @Argument(doc = "JSON config file(s)", fullName = "config_file", shortName = "config", required = false)
   val configfiles: List[File] = Nil
 
-  var outputDir: String = _
+  var outputDir: String = {
+    val temp = Config.getValueFromMap(Config.global.map, ConfigValueIndex(this.configName, configPath, "output_dir"))
+    if (temp.isEmpty) throw new IllegalArgumentException("No output_dir defined in config")
+    else {
+      val t = temp.get.value.toString
+      if (!t.endsWith("/")) t + "/" else t
+    }
+  }
 
   @Argument(doc = "Disable all scatters", shortName = "DSC", required = false)
   var disableScatter: Boolean = false
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/MultiSampleQScript.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/MultiSampleQScript.scala
index ad6dd527ca3306a52c5708567af6bba82396e85c..eb76502c8593a9e970ea2c95b9152569dfffb389 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/MultiSampleQScript.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/core/MultiSampleQScript.scala
@@ -26,7 +26,7 @@ import org.broadinstitute.gatk.utils.commandline.{ Argument }
  */
 trait MultiSampleQScript extends BiopetQScript {
   @Argument(doc = "Only Sample", shortName = "sample", required = false)
-  val onlySample: List[String] = Nil
+  private val onlySamples: List[String] = Nil
 
   require(Config.global.map.contains("samples"), "No Samples found in config")
 
@@ -40,26 +40,26 @@ trait MultiSampleQScript extends BiopetQScript {
 
     /**
      * Library class with basic functions build in
-     * @param libraryId
+     * @param libId
      */
-    abstract class AbstractLibrary(val libraryId: String) {
+    abstract class AbstractLibrary(val libId: String) {
       /** Overrules config of qscript with default sample and default library */
-      val config = new ConfigFunctions(defaultSample = sampleId, defaultLibrary = libraryId)
+      val config = new ConfigFunctions(defaultSample = sampleId, defaultLibrary = libId)
 
       /** Adds the library jobs */
       final def addAndTrackJobs(): Unit = {
         currentSample = Some(sampleId)
-        currentLib = Some(libraryId)
+        currentLib = Some(libId)
         addJobs()
         currentLib = None
         currentSample = None
       }
 
       /** Creates a library file with given suffix */
-      def createFile(suffix: String): File = new File(libDir, sampleId + "-" + libraryId + suffix)
+      def createFile(suffix: String): File = new File(libDir, sampleId + "-" + libId + suffix)
 
       /** Returns library directory */
-      def libDir = sampleDir + "lib_" + libraryId + File.separator
+      def libDir = sampleDir + "lib_" + libId + File.separator
 
       /** Function that add library jobs */
       protected def addJobs()
@@ -94,8 +94,8 @@ trait MultiSampleQScript extends BiopetQScript {
     protected def addJobs()
 
     /** function add all libraries in one call */
-    protected final def addLibsJobs(): Unit = {
-      for ((libraryId, library) <- libraries) {
+    protected final def addPerLibJobs(): Unit = {
+      for ((libId, library) <- libraries) {
         library.addAndTrackJobs()
       }
     }
@@ -108,7 +108,7 @@ trait MultiSampleQScript extends BiopetQScript {
     def createFile(suffix: String) = new File(sampleDir, sampleId + suffix)
 
     /** Returns sample directory */
-    def sampleDir = outputDir + "samples" + File.pathSeparator + sampleId + File.pathSeparator
+    def sampleDir = outputDir + "samples" + File.separator + sampleId + File.separator
   }
 
   /** Sample type, need implementation in pipeline */
@@ -125,17 +125,24 @@ trait MultiSampleQScript extends BiopetQScript {
   val samples: Map[String, Sample] = sampleIds.map(id => id -> makeSample(id)).toMap
 
   /** Returns a list of all sampleIDs */
-  protected def sampleIds: Set[String] = if (onlySample != Nil) onlySample.toSet else {
-    ConfigUtils.any2map(Config.global.map("samples")).keySet
-  }
+  protected def sampleIds: Set[String] = ConfigUtils.any2map(Config.global.map("samples")).keySet
 
   /** Runs addAndTrackJobs method for each sample */
   final def addSamplesJobs() {
-    for ((sampleId, sample) <- samples) {
-      sample.addAndTrackJobs()
-    }
+    if (onlySamples.isEmpty) {
+      samples.foreach { case (sampleId, sample) => sample.addAndTrackJobs() }
+      addMultiSampleJobs()
+    } else onlySamples.foreach(sampleId => samples.get(sampleId) match {
+      case Some(sample) => sample.addAndTrackJobs()
+      case None         => logger.warn("sampleId '" + sampleId + "' not found")
+    })
   }
 
+  /**
+   * Method where the multisample jobs should be added, this will be executed only when running the -sample argument is not given
+   */
+  def addMultiSampleJobs()
+
   /** Stores sample state */
   private var currentSample: Option[String] = None
 
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Bowtie.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Bowtie.scala
index 7f670a44468ee63230f6df282b8027057ae9a08a..e192a845be6db96d2a754d2d2ff6a5e393d136ca 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Bowtie.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Bowtie.scala
@@ -43,7 +43,7 @@ class Bowtie(val root: Configurable) extends BiopetCommandLineFunction {
   override val defaultThreads = 8
 
   var sam: Boolean = config("sam", default = true)
-  var sam_RG: String = config("sam-RG")
+  var sam_RG: Option[String] = config("sam-RG")
   var seedlen: Option[Int] = config("seedlen")
   var seedmms: Option[Int] = config("seedmms")
   var k: Option[Int] = config("k")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Cutadapt.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Cutadapt.scala
index ba145e2aa8fad1c942c0104a494cf5b43ec1b45c..e7a5a319ead2bc797713479de80ff949373bc8ff 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Cutadapt.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Cutadapt.scala
@@ -43,8 +43,8 @@ class Cutadapt(val root: Configurable) extends BiopetCommandLineFunction {
   if (config.contains("front")) for (adapter <- config("front").asList) opt_front += adapter.toString
 
   var opt_discard: Boolean = config("discard", default = false)
-  var opt_minimum_length: String = config("minimum_length", 1)
-  var opt_maximum_length: String = config("maximum_length")
+  var opt_minimum_length: Option[Int] = config("minimum_length", 1)
+  var opt_maximum_length: Option[Int] = config("maximum_length")
 
   def cmdLine = required(executable) +
     // options
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/PythonCommandLineFunction.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/PythonCommandLineFunction.scala
index 68219fd22bbf0a481ba9fa7fe552bc254bf71ef4..ebfaac812bbedec10a14d0daad609d54469f9c03 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/PythonCommandLineFunction.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/PythonCommandLineFunction.scala
@@ -28,7 +28,14 @@ trait PythonCommandLineFunction extends BiopetCommandLineFunction {
   executable = config("exe", default = "python", submodule = "python")
 
   protected var python_script_name: String = _
-  def setPythonScript(script: String) { setPythonScript(script, "") }
+  def setPythonScript(script: String) {
+    python_script = new File(script)
+    if (!python_script.exists()) {
+      setPythonScript(script, "")
+    } else {
+      python_script_name = script
+    }
+  }
   def setPythonScript(script: String, subpackage: String) {
     python_script_name = script
     python_script = new File(".queue/tmp/" + subpackage + python_script_name)
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Raxml.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Raxml.scala
index f735823718bd87ca5765c925d167143c08494c72..4f6236179755659b3b42d74cef6c722713fbf4c9 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Raxml.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Raxml.scala
@@ -60,11 +60,11 @@ class Raxml(val root: Configurable) extends BiopetCommandLineFunction {
   private var out: List[File] = Nil
 
   var executableNonThreads: String = config("exe", default = "raxmlHPC")
-  var executableThreads: String = config("exe_pthreads")
+  var executableThreads: Option[String] = config("exe_pthreads")
 
   override def afterGraph {
     if (threads == 0) threads = getThreads(defaultThreads)
-    executable = if (threads > 1 && executableThreads != null) executableThreads else executableNonThreads
+    executable = if (threads > 1 && executableThreads.isDefined) executableThreads.get else executableNonThreads
     super.afterGraph
     out +:= getInfoFile
     f match {
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/RunGubbins.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/RunGubbins.scala
index 6c8892c9bc66f3ce3cc6997032cd11700d7535d0..e732c1023de5f713f4c21ed366da68eddc825285 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/RunGubbins.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/RunGubbins.scala
@@ -24,7 +24,7 @@ import org.broadinstitute.gatk.utils.commandline.{ Argument, Input, Output }
 class RunGubbins(val root: Configurable) extends BiopetCommandLineFunction {
 
   @Input(doc = "Contaminants", required = false)
-  var startingTree: File = config("starting_tree")
+  var startingTree: Option[File] = config("starting_tree")
 
   @Input(doc = "Fasta file", shortName = "FQ")
   var fastafile: File = _
@@ -36,21 +36,21 @@ class RunGubbins(val root: Configurable) extends BiopetCommandLineFunction {
   var outputDirectory: String = _
 
   executable = config("exe", default = "run_gubbins.py")
-  var outgroup: String = config("outgroup")
-  var filterPercentage: String = config("filter_percentage")
-  var treeBuilder: String = config("tree_builder")
+  var outgroup: Option[String] = config("outgroup")
+  var filterPercentage: Option[String] = config("filter_percentage")
+  var treeBuilder: Option[String] = config("tree_builder")
   var iterations: Option[Int] = config("iterations")
   var minSnps: Option[Int] = config("min_snps")
-  var convergeMethod: String = config("converge_method")
+  var convergeMethod: Option[String] = config("converge_method")
   var useTimeStamp: Boolean = config("use_time_stamp", default = false)
-  var prefix: String = config("prefix")
+  var prefix: Option[String] = config("prefix")
   var verbose: Boolean = config("verbose", default = false)
   var noCleanup: Boolean = config("no_cleanup", default = false)
 
   override def afterGraph: Unit = {
     super.afterGraph
     jobLocalDir = new File(outputDirectory)
-    if (prefix == null) prefix = fastafile.getName
+    if (prefix.isEmpty) prefix = Some(fastafile.getName)
     val out: List[String] = List(".recombination_predictions.embl",
       ".recombination_predictions.gff",
       ".branch_base_reconstruction.embl",
@@ -59,7 +59,7 @@ class RunGubbins(val root: Configurable) extends BiopetCommandLineFunction {
       ".filtered_polymorphic_sites.fasta",
       ".filtered_polymorphic_sites.phylip",
       ".final_tree.tre")
-    for (t <- out) outputFiles ::= new File(outputDirectory + File.separator + prefix + t)
+    for (t <- out) outputFiles ::= new File(outputDirectory + File.separator + prefix.getOrElse("gubbins") + t)
   }
 
   def cmdLine = required("cd", outputDirectory) + " && " + required(executable) +
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Sickle.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Sickle.scala
index 5c068b9eed4a9d9bed5438c47aeb88da93efa63e..5056c70abc0e122fac0b95862f3443faaa48b5c7 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Sickle.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/Sickle.scala
@@ -43,7 +43,7 @@ class Sickle(val root: Configurable) extends BiopetCommandLineFunction {
   var fastqc: Fastqc = _
 
   executable = config("exe", default = "sickle", freeVar = false)
-  var qualityType: String = config("qualitytype")
+  var qualityType: Option[String] = config("qualitytype")
   var qualityThreshold: Option[Int] = config("qualityThreshold")
   var lengthThreshold: Option[Int] = config("lengthThreshold")
   var noFiveprime: Boolean = config("noFiveprime", default = false)
@@ -54,7 +54,7 @@ class Sickle(val root: Configurable) extends BiopetCommandLineFunction {
   override def versionCommand = executable + " --version"
 
   override def afterGraph {
-    if (qualityType == null && defaultQualityType != null) qualityType = defaultQualityType
+    if (qualityType.isEmpty) qualityType = Some(defaultQualityType)
   }
 
   def cmdLine = {
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaMem.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaMem.scala
index 09401898a9ee9ac8b6fb3df2be45c7b7c66ffa3b..fc790b183b5bae2922a1b5f89ec66fcf4f5b85b2 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaMem.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaMem.scala
@@ -34,7 +34,7 @@ class BwaMem(val root: Configurable) extends Bwa {
   @Output(doc = "Output file SAM", shortName = "output")
   var output: File = _
 
-  var R: String = config("R")
+  var R: Option[String] = config("R")
   var k: Option[Int] = config("k")
   var r: Option[Float] = config("r")
   var S: Boolean = config("S", default = false)
@@ -49,11 +49,11 @@ class BwaMem(val root: Configurable) extends Bwa {
   var e: Boolean = config("e", default = false)
   var A: Option[Int] = config("A")
   var B: Option[Int] = config("B")
-  var O: String = config("O")
-  var E: String = config("E")
-  var L: String = config("L")
+  var O: Option[String] = config("O")
+  var E: Option[String] = config("E")
+  var L: Option[String] = config("L")
   var U: Option[Int] = config("U")
-  var x: String = config("x")
+  var x: Option[String] = config("x")
   var p: Boolean = config("p", default = false)
   var v: Option[Int] = config("v")
   var T: Option[Int] = config("T")
@@ -61,7 +61,7 @@ class BwaMem(val root: Configurable) extends Bwa {
   var a: Boolean = config("a", default = false)
   var C: Boolean = config("C", default = false)
   var Y: Boolean = config("Y", default = false)
-  var I: String = config("I")
+  var I: Option[String] = config("I")
 
   override val defaultVmem = "6G"
   override val defaultThreads = 8
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaSampe.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaSampe.scala
index bdd12b3e07272d805dcaf18449e043284c043860..b857eea014ac52acb9608debb94db9cd75cef929 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaSampe.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/bwa/BwaSampe.scala
@@ -6,7 +6,11 @@ import nl.lumc.sasc.biopet.core.config.Configurable
 import org.broadinstitute.gatk.utils.commandline.{ Output, Input }
 
 /**
- * Created by pjvan_thof on 1/16/15.
+ * BWA sampe wrapper
+ *
+ * based on executable version 0.7.10-r789
+ *
+ * @param root Configurable
  */
 class BwaSampe(val root: Configurable) extends Bwa {
   @Input(doc = "Fastq file R1", required = true)
@@ -39,11 +43,14 @@ class BwaSampe(val root: Configurable) extends Bwa {
   var r: String = _
 
   def cmdLine = required(executable) +
-    required("samse") +
+    required("sampe") +
+    optional("-a", a) +
+    optional("-o", o) +
     optional("-n", n) +
+    optional("-N", N) +
+    optional("-c", c) +
     optional("-f", output) +
     optional("-r", r) +
-    optional("-c", c) +
     conditional(P, "-P") +
     conditional(s, "-s") +
     conditional(A, "-A") +
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/Conifer.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/Conifer.scala
new file mode 100644
index 0000000000000000000000000000000000000000..79fadce176938d293b378599f9f52553636f7aea
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/Conifer.scala
@@ -0,0 +1,34 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.extensions.conifer
+
+import nl.lumc.sasc.biopet.core.BiopetCommandLineFunction
+import nl.lumc.sasc.biopet.extensions.PythonCommandLineFunction
+
+abstract class Conifer extends PythonCommandLineFunction {
+  override def subPath = "conifer" :: super.subPath
+  //  executable = config("exe", default = "conifer")
+  setPythonScript(config("script", default = "conifer"))
+  override val versionRegex = """(.*)""".r
+  override val versionExitcode = List(0)
+  override def versionCommand = executable + " " + python_script + " --version"
+
+  override val defaultVmem = "8G"
+  override val defaultThreads = 1
+
+  def cmdLine = getPythonCommand
+
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferAnalyze.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferAnalyze.scala
new file mode 100644
index 0000000000000000000000000000000000000000..7ce27881dec042d466b4b2d0e7bdd3a1d2025704
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferAnalyze.scala
@@ -0,0 +1,49 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.extensions.conifer
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import nl.lumc.sasc.biopet.extensions.Ln
+import org.broadinstitute.gatk.utils.commandline.{ Argument, Input, Output }
+
+class ConiferAnalyze(val root: Configurable) extends Conifer {
+
+  @Input(doc = "Probes / capture kit definition as bed file: chr,start,stop,gene-annot", required = true)
+  var probes: File = _
+
+  //  @Input(doc = "Path to Conifer RPKM files", required = true)
+  var rpkmDir: File = _
+
+  @Output(doc = "Output analyse.hdf5", shortName = "out")
+  var output: File = _
+
+  @Argument(doc = "SVD, number of components to remove", minRecommendedValue = 2, maxRecommendedValue = 5,
+    minValue = 2, maxValue = 20, required = false)
+  var svd: Option[Int] = config("svd", default = 1)
+
+  @Argument(doc = "Minimum population median RPKM per probe", required = false)
+  var min_rpkm: Option[Double] = config("min_rpkm")
+
+  override def cmdLine = super.cmdLine +
+    " analyze " +
+    " --probes" + required(probes) +
+    " --rpkm_dir" + required(rpkmDir) +
+    " --output" + required(output) +
+    optional("--svd", svd) +
+    optional("--min_rpkm", min_rpkm)
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferCall.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferCall.scala
new file mode 100644
index 0000000000000000000000000000000000000000..6583ed64ebef5636e1afcd51b2432f5fc99c9765
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferCall.scala
@@ -0,0 +1,35 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.extensions.conifer
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Argument, Input, Output }
+
+class ConiferCall(val root: Configurable) extends Conifer {
+
+  @Input(doc = "Input analysis.hdf5", required = true)
+  var input: File = _
+
+  @Output(doc = "Output calls.txt", shortName = "out")
+  var output: File = _
+
+  override def cmdLine = super.cmdLine +
+    " call " +
+    " --input" + required(input) +
+    " --output" + required(output)
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferExport.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferExport.scala
new file mode 100644
index 0000000000000000000000000000000000000000..02c0f09584206770965e67e6afc32765f9997a3f
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferExport.scala
@@ -0,0 +1,39 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.extensions.conifer
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Input, Output }
+
+class ConiferExport(val root: Configurable) extends Conifer {
+
+  @Input(doc = "Input analysis.hdf5", required = true)
+  var input: File = _
+
+  @Output(doc = "Output <sample>.svdzrpkm.bed", shortName = "out", required = true)
+  var output: File = _
+
+  override def afterGraph {
+    this.checkExecutable
+  }
+
+  override def cmdLine = super.cmdLine +
+    " export " +
+    " --input" + required(input) +
+    " --output" + required(output)
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferRPKM.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferRPKM.scala
new file mode 100644
index 0000000000000000000000000000000000000000..52c0e48113715133092144a0def6347ec1e6617a
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/conifer/ConiferRPKM.scala
@@ -0,0 +1,40 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.extensions.conifer
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Output, Input }
+
+class ConiferRPKM(val root: Configurable) extends Conifer {
+
+  @Input(doc = "Bam file", required = true)
+  var bamFile: File = _
+
+  @Input(doc = "Probes / capture kit definition as bed file: chr,start,stop,gene-annot", required = true)
+  var probes: File = _
+
+  /** The output RPKM should outputted to a directory which contains all the RPKM files from previous experiments */
+  @Output(doc = "Output RPKM.txt", shortName = "out")
+  var output: File = _
+
+  override def cmdLine = super.cmdLine +
+    " rpkm " +
+    " --probes" + required(probes) +
+    " --input" + required(bamFile) +
+    " --output" + required(output)
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVTools.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVTools.scala
new file mode 100644
index 0000000000000000000000000000000000000000..d017864f6988828100f6bbda421fbf734a9a5878
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVTools.scala
@@ -0,0 +1,14 @@
+/**
+ * Created by wyleung on 5-1-15.
+ */
+
+package nl.lumc.sasc.biopet.extensions.igvtools
+
+import nl.lumc.sasc.biopet.core.BiopetCommandLineFunction
+
+abstract class IGVTools extends BiopetCommandLineFunction {
+  executable = config("exe", default = "igvtools", submodule = "igvtools", freeVar = false)
+  override def versionCommand = executable + " version"
+  override val versionRegex = """IGV Version: ([\d\.]) .*""".r
+  override val versionExitcode = List(0)
+}
\ No newline at end of file
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVToolsCount.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVToolsCount.scala
new file mode 100644
index 0000000000000000000000000000000000000000..8037616834ecd4de02e9949883b75d20b45c7347
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/igvtools/IGVToolsCount.scala
@@ -0,0 +1,105 @@
+
+package nl.lumc.sasc.biopet.extensions.igvtools
+
+import java.nio.file.InvalidPathException
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Input, Output, Argument }
+import java.io.{ FileNotFoundException, File }
+
+/**
+ * IGVTools `count` wrapper
+ *
+ * @constructor create a new IGVTools instance from a `.bam` file
+ *
+ */
+
+class IGVToolsCount(val root: Configurable) extends IGVTools {
+  @Input(doc = "Bam File")
+  var input: File = _
+
+  @Input(doc = "<genome>.chrom.sizes File")
+  var genomeChromSizes: File = _
+
+  @Output
+  var tdf: Option[File] = _
+
+  @Output
+  var wig: Option[File] = _
+
+  var maxZoom: Option[Int] = config("maxZoom")
+  var windowSize: Option[Int] = config("windowSize")
+  var extFactor: Option[Int] = config("extFactor")
+
+  var preExtFactor: Option[Int] = config("preExtFactor")
+  var postExtFactor: Option[Int] = config("postExtFactor")
+
+  var windowFunctions: Option[String] = config("windowFunctions")
+  var strands: Option[String] = config("strands")
+  var bases: Boolean = config("bases", default = false)
+
+  var query: Option[String] = config("query")
+  var minMapQuality: Option[Int] = config("minMapQuality")
+  var includeDuplicates: Boolean = config("includeDuplicates", default = false)
+
+  var pairs: Boolean = config("pairs", default = false)
+
+  override def afterGraph {
+    super.afterGraph
+    if (!input.exists()) throw new FileNotFoundException("Input bam is required for IGVToolsCount")
+
+    if (!wig.isEmpty && !wig.get.getAbsolutePath.endsWith(".wig")) throw new IllegalArgumentException("Wiggle file should have a .wig file-extension")
+    if (!tdf.isEmpty && !tdf.get.getAbsolutePath.endsWith(".tdf")) throw new IllegalArgumentException("TDF file should have a .tdf file-extension")
+  }
+
+  def cmdLine = {
+    required(executable) +
+      required("count") +
+      optional("--maxZoom", maxZoom) +
+      optional("--windowSize", windowSize) +
+      optional("--extFactor", extFactor) +
+      optional("--preExtFactor", preExtFactor) +
+      optional("--postExtFactor", postExtFactor) +
+      optional("--windowFunctions", windowFunctions) +
+      optional("--strands", strands) +
+      conditional(bases, "--bases") +
+      optional("--query", query) +
+      optional("--minMapQuality", minMapQuality) +
+      conditional(includeDuplicates, "--includeDuplicates") +
+      conditional(pairs, "--pairs") +
+      required(input) +
+      required(outputArg) +
+      required(genomeChromSizes)
+  }
+
+  /**
+   * This part should never fail, these values are set within this wrapper
+   *
+   */
+  private def outputArg: String = {
+    (tdf, wig) match {
+      case (None, None)       => throw new IllegalArgumentException("Either TDF or WIG should be supplied");
+      case (Some(a), None)    => a.getAbsolutePath;
+      case (None, Some(b))    => b.getAbsolutePath;
+      case (Some(a), Some(b)) => a.getAbsolutePath + "," + b.getAbsolutePath;
+    }
+  }
+}
+
+object IGVToolsCount {
+  /**
+   * Create an object by specifying the `input` (.bam),
+   * and the `genomename` (hg18,hg19,mm10)
+   *
+   * @param input Bamfile to count reads from
+   * @return a new IGVToolsCount instance
+   * @throws FileNotFoundException bam File is not found
+   * @throws IllegalArgumentException tdf or wig not supplied
+   */
+  def apply(root: Configurable, input: File, genomeChromSizes: File): IGVToolsCount = {
+    val counting = new IGVToolsCount(root)
+    counting.input = input
+    counting.genomeChromSizes = genomeChromSizes
+    return counting
+  }
+}
\ No newline at end of file
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2.scala
new file mode 100644
index 0000000000000000000000000000000000000000..5c52970ed8cc0c049ef236f571acedbb8a4610dd
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2.scala
@@ -0,0 +1,13 @@
+package nl.lumc.sasc.biopet.extensions.macs2
+
+import nl.lumc.sasc.biopet.core.BiopetCommandLineFunction
+
+/**
+ * Created by sajvanderzeeuw on 12/19/14.
+ */
+abstract class Macs2 extends BiopetCommandLineFunction {
+  executable = config("exe", default = "macs2", submodule = "macs2", freeVar = false)
+  override def versionCommand = executable + " --version"
+  override val versionRegex = """macs2 (.*)""".r
+  override val versionExitcode = List(0, 1)
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2CallPeak.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2CallPeak.scala
new file mode 100644
index 0000000000000000000000000000000000000000..e6a9c48e925949bdd413a9aca79afbb7e28b91d7
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/macs2/Macs2CallPeak.scala
@@ -0,0 +1,99 @@
+package nl.lumc.sasc.biopet.extensions.macs2
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Output, Input }
+
+class Macs2CallPeak(val root: Configurable) extends Macs2 {
+  @Input(doc = "Treatment input", required = true)
+  var treatment: File = _
+
+  @Input(doc = "Control input", required = false)
+  var control: File = _
+
+  @Output(doc = "Output file NARROWPEAKS")
+  private var output_narrow: File = _
+
+  @Output(doc = "Output file BROADPEAKS")
+  private var output_broad: File = _
+
+  @Output(doc = "Output in Excel format")
+  private var output_xls: File = _
+
+  @Output(doc = "R script with Bimodal model")
+  private var output_r: File = _
+
+  @Output(doc = "Output file Bedgraph")
+  private var output_bdg: File = _
+
+  @Output(doc = "Output file gappedPeak")
+  private var output_gapped: File = _
+
+  var fileformat: Option[String] = config("fileformat")
+  var gsize: Option[Float] = config("gsize")
+  var keepdup: Boolean = config("keep-dup", default = false)
+  var buffersize: Option[Int] = config("buffer-size")
+  var outputdir: String = _
+  var name: Option[String] = config("name")
+  var bdg: Boolean = config("bdg", default = false)
+  var verbose: Boolean = config("verbose", default = false)
+  var tsize: Option[Int] = config("tsize")
+  var bandwith: Option[Int] = config("bandwith")
+  var mfold: Option[Float] = config("mfold")
+  var fixbimodel: Boolean = config("fixbimodel", default = false)
+  var nomodel: Boolean = config("nomodel", default = false)
+  var shift: Option[Int] = config("shift")
+  var qvalue: Option[Float] = config("qvalue")
+  var pvalue: Option[Float] = config("pvalue")
+  var tolarge: Boolean = config("tolarge", default = false)
+  var downsample: Boolean = config("downsample", default = false)
+  var nolambda: Boolean = config("nolambda", default = false)
+  var slocal: Option[Int] = config("slocal")
+  var llocal: Option[Int] = config("llocal")
+  var broad: Boolean = config("broad", default = false)
+  var broadcutoff: Option[Int] = config("broadcutoff")
+  var callsummits: Boolean = config("callsummits", default = false)
+
+  override def afterGraph: Unit = {
+    if (name.isEmpty) throw new IllegalArgumentException("Name is not defined")
+    if (outputdir == null) throw new IllegalArgumentException("Outputdir is not defined")
+    output_narrow = new File(outputdir + name.get + ".narrowPeak")
+    output_broad = new File(outputdir + name.get + ".broadPeak")
+    output_xls = new File(outputdir + name.get + ".xls")
+    output_bdg = new File(outputdir + name.get + ".bdg")
+    output_r = new File(outputdir + name.get + ".r")
+    output_gapped = new File(outputdir + name.get + ".gappedPeak")
+  }
+
+  def cmdLine = {
+    required(executable) + required("callpeak") +
+      required("--treatment", treatment) + /* Treatment sample */
+      optional("--control", control) + /* Control sample */
+      optional("-f", fileformat) + /* Input file format */
+      required("-g", gsize) + /* Estimated genome size.(format: 2.7e9) (presets: hs, mm, ce, dm) */
+      conditional(keepdup, "--keep-dup") + /* Whether to keep duplicates */
+      optional("--buffer-size", buffersize) + /* Buffer size */
+      required("--outdir", outputdir) + /* The output directory */
+      optional("--name", name) + /* prefix name of the output files. (note that also the peak names inside the files will have this name */
+      conditional(bdg, "-B") + /* Whether to output in BDG format */
+      conditional(verbose, "--verbose") + /* Whether to output verbosely */
+      optional("--tsize", tsize) + /* Sets custom tag length, if not specified macs will use first 10 sequences to estimate the size */
+      optional("--bw", bandwith) + /* The bandwith to use for model building. Set this parameter as the sonication fragment size estimated in the wetlab */
+      optional("--mfold", mfold) + /* The parameter to select regions within the model fold. Must be a upper and lower limit. */
+      conditional(fixbimodel, "--fix-bimodal") + /* Whether turn on the auto paired-peak model process. If it's set, when MACS failed to build paired model, it will use the nomodel settings, the '--extsize' parameter to extend each tags. If set, MACS will be terminated if paried-peak model is failed. */
+      conditional(nomodel, "--nomodel") + /* While on, MACS will bypass building the shifting model */
+      optional("--shift", shift) + /* You can set an arbitrary shift in basepairs here */
+      optional("--extsize", shift) + /* While '--nomodel' is set, MACS uses this parameter to extend reads in 5'->3' direction to fix-sized fragments. For example, if the size of binding region for your transcription factor is 200 bp, and you want to bypass the model building by MACS, this parameter can be set as 200. This option is only valid when --nomodel is set or when MACS fails to build model and --fix-bimodal is on. */
+      optional("--qvalue", qvalue) + /* the Q-value(FDR) cutoff */
+      optional("--pvalue", pvalue) + /* The P-value cutoff, if --pvalue is set no Qvalue is calculated */
+      conditional(tolarge, "--to-large") + /* Whether to scale up the smallest input file to the larger one */
+      conditional(downsample, "--down-sample") + /* This is the reversed from --to-large */
+      conditional(nolambda, "--nolambda") + /* With this flag on, MACS will use the background lambda as local lambda. This means MACS will not consider the local bias at peak candidate regions.*/
+      optional("--slocal", slocal) + /* These two parameters control which two levels of regions will be checked around the peak regions to calculate the maximum lambda as local lambda */
+      optional("--llocal", llocal) +
+      conditional(broad, "--broad") + /* whether to enable broad peak calling */
+      optional("--broad-cutoff", broadcutoff) + /* Cutoff for broad region. This option is not available unless --broad is set. If -p is set, this is a pvalue cutoff, otherwise, it's a qvalue cutoff. */
+      conditional(callsummits, "--call-summits") /* MACS will now reanalyze the shape of signal profile (p or q-score depending on cutoff setting) to deconvolve subpeaks within each peak called from general procedure. */
+  }
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MarkDuplicates.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MarkDuplicates.scala
index f88304a0aa3fe25ad601fd5ad9f75672e03a1965..6df04c12ccb0ad1ca1c4853dac940ec3a304d043 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MarkDuplicates.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MarkDuplicates.scala
@@ -32,19 +32,19 @@ class MarkDuplicates(val root: Configurable) extends Picard {
   var outputMetrics: File = _
 
   @Argument(doc = "PROGRAM_RECORD_ID", required = false)
-  var programRecordId: String = config("programrecordid")
+  var programRecordId: Option[String] = config("programrecordid")
 
   @Argument(doc = "PROGRAM_GROUP_VERSION", required = false)
-  var programGroupVersion: String = config("programgroupversion")
+  var programGroupVersion: Option[String] = config("programgroupversion")
 
   @Argument(doc = "PROGRAM_GROUP_COMMAND_LINE", required = false)
-  var programGroupCommandLine: String = config("programgroupcommandline")
+  var programGroupCommandLine: Option[String] = config("programgroupcommandline")
 
   @Argument(doc = "PROGRAM_GROUP_NAME", required = false)
-  var programGroupName: String = config("programgroupname")
+  var programGroupName: Option[String] = config("programgroupname")
 
   @Argument(doc = "COMMENT", required = false)
-  var comment: String = config("comment")
+  var comment: Option[String] = config("comment")
 
   @Argument(doc = "REMOVE_DUPLICATES", required = false)
   var removeDuplicates: Boolean = config("removeduplicates", default = false)
@@ -62,7 +62,7 @@ class MarkDuplicates(val root: Configurable) extends Picard {
   var sortingCollectionSizeRatio: Option[Double] = config("sortingCollectionSizeRatio")
 
   @Argument(doc = "READ_NAME_REGEX", required = false)
-  var readNameRegex: String = config("readNameRegex")
+  var readNameRegex: Option[String] = config("readNameRegex")
 
   @Argument(doc = "OPTICAL_DUPLICATE_PIXEL_DISTANCE", required = false)
   var opticalDuplicatePixelDistance: Option[Int] = config("opticalDuplicatePixelDistance")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MergeSamFiles.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MergeSamFiles.scala
index b1327bc3aa5d00d8608515897a5fd81ab2d3fbe3..4de143498d9812502a2141facfacc0bbf2b046d5 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MergeSamFiles.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/MergeSamFiles.scala
@@ -41,7 +41,7 @@ class MergeSamFiles(val root: Configurable) extends Picard {
   var useThreading: Boolean = config("use_threading", default = false)
 
   @Argument(doc = "COMMENT", required = false)
-  var comment: String = config("comment")
+  var comment: Option[String] = config("comment")
 
   override def commandLine = super.commandLine +
     repeat("INPUT=", input, spaceSeparated = false) +
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/Picard.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/Picard.scala
index cc571ca7d6dad5be5c8afccd5c4839a4c167991a..e417a85fd65c0d096961a910d20a8c890de28e91 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/Picard.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/Picard.scala
@@ -22,13 +22,13 @@ abstract class Picard extends BiopetJavaCommandLineFunction {
   override def subPath = "picard" :: super.subPath
 
   @Argument(doc = "VERBOSITY", required = false)
-  var verbosity: String = config("verbosity")
+  var verbosity: Option[String] = config("verbosity")
 
   @Argument(doc = "QUIET", required = false)
   var quiet: Boolean = config("quiet", default = false)
 
   @Argument(doc = "VALIDATION_STRINGENCY", required = false)
-  var stringency: String = config("validationstringency")
+  var stringency: Option[String] = config("validationstringency")
 
   @Argument(doc = "COMPRESSION_LEVEL", required = false)
   var compression: Option[Int] = config("compressionlevel")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/SamToFastq.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/SamToFastq.scala
index cb61cc4a8088da7dcbf9b87e5395722dd51f5f01..db784ee2b11767654e8fb2eb2174f9c0d63a11fa 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/SamToFastq.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/picard/SamToFastq.scala
@@ -38,7 +38,7 @@ class SamToFastq(val root: Configurable) extends Picard {
   var outputPerRg: Boolean = config("outputPerRg", default = false)
 
   @Argument(doc = "Output dir", required = false)
-  var outputDir: String = config("outputDir")
+  var outputDir: String = _
 
   @Argument(doc = "re reverse", required = false)
   var reReverse: Boolean = config("reReverse", default = false)
@@ -53,7 +53,7 @@ class SamToFastq(val root: Configurable) extends Picard {
   var clippingAtribute: String = config("clippingAtribute")
 
   @Argument(doc = "clippingAction", required = false)
-  var clippingAction: String = config("clippingAction")
+  var clippingAction: Option[String] = config("clippingAction")
 
   @Argument(doc = "read1Trim", required = false)
   var read1Trim: Option[Int] = config("read1Trim")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/samtools/SamtoolsMpileup.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/samtools/SamtoolsMpileup.scala
index f0e6457a491a09e10580ad100dcb1c0622974b4c..b8a3a2d0e6ec913611abe21b00c1dfe200842418 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/samtools/SamtoolsMpileup.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/samtools/SamtoolsMpileup.scala
@@ -30,7 +30,7 @@ class SamtoolsMpileup(val root: Configurable) extends Samtools {
   var reference: File = config("reference")
 
   @Input(doc = "Interval bed")
-  var intervalBed: File = config("interval_bed")
+  var intervalBed: Option[File] = config("interval_bed")
 
   var disableBaq: Boolean = config("disable_baq")
   var minMapQuality: Option[Int] = config("min_map_quality")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/seqtk/SeqtkSeq.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/seqtk/SeqtkSeq.scala
index be42104c0562707ea5afca657d6a9b9d04315e8a..9838040cc74a5da4cff3073ac0b2123b6de9e954 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/seqtk/SeqtkSeq.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/extensions/seqtk/SeqtkSeq.scala
@@ -37,7 +37,7 @@ class SeqtkSeq(val root: Configurable) extends Seqtk {
   var q: Option[Int] = config("q")
 
   /** masked bases converted to CHAR; 0 for lowercase [0] */
-  var n: String = config("n")
+  var n: Option[String] = config("n")
 
   /** number of residues per line; 0 for 2^32-1 [0] */
   var l: Option[Int] = config("l")
@@ -52,7 +52,7 @@ class SeqtkSeq(val root: Configurable) extends Seqtk {
   var f: Option[Int] = config("f")
 
   /** mask regions in BED or name list FILE [null] */
-  var M: File = config("M")
+  var M: Option[File] = config("M")
 
   /** drop sequences with length shorter than INT [0] */
   var L: Option[Int] = config("L")
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/pipelines/MultisamplePipelineTemplate.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/pipelines/MultisamplePipelineTemplate.scala
index 1ccf16f428e6cd2482648f4e9f2cc030696c9799..7d46c4b8fb910398f61e9fb3b873f682efaf023f 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/pipelines/MultisamplePipelineTemplate.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/pipelines/MultisamplePipelineTemplate.scala
@@ -26,7 +26,7 @@ class MultisamplePipelineTemplate(val root: Configurable) extends QScript with M
   class Sample(sampleId: String) extends AbstractSample(sampleId) {
 
     def makeLibrary(id: String) = new Library(id)
-    class Library(libraryId: String) extends AbstractLibrary(libraryId) {
+    class Library(libId: String) extends AbstractLibrary(libId) {
       protected def addJobs(): Unit = {
         // Library jobs
       }
@@ -37,6 +37,9 @@ class MultisamplePipelineTemplate(val root: Configurable) extends QScript with M
     }
   }
 
+  def addMultiSampleJobs(): Unit = {
+  }
+
   def init(): Unit = {
   }
 
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/scripts/FastqSync.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/scripts/FastqSync.scala
deleted file mode 100644
index 6cada2301456817e3e3c191fbd2b5461788fa47d..0000000000000000000000000000000000000000
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/scripts/FastqSync.scala
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Biopet is built on top of GATK Queue for building bioinformatic
- * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
- * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
- * should also be able to execute Biopet tools and pipelines.
- *
- * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
- *
- * Contact us at: sasc@lumc.nl
- *
- * A dual licensing mode is applied. The source code within this project that are
- * not part of GATK Queue is freely available for non-commercial use under an AGPL
- * license; For commercial users or users who do not want to follow the AGPL
- * license, please contact us to obtain a separate license.
- */
-package nl.lumc.sasc.biopet.scripts
-
-import java.io.File
-
-import org.broadinstitute.gatk.utils.commandline.{ Input, Output }
-
-import argonaut._, Argonaut._
-import scalaz._, Scalaz._
-
-import nl.lumc.sasc.biopet.core.config.Configurable
-import nl.lumc.sasc.biopet.extensions.PythonCommandLineFunction
-
-import scala.io.Source
-
-class FastqSync(val root: Configurable) extends PythonCommandLineFunction {
-  setPythonScript("sync_paired_end_reads.py")
-
-  @Input(doc = "Start fastq")
-  var input_start_fastq: File = _
-
-  @Input(doc = "R1 input")
-  var input_R1: File = _
-
-  @Input(doc = "R2 input")
-  var input_R2: File = _
-
-  @Output(doc = "R1 output")
-  var output_R1: File = _
-
-  @Output(doc = "R2 output")
-  var output_R2: File = _
-
-  //No output Annotation so file 
-  var output_stats: File = _
-
-  def cmdLine = {
-    getPythonCommand +
-      required(input_start_fastq) +
-      required(input_R1) +
-      required(input_R2) +
-      required(output_R1) +
-      required(output_R2) +
-      " > " +
-      required(output_stats)
-  }
-
-  def getSummary: Json = {
-    val R1_filteredR = """Filtered (\d*) reads from first read file.""".r
-    val R2_filteredR = """Filtered (\d*) reads from second read file.""".r
-    val readsLeftR = """Synced read files contain (\d*) reads.""".r
-
-    var R1_filtered = 0
-    var R2_filtered = 0
-    var readsLeft = 0
-
-    if (output_stats.exists) for (line <- Source.fromFile(output_stats).getLines) {
-      line match {
-        case R1_filteredR(m) => R1_filtered = m.toInt
-        case R2_filteredR(m) => R2_filtered = m.toInt
-        case readsLeftR(m)   => readsLeft = m.toInt
-        case _               =>
-      }
-    }
-
-    return ("num_reads_discarded_R1" := R1_filtered) ->:
-      ("num_reads_discarded_R2" := R2_filtered) ->:
-      ("num_reads_kept" := readsLeft) ->:
-      jEmptyObject
-  }
-}
-
-object FastqSync {
-  def apply(root: Configurable, input_start_fastq: File, input_R1: File, input_R2: File,
-            output_R1: File, output_R2: File, output_stats: File): FastqSync = {
-    val fastqSync = new FastqSync(root)
-    fastqSync.input_start_fastq = input_start_fastq
-    fastqSync.input_R1 = input_R1
-    fastqSync.input_R2 = input_R2
-    fastqSync.output_R1 = output_R1
-    fastqSync.output_R2 = output_R2
-    fastqSync.output_stats = output_stats
-    return fastqSync
-  }
-
-  def mergeSummaries(jsons: List[Json]): Json = {
-    var R1_filtered = 0
-    var R2_filtered = 0
-    var readsLeft = 0
-
-    for (json <- jsons) {
-      R1_filtered += json.field("num_reads_discarded_R1").get.numberOrZero.toInt
-      R2_filtered += json.field("num_reads_discarded_R2").get.numberOrZero.toInt
-      readsLeft += json.field("num_reads_kept").get.numberOrZero.toInt
-    }
-
-    return ("num_reads_discarded_R1" := R1_filtered) ->:
-      ("num_reads_discarded_R2" := R2_filtered) ->:
-      ("num_reads_kept" := readsLeft) ->:
-      jEmptyObject
-  }
-}
\ No newline at end of file
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/FastqSync.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/FastqSync.scala
new file mode 100644
index 0000000000000000000000000000000000000000..aaa2797b65930d197089aaa72d90dd5e4d9382e4
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/FastqSync.scala
@@ -0,0 +1,293 @@
+/**
+ * Copyright (c) 2014 Leiden University Medical Center - Sequencing Analysis Support Core <sasc@lumc.nl>
+ * @author Wibowo Arindrarto <w.arindrarto@lumc.nl>
+ *
+ * This tool is a port of a Python implementation written by Martijn Vermaat[1]
+ *
+ * [1] https://github.com/martijnvermaat/bio-playground/blob/master/sync-paired-end-reads/sync_paired_end_reads.py
+ */
+package nl.lumc.sasc.biopet.tools
+
+import java.io.File
+import scala.io.Source
+import scala.util.matching.Regex
+
+import scala.annotation.tailrec
+import scala.collection.JavaConverters._
+
+import argonaut._, Argonaut._
+import scalaz._, Scalaz._
+import htsjdk.samtools.fastq.{ AsyncFastqWriter, BasicFastqWriter, FastqReader, FastqRecord }
+import org.broadinstitute.gatk.utils.commandline.{ Input, Output }
+
+import nl.lumc.sasc.biopet.core.BiopetJavaCommandLineFunction
+import nl.lumc.sasc.biopet.core.ToolCommand
+import nl.lumc.sasc.biopet.core.config.Configurable
+
+/**
+ * FastqSync function class for usage in Biopet pipelines
+ *
+ * @param root Configuration object for the pipeline
+ */
+class FastqSync(val root: Configurable) extends BiopetJavaCommandLineFunction {
+
+  javaMainClass = getClass.getName
+
+  @Input(doc = "Original FASTQ file (read 1 or 2)", shortName = "r", required = true)
+  var refFastq: File = _
+
+  @Input(doc = "Input read 1 FASTQ file", shortName = "i", required = true)
+  var inputFastq1: File = _
+
+  @Input(doc = "Input read 2 FASTQ file", shortName = "j", required = true)
+  var inputFastq2: File = _
+
+  @Output(doc = "Output read 1 FASTQ file", shortName = "o", required = true)
+  var outputFastq1: File = _
+
+  @Output(doc = "Output read 2 FASTQ file", shortName = "p", required = true)
+  var outputFastq2: File = _
+
+  @Output(doc = "Sync statistics", required = true)
+  var outputStats: File = _
+
+  // executed command line
+  override def commandLine =
+    super.commandLine +
+      required("-r", refFastq) +
+      required("-i", inputFastq1) +
+      required("-j", inputFastq2) +
+      required("-o", outputFastq1) +
+      required("-p", outputFastq2) + " > " +
+      required(outputStats)
+
+  // summary statistics
+  def summary: Json = {
+
+    val regex = new Regex("""Filtered (\d*) reads from first read file.
+                            |Filtered (\d*) reads from second read file.
+                            |Synced read files contain (\d*) reads.""".stripMargin,
+      "R1", "R2", "RL")
+
+    val (countFilteredR1, countFilteredR2, countRLeft) =
+      if (outputStats.exists) {
+        val text = Source
+          .fromFile(outputStats)
+          .getLines()
+          .mkString("\n")
+        regex.findFirstMatchIn(text) match {
+          case None         => (0, 0, 0)
+          case Some(rmatch) => (rmatch.group("R1").toInt, rmatch.group("R2").toInt, rmatch.group("RL").toInt)
+        }
+      } else (0, 0, 0)
+
+    ("num_reads_discarded_R1" := countFilteredR1) ->:
+      ("num_reads_discarded_R2" := countFilteredR2) ->:
+      ("num_reads_kept" := countRLeft) ->:
+      jEmptyObject
+  }
+}
+
+object FastqSync extends ToolCommand {
+
+  /**
+   * Implicit class to allow for lazy retrieval of FastqRecord ID
+   * without any read pair mark
+   *
+   * @param fq FastqRecord
+   */
+  private implicit class FastqPair(fq: FastqRecord) {
+    lazy val fragId = fq.getReadHeader.split("[_/][12]\\s??|\\s")(0)
+  }
+
+  /**
+   * Counts from syncing FastqRecords
+   *
+   * @param numDiscard1 Number of reads discarded from the initial read 1
+   * @param numDiscard2 Number of reads discarded from the initial read 2
+   * @param numKept Number of items in result
+   */
+  case class SyncCounts(numDiscard1: Int, numDiscard2: Int, numKept: Int)
+
+  /**
+   * Filters out FastqRecord that are not present in the input iterators, using
+   * a reference sequence object
+   *
+   * @param pre FastqReader over reference FASTQ file
+   * @param seqA FastqReader over read 1
+   * @param seqB FastqReader over read 2
+   * @return
+   */
+  def syncFastq(pre: FastqReader, seqA: FastqReader, seqB: FastqReader): (Stream[(FastqRecord, FastqRecord)], SyncCounts) = {
+    // counters for discarded A and B seqections + total kept
+    // NOTE: we are reasigning values to these variables in the recursion below
+    var (numDiscA, numDiscB, numKept) = (0, 0, 0)
+
+    /**
+     * Syncs read pairs recursively
+     *
+     * @param pre Reference sequence, assumed to be a superset of both seqA and seqB
+     * @param seqA Sequence over read 1
+     * @param seqB Sequence over read 2
+     * @param acc Stream containing pairs which are present in read 1 and read 2
+     * @return
+     */
+    @tailrec def syncIter(pre: Stream[FastqRecord],
+                          seqA: Stream[FastqRecord], seqB: Stream[FastqRecord],
+                          acc: Stream[(FastqRecord, FastqRecord)]): Stream[(FastqRecord, FastqRecord)] =
+      (pre.headOption, seqA.headOption, seqB.headOption) match {
+        // recursion base case: both iterators have been exhausted
+        case (_, None, None) => acc
+        // illegal state: reference sequence exhausted but not seqA or seqB
+        case (None, Some(_), _) | (None, _, Some(_)) =>
+          throw new NoSuchElementException("Reference record stream shorter than expected")
+        // keep recursion going if A still has items (we want to count how many)
+        case (_, _, None) =>
+          numDiscA += 1
+          syncIter(pre.tail, seqA.tail, Stream(), acc)
+        // like above but for B
+        case (_, None, _) =>
+          numDiscB += 1
+          syncIter(pre.tail, Stream(), seqB.tail, acc)
+        // where the magic happens!
+        case (Some(r), Some(a), Some(b)) =>
+          // value of A iterator in the next recursion
+          val nextA =
+            // hold A if its head is not equal to reference
+            if (a.fragId != r.fragId) {
+              if (b.fragId == r.fragId) numDiscB += 1
+              seqA
+              // otherwise, go to next item
+            } else seqA.tail
+          // like A above
+          val nextB =
+            if (b.fragId != r.fragId) {
+              if (a.fragId == r.fragId) numDiscA += 1
+              seqB
+            } else seqB.tail
+          // value of accumulator in the next recursion
+          val nextAcc =
+            // keep accumulator unchanged if any of the two post streams
+            // have different elements compared to the reference stream
+            if (a.fragId != r.fragId || b.fragId != r.fragId) acc
+            // otherwise, grow accumulator
+            else {
+              numKept += 1
+              acc ++ Stream((a, b))
+            }
+          syncIter(pre.tail, nextA, nextB, nextAcc)
+      }
+
+    (syncIter(pre.iterator.asScala.toStream, seqA.iterator.asScala.toStream, seqB.iterator.asScala.toStream,
+      Stream.empty[(FastqRecord, FastqRecord)]),
+      SyncCounts(numDiscA, numDiscB, numKept))
+  }
+
+  def writeSyncedFastq(sync: Stream[(FastqRecord, FastqRecord)],
+                       counts: SyncCounts,
+                       outputFastq1: AsyncFastqWriter,
+                       outputFastq2: AsyncFastqWriter): Unit = {
+    sync.foreach {
+      case (rec1, rec2) =>
+        outputFastq1.write(rec1)
+        outputFastq2.write(rec2)
+    }
+    println("Filtered %d reads from first read file.".format(counts.numDiscard1))
+    println("Filtered %d reads from second read file.".format(counts.numDiscard2))
+    println("Synced read files contain %d reads.".format(counts.numKept))
+  }
+
+  /** Function to merge this tool's summary with summaries from other objects */
+  // TODO: refactor this into the object? At least make it work on the summary object
+  def mergeSummaries(jsons: List[Json]): Json = {
+
+    val (read1FilteredCount, read2FilteredCount, readsLeftCount) = jsons
+      // extract the values we require from each JSON object into tuples
+      .map {
+        case json =>
+          (json.field("num_reads_discarded_R1").get.numberOrZero.toInt,
+            json.field("num_reads_discarded_R2").get.numberOrZero.toInt,
+            json.field("num_reads_kept").get.numberOrZero.toInt)
+      }
+      // reduce the tuples
+      .reduceLeft {
+        (x: (Int, Int, Int), y: (Int, Int, Int)) =>
+          (x._1 + y._1, x._2 + y._2, x._3 + y._3)
+      }
+
+    ("num_reads_discarded_R1" := read1FilteredCount) ->:
+      ("num_reads_discarded_R2" := read2FilteredCount) ->:
+      ("num_reads_kept" := readsLeftCount) ->:
+      jEmptyObject
+  }
+
+  case class Args(refFastq: File = new File(""),
+                  inputFastq1: File = new File(""),
+                  inputFastq2: File = new File(""),
+                  outputFastq1: File = new File(""),
+                  outputFastq2: File = new File("")) extends AbstractArgs
+
+  class OptParser extends AbstractOptParser {
+
+    // TODO: make output format independent from input format?
+    head(
+      s"""
+        |$commandName - Sync paired-end FASTQ files.
+        |
+        |This tool works with gzipped or non-gzipped FASTQ files. The output
+        |file will be gzipped when the input is also gzipped.
+      """.stripMargin)
+
+    opt[File]('r', "ref") required () valueName "<fastq>" action { (x, c) =>
+      c.copy(refFastq = x)
+    } validate {
+      x => if (x.exists) success else failure("Reference FASTQ file not found")
+    } text "Reference FASTQ file"
+
+    opt[File]('i', "in1") required () valueName "<fastq>" action { (x, c) =>
+      c.copy(inputFastq1 = x)
+    } validate {
+      x => if (x.exists) success else failure("Input FASTQ file 1 not found")
+    } text "Input FASTQ file 1"
+
+    opt[File]('j', "in2") required () valueName "<fastq[.gz]>" action { (x, c) =>
+      c.copy(inputFastq2 = x)
+    } validate {
+      x => if (x.exists) success else failure("Input FASTQ file 2 not found")
+    } text "Input FASTQ file 2"
+
+    opt[File]('o', "out1") required () valueName "<fastq[.gz]>" action { (x, c) =>
+      c.copy(outputFastq1 = x)
+    } text "Output FASTQ file 1"
+
+    opt[File]('p', "out2") required () valueName "<fastq>" action { (x, c) =>
+      c.copy(outputFastq2 = x)
+    } text "Output FASTQ file 2"
+  }
+
+  /**
+   * Parses the command line argument
+   *
+   * @param args Array of arguments
+   * @return
+   */
+  def parseArgs(args: Array[String]): Args = new OptParser()
+    .parse(args, Args())
+    .getOrElse(sys.exit(1))
+
+  def main(args: Array[String]): Unit = {
+
+    val commandArgs: Args = parseArgs(args)
+
+    val (synced, counts) = syncFastq(
+      new FastqReader(commandArgs.refFastq),
+      new FastqReader(commandArgs.inputFastq1),
+      new FastqReader(commandArgs.inputFastq2))
+
+    writeSyncedFastq(synced, counts,
+      // using 3000 for queue size to approximate NFS buffer
+      new AsyncFastqWriter(new BasicFastqWriter(commandArgs.outputFastq1), 3000),
+      new AsyncFastqWriter(new BasicFastqWriter(commandArgs.outputFastq2), 3000)
+    )
+  }
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/VcfStats.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/VcfStats.scala
new file mode 100644
index 0000000000000000000000000000000000000000..11d930d5972897824e341d22d46808d849d7767b
--- /dev/null
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/tools/VcfStats.scala
@@ -0,0 +1,472 @@
+package nl.lumc.sasc.biopet.tools
+
+import java.io.{ FileOutputStream, PrintWriter, File }
+
+import htsjdk.variant.variantcontext.{ VariantContext, Genotype }
+import htsjdk.variant.vcf.VCFFileReader
+import nl.lumc.sasc.biopet.core.{ BiopetJavaCommandLineFunction, ToolCommand }
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.utils.commandline.{ Output, Input }
+import scala.collection.JavaConversions._
+import scala.collection.mutable
+import scala.sys.process.{ Process, ProcessLogger }
+import htsjdk.samtools.util.Interval
+
+/**
+ * Created by pjvan_thof on 1/10/15.
+ */
+class VcfStats(val root: Configurable) extends BiopetJavaCommandLineFunction {
+  javaMainClass = getClass.getName
+
+  @Input(doc = "Input fastq", shortName = "I", required = true)
+  var input: File = _
+
+  protected var outputDir: String = _
+
+  /**
+   * Set output dir and a output file
+   * @param dir
+   */
+  def setOutputDir(dir: String): Unit = {
+    outputDir = dir
+    this.jobOutputFile = new File(dir + File.separator + ".vcfstats.out")
+  }
+
+  /**
+   * Creates command to execute extension
+   * @return
+   */
+  override def commandLine = super.commandLine +
+    required("-I", input) +
+    required("-o", outputDir)
+}
+
+object VcfStats extends ToolCommand {
+  /** Commandline argument */
+  case class Args(inputFile: File = null, outputDir: String = null, intervals: Option[File] = None) extends AbstractArgs
+
+  /** Parsing commandline arguments */
+  class OptParser extends AbstractOptParser {
+    opt[File]('I', "inputFile") required () unbounded () valueName ("<file>") action { (x, c) =>
+      c.copy(inputFile = x)
+    }
+    opt[String]('o', "outputDir") required () unbounded () valueName ("<file>") action { (x, c) =>
+      c.copy(outputDir = x)
+    }
+    //TODO: add interval argument
+    /*
+    opt[File]('i', "intervals") unbounded () valueName ("<file>") action { (x, c) =>
+      c.copy(intervals = Some(x))
+    }
+    */
+  }
+
+  /**
+   * Class to store sample to sample compare stats
+   * @param genotypeOverlap Number of genotypes match with other sample
+   * @param alleleOverlap Number of alleles also found in other sample
+   */
+  case class SampleToSampleStats(var genotypeOverlap: Int = 0,
+                                 var alleleOverlap: Int = 0) {
+    /** Add an other class */
+    def +=(other: SampleToSampleStats) {
+      this.genotypeOverlap += other.genotypeOverlap
+      this.alleleOverlap += other.alleleOverlap
+    }
+  }
+
+  /**
+   * class to store all sample relative stats
+   * @param genotypeStats Stores all genotype relative stats
+   * @param sampleToSample Stores sample to sample compare stats
+   */
+  case class SampleStats(val genotypeStats: mutable.Map[String, mutable.Map[Any, Int]] = mutable.Map(),
+                         val sampleToSample: mutable.Map[String, SampleToSampleStats] = mutable.Map()) {
+    /** Add an other class */
+    def +=(other: SampleStats): Unit = {
+      for ((key, value) <- other.sampleToSample) {
+        if (this.sampleToSample.contains(key)) this.sampleToSample(key) += value
+        else this.sampleToSample(key) = value
+      }
+      for ((field, fieldMap) <- other.genotypeStats) {
+        val thisField = this.genotypeStats.get(field)
+        if (thisField.isDefined) mergeStatsMap(thisField.get, fieldMap)
+        else this.genotypeStats += field -> fieldMap
+      }
+    }
+  }
+
+  /**
+   * General stats class to store vcf stats
+   * @param generalStats Stores are general stats
+   * @param samplesStats Stores all sample/genotype specific stats
+   */
+  case class Stats(val generalStats: mutable.Map[String, mutable.Map[Any, Int]] = mutable.Map(),
+                   val samplesStats: mutable.Map[String, SampleStats] = mutable.Map()) {
+    /** Add an other class */
+    def +=(other: Stats): Stats = {
+      for ((key, value) <- other.samplesStats) {
+        if (this.samplesStats.contains(key)) this.samplesStats(key) += value
+        else this.samplesStats(key) = value
+      }
+      for ((field, fieldMap) <- other.generalStats) {
+        val thisField = this.generalStats.get(field)
+        if (thisField.isDefined) mergeStatsMap(thisField.get, fieldMap)
+        else this.generalStats += field -> fieldMap
+      }
+      this
+    }
+  }
+
+  /**
+   * Merge m2 into m1
+   * @param m1
+   * @param m2
+   */
+  def mergeStatsMap(m1: mutable.Map[Any, Int], m2: mutable.Map[Any, Int]): Unit = {
+    for (key <- m2.keySet)
+      m1(key) = m1.getOrElse(key, 0) + m2(key)
+  }
+
+  /**
+   * Merge m2 into m1
+   * @param m1
+   * @param m2
+   */
+  def mergeNestedStatsMap(m1: mutable.Map[String, mutable.Map[Any, Int]], m2: Map[String, Map[Any, Int]]): Unit = {
+    for ((field, fieldMap) <- m2) {
+      if (m1.contains(field)) {
+        for ((key, value) <- fieldMap) {
+          if (m1(field).contains(key)) m1(field)(key) += value
+          else m1(field)(key) = value
+        }
+      } else m1(field) = mutable.Map(fieldMap.toList: _*)
+    }
+  }
+
+  protected var commandArgs: Args = _
+
+  /**
+   * @param args the command line arguments
+   */
+  def main(args: Array[String]): Unit = {
+    logger.info("Started")
+    val argsParser = new OptParser
+    commandArgs = argsParser.parse(args, Args()) getOrElse sys.exit(1)
+
+    val reader = new VCFFileReader(commandArgs.inputFile, true)
+    val header = reader.getFileHeader
+    val samples = header.getSampleNamesInOrder.toList
+
+    val intervals: List[Interval] = (
+      for (
+        seq <- header.getSequenceDictionary.getSequences;
+        chunks = seq.getSequenceLength / 10000000;
+        i <- 1 until chunks
+      ) yield {
+        val size = seq.getSequenceLength / chunks
+        val begin = size * (i - 1) + 1
+        val end = if (i >= chunks) seq.getSequenceLength else size * i
+        new Interval(seq.getSequenceName, begin, end)
+      }
+    ).toList
+
+    val totalBases = intervals.foldRight(0L)(_.length() + _)
+
+    // Reading vcf records
+    logger.info("Start reading vcf records")
+
+    def createStats: Stats = {
+      val stats = new Stats
+      //init stats
+      for (sample1 <- samples) {
+        stats.samplesStats += sample1 -> new SampleStats
+        for (sample2 <- samples) {
+          stats.samplesStats(sample1).sampleToSample += sample2 -> new SampleToSampleStats
+        }
+      }
+      stats
+    }
+
+    var variantCounter = 0L
+    var baseCounter = 0L
+
+    def status(count: Int, interval: Interval): Unit = {
+      variantCounter += count
+      baseCounter += interval.length()
+      val fraction = baseCounter.toFloat / totalBases * 100
+      logger.info(interval + " done, " + count + " rows processed")
+      logger.info("total: " + variantCounter + " rows processed, " + fraction + "% done")
+    }
+
+    val statsChunks = for (interval <- intervals.par) yield {
+      val reader = new VCFFileReader(commandArgs.inputFile, true)
+      var chunkCounter = 0
+      val stats = createStats
+      logger.info("Starting on: " + interval)
+
+      for (
+        record <- reader.query(interval.getSequence, interval.getStart, interval.getEnd) if record.getStart <= interval.getEnd
+      ) {
+        mergeNestedStatsMap(stats.generalStats, checkGeneral(record))
+        for (sample1 <- samples) yield {
+          val genotype = record.getGenotype(sample1)
+          mergeNestedStatsMap(stats.samplesStats(sample1).genotypeStats, checkGenotype(record, genotype))
+          for (sample2 <- samples) {
+            val genotype2 = record.getGenotype(sample2)
+            if (genotype.getAlleles == genotype2.getAlleles)
+              stats.samplesStats(sample1).sampleToSample(sample2).genotypeOverlap += 1
+            stats.samplesStats(sample1).sampleToSample(sample2).alleleOverlap += genotype.getAlleles.count(allele => genotype2.getAlleles.exists(_.basesMatch(allele)))
+          }
+        }
+        chunkCounter += 1
+      }
+      status(chunkCounter, interval)
+      stats
+    }
+
+    val stats = statsChunks.toList.fold(createStats)(_ += _)
+
+    logger.info("Done reading vcf records")
+
+    writeField("QUAL", stats.generalStats.getOrElse("QUAL", mutable.Map()))
+    writeField("general", stats.generalStats.getOrElse("general", mutable.Map()))
+    writeGenotypeFields(stats, commandArgs.outputDir + "/genotype_", samples)
+    writeOverlap(stats, _.genotypeOverlap, commandArgs.outputDir + "/sample_compare/genotype_overlap", samples)
+    writeOverlap(stats, _.alleleOverlap, commandArgs.outputDir + "/sample_compare/allele_overlap", samples)
+
+    logger.info("Done")
+  }
+
+  /**
+   * Function to check all general stats, all info expect sample/genotype specific stats
+   * @param record
+   * @return
+   */
+  protected def checkGeneral(record: VariantContext): Map[String, Map[Any, Int]] = {
+    val buffer = mutable.Map[String, Map[Any, Int]]()
+
+    def addToBuffer(key: String, value: Any): Unit = {
+      val map = buffer.getOrElse(key, Map())
+      buffer += key -> (map + (value -> (map.getOrElse(value, 0) + 1)))
+    }
+
+    buffer += "QUAL" -> Map(record.getPhredScaledQual -> 1)
+
+    addToBuffer("general", "Total")
+    if (record.isBiallelic) addToBuffer("general", "Biallelic")
+    if (record.isComplexIndel) addToBuffer("general", "ComplexIndel")
+    if (record.isFiltered) addToBuffer("general", "Filtered")
+    if (record.isFullyDecoded) addToBuffer("general", "FullyDecoded")
+    if (record.isIndel) addToBuffer("general", "Indel")
+    if (record.isMixed) addToBuffer("general", "Mixed")
+    if (record.isMNP) addToBuffer("general", "MNP")
+    if (record.isMonomorphicInSamples) addToBuffer("general", "MonomorphicInSamples")
+    if (record.isNotFiltered) addToBuffer("general", "NotFiltered")
+    if (record.isPointEvent) addToBuffer("general", "PointEvent")
+    if (record.isPolymorphicInSamples) addToBuffer("general", "PolymorphicInSamples")
+    if (record.isSimpleDeletion) addToBuffer("general", "SimpleDeletion")
+    if (record.isSimpleInsertion) addToBuffer("general", "SimpleInsertion")
+    if (record.isSNP) addToBuffer("general", "SNP")
+    if (record.isStructuralIndel) addToBuffer("general", "StructuralIndel")
+    if (record.isSymbolic) addToBuffer("general", "Symbolic")
+    if (record.isSymbolicOrSV) addToBuffer("general", "SymbolicOrSV")
+    if (record.isVariant) addToBuffer("general", "Variant")
+
+    buffer.toMap
+  }
+
+  /**
+   * Function to check sample/genotype specific stats
+   * @param record
+   * @param genotype
+   * @return
+   */
+  protected def checkGenotype(record: VariantContext, genotype: Genotype): Map[String, Map[Any, Int]] = {
+    val buffer = mutable.Map[String, Map[Any, Int]]()
+
+    def addToBuffer(key: String, value: Any): Unit = {
+      val map = buffer.getOrElse(key, Map())
+      buffer += key -> (map + (value -> (map.getOrElse(value, 0) + 1)))
+    }
+
+    buffer += "DP" -> Map((if (genotype.hasDP) genotype.getDP else "not set") -> 1)
+    buffer += "GQ" -> Map((if (genotype.hasGQ) genotype.getGQ else "not set") -> 1)
+
+    val usedAlleles = (for (allele <- genotype.getAlleles) yield record.getAlleleIndex(allele)).toList
+
+    addToBuffer("general", "Total")
+    if (genotype.isHet) addToBuffer("general", "Het")
+    if (genotype.isHetNonRef) addToBuffer("general", "HetNonRef")
+    if (genotype.isHom) addToBuffer("general", "Hom")
+    if (genotype.isHomRef) addToBuffer("general", "HomRef")
+    if (genotype.isHomVar) addToBuffer("general", "HomVar")
+    if (genotype.isMixed) addToBuffer("general", "Mixed")
+    if (genotype.isNoCall) addToBuffer("general", "NoCall")
+    if (genotype.isNonInformative) addToBuffer("general", "NonInformative")
+    if (genotype.isAvailable) addToBuffer("general", "Available")
+    if (genotype.isCalled) addToBuffer("general", "Called")
+    if (genotype.isFiltered) addToBuffer("general", "Filtered")
+
+    if (genotype.hasAD) {
+      val ad = genotype.getAD
+      for (i <- 0 until ad.size if ad(i) > 0) {
+        addToBuffer("AD", ad(i))
+        if (i == 0) addToBuffer("AD-ref", ad(i))
+        if (i > 0) addToBuffer("AD-alt", ad(i))
+        if (usedAlleles.exists(_ == i)) addToBuffer("AD-used", ad(i))
+        else addToBuffer("AD-not_used", ad(i))
+      }
+    }
+
+    buffer.toMap
+  }
+
+  /**
+   * Function to write stats to tsv files
+   * @param stats
+   * @param prefix
+   * @param samples
+   */
+  protected def writeGenotypeFields(stats: Stats, prefix: String, samples: List[String]) {
+    val fields = List("DP", "GQ", "AD", "AD-ref", "AD-alt", "AD-used", "AD-not_used", "general")
+    for (field <- fields) {
+      writeGenotypeField(stats, prefix, samples, field)
+    }
+  }
+
+  /**
+   * Function to write 1 specific genotype field
+   * @param stats
+   * @param prefix
+   * @param samples
+   * @param field
+   */
+  protected def writeGenotypeField(stats: Stats, prefix: String, samples: List[String], field: String): Unit = {
+    val file = new File(prefix + field + ".tsv")
+    file.getParentFile.mkdirs()
+    val writer = new PrintWriter(file)
+    writer.println(samples.mkString(field + "\t", "\t", ""))
+    val keySet = (for (sample <- samples) yield stats.samplesStats(sample).genotypeStats.getOrElse(field, Map[Any, Int]()).keySet).fold(Set[Any]())(_ ++ _)
+    for (key <- keySet.toList.sortWith(sortAnyAny(_, _))) {
+      val values = for (sample <- samples) yield stats.samplesStats(sample).genotypeStats.getOrElse(field, Map[Any, Int]()).getOrElse(key, 0)
+      writer.println(values.mkString(key + "\t", "\t", ""))
+    }
+    writer.close()
+    plotLine(file)
+  }
+
+  /**
+   * Function to write 1 specific general field
+   * @param prefix
+   * @param data
+   * @return
+   */
+  protected def writeField(prefix: String, data: mutable.Map[Any, Int]): File = {
+    val file = new File(commandArgs.outputDir + "/" + prefix + ".tsv")
+    println(file)
+    file.getParentFile.mkdirs()
+    val writer = new PrintWriter(file)
+    writer.println("\t" + prefix)
+    for (key <- data.keySet.toList.sortWith(sortAnyAny(_, _))) {
+      writer.println(key + "\t" + data(key))
+    }
+    writer.close()
+    file
+  }
+
+  /**
+   * Function to sort Any values
+   * @param a
+   * @param b
+   * @return
+   */
+  def sortAnyAny(a: Any, b: Any): Boolean = {
+    a match {
+      case ai: Int => {
+        b match {
+          case bi: Int    => ai < bi
+          case bi: Double => ai < bi
+          case _          => a.toString < b.toString
+        }
+      }
+      case _ => a.toString < b.toString
+    }
+  }
+
+  /**
+   * Function to write sample to sample compare tsv's / heatmaps
+   * @param stats
+   * @param function function to extract targeted var in SampleToSampleStats
+   * @param prefix
+   * @param samples
+   */
+  def writeOverlap(stats: Stats, function: SampleToSampleStats => Int,
+                   prefix: String, samples: List[String]): Unit = {
+    val absFile = new File(prefix + ".abs.tsv")
+    val relFile = new File(prefix + ".rel.tsv")
+
+    absFile.getParentFile.mkdirs()
+
+    val absWriter = new PrintWriter(absFile)
+    val relWriter = new PrintWriter(relFile)
+
+    absWriter.println(samples.mkString("\t", "\t", ""))
+    relWriter.println(samples.mkString("\t", "\t", ""))
+    for (sample1 <- samples) {
+      val values = for (sample2 <- samples) yield function(stats.samplesStats(sample1).sampleToSample(sample2))
+
+      absWriter.println(values.mkString(sample1 + "\t", "\t", ""))
+
+      val total = function(stats.samplesStats(sample1).sampleToSample(sample1))
+      relWriter.println(values.map(_.toFloat / total).mkString(sample1 + "\t", "\t", ""))
+    }
+    absWriter.close()
+    relWriter.close()
+
+    plotHeatmap(relFile)
+  }
+
+  /**
+   * Plots heatmaps on target tsv file
+   * @param file
+   */
+  def plotHeatmap(file: File) {
+    executeRscript("plotHeatmap.R", Array(file.getAbsolutePath,
+      file.getAbsolutePath.stripSuffix(".tsv") + ".heatmap.png",
+      file.getAbsolutePath.stripSuffix(".tsv") + ".heatmap.clustering.png",
+      file.getAbsolutePath.stripSuffix(".tsv") + ".heatmap.dendrogram.png"))
+  }
+
+  /**
+   * Plots line graph with target tsv file
+   * @param file
+   */
+  def plotLine(file: File) {
+    executeRscript("plotXY.R", Array(file.getAbsolutePath,
+      file.getAbsolutePath.stripSuffix(".tsv") + ".xy.png"))
+  }
+
+  /**
+   * Function to execute Rscript as subproces
+   * @param resource
+   * @param args
+   */
+  def executeRscript(resource: String, args: Array[String]): Unit = {
+    val is = getClass.getResourceAsStream(resource)
+    val file = File.createTempFile("script.", "." + resource)
+    val os = new FileOutputStream(file)
+    org.apache.commons.io.IOUtils.copy(is, os)
+    os.close()
+
+    val command: String = "Rscript " + file + " " + args.mkString(" ")
+
+    logger.info("Starting: " + command)
+    val process = Process(command).run(ProcessLogger(x => logger.debug(x), x => logger.debug(x)))
+    if (process.exitValue() == 0) logger.info("Done: " + command)
+    else {
+      logger.warn("Failed: " + command)
+      if (!logger.isDebugEnabled) logger.warn("Use -l debug for more info")
+    }
+  }
+}
diff --git a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/utils/ConfigUtils.scala b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/utils/ConfigUtils.scala
index 16a00b55157918b6ce92a49ff29d1a22a06110d5..9f1cddb7913cf7846daa72c7b9e695988b3a14c6 100644
--- a/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/utils/ConfigUtils.scala
+++ b/public/biopet-framework/src/main/scala/nl/lumc/sasc/biopet/utils/ConfigUtils.scala
@@ -336,8 +336,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2file(value: ConfigValue): File = {
-      //TODO: throw IllegalStateException
-      if (value != null && value.value != null && value.value != None) new File(any2string(value.value)) else null
+      if (value != null && value.value != null && value.value != None) new File(any2string(value.value))
+      else throw new IllegalStateException("Value does not exist")
     }
 
     /**
@@ -346,7 +346,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionFile(value: ConfigValue): Option[File] = {
-      if (value != null && value.value != null && value.value != None) Some(new File(any2string(value.value))) else None
+      if (value != null && value.value != null && value.value != None) Some(new File(any2string(value.value)))
+      else None
     }
 
     /**
@@ -355,8 +356,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2string(value: ConfigValue): String = {
-      //TODO: throw IllegalStateException
-      if (value != null && value.value != null && value.value != None) any2string(value.value) else null
+      if (value != null && value.value != null && value.value != None) any2string(value.value)
+      else throw new IllegalStateException("Value does not exist")
     }
 
     /**
@@ -365,7 +366,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionString(value: ConfigValue): Option[String] = {
-      if (value != null && value.value != null && value.value != None) Some(any2string(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Some(any2string(value.value))
+      else None
     }
 
     /**
@@ -384,7 +386,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionLong(value: ConfigValue): Option[Long] = {
-      if (value != null && value.value != null && value.value != None) Option(any2long(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Option(any2long(value.value))
+      else None
     }
 
     /**
@@ -403,7 +406,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionInt(value: ConfigValue): Option[Int] = {
-      if (value != null && value.value != null && value.value != None) Option(any2int(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Option(any2int(value.value))
+      else None
     }
 
     /**
@@ -422,7 +426,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionDouble(value: ConfigValue): Option[Double] = {
-      if (value != null && value.value != null && value.value != None) Option(any2double(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Option(any2double(value.value))
+      else None
     }
 
     /**
@@ -441,7 +446,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionFloat(value: ConfigValue): Option[Float] = {
-      if (value != null && value.value != null && value.value != None) Option(any2float(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Option(any2float(value.value))
+      else None
     }
 
     /**
@@ -460,7 +466,8 @@ object ConfigUtils extends Logging {
      * @return
      */
     implicit def configValue2optionBoolean(value: ConfigValue): Option[Boolean] = {
-      if (value != null && value.value != null && value.value != None) Option(any2boolean(value.value)) else None
+      if (value != null && value.value != null && value.value != None) Option(any2boolean(value.value))
+      else None
     }
 
     /**
diff --git a/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/FastqSyncTest.scala b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/FastqSyncTest.scala
new file mode 100644
index 0000000000000000000000000000000000000000..1c98a36f2698d23806aa48de151b4cffa20eb056
--- /dev/null
+++ b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/FastqSyncTest.scala
@@ -0,0 +1,230 @@
+/**
+ * Copyright (c) 2014 Leiden University Medical Center - Sequencing Analysis Support Core <sasc@lumc.nl>
+ * @author Wibowo Arindrarto <w.arindrarto@lumc.nl>
+ */
+package nl.lumc.sasc.biopet.tools
+
+import java.io.File
+import java.nio.file.Paths
+import scala.collection.JavaConverters._
+
+import htsjdk.samtools.fastq.{ AsyncFastqWriter, FastqReader, FastqRecord }
+import org.mockito.Mockito.{ inOrder => inOrd, when }
+import org.scalatest.Matchers
+import org.scalatest.mock.MockitoSugar
+import org.scalatest.testng.TestNGSuite
+import org.testng.annotations.{ DataProvider, Test }
+
+class FastqSyncTest extends TestNGSuite with MockitoSugar with Matchers {
+
+  import FastqSync._
+
+  private def resourceFile(p: String): File =
+    new File(resourcePath(p))
+
+  private def resourcePath(p: String): String =
+    Paths.get(getClass.getResource(p).toURI).toString
+
+  // Helper functions to create iterator over FastqRecords given its IDs as Ints
+  private def recordsOver(ids: String*): java.util.Iterator[FastqRecord] = ids
+    .map(x => new FastqRecord(x, "A", "", "H"))
+    .toIterator.asJava
+
+  @DataProvider(name = "mockReaderProvider")
+  def mockReaderProvider() =
+    Array(
+      Array(mock[FastqReader], mock[FastqReader], mock[FastqReader]))
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testDefault(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(bMock.iterator) thenReturn recordsOver("1", "2", "3")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 3
+    sync(0) shouldBe (new FastqRecord("1", "A", "", "H"), new FastqRecord("1", "A", "", "H"))
+    sync(1) shouldBe (new FastqRecord("2", "A", "", "H"), new FastqRecord("2", "A", "", "H"))
+    sync(2) shouldBe (new FastqRecord("3", "A", "", "H"), new FastqRecord("3", "A", "", "H"))
+    counts.numDiscard1 shouldBe 0
+    counts.numDiscard2 shouldBe 0
+    counts.numKept shouldBe 3
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testRefTooShort(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2")
+    when(aMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(bMock.iterator) thenReturn recordsOver("1", "2", "3")
+
+    val thrown = intercept[NoSuchElementException] {
+      syncFastq(refMock, aMock, bMock)
+    }
+    thrown.getMessage should ===("Reference record stream shorter than expected")
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqAEmpty(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver()
+    when(bMock.iterator) thenReturn recordsOver("1", "2", "3")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 0
+    counts.numDiscard1 shouldBe 0
+    counts.numDiscard2 shouldBe 3
+    counts.numKept shouldBe 0
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqBEmpty(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(bMock.iterator) thenReturn recordsOver()
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 0
+    counts.numDiscard1 shouldBe 3
+    counts.numDiscard2 shouldBe 0
+    counts.numKept shouldBe 0
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqAShorter(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver("2", "3")
+    when(bMock.iterator) thenReturn recordsOver("1", "2", "3")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 2
+    sync(0) shouldBe (new FastqRecord("2", "A", "", "H"), new FastqRecord("2", "A", "", "H"))
+    sync(1) shouldBe (new FastqRecord("3", "A", "", "H"), new FastqRecord("3", "A", "", "H"))
+    counts.numDiscard1 shouldBe 0
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 2
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqBShorter(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver("2", "3")
+    when(bMock.iterator) thenReturn recordsOver("1", "2", "3")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 2
+    sync(0) shouldBe (new FastqRecord("2", "A", "", "H"), new FastqRecord("2", "A", "", "H"))
+    sync(1) shouldBe (new FastqRecord("3", "A", "", "H"), new FastqRecord("3", "A", "", "H"))
+    counts.numDiscard1 shouldBe 0
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 2
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqABShorter(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1", "2", "3")
+    when(aMock.iterator) thenReturn recordsOver("2", "3")
+    when(bMock.iterator) thenReturn recordsOver("1", "2")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 1
+    sync(0) shouldBe (new FastqRecord("2", "A", "", "H"), new FastqRecord("2", "A", "", "H"))
+    counts.numDiscard1 shouldBe 1
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 1
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqABShorterPairMarkSlash(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1/1", "2/1", "3/1")
+    when(aMock.iterator) thenReturn recordsOver("2/1", "3/1")
+    when(bMock.iterator) thenReturn recordsOver("1/2", "2/2")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 1
+    sync(0) shouldBe (new FastqRecord("2/1", "A", "", "H"), new FastqRecord("2/2", "A", "", "H"))
+    counts.numDiscard1 shouldBe 1
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 1
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqABShorterPairMarkUnderscore(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1_1", "2_1", "3_1")
+    when(aMock.iterator) thenReturn recordsOver("2_1", "3_1")
+    when(bMock.iterator) thenReturn recordsOver("1_2", "2_2")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 1
+    sync(0) shouldBe (new FastqRecord("2_1", "A", "", "H"), new FastqRecord("2_2", "A", "", "H"))
+    counts.numDiscard1 shouldBe 1
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 1
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testSeqABShorterWithDesc(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1 desc1b", "2 desc2b", "3 desc3b")
+    when(aMock.iterator) thenReturn recordsOver("2 desc2a", "3 desc3a")
+    when(bMock.iterator) thenReturn recordsOver("1 desc1b", "2 desc2b")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 1
+    sync(0) shouldBe (new FastqRecord("2 desc2a", "A", "", "H"), new FastqRecord("2 desc2b", "A", "", "H"))
+    counts.numDiscard1 shouldBe 1
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 1
+  }
+
+  @Test(dataProvider = "mockReaderProvider")
+  def testComplex(refMock: FastqReader, aMock: FastqReader, bMock: FastqReader) = {
+    when(refMock.iterator) thenReturn recordsOver("1/2 yep", "2/2 yep", "3/2 yep", "4/2 yep", "5/2 yep")
+    when(aMock.iterator) thenReturn recordsOver("1/1 yep", "2/1 yep", "4/1 yep")
+    when(bMock.iterator) thenReturn recordsOver("1/2 yep", "3/2 yep", "4/2 yep")
+
+    val (sync, counts) = syncFastq(refMock, aMock, bMock)
+    sync.length shouldBe 2
+    sync(0) shouldBe (new FastqRecord("1/1 yep", "A", "", "H"), new FastqRecord("1/2 yep", "A", "", "H"))
+    sync(1) shouldBe (new FastqRecord("4/1 yep", "A", "", "H"), new FastqRecord("4/2 yep", "A", "", "H"))
+    counts.numDiscard1 shouldBe 1
+    counts.numDiscard2 shouldBe 1
+    counts.numKept shouldBe 2
+  }
+
+  @Test def testWriteSynced() = {
+    val aMock = mock[AsyncFastqWriter]
+    val bMock = mock[AsyncFastqWriter]
+    val sync = Stream(
+      (new FastqRecord("1", "A", "", "H"), new FastqRecord("1", "T", "", "E")),
+      (new FastqRecord("2", "A", "", "H"), new FastqRecord("2", "T", "", "E")))
+    val counts = SyncCounts(4, 3, 2)
+    val obs = inOrd(aMock, bMock)
+    val stdout = new java.io.ByteArrayOutputStream
+    Console.withOut(stdout) {
+      writeSyncedFastq(sync, counts, aMock, bMock)
+    }
+    stdout.toString should ===(List(
+      "Filtered 4 reads from first read file.",
+      "Filtered 3 reads from second read file.",
+      "Synced read files contain 2 reads.\n"
+    ).mkString("\n"))
+    obs.verify(aMock).write(new FastqRecord("1", "A", "", "H"))
+    obs.verify(bMock).write(new FastqRecord("1", "T", "", "E"))
+    obs.verify(aMock).write(new FastqRecord("2", "A", "", "H"))
+    obs.verify(bMock).write(new FastqRecord("2", "T", "", "E"))
+  }
+
+  @Test def testArgsMinimum() = {
+    val args = Array(
+      "-r", resourcePath("/paired01a.fq"),
+      "-i", resourcePath("/paired01a.fq"),
+      "-j", resourcePath("/paired01b.fq"),
+      "-o", "/tmp/mockout1.fq",
+      "-p", "/tmp/mockout2.fq")
+    val parsed = parseArgs(args)
+    parsed.refFastq shouldBe resourceFile("/paired01a.fq")
+    parsed.inputFastq1 shouldBe resourceFile("/paired01a.fq")
+    parsed.inputFastq2 shouldBe resourceFile("/paired01b.fq")
+    parsed.outputFastq1 shouldBe new File("/tmp/mockout1.fq")
+    parsed.outputFastq2 shouldBe new File("/tmp/mockout2.fq")
+  }
+}
\ No newline at end of file
diff --git a/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/VcfStatsTest.scala b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/VcfStatsTest.scala
new file mode 100644
index 0000000000000000000000000000000000000000..9ac90deecad83fab23d95bfafaf89442899b798e
--- /dev/null
+++ b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/tools/VcfStatsTest.scala
@@ -0,0 +1,77 @@
+package nl.lumc.sasc.biopet.tools
+
+import org.scalatest.Matchers
+import org.scalatest.testng.TestNGSuite
+import org.testng.annotations.Test
+import scala.collection.mutable
+import VcfStats._
+
+/**
+ * Created by pjvan_thof on 2/5/15.
+ */
+class VcfStatsTest extends TestNGSuite with Matchers {
+
+  @Test
+  def testSampleToSampleStats: Unit = {
+    val s1 = SampleToSampleStats()
+    val s2 = SampleToSampleStats()
+    s1.alleleOverlap shouldBe 0
+    s1.genotypeOverlap shouldBe 0
+    s2.alleleOverlap shouldBe 0
+    s2.genotypeOverlap shouldBe 0
+
+    s1 += s2
+    s1.alleleOverlap shouldBe 0
+    s1.genotypeOverlap shouldBe 0
+    s2.alleleOverlap shouldBe 0
+    s2.genotypeOverlap shouldBe 0
+
+    s2.alleleOverlap = 2
+    s2.genotypeOverlap = 3
+
+    s1 += s2
+    s1.alleleOverlap shouldBe 2
+    s1.genotypeOverlap shouldBe 3
+    s2.alleleOverlap shouldBe 2
+    s2.genotypeOverlap shouldBe 3
+
+    s1 += s2
+    s1.alleleOverlap shouldBe 4
+    s1.genotypeOverlap shouldBe 6
+    s2.alleleOverlap shouldBe 2
+    s2.genotypeOverlap shouldBe 3
+  }
+
+  @Test
+  def testSampleStats: Unit = {
+    val s1 = SampleStats()
+    val s2 = SampleStats()
+
+    s1.sampleToSample += "s1" -> SampleToSampleStats()
+    s1.sampleToSample += "s2" -> SampleToSampleStats()
+    s2.sampleToSample += "s1" -> SampleToSampleStats()
+    s2.sampleToSample += "s2" -> SampleToSampleStats()
+
+    s1.sampleToSample("s1").alleleOverlap = 1
+    s2.sampleToSample("s2").alleleOverlap = 2
+
+    s1.genotypeStats += "1" -> mutable.Map(1 -> 1)
+    s2.genotypeStats += "2" -> mutable.Map(2 -> 2)
+
+    val ss1 = SampleToSampleStats()
+    val ss2 = SampleToSampleStats()
+
+    s1 += s2
+    s1.genotypeStats shouldBe mutable.Map("1" -> mutable.Map(1 -> 1), "2" -> mutable.Map(2 -> 2))
+    ss1.alleleOverlap = 1
+    ss2.alleleOverlap = 2
+    s1.sampleToSample shouldBe mutable.Map("s1" -> ss1, "s2" -> ss2)
+
+    s1 += s2
+    s1.genotypeStats shouldBe mutable.Map("1" -> mutable.Map(1 -> 1), "2" -> mutable.Map(2 -> 4))
+
+    s1 += s1
+    s1.genotypeStats shouldBe mutable.Map("1" -> mutable.Map(1 -> 2), "2" -> mutable.Map(2 -> 8))
+  }
+
+}
diff --git a/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/utils/ConfigUtilsTest.scala b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/utils/ConfigUtilsTest.scala
index 6b6f990bf5938b4adb073812de835c13ea830236..26f989c5369c4e138ded82620d1d03f0c076ce7c 100644
--- a/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/utils/ConfigUtilsTest.scala
+++ b/public/biopet-framework/src/test/scala/nl/lumc/sasc/biopet/utils/ConfigUtilsTest.scala
@@ -235,10 +235,14 @@ class ConfigUtilsTest extends TestNGSuite with Matchers {
       }
 
       var string: String = ConfigValue(index, index, "test")
-      string = ConfigValue(index, index, null)
+      intercept[IllegalStateException] {
+        string = ConfigValue(index, index, null)
+      }
 
       var file: File = ConfigValue(index, index, "test")
-      file = ConfigValue(index, index, null)
+      intercept[IllegalStateException] {
+        file = ConfigValue(index, index, null)
+      }
     }
   }
 }
diff --git a/public/biopet-public-package/pom.xml b/public/biopet-public-package/pom.xml
index 4d303b2e7e913a2f584e47a0031da7fb9b75f029..0ebd7278059952eca93e42862ba22be632706c6b 100644
--- a/public/biopet-public-package/pom.xml
+++ b/public/biopet-public-package/pom.xml
@@ -75,11 +75,21 @@
             <artifactId>Yamsvp</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <dependency>
+            <groupId>nl.lumc.sasc</groupId>
+            <artifactId>Kopisu</artifactId>
+            <version>${project.version}</version>
+        </dependency>
         <dependency>
             <groupId>nl.lumc.sasc</groupId>
             <artifactId>Bam2Wig</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <dependency>
+            <groupId>nl.lumc.sasc</groupId>
+            <artifactId>Carp</artifactId>
+            <version>${project.version}</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/public/biopet-public-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutablePublic.scala b/public/biopet-public-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutablePublic.scala
index febb389833a584312e72c83aca9ad57b49306f10..279e33e4ddac28f317b9da98bcda0b9b2b647877 100644
--- a/public/biopet-public-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutablePublic.scala
+++ b/public/biopet-public-package/src/main/scala/nl/lumc/sasc/biopet/core/BiopetExecutablePublic.scala
@@ -23,16 +23,20 @@ object BiopetExecutablePublic extends BiopetExecutable {
     nl.lumc.sasc.biopet.pipelines.bammetrics.BamMetrics,
     nl.lumc.sasc.biopet.pipelines.yamsvp.Yamsvp,
     nl.lumc.sasc.biopet.pipelines.sage.Sage,
-    nl.lumc.sasc.biopet.pipelines.bamtobigwig.Bam2Wig
+    nl.lumc.sasc.biopet.pipelines.bamtobigwig.Bam2Wig,
+    nl.lumc.sasc.biopet.pipelines.kopisu.ConiferPipeline,
+    nl.lumc.sasc.biopet.pipelines.carp.Carp
   )
 
   def tools: List[MainCommand] = List(
     nl.lumc.sasc.biopet.tools.WipeReads,
     nl.lumc.sasc.biopet.tools.ExtractAlignedFastq,
+    nl.lumc.sasc.biopet.tools.FastqSync,
     nl.lumc.sasc.biopet.tools.BiopetFlagstat,
     nl.lumc.sasc.biopet.tools.CheckAllelesVcfInBam,
     nl.lumc.sasc.biopet.tools.VcfToTsv,
     nl.lumc.sasc.biopet.tools.VcfFilter,
+    nl.lumc.sasc.biopet.tools.VcfStats,
     nl.lumc.sasc.biopet.tools.FindRepeatsPacBio,
     nl.lumc.sasc.biopet.tools.BedToInterval,
     nl.lumc.sasc.biopet.tools.MpileupToVcf,
diff --git a/public/carp/.gitignore b/public/carp/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a6f89c2da7a029afa02b6e7a2bf80ad34958a311
--- /dev/null
+++ b/public/carp/.gitignore
@@ -0,0 +1 @@
+/target/
\ No newline at end of file
diff --git a/public/carp/pom.xml b/public/carp/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..482b83147312cf332cc7325c6a9196c092dcd91a
--- /dev/null
+++ b/public/carp/pom.xml
@@ -0,0 +1,48 @@
+<!--
+
+    Biopet is built on top of GATK Queue for building bioinformatic
+    pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+    SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+    should also be able to execute Biopet tools and pipelines.
+
+    Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+
+    Contact us at: sasc@lumc.nl
+
+    A dual licensing mode is applied. The source code within this project that are
+    not part of GATK Queue is freely available for non-commercial use under an AGPL
+    license; For commercial users or users who do not want to follow the AGPL
+    license, please contact us to obtain a separate license.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>nl.lumc.sasc</groupId>
+    <artifactId>Carp</artifactId>
+    <packaging>jar</packaging>
+    
+    <parent>
+        <groupId>nl.lumc.sasc</groupId>
+        <artifactId>Biopet</artifactId>
+        <version>0.3.0-DEV</version>
+        <relativePath>../</relativePath>
+    </parent>
+    
+    <inceptionYear>2014</inceptionYear>
+    <name>Carp</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>nl.lumc.sasc</groupId>
+            <artifactId>BiopetFramework</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>nl.lumc.sasc</groupId>
+            <artifactId>Mapping</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/public/carp/src/main/scala/nl/lumc/sasc/biopet/pipelines/carp/Carp.scala b/public/carp/src/main/scala/nl/lumc/sasc/biopet/pipelines/carp/Carp.scala
new file mode 100644
index 0000000000000000000000000000000000000000..578f3afc79f6cac8d2621399bb577222383ee2bb
--- /dev/null
+++ b/public/carp/src/main/scala/nl/lumc/sasc/biopet/pipelines/carp/Carp.scala
@@ -0,0 +1,124 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.pipelines.carp
+
+import java.io.File
+
+import nl.lumc.sasc.biopet.extensions.Ln
+import nl.lumc.sasc.biopet.extensions.macs2.Macs2CallPeak
+import nl.lumc.sasc.biopet.extensions.picard.MergeSamFiles
+import nl.lumc.sasc.biopet.utils.ConfigUtils
+import org.broadinstitute.gatk.queue.QScript
+import org.broadinstitute.gatk.utils.commandline.{ Argument, Input }
+import org.broadinstitute.gatk.utils.commandline.{ Input, Argument }
+import nl.lumc.sasc.biopet.core._
+import nl.lumc.sasc.biopet.core.config._
+import nl.lumc.sasc.biopet.pipelines.mapping.Mapping
+
+/**
+ * Carp pipeline
+ * Chip-Seq analysis pipeline
+ * This pipeline performs QC,mapping and peak calling
+ */
+class Carp(val root: Configurable) extends QScript with MultiSampleQScript {
+  qscript =>
+  def this() = this(null)
+
+  override def defaults = ConfigUtils.mergeMaps(Map(
+    "mapping" -> Map("skip_markduplicates" -> true)
+  ), super.defaults)
+
+  def makeSample(id: String) = new Sample(id)
+  class Sample(sampleId: String) extends AbstractSample(sampleId) {
+    def makeLibrary(id: String) = new Library(id)
+    class Library(libId: String) extends AbstractLibrary(libId) {
+      val mapping = new Mapping(qscript)
+
+      def addJobs(): Unit = {
+        if (config.contains("R1")) {
+          mapping.input_R1 = config("R1")
+          if (config.contains("R2")) mapping.input_R2 = config("R2")
+          mapping.libId = libId
+          mapping.sampleId = sampleId
+          mapping.outputDir = libDir
+
+          mapping.init
+          mapping.biopetScript
+          addAll(mapping.functions)
+
+        } else logger.error("Sample: " + sampleId + ": No R1 found for library: " + libId)
+      }
+    }
+
+    val bamFile = createFile(".bam")
+    val controls: List[String] = config("control", default = Nil)
+
+    def addJobs(): Unit = {
+      addPerLibJobs()
+      val bamFiles = libraries.map(_._2.mapping.finalBamFile).toList
+      if (bamFiles.length == 1) {
+        add(Ln(qscript, bamFiles.head, bamFile))
+        val oldIndex = new File(bamFiles.head.getAbsolutePath.stripSuffix(".bam") + ".bai")
+        val newIndex = new File(bamFile.getAbsolutePath.stripSuffix(".bam") + ".bai")
+        add(Ln(qscript, oldIndex, newIndex))
+      } else if (bamFiles.length > 1) {
+        val merge = new MergeSamFiles(qscript)
+        merge.input = bamFiles
+        merge.sortOrder = "coordinate"
+        merge.output = bamFile
+        add(merge)
+
+        //TODO: Add BigWIg track
+      }
+
+      val macs2 = new Macs2CallPeak(qscript)
+      macs2.treatment = bamFile
+      macs2.name = Some(sampleId)
+      macs2.outputdir = sampleDir + "macs2/" + sampleId + "/"
+      add(macs2)
+    }
+  }
+
+  def init() {
+  }
+
+  def biopetScript() {
+    // Define what the pipeline should do
+    // First step is QC, this will be done with Flexiprep
+    // Second step is mapping, this will be done with the Mapping pipeline
+    // Third step is calling peaks on the bam files produced with the mapping pipeline, this will be done with MACS2
+    logger.info("Starting CArP pipeline")
+
+    addSamplesJobs()
+  }
+
+  def addMultiSampleJobs(): Unit = {
+    for ((sampleId, sample) <- samples) {
+      for (controlId <- sample.controls) {
+        if (!samples.contains(controlId))
+          throw new IllegalStateException("For sample: " + sampleId + " this control: " + controlId + " does not exist")
+        val macs2 = new Macs2CallPeak(this)
+        macs2.treatment = sample.bamFile
+        macs2.control = samples(controlId).bamFile
+        macs2.name = Some(sampleId + "_VS_" + controlId)
+        macs2.outputdir = sample.sampleDir + "/" + "macs2/" + macs2.name.get + "/"
+        add(macs2)
+      }
+    }
+  }
+}
+
+object Carp extends PipelineCommand
diff --git a/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/Flexiprep.scala b/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/Flexiprep.scala
index e44260588ee1037911be41438c38973d3e81fff8..98daba52d0960fa63b4f33b43117c3fb456bddc4 100644
--- a/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/Flexiprep.scala
+++ b/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/Flexiprep.scala
@@ -21,7 +21,7 @@ import org.broadinstitute.gatk.utils.commandline.{ Input, Argument }
 import nl.lumc.sasc.biopet.core.{ BiopetQScript, PipelineCommand }
 import nl.lumc.sasc.biopet.core.config.Configurable
 import nl.lumc.sasc.biopet.extensions.{ Gzip, Pbzip2, Md5sum, Zcat, Seqstat }
-import nl.lumc.sasc.biopet.scripts.{ FastqSync }
+import nl.lumc.sasc.biopet.tools.FastqSync
 
 class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
   def this() = this(null)
@@ -30,7 +30,7 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
   var input_R1: File = _
 
   @Input(doc = "R2 fastq file (gzipped allowed)", shortName = "R2", required = false)
-  var input_R2: File = _
+  var input_R2: Option[File] = _
 
   /** Skip Trim fastq files */
   var skipTrim: Boolean = config("skip_trim", default = false)
@@ -38,13 +38,17 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
   /** Skip Clip fastq files */
   var skipClip: Boolean = config("skip_clip", default = false)
 
+  // TODO: hide sampleId and libId from the command line so they do not interfere with our config values
+
   /** Sample name */
+  @Argument(doc = "Sample ID", shortName = "sample", required = true)
   var sampleId: String = _
 
   /** Library name */
-  var libraryId: String = _
+  @Argument(doc = "Library ID", shortName = "library", required = true)
+  var libId: String = _
 
-  var paired: Boolean = (input_R2 != null)
+  var paired: Boolean = input_R2.isDefined
   var R1_ext: String = _
   var R2_ext: String = _
   var R1_name: String = _
@@ -58,12 +62,12 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
   val summary = new FlexiprepSummary(this)
 
   def init() {
-    if (input_R1 == null) throw new IllegalStateException("Missing R1 on flexiprep module")
-    if (outputDir == null) throw new IllegalStateException("Missing Output directory on flexiprep module")
-    if (sampleId == null) throw new IllegalStateException("Missing Sample name on flexiprep module")
-    if (libraryId == null) throw new IllegalStateException("Missing Library name on flexiprep module")
-    else if (!outputDir.endsWith("/")) outputDir += "/"
-    paired = (input_R2 != null)
+    require(outputDir != null, "Missing output directory on flexiprep module")
+    require(input_R1 != null, "Missing input R1 on flexiprep module")
+    require(sampleId != null, "Missing sample ID on flexiprep module")
+    require(libId != null, "Missing library ID on flexiprep module")
+
+    paired = input_R2.isDefined
 
     if (input_R1.endsWith(".gz")) R1_name = input_R1.getName.substring(0, input_R1.getName.lastIndexOf(".gz"))
     else if (input_R1.endsWith(".gzip")) R1_name = input_R1.getName.substring(0, input_R1.getName.lastIndexOf(".gzip"))
@@ -71,15 +75,19 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
     R1_ext = R1_name.substring(R1_name.lastIndexOf("."), R1_name.size)
     R1_name = R1_name.substring(0, R1_name.lastIndexOf(R1_ext))
 
-    if (paired) {
-      if (input_R2.endsWith(".gz")) R2_name = input_R2.getName.substring(0, input_R2.getName.lastIndexOf(".gz"))
-      else if (input_R2.endsWith(".gzip")) R2_name = input_R2.getName.substring(0, input_R2.getName.lastIndexOf(".gzip"))
-      else R2_name = input_R2.getName
-      R2_ext = R2_name.substring(R2_name.lastIndexOf("."), R2_name.size)
-      R2_name = R2_name.substring(0, R2_name.lastIndexOf(R2_ext))
+    input_R2 match {
+      case Some(fileR2) => {
+        paired = true
+        if (fileR2.endsWith(".gz")) R2_name = fileR2.getName.substring(0, fileR2.getName.lastIndexOf(".gz"))
+        else if (fileR2.endsWith(".gzip")) R2_name = fileR2.getName.substring(0, fileR2.getName.lastIndexOf(".gzip"))
+        else R2_name = fileR2.getName
+        R2_ext = R2_name.substring(R2_name.lastIndexOf("."), R2_name.size)
+        R2_name = R2_name.substring(0, R2_name.lastIndexOf(R2_ext))
+      }
+      case _ =>
     }
 
-    summary.out = outputDir + sampleId + "-" + libraryId + ".qc.summary.json"
+    summary.out = outputDir + sampleId + "-" + libId + ".qc.summary.json"
   }
 
   def biopetScript() {
@@ -95,7 +103,7 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
 
   def runInitialJobs() {
     outputFiles += ("fastq_input_R1" -> extractIfNeeded(input_R1, outputDir))
-    if (paired) outputFiles += ("fastq_input_R2" -> extractIfNeeded(input_R2, outputDir))
+    if (paired) outputFiles += ("fastq_input_R2" -> extractIfNeeded(input_R2.get, outputDir))
 
     fastqc_R1 = Fastqc(this, input_R1, outputDir + "/" + R1_name + ".fastqc/")
     add(fastqc_R1)
@@ -107,12 +115,12 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
     summary.addMd5sum(md5sum_R1, R2 = false, after = false)
 
     if (paired) {
-      fastqc_R2 = Fastqc(this, input_R2, outputDir + "/" + R2_name + ".fastqc/")
+      fastqc_R2 = Fastqc(this, input_R2.get, outputDir + "/" + R2_name + ".fastqc/")
       add(fastqc_R2)
       summary.addFastqc(fastqc_R2, R2 = true)
       outputFiles += ("fastqc_R2" -> fastqc_R2.output)
 
-      val md5sum_R2 = Md5sum(this, input_R2, outputDir)
+      val md5sum_R2 = Md5sum(this, input_R2.get, outputDir)
       add(md5sum_R2)
       summary.addMd5sum(md5sum_R2, R2 = true, after = false)
     }
@@ -182,15 +190,20 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
         R2 = cutadapt_R2.fastq_output
         deps ::= R2
 
-        val fastqSync = FastqSync(this, cutadapt_R1.fastq_input, cutadapt_R1.fastq_output, cutadapt_R2.fastq_output,
-          swapExt(outDir, R1, R1_ext, ".sync" + R1_ext), swapExt(outDir, R2, R2_ext, ".sync" + R2_ext), swapExt(outDir, R1, R1_ext, ".sync.stats"))
-        fastqSync.deps :::= deps
-        fastqSync.isIntermediate = true
-        add(fastqSync)
-        summary.addFastqcSync(fastqSync, chunk)
-        outputFiles += ("syncStats" -> fastqSync.output_stats)
-        R1 = fastqSync.output_R1
-        R2 = fastqSync.output_R2
+        val fqSync = new FastqSync(this)
+        fqSync.refFastq = cutadapt_R1.fastq_input
+        fqSync.inputFastq1 = cutadapt_R1.fastq_output
+        fqSync.inputFastq2 = cutadapt_R2.fastq_output
+        fqSync.outputFastq1 = swapExt(outDir, R1, R1_ext, ".sync" + R1_ext)
+        fqSync.outputFastq2 = swapExt(outDir, R2, R2_ext, ".sync" + R2_ext)
+        fqSync.outputStats = swapExt(outDir, R1, R1_ext, ".sync.stats")
+        fqSync.deps :::= deps
+        add(fqSync)
+
+        summary.addFastqcSync(fqSync, chunk)
+        outputFiles += ("syncStats" -> fqSync.outputStats)
+        R1 = fqSync.outputFastq1
+        R2 = fqSync.outputFastq2
         deps :::= R1 :: R2 :: Nil
       }
     }
@@ -273,7 +286,7 @@ class Flexiprep(val root: Configurable) extends QScript with BiopetQScript {
       add(zcatCommand)
       return newFile
     } else if (file.getName().endsWith(".bz2")) {
-      var newFile = swapExt(runDir, file, ".bz2", "")
+      val newFile = swapExt(runDir, file, ".bz2", "")
       val pbzip2 = Pbzip2(this, file, newFile)
       pbzip2.isIntermediate = true
       add(pbzip2)
diff --git a/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/FlexiprepSummary.scala b/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/FlexiprepSummary.scala
index 855e7288c51d697389adb0e60000e11730f1f4e0..353fb1acb83c75fe121d064c1ab6b0149f87565d 100644
--- a/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/FlexiprepSummary.scala
+++ b/public/flexiprep/src/main/scala/nl/lumc/sasc/biopet/pipelines/flexiprep/FlexiprepSummary.scala
@@ -18,7 +18,7 @@ package nl.lumc.sasc.biopet.pipelines.flexiprep
 import java.io.PrintWriter
 import nl.lumc.sasc.biopet.core.config.Configurable
 import nl.lumc.sasc.biopet.extensions.{ Md5sum, Seqstat }
-import nl.lumc.sasc.biopet.scripts.{ FastqSync }
+import nl.lumc.sasc.biopet.tools.FastqSync
 import org.broadinstitute.gatk.queue.function.InProcessFunction
 import org.broadinstitute.gatk.utils.commandline.{ Input, Output }
 import java.io.File
@@ -112,8 +112,8 @@ class FlexiprepSummary(val root: Configurable) extends InProcessFunction with Co
   def addFastqcSync(fastqSync: FastqSync, chunk: String = ""): FastqSync = {
     if (!chunks.contains(chunk)) chunks += (chunk -> new Chunk)
     chunks(chunk).fastqSync = fastqSync
-    deps ::= fastqSync.output_stats
-    return fastqSync
+    deps ::= fastqSync.outputStats
+    fastqSync
   }
   // format: OFF
   override def run {
@@ -121,7 +121,7 @@ class FlexiprepSummary(val root: Configurable) extends InProcessFunction with Co
     md5Summary()
     val summary = 
       ("samples" := ( flexiprep.sampleId :=
-        ("libraries" := ( flexiprep.libraryId := (
+        ("libraries" := ( flexiprep.libId := (
           ("flexiprep" := (
             ("clipping" := !flexiprep.skipClip) ->:
             ("trimming" := !flexiprep.skipTrim) ->:
@@ -223,11 +223,13 @@ class FlexiprepSummary(val root: Configurable) extends InProcessFunction with Co
       jEmptyObject)
   }
 
-  def syncstatSummary(): Option[Json] = {
-    if (flexiprep.skipClip || !flexiprep.paired) return None
-    val s = for ((key, value) <- chunks) yield value.fastqSync.getSummary
-    return Option(FastqSync.mergeSummaries(s.toList))
-  }
+  def syncstatSummary(): Option[Json] =
+    if (flexiprep.skipClip || !flexiprep.paired)
+      None
+    else {
+      val s = for ((key, value) <- chunks) yield value.fastqSync.summary
+      Option(FastqSync.mergeSummaries(s.toList))
+    }
 
   def trimstatSummary(): Option[Json] = {
     if (flexiprep.skipTrim) return None
diff --git a/public/kopisu/.gitignore b/public/kopisu/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a6f89c2da7a029afa02b6e7a2bf80ad34958a311
--- /dev/null
+++ b/public/kopisu/.gitignore
@@ -0,0 +1 @@
+/target/
\ No newline at end of file
diff --git a/public/kopisu/pom.xml b/public/kopisu/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..3eae712aa40f517b182d345f7c7187eb846995f6
--- /dev/null
+++ b/public/kopisu/pom.xml
@@ -0,0 +1,43 @@
+<!--
+
+    Biopet is built on top of GATK Queue for building bioinformatic
+    pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+    SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+    should also be able to execute Biopet tools and pipelines.
+
+    Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+
+    Contact us at: sasc@lumc.nl
+
+    A dual licensing mode is applied. The source code within this project that are
+    not part of GATK Queue is freely available for non-commercial use under an AGPL
+    license; For commercial users or users who do not want to follow the AGPL
+    license, please contact us to obtain a separate license.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>nl.lumc.sasc</groupId>
+    <artifactId>Kopisu</artifactId>
+    <packaging>jar</packaging>
+    
+    <parent>
+        <groupId>nl.lumc.sasc</groupId>
+        <artifactId>Biopet</artifactId>
+        <version>0.3.0-DEV</version>
+        <relativePath>../</relativePath>
+    </parent>
+    
+    <inceptionYear>2015</inceptionYear>
+    <name>Kopisu</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>nl.lumc.sasc</groupId>
+            <artifactId>BiopetFramework</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferPipeline.scala b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferPipeline.scala
new file mode 100644
index 0000000000000000000000000000000000000000..c45de4eac904af6b110c7448416d0ce0a90fe8e3
--- /dev/null
+++ b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferPipeline.scala
@@ -0,0 +1,121 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.pipelines.kopisu
+
+import java.io.{ BufferedWriter, FileWriter, File }
+
+import nl.lumc.sasc.biopet.core.{ PipelineCommand, _ }
+import nl.lumc.sasc.biopet.core.config._
+import nl.lumc.sasc.biopet.extensions.Ln
+import nl.lumc.sasc.biopet.extensions.conifer.{ ConiferAnalyze, ConiferCall, ConiferRPKM }
+import org.broadinstitute.gatk.queue.QScript
+
+import scala.io.Source
+
+class ConiferPipeline(val root: Configurable) extends QScript with BiopetQScript {
+  //*
+  // Kopisu - Coniferpipeline is a pipeline that can run standalone
+  // */
+  def this() = this(null)
+
+  /** Input bamfile  */
+  @Input(doc = "Bamfile to start from", fullName = "bam", shortName = "bam", required = true)
+  var inputBam: File = _
+
+  @Argument(doc = "Label this sample with a name/ID [0-9a-zA-Z] and [-_]",
+    fullName = "label",
+    shortName = "label", required = false)
+  var sampleLabel: String = _
+
+  /** Exon definitions in bed format */
+  @Input(doc = "Exon definition file in bed format", fullName = "exon_bed", shortName = "bed", required = false)
+  var probeFile: File = config("probeFile")
+
+  @Input(doc = "Previous RPKM files (controls)", fullName = "rpkm_controls", shortName = "rc", required = false)
+  var controlsDir: File = config("controlsDir")
+
+  @Argument(doc = "Enable RPKM only mode, generate files for reference db", shortName = "rpkmonly", required = false)
+  var RPKMonly: Boolean = false
+
+  val summary = new ConiferSummary(this)
+
+  def init() {
+
+  }
+
+  def input2RPKM(inputBam: File): String = {
+    if (!sampleLabel.isEmpty) sampleLabel ++ ".txt"
+    else swapExt(inputBam.getName, ".bam", ".txt")
+  }
+
+  def input2HDF5(inputBam: File): String = {
+    if (!sampleLabel.isEmpty) sampleLabel ++ ".hdf5"
+    else swapExt(inputBam.getName, ".bam", ".hdf5")
+  }
+  def input2Calls(inputBam: File): String = {
+    if (!sampleLabel.isEmpty) sampleLabel ++ ".calls.txt"
+    else swapExt(inputBam.getName, ".bam", "calls.txt")
+  }
+
+  def biopetScript(): Unit = {
+
+    /** Setup RPKM directory */
+    val sampleDir: String = outputDir
+    val RPKMdir: File = new File(sampleDir + File.separator + "RPKM" + File.separator)
+    RPKMdir.mkdir()
+
+    val coniferRPKM = new ConiferRPKM(this)
+    coniferRPKM.bamFile = this.inputBam.getAbsoluteFile
+    coniferRPKM.probes = this.probeFile
+    coniferRPKM.output = new File(RPKMdir + File.separator + input2RPKM(inputBam))
+    add(coniferRPKM)
+
+    if (!RPKMonly) {
+      /** Collect the rpkm_output to a temp directory, where we merge with the control files */
+      var refRPKMlist: List[File] = Nil
+      for (f <- controlsDir.listFiles()) {
+        var target = new File(RPKMdir + File.separator + f.getName)
+        if (!target.exists()) {
+          logger.info("Creating " + target.getAbsolutePath)
+          add(Ln(this, f, target, true))
+          refRPKMlist :+= target
+        }
+      }
+
+      val coniferAnalyze = new ConiferAnalyze(this)
+      coniferAnalyze.deps = List(coniferRPKM.output) ++ refRPKMlist
+      coniferAnalyze.probes = this.probeFile
+      coniferAnalyze.rpkmDir = RPKMdir
+      coniferAnalyze.output = new File(sampleDir + File.separator + input2HDF5(inputBam))
+      add(coniferAnalyze)
+
+      val coniferCall = new ConiferCall(this)
+      coniferCall.input = coniferAnalyze.output
+      coniferCall.output = new File(sampleDir + File.separator + "calls.txt")
+      add(coniferCall)
+
+      summary.deps = List(coniferCall.output)
+      summary.label = sampleLabel
+      summary.calls = coniferCall.output
+      summary.out = new File(sampleDir + File.separator + input2Calls(inputBam))
+
+      add(summary)
+    }
+
+  }
+}
+
+object ConiferPipeline extends PipelineCommand
diff --git a/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferSummary.scala b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferSummary.scala
new file mode 100644
index 0000000000000000000000000000000000000000..78ffcbb29540bbfd801c04b4b5d709b7eaf69191
--- /dev/null
+++ b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/ConiferSummary.scala
@@ -0,0 +1,65 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.pipelines.kopisu
+
+import java.io.{ FileWriter, BufferedWriter, File, PrintWriter }
+
+import argonaut._
+import nl.lumc.sasc.biopet.core.config.Configurable
+import org.broadinstitute.gatk.queue.function.InProcessFunction
+import org.broadinstitute.gatk.utils.commandline.{ Input, Output }
+
+import scala.io.Source
+
+class ConiferSummary(val root: Configurable) extends InProcessFunction with Configurable {
+  def filterCalls(callFile: File, outFile: File, sampleName: String): Unit = {
+    //    val filename = callFile.getAbsolutePath
+    val writer = new BufferedWriter(new FileWriter(outFile))
+
+    for (line <- Source.fromFile(callFile).getLines()) {
+      line.startsWith(sampleName) || line.startsWith("sampleID") match {
+        case true => writer.write(line + "\n");
+        case _    =>
+      }
+    }
+    writer.close()
+  }
+
+  this.analysisName = getClass.getSimpleName
+
+  @Input(doc = "deps")
+  var deps: List[File] = Nil
+
+  @Output(doc = "Summary output", required = true)
+  var out: File = _
+
+  @Input(doc = "calls")
+  var calls: File = _
+
+  var label: String = _
+
+  var coniferPipeline: ConiferPipeline = if (root.isInstanceOf[ConiferPipeline]) root.asInstanceOf[ConiferPipeline] else {
+    throw new IllegalStateException("Root is no instance of ConiferPipeline")
+  }
+
+  var resources: Map[String, Json] = Map()
+
+  override def run {
+    logger.debug("Start")
+    filterCalls(calls, out, label)
+    logger.debug("Stop")
+  }
+}
diff --git a/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/Kopisu.scala b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/Kopisu.scala
new file mode 100644
index 0000000000000000000000000000000000000000..94b3bcbe619ee5b01bd29bea6aa9b9e707934a6d
--- /dev/null
+++ b/public/kopisu/src/main/scala/nl/lumc/sasc/biopet/pipelines/kopisu/Kopisu.scala
@@ -0,0 +1,54 @@
+/**
+ * Biopet is built on top of GATK Queue for building bioinformatic
+ * pipelines. It is mainly intended to support LUMC SHARK cluster which is running
+ * SGE. But other types of HPC that are supported by GATK Queue (such as PBS)
+ * should also be able to execute Biopet tools and pipelines.
+ *
+ * Copyright 2014 Sequencing Analysis Support Core - Leiden University Medical Center
+ *
+ * Contact us at: sasc@lumc.nl
+ *
+ * A dual licensing mode is applied. The source code within this project that are
+ * not part of GATK Queue is freely available for non-commercial use under an AGPL
+ * license; For commercial users or users who do not want to follow the AGPL
+ * license, please contact us to obtain a separate license.
+ */
+package nl.lumc.sasc.biopet.pipelines.kopisu
+
+import nl.lumc.sasc.biopet.core.config.Configurable
+import nl.lumc.sasc.biopet.core.{ MultiSampleQScript, PipelineCommand }
+import org.broadinstitute.gatk.queue.QScript
+
+class Kopisu(val root: Configurable) extends QScript with MultiSampleQScript {
+  def this() = this(null)
+
+  @Input(doc = "Input bamfile", required = true)
+  var bamFile: File = config("bam")
+
+  def init() {
+    if (!outputDir.endsWith("/")) outputDir += "/"
+  }
+
+  def biopetScript() {
+    addSamplesJobs()
+  }
+
+  def makeSample(id: String) = new Sample(id)
+  class Sample(sampleId: String) extends AbstractSample(sampleId) {
+    def makeLibrary(id: String) = new Library(id)
+    class Library(libId: String) extends AbstractLibrary(libId) {
+      def addJobs(): Unit = {
+
+      }
+    }
+
+    def addJobs(): Unit = {
+
+    }
+  }
+
+  def addMultiSampleJobs(): Unit = {
+  }
+}
+
+object Kopisu extends PipelineCommand
diff --git a/public/mapping/src/main/scala/nl/lumc/sasc/biopet/pipelines/mapping/Mapping.scala b/public/mapping/src/main/scala/nl/lumc/sasc/biopet/pipelines/mapping/Mapping.scala
index 87a435d5429efb3ee6dc95e9aeab9c4bb8e7d72c..9094d38cddbe70d8c7b6787200d2afe94e19d486 100644
--- a/public/mapping/src/main/scala/nl/lumc/sasc/biopet/pipelines/mapping/Mapping.scala
+++ b/public/mapping/src/main/scala/nl/lumc/sasc/biopet/pipelines/mapping/Mapping.scala
@@ -36,7 +36,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
   var input_R1: File = _
 
   @Input(doc = "R2 fastq file", shortName = "R2", required = false)
-  var input_R2: File = _
+  var input_R2: Option[File] = None
 
   /** Output name */
   var outputName: String = _
@@ -66,8 +66,15 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
   /** Readgroup ID */
   protected var readgroupId: String = _
 
+  // TODO: hide sampleId and libId from the command line so they do not interfere with our config values
+
   /** Readgroup Library */
-  var libraryId: String = _
+  @Argument(doc = "Library ID", shortName = "library", required = true)
+  var libId: String = _
+
+  /**Readgroup sample */
+  @Argument(doc = "Sample ID", shortName = "sample", required = true)
+  var sampleId: String = _
 
   /** Readgroup Platform */
   protected var platform: String = config("platform", default = "illumina")
@@ -75,9 +82,6 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
   /** Readgroup platform unit */
   protected var platformUnit: String = config("platform_unit", default = "na")
 
-  /**Readgroup sample */
-  var sampleId: String = _
-
   /** Readgroup sequencing center */
   protected var readgroupSequencingCenter: Option[String] = config("readgroup_sequencing_center")
 
@@ -95,14 +99,14 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
   def finalBamFile: File = outputDir + outputName + ".final.bam"
 
   def init() {
-    if (outputDir == null) throw new IllegalStateException("Missing Output directory on mapping module")
-    else if (!outputDir.endsWith("/")) outputDir += "/"
-    if (input_R1 == null) throw new IllegalStateException("Missing FastQ R1 on mapping module")
-    paired = (input_R2 != null)
-
-    if (libraryId == null) libraryId = config("library_id")
-    if (sampleId == null) sampleId = config("sample_id")
-    if (readgroupId == null && sampleId != null && libraryId != null) readgroupId = sampleId + "-" + libraryId
+    require(outputDir != null, "Missing output directory on mapping module")
+    require(input_R1 != null, "Missing output directory on mapping module")
+    require(sampleId != null, "Missing sample ID on mapping module")
+    require(libId != null, "Missing library ID on mapping module")
+
+    paired = input_R2.isDefined
+
+    if (readgroupId == null && sampleId != null && libId != null) readgroupId = sampleId + "-" + libId
     else if (readgroupId == null) readgroupId = config("readgroup_id")
 
     if (outputName == null) outputName = readgroupId
@@ -123,11 +127,11 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
 
   def biopetScript() {
     if (!skipFlexiprep) {
-      flexiprep.outputDir = outputDir + "flexiprep/"
+      flexiprep.outputDir = outputDir + "flexiprep" + File.separator
       flexiprep.input_R1 = input_R1
-      if (paired) flexiprep.input_R2 = input_R2
+      flexiprep.input_R2 = input_R2
       flexiprep.sampleId = this.sampleId
-      flexiprep.libraryId = this.libraryId
+      flexiprep.libId = this.libId
       flexiprep.init
       flexiprep.runInitialJobs
     }
@@ -144,10 +148,12 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
     if (chunking) for (t <- 1 to numberChunks.getOrElse(1)) {
       val chunkDir = outputDir + "chunks/" + t + "/"
       chunks += (chunkDir -> (removeGz(chunkDir + input_R1.getName),
-        if (paired) removeGz(chunkDir + input_R2.getName) else ""))
+        if (paired) removeGz(chunkDir + input_R2.get.getName) else ""))
     }
-    else chunks += (outputDir -> (flexiprep.extractIfNeeded(input_R1, flexiprep.outputDir),
-      flexiprep.extractIfNeeded(input_R2, flexiprep.outputDir)))
+    else chunks += (outputDir -> (
+      flexiprep.extractIfNeeded(input_R1, flexiprep.outputDir),
+      if (paired) flexiprep.extractIfNeeded(input_R2.get, flexiprep.outputDir) else "")
+    )
 
     if (chunking) {
       val fastSplitter_R1 = new FastqSplitter(this)
@@ -158,7 +164,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
 
       if (paired) {
         val fastSplitter_R2 = new FastqSplitter(this)
-        fastSplitter_R2.input = input_R2
+        fastSplitter_R2.input = input_R2.get
         for ((chunkDir, fastqfile) <- chunks) fastSplitter_R2.output :+= fastqfile._2
         fastSplitter_R2.isIntermediate = true
         add(fastSplitter_R2)
@@ -208,7 +214,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
       bamFile = mergeSamFile.output
     }
 
-    if (!skipMetrics) addAll(BamMetrics(this, bamFile, outputDir + "metrics/").functions)
+    if (!skipMetrics) addAll(BamMetrics(this, bamFile, outputDir + "metrics" + File.separator).functions)
 
     add(Ln(this, swapExt(outputDir, bamFile, ".bam", ".bai"), swapExt(outputDir, finalBamFile, ".bam", ".bai")))
     add(Ln(this, bamFile, finalBamFile))
@@ -265,7 +271,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
     bwaCommand.R1 = R1
     if (paired) bwaCommand.R2 = R2
     bwaCommand.deps = deps
-    bwaCommand.R = getReadGroup
+    bwaCommand.R = Some(getReadGroup)
     bwaCommand.output = swapExt(output.getParent, output, ".bam", ".sam")
     bwaCommand.isIntermediate = true
     add(bwaCommand)
@@ -279,7 +285,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
 
     var RG: String = "ID:" + readgroupId + ","
     RG += "SM:" + sampleId + ","
-    RG += "LB:" + libraryId + ","
+    RG += "LB:" + libId + ","
     if (readgroupDescription != null) RG += "DS" + readgroupDescription + ","
     RG += "PU:" + platformUnit + ","
     if (predictedInsertsize.getOrElse(0) > 0) RG += "PI:" + predictedInsertsize.get + ","
@@ -330,7 +336,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
     addOrReplaceReadGroups.createIndex = true
 
     addOrReplaceReadGroups.RGID = readgroupId
-    addOrReplaceReadGroups.RGLB = libraryId
+    addOrReplaceReadGroups.RGLB = libId
     addOrReplaceReadGroups.RGPL = platform
     addOrReplaceReadGroups.RGPU = platformUnit
     addOrReplaceReadGroups.RGSM = sampleId
@@ -344,7 +350,7 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
 
   def getReadGroup(): String = {
     var RG: String = "@RG\\t" + "ID:" + readgroupId + "\\t"
-    RG += "LB:" + libraryId + "\\t"
+    RG += "LB:" + libId + "\\t"
     RG += "PL:" + platform + "\\t"
     RG += "PU:" + platformUnit + "\\t"
     RG += "SM:" + sampleId + "\\t"
@@ -357,6 +363,4 @@ class Mapping(val root: Configurable) extends QScript with BiopetQScript {
   }
 }
 
-object Mapping extends PipelineCommand {
-
-}
+object Mapping extends PipelineCommand
\ No newline at end of file
diff --git a/public/pom.xml b/public/pom.xml
index 91ede18aa729e90d7475a620f8bf1d531572b290..b9043502c766d917ac5bf6fbe665dba46b3df7fb 100644
--- a/public/pom.xml
+++ b/public/pom.xml
@@ -32,8 +32,10 @@
         <module>gentrap</module>
         <module>mapping</module>
         <module>sage</module>
+        <module>kopisu</module>
         <module>yamsvp</module>
         <module>bam2wig</module>
+        <module>carp</module>
     </modules>
     
     <properties>
@@ -183,4 +185,4 @@
           </plugin>
       </plugins>
     </build>
-</project>
\ No newline at end of file
+</project>
diff --git a/public/sage/src/main/scala/nl/lumc/sasc/biopet/pipelines/sage/Sage.scala b/public/sage/src/main/scala/nl/lumc/sasc/biopet/pipelines/sage/Sage.scala
index 851e8481c5da2eff863760e1a9dfcaef180cc369..ed55ca382d13346b3cbb7bb4862941ef4da591f5 100644
--- a/public/sage/src/main/scala/nl/lumc/sasc/biopet/pipelines/sage/Sage.scala
+++ b/public/sage/src/main/scala/nl/lumc/sasc/biopet/pipelines/sage/Sage.scala
@@ -35,13 +35,10 @@ class Sage(val root: Configurable) extends QScript with MultiSampleQScript {
   qscript =>
   def this() = this(null)
 
-  var countBed: File = config("count_bed")
-
-  var squishedCountBed: File = config("squished_count_bed")
-
-  var transcriptome: File = config("transcriptome")
-
-  var tagsLibrary: File = config("tags_library")
+  var countBed: Option[File] = config("count_bed")
+  var squishedCountBed: File = _
+  var transcriptome: Option[File] = config("transcriptome")
+  var tagsLibrary: Option[File] = config("tags_library")
 
   override def defaults = ConfigUtils.mergeMaps(Map("bowtie" -> Map(
     "m" -> 1,
@@ -62,16 +59,16 @@ class Sage(val root: Configurable) extends QScript with MultiSampleQScript {
   def makeSample(id: String) = new Sample(id)
   class Sample(sampleId: String) extends AbstractSample(sampleId) {
     def makeLibrary(id: String) = new Library(id)
-    class Library(libraryId: String) extends AbstractLibrary(libraryId) {
+    class Library(libId: String) extends AbstractLibrary(libId) {
       val inputFastq: File = config("R1", required = true)
       val prefixFastq: File = createFile(".prefix.fastq")
 
       val flexiprep = new Flexiprep(qscript)
       flexiprep.sampleId = sampleId
-      flexiprep.libraryId = libraryId
+      flexiprep.libId = libId
 
       val mapping = new Mapping(qscript)
-      mapping.libraryId = libraryId
+      mapping.libId = libId
       mapping.sampleId = sampleId
 
       protected def addJobs(): Unit = {
@@ -96,14 +93,14 @@ class Sage(val root: Configurable) extends QScript with MultiSampleQScript {
         qscript.addAll(mapping.functions)
 
         if (config("library_counts", default = false).asBoolean) {
-          addBedtoolsCounts(mapping.finalBamFile, sampleId + "-" + libraryId, libDir)
-          addTablibCounts(pf.outputFastq, sampleId + "-" + libraryId, libDir)
+          addBedtoolsCounts(mapping.finalBamFile, sampleId + "-" + libId, libDir)
+          addTablibCounts(pf.outputFastq, sampleId + "-" + libId, libDir)
         }
       }
     }
 
     protected def addJobs(): Unit = {
-      addLibsJobs()
+      addPerLibJobs()
       val libraryBamfiles = libraries.map(_._2.mapping.finalBamFile).toList
       val libraryFastqFiles = libraries.map(_._2.prefixFastq).toList
 
@@ -127,31 +124,32 @@ class Sage(val root: Configurable) extends QScript with MultiSampleQScript {
 
   def init() {
     if (!outputDir.endsWith("/")) outputDir += "/"
-    if (transcriptome == null && tagsLibrary == null)
+    if (transcriptome.isEmpty && tagsLibrary.isEmpty)
       throw new IllegalStateException("No transcriptome or taglib found")
-    if (countBed == null && squishedCountBed == null)
-      throw new IllegalStateException("No bedfile supplied, please add a countBed or squishedCountBed")
+    if (countBed.isEmpty)
+      throw new IllegalStateException("No bedfile supplied, please add a countBed")
   }
 
   def biopetScript() {
-    if (squishedCountBed == null) {
-      val squishBed = SquishBed(this, countBed, outputDir)
-      add(squishBed)
-      squishedCountBed = squishBed.output
-    }
+    val squishBed = SquishBed(this, countBed.get, outputDir)
+    add(squishBed)
+    squishedCountBed = squishBed.output
 
-    if (tagsLibrary == null) {
+    if (tagsLibrary.isEmpty) {
       val cdl = new SageCreateLibrary(this)
-      cdl.input = transcriptome
+      cdl.input = transcriptome.get
       cdl.output = outputDir + "taglib/tag.lib"
       cdl.noAntiTagsOutput = outputDir + "taglib/no_antisense_genes.txt"
       cdl.noTagsOutput = outputDir + "taglib/no_sense_genes.txt"
       cdl.allGenesOutput = outputDir + "taglib/all_genes.txt"
       add(cdl)
-      tagsLibrary = cdl.output
+      tagsLibrary = Some(cdl.output)
     }
 
-    addSamplesJobs
+    addSamplesJobs()
+  }
+
+  def addMultiSampleJobs(): Unit = {
   }
 
   def addBedtoolsCounts(bamFile: File, outputPrefix: String, outputDir: String) {
@@ -184,7 +182,7 @@ class Sage(val root: Configurable) extends QScript with MultiSampleQScript {
 
     val createTagCounts = new SageCreateTagCounts(this)
     createTagCounts.input = countFastq.output
-    createTagCounts.tagLib = tagsLibrary
+    createTagCounts.tagLib = tagsLibrary.get
     createTagCounts.countSense = outputDir + outputPrefix + ".tagcount.sense.counts"
     createTagCounts.countAllSense = outputDir + outputPrefix + ".tagcount.all.sense.counts"
     createTagCounts.countAntiSense = outputDir + outputPrefix + ".tagcount.antisense.counts"
diff --git a/public/yamsvp/src/main/scala/nl/lumc/sasc/biopet/pipelines/yamsvp/Yamsvp.scala b/public/yamsvp/src/main/scala/nl/lumc/sasc/biopet/pipelines/yamsvp/Yamsvp.scala
index 8e3ef0af511f28b19dda8403258b024ab8936b42..206c92a69453105fb2034d6cd72cce00f5ab75cd 100644
--- a/public/yamsvp/src/main/scala/nl/lumc/sasc/biopet/pipelines/yamsvp/Yamsvp.scala
+++ b/public/yamsvp/src/main/scala/nl/lumc/sasc/biopet/pipelines/yamsvp/Yamsvp.scala
@@ -124,11 +124,11 @@ class Yamsvp(val root: Configurable) extends QScript with BiopetQScript { //with
 
   // Called for each run from a sample
 
-  def runSingleLibraryJobs(libraryId: String, sampleID: String): LibraryOutput = {
+  def runSingleLibraryJobs(libId: String, sampleID: String): LibraryOutput = {
     val libraryOutput = new LibraryOutput
 
     val alignmentDir: String = outputDir + sampleID + "/alignment/"
-    val runDir: String = alignmentDir + "run_" + libraryId + "/"
+    val runDir: String = alignmentDir + "run_" + libId + "/"
 
     if (config.contains("R1")) {
       val mapping = new Mapping(this)
@@ -140,7 +140,7 @@ class Yamsvp(val root: Configurable) extends QScript with BiopetQScript { //with
       mapping.input_R1 = config("R1")
       mapping.input_R2 = config("R2")
       mapping.paired = (mapping.input_R2 != null)
-      mapping.RGLB = libraryId
+      mapping.RGLB = libId
       mapping.RGSM = sampleID
       mapping.RGPL = config("PL")
       mapping.RGPU = config("PU")
@@ -154,7 +154,7 @@ class Yamsvp(val root: Configurable) extends QScript with BiopetQScript { //with
       // start sambamba dedup
 
       libraryOutput.mappedBamFile = mapping.outputFiles("finalBamFile")
-    } else this.logger.error("Sample: " + sampleID + ": No R1 found for library: " + libraryId)
+    } else this.logger.error("Sample: " + sampleID + ": No R1 found for library: " + libId)
     return libraryOutput
     //    logger.debug(outputFiles)
     //    return outputFiles