diff --git a/CHANGELOG.md b/CHANGELOG.md
index 390faf25a738576b9c977a82036d633edcbb5775..13beb086a9d432936236eb0374a3520e478544d6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,8 @@ that users understand how the changes affect the new version.
 
 version 4.0.0-develop
 ---------------------------
++ Added bcftools stats task.
++ Added GATK VariantEval task.
 + Added a log output for STAR.
 + Added report output to Hisat2.
 + Added output with all reports to gffcompare.
diff --git a/bcftools.wdl b/bcftools.wdl
index 53165c6be2118579bc1705a11577b52e80c8591e..bd79c2c64f7c0ca6d61a9aa24e1407be83b2efe5 100644
--- a/bcftools.wdl
+++ b/bcftools.wdl
@@ -55,3 +55,108 @@ task Bcf2Vcf {
         dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
+
+task Stats {
+    input {
+        File inputVcf
+        File inputVcfIndex
+        File? compareVcf
+        File? compareVcfIndex
+        String outputPath = basename(inputVcf) + ".stats"
+        String? afBins
+        String? afTag
+        Boolean firstAlleleOnly = false 
+        String? collapse
+        String? depth
+        String? exclude
+        File? exons 
+        String? applyFilters
+        File? fastaRef
+        File? fastaRefIndex
+        String? include 
+        Boolean splitByID = false 
+        String? regions
+        File? regionsFile
+        Array[String] samples = []
+        File? samplesFile 
+        String? targets 
+        File? targetsFile
+        String? userTsTv
+        Boolean verbose = false
+
+        Int threads = 0
+        Int timeMinutes = 1 + 2* ceil(size(select_all([inputVcf, compareVcf]), "G"))  # TODO: Estimate, 2 minutes per GB, refine later.
+        String memory = "2G"  # TODO: Safe estimate, refine later.  
+        String dockerImage = "quay.io/biocontainers/bcftools:1.9--ha228f0b_3"
+    }
+    
+    command {
+        set -e 
+        mkdir -p $(dirname ~{outputPath})
+        bcftools stats \
+        ~{"--af-bins " + afBins} \
+        ~{"--af-tag " + afTag} \
+        ~{true="--1st-allele-only" false="" firstAlleleOnly} \
+        ~{"--collapse " + collapse} \
+        ~{"--depth " + depth} \
+        ~{"--exclude " + exclude} \
+        ~{"--exons " + exons} \
+        ~{"--apply-filters " + applyFilters} \
+        ~{"--fasta-ref " + fastaRef} \
+        ~{"--include " + include} \
+        ~{true="--split-by-ID" false="" splitByID} \
+        ~{"--regions " + regions} \
+        ~{"--regions-file " + regionsFile} \
+        ~{true="--samples" false="" length(samples) > 0} ~{sep="," samples} \
+        ~{"--samples-file " + samplesFile} \
+        ~{"--targets " + targets} \
+        ~{"--targets-file " + targetsFile} \
+        ~{"--user-tstv " + userTsTv} \
+        --threads ~{threads} \
+        ~{true="--verbose" false="" verbose} \
+        ~{inputVcf} ~{compareVcf} > ~{outputPath}
+    }
+
+    output {
+        File stats = outputPath
+    }
+
+    runtime {
+        cpu: threads + 1
+        time_minutes: timeMinutes
+        memory: memory
+        docker: dockerImage
+    }
+
+    parameter_meta {
+        inputVcf: {description: "The VCF to be analysed.", category: "required"}
+        inputVcfIndex: {description: "The index for the input VCF.", category: "required"}
+        compareVcf: {description: "When inputVcf and compareVCF are given, the program generates separate stats for intersection and the complements. By default only sites are compared, samples must be given to include also sample columns.", category: "common"}
+        compareVcfIndex: {description: "Index for the compareVcf.", category: "common"}
+        afBins: {description: "Allele frequency bins, a list (0.1,0.5,1) or a file (0.1\n0.5\n1).", category: "advanced"}
+        afTag: {description: "Allele frequency tag to use, by default estimated from AN,AC or GT.", category: "advanded"}
+        firstAlleleOnly: {description: "Include only 1st allele at multiallelic sites.", category: "advanced"}
+        collapse: {description: "Treat as identical records with <snps|indels|both|all|some|none>, see man page for details.", category: "advanced"}
+        depth: {description: "Depth distribution: min,max,bin size [0,500,1].", category: "advanced"}
+        exclude: {description: "Exclude sites for which the expression is true (see man page for details).", category: "advanced"}
+        exons: {description: "Tab-delimited file with exons for indel frameshifts (chr,from,to; 1-based, inclusive, bgzip compressed).", category: "advanced"}
+        applyFilters: {description: "Require at least one of the listed FILTER strings (e.g. \"PASS,.\").", category: "advanced"}
+        fastaRef: {description: "Faidx indexed reference sequence file to determine INDEL context.", category: "advanced"}
+        fastaRefIndex: {description: "Index file (.fai) for fastaRef. Must be supplied if fastaRef is supplied.", category: "advanced"}
+        include: {description: "Select sites for which the expression is true (see man page for details).", category: "advanced"}
+        splitByID: {description: "Collect stats for sites with ID separately (known vs novel).", category: "advanced"}
+        regions: {description: "Restrict to comma-separated list of regions.", category: "advanced"}
+        regionsFile: {description: "Restrict to regions listed in a file.", category: "advanced"}
+        samples: {description: "List of samples for sample stats, \"-\" to include all samples.", category: "advanced"}
+        samplesFile: {description: "File of samples to include.", category: "advanced"}
+        targets: {description: "Similar to regions but streams rather than index-jumps.", category: "advanced"}
+        targetsFile: {description: "Similar to regionsFile but streams rather than index-jumps.", category: "advanced"}
+        userTsTv: {description: "<TAG[:min:max:n]>. Collect Ts/Tv stats for any tag using the given binning [0:1:100].", category: "advanced"}
+        threads: {description: "Number of extra decompression threads [0].", category: "advanced"}
+        verbose: {description: "Produce verbose per-site and per-sample output.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
+        outputPath: {description: "The location the output VCF file should be written.", category: "common"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+    }
+}
\ No newline at end of file
diff --git a/gatk.wdl b/gatk.wdl
index edafc4d4cc9a4acb24bbcd5b385ab7e628cf7966..09de0488fd44542fc91a3546387d0a207cf90858 100644
--- a/gatk.wdl
+++ b/gatk.wdl
@@ -1551,6 +1551,87 @@ task SplitNCigarReads {
     }
 }
 
+task VariantEval {
+    input {
+        Array[File] evalVcfs
+        Array[File] evalVcfsIndex
+        Array[File] comparisonVcfs = []
+        Array[File] comparisonVcfsIndex = []
+        File? referenceFasta
+        File? referenceFastaDict
+        File? referenceFastaFai
+        File? dbsnpVCF
+        File? dbsnpVCFIndex
+        Array[File] intervals = []
+        String outputPath = "eval.table"
+        Boolean doNotUseAllStandardModules = false 
+        Boolean doNotUseAllStandardStratifications = false 
+        Array[String] evalModules = []
+        Array[String] stratificationModules = []
+        Array[String] samples = []
+        Boolean mergeEvals = false
+
+        String memory = "5G"
+        String javaXmx = "4G"
+        # TODO: Refine estimate. For now 4 minutes per GB of input.
+        Int timeMinutes = ceil(size(flatten([evalVcfs, comparisonVcfs]), "G") * 4)
+        String dockerImage = "quay.io/biocontainers/gatk4:4.1.7.0--py38_0"
+    }
+
+    command {
+        set -e
+        mkdir -p "$(dirname ~{outputPath})"
+        gatk --java-options '-Xmx~{javaXmx} -XX:ParallelGCThreads=1' \
+        VariantEval \
+        --output ~{outputPath} \
+        ~{true="--eval" false="" length(evalVcfs) > 0} ~{sep=" --eval " evalVcfs} \
+        ~{true="--comparison" false="" length(comparisonVcfs) > 0} ~{sep=" --comparison " comparisonVcfs} \
+        ~{"-R " + referenceFasta} \
+        ~{"--dbsnp " + dbsnpVCF } \
+        ~{true="-L" false="" length(intervals) > 0} ~{sep=' -L ' intervals} \
+        ~{true="--sample" false="" length(samples) > 0} ~{sep=' --sample ' samples} \
+        ~{true="--do-not-use-all-standard-modules" false="" doNotUseAllStandardModules} \
+        ~{true="--do-not-use-all-standard-stratifications" false="" doNotUseAllStandardStratifications} \
+        ~{true="-EV" false="" length(evalModules) > 0} ~{sep=" -EV " evalModules} \
+        ~{true="-ST" false="" length(stratificationModules) > 0} ~{sep=" -ST " stratificationModules} \
+        ~{true="--merge-evals" false="" mergeEvals}
+    }
+
+    output {
+        File table = outputPath
+    }
+
+    runtime {
+        cpu: 1
+        docker: dockerImage
+        memory: memory
+        time_minutes: timeMinutes
+    }
+    parameter_meta {
+        evalVcfs: {description: "Variant sets to evaluate.", category: "required"}
+        evalVcfsIndex: {description: "Indexes for the variant sets.", category: "required"}
+        comparisonVcfs: {description: "Compare set vcfs.", category: "advanced"}
+        comparisonVcfsIndex: {description: "Indexes for the compare sets.", category: "advanced"}
+        evalModules: {description: "One or more specific eval modules to apply to the eval track(s) (in addition to the standard modules, unless doNotUseAllStandardModules=true)", category: "common"}
+        stratificationModules: {description: "One or more specific stratification modules to apply to the eval track(s) (in addition to the standard stratifications, unless doNotUseAllStandardStratifications=true)", category: "common"}
+        samples: {description: "Derive eval and comp contexts using only these sample genotypes, when genotypes are available in the original context." , category: "advanced"}  # Advanced because this description is impossible to understand...
+        mergeEvals: {description: "If provided, all evalVcf tracks will be merged into a single eval track", category: "common"}
+        doNotUseAllStandardModules: {description: "Do not use the standard modules by default (instead, only those that are specified with the evalModules option).", category: "common"}
+        doNotUseAllStandardStratifications: {description: "Do not use the standard stratification modules by default (instead, only those that are specified with the stratificationModules option).", category: "common"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "common"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.", category: "common"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "common"}
+        dbsnpVCF: {description: "A dbSNP VCF.", category: "common"}
+        dbsnpVCFIndex: {description: "The index for the dbSNP VCF.", category: "common"}
+        outputPath: {description: "The location the output table should be written.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
+}
 task VariantFiltration {
     input {
         File inputVcf