diff --git a/CHANGELOG.md b/CHANGELOG.md
index e2068f49662a192479a19bbf873c014a521fa35c..2c04b58263ae0ddd29bd84d59d41c2378a8e5a34 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,7 @@ that users understand how the changes affect the new version.
 
 version 5.0.0-dev
 ---------------------------
++ CCS: `cores` input has been renamed to `threads` to match tool naming.
 + CCS: Update CCS to version 5.
 + deepvariant: Add task for DeepVariant.
 + gatk: Make intervals optional for GenotypeGVCFs.
@@ -19,7 +20,8 @@ version 5.0.0-dev
 + picard: Add CollectHsMetrics and CollectVariantCallingMetrics.
 + Samtools: Add `threads` to parameter meta for Merge task.
 + bcftools: add tmpDir input to specify temporary directory when sorting.
-+ bcftools: remove outputType and implement indexing based on output file extension. 
++ bcftools: remove outputType and implement indexing based on output
+  file extension.
 + NanoPack: Add parameter_meta to NanoPlot task.
 + Centrifuge: Remove metrics file from classification (which causes the
   summary report to be empty).
@@ -111,8 +113,8 @@ version 4.0.0
 + Change MultiQC inputs. It now accepts an array of reports files. It does not
   need access to a folder with the reports anymore. MultiQC can now be used
   as a normal WDL task without hacks.
-+ Picard: Make all outputs in `CollectMultipleMetrics` optional. This will make sure the
-  task will not fail if one of the metrics is set to false.
++ Picard: Make all outputs in `CollectMultipleMetrics` optional. This will
+  make sure the task will not fail if one of the metrics is set to false.
 + The struct `BowtieIndex` was removed, as it has become obsolete.
 + The task `ReorderGlobbedScatters` was removed, as it has become obsolete.
 + Adjusted the memory settings of many tools, especially java tools.
@@ -134,7 +136,8 @@ version 4.0.0
 + Add faidx task to samtools.
 + Isoseq3: Remove dirname command from output folder creation step.
 + Isoseq3: Requires more memory by default, is now 2G.
-+ Isoseq3: Remove cp commands and other bash magic, file naming is now solved by pipeline.
++ Isoseq3: Remove cp commands and other bash magic, file naming is now
+  solved by pipeline.
 + Lima: Replace mv command with cp.
 + Add WDL task for smoove (lumpy) sv-caller.
 
@@ -145,7 +148,8 @@ version 3.1.0
 + Lima: Add missing output to parameter_meta.
 + Lima: Remove outputPrefix variable from output section.
 + Isoseq3: Make sure stderr log file from Refine is unique and not overwritten.
-+ Isoseq3: Add workaround in Refine for glob command not locating files in output directory.
++ Isoseq3: Add workaround in Refine for glob command not locating files
+  in output directory.
 + Isoseq3: Fix --min-polya-length argument syntax.
 + Lima: Add workaround for glob command not locating files in output directory.
 + CCS: Add missing backslash.
@@ -189,10 +193,13 @@ version 3.0.0
 + Rename HaplotypeCallerGVCF to HaplotypeCaller. Add `gvcf` option to set
   whether output should be a GVCF.
 + Centrifuge: Add Krona task specific to Centrifuge.
-+ Centrifuge: Fix Centrifuge tests, where sometimes the index files could still not be located.
++ Centrifuge: Fix Centrifuge tests, where sometimes the index files could
+  still not be located.
 + Update parameter_meta for TALON, Centrifuge and Minimap2.
-+ Centrifuge: Fix issue where Centrifuge Inspect did not get the correct index files location.
-+ Add `minimumContigLength` input to PlotDenoisedCopyRatios and PlotModeledSegments.
++ Centrifuge: Fix issue where Centrifuge Inspect did not get the correct
+  index files location.
++ Add `minimumContigLength` input to PlotDenoisedCopyRatios
+  and PlotModeledSegments.
 + Add `commonVariantSitesIndex` input to CollectAllelicCounts.
 + Centrifuge: Fix issue where Centrifuge could not locate index files.
 + Increase default memory of BWA mem to 32G (was 16G).
@@ -228,11 +235,13 @@ version 3.0.0
 + Removed the "extraArgs" input from FilterMutectCalls.
 + Removed unused "verbose" and "quiet" inputs from multiqc.
 + Added parameter_meta sections to a variety of tasks.
-+ Picard's BedToIntervalList outputPath input is now optional (with a default of "regions.interval_list").
++ Picard's BedToIntervalList outputPath input is now
+  optional (with a default of "regions.interval_list").
 + TALON: Fix SQLite error concerning database/disk space being full.
 + Update htseq to default image version 0.11.2.
 + Update biowdl-input-converter in common.wdl to version 0.2.1.
-+ Update TALON section to now include the new annotation file output, and add config file creation to the TALON task.
++ Update TALON section to now include the new annotation file output, and
+  add config file creation to the TALON task.
 + Removed unused inputs (trimPrimer and format) for cutadapt.
 + Various minor command tweaks to increase stability.
 + Fixed unused inputs in bedtools sort (inputs are now used).
@@ -245,7 +254,8 @@ version 2.1.0
 + Updated biowdl-input-converter version.
 + GATK CombineGVCFs memory was tripled to prevent it from using a lot of CPU in
   Garbage Collection mode.
-+ Updated parameter_meta sections for Minimap2 and TranscriptClean to wdl-aid format.
++ Updated parameter_meta sections for Minimap2 and TranscriptClean to
+  wdl-aid format.
 + Updated cores variable for TALON, the default is now 4.
 + Updated TALON to version 4.4.
 + Added parameter_meta sections to the following tools:
@@ -262,10 +272,14 @@ version 2.1.0
 version 2.0.0
 ---------------------------
 + TranscriptClean: Update TranscriptClean to version 2.0.2.
-+ Memory runtime attributes are now Strings indicating total memory, as opposed to Ints indicating memory per core.
-+ Memory inputs for most tasks are now Strings, remaining Int memory inputs are renamed to "memoryGb".
-+ Use the biowdl-input-converter container for JsonToYaml, to reduce the amount of containers needed.
-+ Add biowdl-input-converter and remove SampleConfigToSampleReadgroupLists which it replaces.
++ Memory runtime attributes are now Strings indicating total memory, as
+  opposed to Ints indicating memory per core.
++ Memory inputs for most tasks are now Strings, remaining Int memory inputs
+  are renamed to "memoryGb".
++ Use the biowdl-input-converter container for JsonToYaml, to reduce the
+  amount of containers needed.
++ Add biowdl-input-converter and remove SampleConfigToSampleReadgroupLists
+  which it replaces.
 + GATK.GenotypeGVCFs: Increased memoryMultiplier from 2.0 to 3.0 .
 + Minimap2: Add -k option to minimap2 mapping.
 + Added bwakit task.
@@ -279,7 +293,9 @@ version 1.0.0
 + Removed deprecated tasks:
   + bioconda.installPrefix
   + mergecounts.MergeCounts
-+ GATK.BaseRecalibrator: "knownIndelsSitesVCFs" and "knownIndelsSitesVCFIndexes" are no longer optional, but now have a default of "[]".
++ GATK.BaseRecalibrator: "knownIndelsSitesVCFs"
+  and "knownIndelsSitesVCFIndexes" are no longer optional, but
+  now have a default of "[]".
 + Removed BWA index task.
 + Removed unused "picardJar" input from bwa.wdl.
 + All inputs to bedtools Sort are now reflected in the generated command.
@@ -295,17 +311,25 @@ version 1.0.0
 + Fastqsplitter: use version 1.1.
 + Picard: Use version 2.20.5 of the biocontainer as this includes the R dependency.
 + Common: Update dockerTag to dockerImage.
-+ GATK: Add CombineVariants task that allows, e.g., to merge VCFs from different callers.
-+ Mutect2: Add GATK tasks related to variant filtering (LearnReadOrientationModel, MergeStats, GetPileupSummaries, CalculateContamination and FilterMutectCalls).
-+ Mutect2: Add "--germline-resource" and "--f1r2-tar-gz" inputs, requiring an update to GATK 4.1.2.0.
++ GATK: Add CombineVariants task that allows, e.g., to merge VCFs
+  from different callers.
++ Mutect2: Add GATK tasks related to variant
+  filtering (LearnReadOrientationModel, MergeStats, GetPileupSummaries,
+  CalculateContamination and FilterMutectCalls).
++ Mutect2: Add "--germline-resource" and "--f1r2-tar-gz" inputs, requiring
+  an update to GATK 4.1.2.0.
 + Mutect2: Add necessary missing index attribute for panel of normals.
 + MultiQC: Add memory variable to multiqc task.
-+ GATK: SplitNCigarReads, BaseRecalibration and ApplyBQSR do no longer need regions files as required inputs.
-+ VarDict: Add user definable flags (-M, -A, -Q, -d, -v, -f) to the paired VCF filtering script.
-+ Cutadapt: If the output is a gzipped file, compress with level 1 (instead of default 6).
++ GATK: SplitNCigarReads, BaseRecalibration and ApplyBQSR do no longer need
+  regions files as required inputs.
++ VarDict: Add user definable flags (-M, -A, -Q, -d, -v, -f) to the paired
+  VCF filtering script.
++ Cutadapt: If the output is a gzipped file, compress with
+  level 1 (instead of default 6).
 + Cutadapt: Fix issues with read2output when using single-end reads.
 + Add feature type, idattr and additional attributes to htseq-count.
 + Added allow-contain option to bowtie.
 + Added a changelog to keep track of changes.
-+ Added sortByName task in samtools to support more memory efficient execution of HTSeqCount.
++ Added sortByName task in samtools to support more memory efficient
+  execution of HTSeqCount.
 + Removed the bam index from HTSeqCount's inputs.
diff --git a/CPAT.wdl b/CPAT.wdl
index 3b542e4fcb2e88859d4723f25c4659f92eb277cb..d97031dc83ec1fda5e426a8cef878e57566d6bd7 100644
--- a/CPAT.wdl
+++ b/CPAT.wdl
@@ -26,17 +26,22 @@ task CPAT {
         String outFilePath
         File hex
         File logitModel
+
         File? referenceGenome
-        File? referenceGenomeIndex  # Should be added as input if
-        # CPAT should not index the reference genome.
+        # Should be added as input if CPAT should not index the
+        # reference genome.
+        File? referenceGenomeIndex
         Array[String]? startCodons
         Array[String]? stopCodons
+
         Int timeMinutes = 10 + ceil(size(gene, "G") * 30)
         String dockerImage = "biocontainers/cpat:v1.2.4_cv1"
     }
 
-    # Some WDL magic in the command section to properly output the start and stopcodons to the command.
-    # select_first is needed in order to convert the optional arrays to non-optionals.
+    # Some WDL magic in the command section to properly output the start and
+    # stopcodons to the command.
+    # select_first is needed in order to convert the optional arrays
+    # to non-optionals.
     command {
         set -e
         mkdir -p "$(dirname ~{outFilePath})"
@@ -60,18 +65,17 @@ task CPAT {
     }
 
     parameter_meta {
+        # inputs
         gene: {description: "Equivalent to CPAT's `--gene` option.", category: "required"}
         outFilePath: {description: "Equivalent to CPAT's `--outfile` option.", category: "required"}
         hex: {description: "Equivalent to CPAT's `--hex` option.", category: "required"}
         logitModel: {description: "Equivalent to CPAT's `--logitModel` option.", category: "required"}
         referenceGenome: {description: "Equivalent to CPAT's `--ref` option.", category: "advanced"}
-        referenceGenomeIndex: {description: "The index of the reference. Should be added as input if CPAT should not index the reference genome.",
-                               category: "advanced"}
+        referenceGenomeIndex: {description: "The index of the reference. Should be added as input if CPAT should not index the reference genome.", category: "advanced"}
         startCodons: {description: "Equivalent to CPAT's `--start` option.", category: "advanced"}
         stopCodons: {description: "Equivalent to CPAT's `--stop` option.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
diff --git a/bam2fastx.wdl b/bam2fastx.wdl
index 18434755f251ee91df8a10e36eac3990fbd51217..e8884ab01a0b82f1829dd4223fe30a60f0c6129d 100644
--- a/bam2fastx.wdl
+++ b/bam2fastx.wdl
@@ -1,6 +1,6 @@
 version 1.0
 
-# Copyright (c) 2020 Sequencing Analysis Support Core - Leiden University Medical Center
+# Copyright (c) 2020 Leiden University Medical Center
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
 # of this software and associated documentation files (the "Software"), to deal
@@ -8,10 +8,10 @@ version 1.0
 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 # copies of the Software, and to permit persons to whom the Software is
 # furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -37,7 +37,22 @@ task Bam2Fasta {
 
     command {
         set -e
-        mkdir -p "$(dirname ~{outputPrefix})"
+        mkdir -p "$(dirname ~{outputPrefix})"'
+
+        # Localise the bam and pbi files so they are next to each other in the
+        # current folder.
+        bamFiles=""
+        for bamFile in ~{sep=" " bam};
+        do
+            ln ${bamFile} .
+            bamFiles=${bamFiles}" $(basename ${bamFile})"
+        done
+
+        for index in ~{sep=" " bamIndex};
+        do
+            ln ${index} .
+        done
+
         bam2fasta \
         --output ~{outputPrefix} \
         -c ~{compressionLevel} \
@@ -93,15 +108,17 @@ task Bam2Fastq {
         mkdir -p "$(dirname ~{outputPrefix})"
 
         # Localise the bam and pbi files so they are next to each other in the
-        # current folder
-        bamfiles=""
-        for bamfile in ~{sep=" " bam};do
-            ln $bamfile .
-            bamfiles=$bamfiles" $(basename $bamfile)"
+        # current folder.
+        bamFiles=""
+        for bamFile in ~{sep=" " bam};
+        do
+            ln ${bamFile} .
+            bamFiles=${bamFiles}" $(basename ${bamFile})"
         done
 
-        for bamindex in ~{sep=" " bamIndex}; do
-            ln $bamindex .
+        for index in ~{sep=" " bamIndex};
+        do
+            ln ${index} .
         done
 
         bam2fastq \
@@ -109,7 +126,7 @@ task Bam2Fastq {
         -c ~{compressionLevel} \
         ~{true="--split-barcodes" false="" splitByBarcode} \
         ~{"--seqid-prefix " + seqIdPrefix} \
-        $bamfiles
+        ${bamFiles}
     }
 
     output {
diff --git a/bcftools.wdl b/bcftools.wdl
index a0aeb44258e6d133fb7b97355287921574bbeceb..4182574767ab5aadf6229b952ded806398220a98 100644
--- a/bcftools.wdl
+++ b/bcftools.wdl
@@ -1,7 +1,5 @@
 version 1.0
 
-# MIT License
-#
 # Copyright (c) 2018 Leiden University Medical Center
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -24,26 +22,27 @@ version 1.0
 
 task Annotate {
     input {
+        Array[String] columns = []
+        Boolean force = false
+        Boolean keepSites = false
+        Boolean noVersion = false
+        Array[String] samples = []
+        Boolean singleOverlaps = false
+        Array[String] removeAnns = []
+        File inputFile
+        String outputPath = "output.vcf.gz"
+
         File? annsFile
         String? collapse
-        Array[String] columns = []
         String? exclude
-        Boolean force = false
         File? headerLines
         String? newId
         String? include
-        Boolean keepSites = false
         String? markSites
-        Boolean noVersion = false
         String? regions
         File? regionsFile
         File? renameChrs
-        Array[String] samples = []
         File? samplesFile
-        Boolean singleOverlaps = false
-        Array[String] removeAnns = []
-        File inputFile
-        String outputPath = "output.vcf.gz"
         
         Int threads = 0
         String memory = "256M"
@@ -80,9 +79,8 @@ task Annotate {
         ~{inputFile}
 
         ~{if compressed then 'bcftools index --tbi ~{outputPath}' else ''}
-
     }
-    
+
     output {
         File outputVcf = outputPath
         File? outputVcfIndex = outputPath + ".tbi"
@@ -95,31 +93,31 @@ task Annotate {
     }
 
     parameter_meta {
+        # inputs
+        columns: {description: "Comma-separated list of columns or tags to carry over from the annotation file (see man page for details).", category: "advanced"}
+        force: {description: "Continue even when parsing errors, such as undefined tags, are encountered.", category: "advanced"}
+        keepSites: {description: "Keep sites which do not pass -i and -e expressions instead of discarding them.", category: "advanced"}
+        noVersion: {description: "Do not append version and command line information to the output VCF header.", category: "advanced"}
+        samples: {description: "List of samples for sample stats, \"-\" to include all samples.", category: "advanced"}
+        singleOverlaps: {description: "keep memory requirements low with very large annotation files.", category: "advanced"}
+        removeAnns: {description: "List of annotations to remove (see man page for details).", category: "advanced"}
+        inputFile: {description: "A vcf or bcf file.", category: "required"}
         outputPath: {description: "The location the output VCF file should be written.", category: "common"}
         annsFile: {description: "Bgzip-compressed and tabix-indexed file with annotations (see man page for details).", category: "advanced"}
         collapse: {description: "Treat as identical records with <snps|indels|both|all|some|none>, see man page for details.", category: "advanced"}
-        columns: {description: "Comma-separated list of columns or tags to carry over from the annotation file (see man page for details).", category: "advanced"}
         exclude: {description: "Exclude sites for which the expression is true (see man page for details).", category: "advanced"}
-        force: {description: "Continue even when parsing errors, such as undefined tags, are encountered.", category: "advanced"}
         headerLines: {description: "Lines to append to the VCF header (see man page for details).", category: "advanced"}
         newId: {description: "Assign ID on the fly (e.g. --set-id +'%CHROM\_%POS').", category: "advanced"}
         include: {description: "Select sites for which the expression is true (see man page for details).", category: "advanced"}
-        keepSites: {description: "Keep sites which do not pass -i and -e expressions instead of discarding them.", category: "advanced"}
         markSites: {description: "Annotate sites which are present ('+') or absent ('-') in the -a file with a new INFO/TAG flag.", category: "advanced"}
-        noVersion: {description: "Do not append version and command line information to the output VCF header.", category: "advanced"}
         regions: {description: "Restrict to comma-separated list of regions.", category: "advanced"}
         regionsFile: {description: "Restrict to regions listed in a file.", category: "advanced"}
         renameChrs: {description: "rename chromosomes according to the map in file (see man page for details).", category: "advanced"}
-        samples: {description: "List of samples for sample stats, \"-\" to include all samples.", category: "advanced"}
         samplesFile: {description: "File of samples to include.", category: "advanced"}
-        singleOverlaps: {description: "keep memory requirements low with very large annotation files.", category: "advanced"}
-        removeAnns: {description: "List of annotations to remove (see man page for details).", category: "advanced"}
-        inputFile: {description: "A vcf or bcf file.", category: "required"}
-
         threads: {description: "Number of extra decompression threads [0].", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -128,6 +126,7 @@ task Sort {
         File inputFile
         String outputPath = "output.vcf.gz"
         String tmpDir = "./sorting-tmp"
+
         String memory = "256M"
         Int timeMinutes = 1 + ceil(size(inputFile, "G"))
         String dockerImage = "quay.io/biocontainers/bcftools:1.10.2--h4f4756c_2"
@@ -159,6 +158,7 @@ task Sort {
     }
 
     parameter_meta {
+        # inputs
         inputFile: {description: "A vcf or bcf file.", category: "required"}
         outputPath: {description: "The location the output VCF file should be written.", category: "common"}
         tmpDir: {description: "The location of the temporary files during the bcftools sorting.", category: "advanced"}
@@ -166,46 +166,45 @@ task Sort {
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
-
-    
 }
 
 task Stats {
     input {
         File inputVcf
         File inputVcfIndex
+        String outputPath = basename(inputVcf) + ".stats"
+        Boolean firstAlleleOnly = false
+        Boolean splitByID = false
+        Array[String] samples = []
+        Boolean verbose = false
+
         File? compareVcf
         File? compareVcfIndex
-        String outputPath = basename(inputVcf) + ".stats"
         String? afBins
         String? afTag
-        Boolean firstAlleleOnly = false 
         String? collapse
         String? depth
         String? exclude
-        File? exons 
+        File? exons
         String? applyFilters
         File? fastaRef
         File? fastaRefIndex
-        String? include 
-        Boolean splitByID = false 
+        String? include
         String? regions
         File? regionsFile
-        Array[String] samples = []
-        File? samplesFile 
-        String? targets 
+        File? samplesFile
+        String? targets
         File? targetsFile
         String? userTsTv
-        Boolean verbose = false
 
         Int threads = 0
-        Int timeMinutes = 1 + 2* ceil(size(select_all([inputVcf, compareVcf]), "G"))  # TODO: Estimate, 2 minutes per GB, refine later.
-        String memory = "256M" 
+        String memory = "256M"
+        Int timeMinutes = 1 + 2* ceil(size(select_all([inputVcf, compareVcf]), "G")) # TODO: Estimate, 2 minutes per GB, refine later.
         String dockerImage = "quay.io/biocontainers/bcftools:1.10.2--h4f4756c_2"
     }
-    
+
     command {
-        set -e 
+        set -e
         mkdir -p $(dirname ~{outputPath})
         bcftools stats \
         ~{"--af-bins " + afBins} \
@@ -237,19 +236,24 @@ task Stats {
 
     runtime {
         cpu: threads + 1
-        time_minutes: timeMinutes
         memory: memory
+        time_minutes: timeMinutes
         docker: dockerImage
     }
 
     parameter_meta {
+        # inputs
         inputVcf: {description: "The VCF to be analysed.", category: "required"}
         inputVcfIndex: {description: "The index for the input VCF.", category: "required"}
+        outputPath: {description: "The location the output VCF file should be written.", category: "common"}
+        firstAlleleOnly: {description: "Include only 1st allele at multiallelic sites.", category: "advanced"}
+        splitByID: {description: "Collect stats for sites with ID separately (known vs novel).", category: "advanced"}
+        samples: {description: "List of samples for sample stats, \"-\" to include all samples.", category: "advanced"}
+        verbose: {description: "Produce verbose per-site and per-sample output.", category: "advanced"}
         compareVcf: {description: "When inputVcf and compareVCF are given, the program generates separate stats for intersection and the complements. By default only sites are compared, samples must be given to include also sample columns.", category: "common"}
         compareVcfIndex: {description: "Index for the compareVcf.", category: "common"}
         afBins: {description: "Allele frequency bins, a list (0.1,0.5,1) or a file (0.1\n0.5\n1).", category: "advanced"}
         afTag: {description: "Allele frequency tag to use, by default estimated from AN,AC or GT.", category: "advanded"}
-        firstAlleleOnly: {description: "Include only 1st allele at multiallelic sites.", category: "advanced"}
         collapse: {description: "Treat as identical records with <snps|indels|both|all|some|none>, see man page for details.", category: "advanced"}
         depth: {description: "Depth distribution: min,max,bin size [0,500,1].", category: "advanced"}
         exclude: {description: "Exclude sites for which the expression is true (see man page for details).", category: "advanced"}
@@ -258,20 +262,16 @@ task Stats {
         fastaRef: {description: "Faidx indexed reference sequence file to determine INDEL context.", category: "advanced"}
         fastaRefIndex: {description: "Index file (.fai) for fastaRef. Must be supplied if fastaRef is supplied.", category: "advanced"}
         include: {description: "Select sites for which the expression is true (see man page for details).", category: "advanced"}
-        splitByID: {description: "Collect stats for sites with ID separately (known vs novel).", category: "advanced"}
         regions: {description: "Restrict to comma-separated list of regions.", category: "advanced"}
         regionsFile: {description: "Restrict to regions listed in a file.", category: "advanced"}
-        samples: {description: "List of samples for sample stats, \"-\" to include all samples.", category: "advanced"}
         samplesFile: {description: "File of samples to include.", category: "advanced"}
         targets: {description: "Similar to regions but streams rather than index-jumps.", category: "advanced"}
         targetsFile: {description: "Similar to regionsFile but streams rather than index-jumps.", category: "advanced"}
         userTsTv: {description: "<TAG[:min:max:n]>. Collect Ts/Tv stats for any tag using the given binning [0:1:100].", category: "advanced"}
         threads: {description: "Number of extra decompression threads [0].", category: "advanced"}
-        verbose: {description: "Produce verbose per-site and per-sample output.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
-        outputPath: {description: "The location the output VCF file should be written.", category: "common"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -279,6 +279,7 @@ task View {
     input {
         File inputFile
         String outputPath = "output.vcf"
+
         String memory = "256M"
         Int timeMinutes = 1 + ceil(size(inputFile, "G"))
         String dockerImage = "quay.io/biocontainers/bcftools:1.10.2--h4f4756c_2"
@@ -296,6 +297,7 @@ task View {
 
         ~{if compressed then 'bcftools index --tbi ~{outputPath}' else ''}
     }
+
     output {
         File outputVcf = outputPath
         File? outputVcfIndex = outputPath + ".tbi"
@@ -308,6 +310,7 @@ task View {
     }
 
     parameter_meta {
+        # inputs
         inputFile: {description: "A vcf or bcf file.", category: "required"}
         outputPath: {description: "The location the output VCF file should be written.", category: "common"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
diff --git a/bedtools.wdl b/bedtools.wdl
index c228d6c6fb838f04f03a55f6729a8b8e08ceca9e..b7a03c173d2c29127c625a726107210b93c57c62 100644
--- a/bedtools.wdl
+++ b/bedtools.wdl
@@ -25,6 +25,7 @@ task Complement {
         File faidx
         File inputBed
         String outputBed = basename(inputBed, "\.bed") + ".complement.bed"
+
         String memory = "~{512 + ceil(size([inputBed, faidx], "M"))}M"
         Int timeMinutes = 1 + ceil(size([inputBed, faidx], "G"))
         String dockerImage = "quay.io/biocontainers/bedtools:2.23.0--hdbcaa40_3"
@@ -52,13 +53,13 @@ task Complement {
     }
 
     parameter_meta {
+        # inputs
         faidx: {description: "The fasta index (.fai) file from which to extract the genome sizes.", category: "required"}
         inputBed: {description: "The inputBed to complement.", category: "required"}
         outputBed: {description: "The path to write the output to.", category: "advanced"}
         memory: {description: "The amount of memory needed for the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -66,12 +67,14 @@ task Merge {
     input {
         File inputBed
         String outputBed = "merged.bed"
+
         String memory = "~{512 + ceil(size(inputBed, "M"))}M"
         Int timeMinutes = 1 + ceil(size(inputBed, "G"))
         String dockerImage = "quay.io/biocontainers/bedtools:2.23.0--hdbcaa40_3"
     }
 
     command {
+        set -e
         bedtools merge -i ~{inputBed} > ~{outputBed}
     }
 
@@ -86,12 +89,12 @@ task Merge {
     }
 
     parameter_meta {
+        # inputs
         inputBed: {description: "The bed to merge.", category: "required"}
         outputBed: {description: "The path to write the output to.", category: "advanced"}
         memory: {description: "The amount of memory needed for the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -100,6 +103,7 @@ task MergeBedFiles {
     input {
         Array[File]+ bedFiles
         String outputBed = "merged.bed"
+
         String memory = "~{512 + ceil(size(bedFiles, "M"))}M"
         Int timeMinutes = 1 + ceil(size(bedFiles, "G"))
         String dockerImage = "quay.io/biocontainers/bedtools:2.23.0--hdbcaa40_3"
@@ -120,13 +124,14 @@ task MergeBedFiles {
         time_minutes: timeMinutes
         docker: dockerImage
     }
+
     parameter_meta {
+        # inputs
         bedFiles: {description: "The bed files to merge.", category: "required"}
         outputBed: {description: "The path to write the output to.", category: "advanced"}
         memory: {description: "The amount of memory needed for the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -139,9 +144,13 @@ task Sort {
         Boolean chrThenSizeD = false
         Boolean chrThenScoreA = false
         Boolean chrThenScoreD = false
+        String outputBed = "output.sorted.bed"
+
         File? genome
         File? faidx
-        String outputBed = "output.sorted.bed"
+
+        String memory = "~{512 + ceil(size(inputBed, "M"))}M"
+        Int timeMinutes = 1 + ceil(size(inputBed, "G"))
         String dockerImage = "quay.io/biocontainers/bedtools:2.23.0--hdbcaa40_3"
     }
 
@@ -166,6 +175,8 @@ task Sort {
     }
 
     runtime {
+        memory: memory
+        time_minutes: timeMinutes
         docker: dockerImage
     }
 }
@@ -174,13 +185,15 @@ task Intersect {
     input {
         File regionsA
         File regionsB
-        # Giving a faidx file will set the sorted option.
-        File? faidx
         String outputBed = "intersect.bed"
+
+        File? faidx # Giving a faidx file will set the sorted option.
+
         String memory = "~{512 + ceil(size([regionsA, regionsB], "M"))}M"
         Int timeMinutes = 1 + ceil(size([regionsA, regionsB], "G"))
         String dockerImage = "quay.io/biocontainers/bedtools:2.23.0--hdbcaa40_3"
     }
+
     Boolean sorted = defined(faidx)
 
     command {
@@ -205,14 +218,13 @@ task Intersect {
     }
 
     parameter_meta {
-        faidx: {description: "The fasta index (.fai) file that is used to create the genome file required for sorted output. Implies sorted option.",
-                category: "common"}
-        regionsA: {description: "Region file a to intersect", category: "required"}
-        regionsB: {description: "Region file b to intersect", category: "required"}
-        outputBed: {description: "The path to write the output to", category: "advanced"}
+        # inputs
+        regionsA: {description: "Region file a to intersect.", category: "required"}
+        regionsB: {description: "Region file b to intersect.", category: "required"}
+        outputBed: {description: "The path to write the output to.", category: "advanced"}
+        faidx: {description: "The fasta index (.fai) file that is used to create the genome file required for sorted output. Implies sorted option.", category: "common"}
         memory: {description: "The amount of memory needed for the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
diff --git a/biowdl.wdl b/biowdl.wdl
index 838755d911b42a729774c227d8521634c37c7b86..8a1f9dfd4bb38900f434b058ab93ec5893747f28 100644
--- a/biowdl.wdl
+++ b/biowdl.wdl
@@ -32,6 +32,7 @@ task InputConverter {
         Boolean checkFileMd5sums=false
         Boolean old=false
 
+        String memory = "128M"
         Int timeMinutes = 1
         String dockerImage = "quay.io/biocontainers/biowdl-input-converter:0.2.1--py_0"
     }
@@ -52,22 +53,20 @@ task InputConverter {
     }
 
     runtime {
-        memory: "128M"
+        memory: memory
         time_minutes: timeMinutes
         docker: dockerImage
     }
 
     parameter_meta {
+        # inputs
         samplesheet: {description: "The samplesheet to be processed.", category: "required"}
-        outputFile: {description: "The location the JSON representation of the samplesheet should be written to.",
-                     category: "advanced"}
-        skipFileCheck: {description: "Whether or not the existance of the files mentioned in the samplesheet should be checked.",
-                        category: "advanced"}
-        checkFileMd5sums: {description: "Whether or not the MD5 sums of the files mentioned in the samplesheet should be checked.",
-                           category: "advanced"}
+        outputFile: {description: "The location the JSON representation of the samplesheet should be written to.", category: "advanced"}
+        skipFileCheck: {description: "Whether or not the existance of the files mentioned in the samplesheet should be checked.", category: "advanced"}
+        checkFileMd5sums: {description: "Whether or not the MD5 sums of the files mentioned in the samplesheet should be checked.", category: "advanced"}
         old: {description: "Whether or not the old samplesheet format should be used.", category: "advanced"}
+        memory: {description: "The amount of memory needed for the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
diff --git a/bowtie.wdl b/bowtie.wdl
index b3f3ceaeadcc165cb2feec9d4e734f31e32c437c..7fb1b614c3cb3cdeeb7ab87c815f0524315d3b30 100644
--- a/bowtie.wdl
+++ b/bowtie.wdl
@@ -1,7 +1,5 @@
 version 1.0
 
-# MIT License
-#
 # Copyright (c) 2018 Leiden University Medical Center
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -28,30 +26,31 @@ task Bowtie {
         Array[File] readsDownstream = []
         String outputPath = "mapped.bam"
         Array[File]+ indexFiles
-        Int? seedmms
-        Int? seedlen
-        Int? k
         Boolean best = false
         Boolean strata = false
         Boolean allowContain = false
+
+        Int? seedmms
+        Int? seedlen
+        Int? k
         String? samRG
 
+        String picardXmx = "4G"
         Int threads = 1
-        Int timeMinutes = 1 + ceil(size(flatten([readsUpstream, readsDownstream]), "G") * 300 / threads)
         String memory = "~{5 + ceil(size(indexFiles, "G"))}G"
-        String picardXmx = "4G"
+        Int timeMinutes = 1 + ceil(size(flatten([readsUpstream, readsDownstream]), "G") * 300 / threads)
         # Image contains bowtie=1.2.2 and picard=2.9.2
         String dockerImage = "quay.io/biocontainers/mulled-v2-bfe71839265127576d3cd749c056e7b168308d56:1d8bec77b352cdcf3e9ff3d20af238b33ed96eae-0"
     }
 
     # Assume fastq input with -q flag.
-    # The output always needs to be SAM as it is piped into Picard SortSam
+    # The output always needs to be SAM as it is piped into Picard SortSam.
     # Hence, the --sam flag is used.
-
     command {
         set -e -o pipefail
         mkdir -p "$(dirname ~{outputPath})"
-        bowtie -q \
+        bowtie \
+        -q \
         --sam \
         ~{"--seedmms " +  seedmms} \
         ~{"--seedlen " + seedlen} \
@@ -84,24 +83,22 @@ task Bowtie {
     }
 
     parameter_meta {
+        # inputs
         readsUpstream: {description: "The first-/single-end fastq files.", category: "required"}
         readsDownstream: {description: "The second-end fastq files.", category: "common"}
         outputPath: {description: "The location the output BAM file should be written to.", category: "common"}
         indexFiles: {description: "The index files for bowtie.", category: "required"}
-        seedmms: {description: "Equivalent to bowtie's `--seedmms` option.", category: "advanced"}
-        seedlen: {description: "Equivalent to bowtie's `--seedlen` option.", category: "advanced"}
-        k: {description: "Equivalent to bowtie's `-k` option.", category: "advanced"}
         best: {description: "Equivalent to bowtie's `--best` flag.", category: "advanced"}
         strata: {description: "Equivalent to bowtie's `--strata` flag.", category: "advanced"}
         allowContain: {description: "Equivalent to bowtie's `--allow-contain` flag.", category: "advanced"}
+        seedmms: {description: "Equivalent to bowtie's `--seedmms` option.", category: "advanced"}
+        seedlen: {description: "Equivalent to bowtie's `--seedlen` option.", category: "advanced"}
+        k: {description: "Equivalent to bowtie's `-k` option.", category: "advanced"}
         samRG: {description: "Equivalent to bowtie's `--sam-RG` option.", category: "advanced"}
-
-        picardXmx: {description: "The maximum memory available to the picard (used for sorting the output). Should be lower than `memory` to accommodate JVM overhead and bowtie's memory usage.",
-                  category: "advanced"}
+        picardXmx: {description: "The maximum memory available to the picard (used for sorting the output). Should be lower than `memory` to accommodate JVM overhead and bowtie's memory usage.", category: "advanced"}
         threads: {description: "The number of threads to use.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
diff --git a/bwa-mem2.wdl b/bwa-mem2.wdl
index 6ea4578dfa21b1dab14e5424e249364ebb80fb2a..34cd38a6a1b1c6c811bfb96d2d1ac9c71fc8e000 100644
--- a/bwa-mem2.wdl
+++ b/bwa-mem2.wdl
@@ -26,33 +26,35 @@ task Mem {
         File? read2
         BwaIndex bwaIndex
         String outputPrefix
-        String? readgroup
         Boolean sixtyFour = false
         Boolean usePostalt = false
-        Int threads = 4
-        Int? sortThreads
         Int sortMemoryPerThreadGb = 2
         Int compressionLevel = 1
-        Int? memoryGb 
+
+        String? readgroup
+        Int? sortThreads
+        Int? memoryGb
+
+        Int threads = 4
         Int timeMinutes = 1 + ceil(size([read1, read2], "G") * 220 / threads)
         # Contains bwa-mem2 2.0 bwakit 0.7.17.dev1 and samtools 1.10
         String dockerImage = "quay.io/biocontainers/mulled-v2-6a15c99309c82b345497d24489bee67bbb76c2f6:1c9c3227b9bf825a8dc9726a25701aa23c0b1f12-0"
     }
 
-    # Samtools sort may block the pipe while it is writing data to disk. 
+    # Samtools sort may block the pipe while it is writing data to disk.
     # This can lead to cpu underutilization.
-    # 1 thread if threads is 1. For 2-4 threads 2 sort threads. 3 sort threads for 5-8 threads. 
+    # 1 thread if threads is 1. For 2-4 threads 2 sort threads. 3 sort threads for 5-8 threads.
     Int estimatedSortThreads = if threads == 1 then 1 else 1 + ceil(threads / 4.0)
     Int totalSortThreads = select_first([sortThreads, estimatedSortThreads])
-    # BWA-mem2's index files contain 2 BWT indexes of which only one is used. .2bit64 is used by default and 
+    # BWA-mem2's index files contain 2 BWT indexes of which only one is used. .2bit64 is used by default and
     # .8bit32 is used for avx2.
     # The larger one of these is the 8bit32 index. Since we do not know beforehand which one is used we need to accomodate for that.
-    # Using only the 8bit32 index uses 57,5% of the index files. Since bwa-mem2 uses slightly more memory than the index
+    # Using only the 8bit32 index uses 57,5% of the index files. Since bwa-mem2 uses slightly more memory than the index.
     # We put it at 62% as a safety factor. That means the memory usage for bwa-mem will be 53G for a human genome. Resulting in 60G total
     # on 8 cores with samtools with 3 sort threads.
     Int estimatedMemoryGb = 1 + ceil(size(bwaIndex.indexFiles, "G") * 0.62) + sortMemoryPerThreadGb * totalSortThreads
     
-    # The bwa postalt script is out commented as soon as usePostalt = false. 
+    # The bwa postalt script is out commented as soon as usePostalt = false.
     # This hack was tested with bash, dash and ash. It seems that comments in between pipes work for all of them.
     command {
         set -e
@@ -81,7 +83,7 @@ task Mem {
     runtime {
         # One extra thread for bwa-postalt + samtools is not needed.
         # These only use 5-10% of compute power and not always simultaneously.
-        cpu: threads  
+        cpu: threads
         memory: "~{select_first([memoryGb, estimatedMemoryGb])}G"
         time_minutes: timeMinutes
         docker: dockerImage
@@ -92,21 +94,21 @@ task Mem {
         read1: {description: "The first-end fastq file.", category: "required"}
         read2: {description: "The second-end fastq file.", category: "common"}
         bwaIndex: {description: "The BWA index, including (optionally) a .alt file.", category: "required"}
-        usePostalt: {description: "Whether to use the postalt script from bwa kit."}
         outputPrefix: {description: "The prefix of the output files, including any parent directories.", category: "required"}
-        readgroup: {description: "A readgroup identifier.", category: "common"}
         sixtyFour: {description: "Whether or not the index uses the '.64' suffixes.", category: "common"}
-        threads: {description: "The number of threads to use for alignment.", category: "advanced"}
-        memoryGb: {description: "The amount of memory this job will use in gigabytes.", category: "advanced"}
-        sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
+        usePostalt: {description: "Whether to use the postalt script from bwa kit."}
         sortMemoryPerThreadGb: {description: "The amount of memory for each sorting thread in gigabytes.", category: "advanced"}
         compressionLevel: {description: "The compression level of the output BAM.", category: "advanced"}
+        readgroup: {description: "A readgroup identifier.", category: "common"}
+        sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
+        memoryGb: {description: "The amount of memory this job will use in gigabytes.", category: "advanced"}
+        threads: {description: "The number of threads to use for alignment.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
 
         # outputs
-        outputBam: "The produced BAM file."
+        outputBam: {description: "The produced BAM file."}
+        outputHla: {description: "The produced HLA file."}
     }
 }
 
diff --git a/bwa.wdl b/bwa.wdl
index fdeb870f19a67375ab8d4ff8e11221a47d761aae..0f09f7a99f4a58e2f8e9a97fa6e4fdec7353a9bc 100644
--- a/bwa.wdl
+++ b/bwa.wdl
@@ -26,28 +26,30 @@ task Mem {
         File? read2
         BwaIndex bwaIndex
         String outputPrefix
-        String? readgroup
         Boolean sixtyFour = false
         Boolean usePostalt = false
-        Int threads = 4
-        Int? sortThreads
         Int sortMemoryPerThreadGb = 2
         Int compressionLevel = 1
-        Int? memoryGb 
+
+        String? readgroup
+        Int? sortThreads
+        Int? memoryGb
+
+        Int threads = 4
         Int timeMinutes = 1 + ceil(size([read1, read2], "G") * 220 / threads)
         # Contains bwa 0.7.17 bwakit 0.7.17.dev1 and samtools 1.10
         String dockerImage = "quay.io/biocontainers/mulled-v2-ad317f19f5881324e963f6a6d464d696a2825ab6:c59b7a73c87a9fe81737d5d628e10a3b5807f453-0"
     }
 
-    # Samtools sort may block the pipe while it is writing data to disk. 
+    # Samtools sort may block the pipe while it is writing data to disk.
     # This can lead to cpu underutilization.
-    # 1 thread if threads is 1. For 2-4 threads 2 sort threads. 3 sort threads for 5-8 threads. 
+    # 1 thread if threads is 1. For 2-4 threads 2 sort threads. 3 sort threads for 5-8 threads.
     Int estimatedSortThreads = if threads == 1 then 1 else 1 + ceil(threads / 4.0)
     Int totalSortThreads = select_first([sortThreads, estimatedSortThreads])
-    # BWA needs slightly more memory than the size of the index files (~10%). Add a margin for safety here.  
+    # BWA needs slightly more memory than the size of the index files (~10%). Add a margin for safety here.
     Int estimatedMemoryGb = 1 + ceil(size(bwaIndex.indexFiles, "G") * 1.2) + sortMemoryPerThreadGb * totalSortThreads
     
-    # The bwa postalt script is out commented as soon as usePostalt = false. 
+    # The bwa postalt script is out commented as soon as usePostalt = false.
     # This hack was tested with bash, dash and ash. It seems that comments in between pipes work for all of them.
     command {
         set -e
@@ -76,7 +78,7 @@ task Mem {
     runtime {
         # One extra thread for bwa-postalt + samtools is not needed.
         # These only use 5-10% of compute power and not always simultaneously.
-        cpu: threads  
+        cpu: threads
         memory: "~{select_first([memoryGb, estimatedMemoryGb])}G"
         time_minutes: timeMinutes
         docker: dockerImage
@@ -87,21 +89,21 @@ task Mem {
         read1: {description: "The first-end fastq file.", category: "required"}
         read2: {description: "The second-end fastq file.", category: "common"}
         bwaIndex: {description: "The BWA index, including (optionally) a .alt file.", category: "required"}
-        usePostalt: {description: "Whether to use the postalt script from bwa kit."}
         outputPrefix: {description: "The prefix of the output files, including any parent directories.", category: "required"}
-        readgroup: {description: "A readgroup identifier.", category: "common"}
         sixtyFour: {description: "Whether or not the index uses the '.64' suffixes.", category: "common"}
-        threads: {description: "The number of threads to use for alignment.", category: "advanced"}
-        memoryGb: {description: "The amount of memory this job will use in gigabytes.", category: "advanced"}
-        sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
+        usePostalt: {description: "Whether to use the postalt script from bwa kit."}
         sortMemoryPerThreadGb: {description: "The amount of memory for each sorting thread in gigabytes.", category: "advanced"}
         compressionLevel: {description: "The compression level of the output BAM.", category: "advanced"}
+        readgroup: {description: "A readgroup identifier.", category: "common"}
+        sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
+        memoryGb: {description: "The amount of memory this job will use in gigabytes.", category: "advanced"}
+        threads: {description: "The number of threads to use for alignment.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
 
         # outputs
-        outputBam: "The produced BAM file."
+        outputBam: {description: "The produced BAM file."}
+        outputHla: {description: "The produced HLA file."}
     }
 }
 
diff --git a/ccs.wdl b/ccs.wdl
index cab15feafc8cdadd12c309b6cdd75f16a3e026b3..4446937b45271a4f90906e4caa6d0d6ffad7ad90 100644
--- a/ccs.wdl
+++ b/ccs.wdl
@@ -1,6 +1,6 @@
 version 1.0
 
-# Copyright (c) 2020 Sequencing Analysis Support Core - Leiden University Medical Center
+# Copyright (c) 2020 Leiden University Medical Center
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
 # of this software and associated documentation files (the "Software"), to deal
@@ -8,10 +8,10 @@ version 1.0
 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 # copies of the Software, and to permit persons to whom the Software is
 # furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -22,19 +22,20 @@ version 1.0
 
 task CCS {
     input {
+        File subreadsFile
+        String outputPrefix
         Int minPasses = 3
         Int minLength = 10
         Int maxLength = 50000
         Boolean byStrand = false
         Float minReadQuality = 0.99
         String logLevel = "WARN"
-        File subreadsFile
+
         File? subreadsIndexFile
         String? chunkString
-        String outputPrefix
-        
-        Int cores = 2
-        String memory = "2G"
+
+        Int threads = 2
+        String memory = "4G"
         Int timeMinutes = 1440
         String dockerImage = "quay.io/biocontainers/pbccs:5.0.0--0"
     }
@@ -49,7 +50,7 @@ task CCS {
         ~{true="--by-strand" false="" byStrand} \
         --min-rq ~{minReadQuality} \
         --log-level ~{logLevel} \
-        --num-threads ~{cores} \
+        --num-threads ~{threads} \
         ~{"--chunk " + chunkString} \
         ~{"--report-json " + outputPrefix + ".ccs.report.json"} \
         ~{"--log-file " + outputPrefix + ".ccs.stderr.log"} \
@@ -65,7 +66,7 @@ task CCS {
     }
 
     runtime {
-        cpu: cores
+        cpu: threads
         memory: memory
         time_minutes: timeMinutes
         docker: dockerImage
@@ -73,17 +74,17 @@ task CCS {
 
     parameter_meta {
         # inputs
+        subreadsFile: {description: "Subreads input file.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
         minPasses: {description: "Minimum number of full-length subreads required to generate ccs for a ZMW.", category: "advanced"}
         minLength: {description: "Minimum draft length before polishing.", category: "advanced"}
         maxLength: {description: "Maximum draft length before polishing.", category: "advanced"}
         byStrand: {description: "Generate a consensus for each strand.", category: "advanced"}
         minReadQuality: {description: "Minimum predicted accuracy in [0, 1].", category: "common"}
         logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
-        subreadsFile: {description: "Subreads input file.", category: "required"}
         subreadsIndexFile: {description: "Index for the subreads input file, required when using chunkString.", category: "advanced"}
         chunkString: {descpription: "Chunk string (e.g. 1/4, 5/5) for CCS.", category: "advanced"}
-        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
-        cores: {description: "The number of cores to be used.", category: "advanced"}
+        threads: {description: "The number of threads to be used.", category: "advanced"}
         memory: {description: "The amount of memory available to the job.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
diff --git a/centrifuge.wdl b/centrifuge.wdl
index 1e7a0b4566c1b782877ee37689a7a49045ea7fad..1637abdd911f083c2fb27ebc5d82dbe29fc5957b 100644
--- a/centrifuge.wdl
+++ b/centrifuge.wdl
@@ -94,13 +94,13 @@ task Build {
 
 task Classify {
     input {
+        Array[File]+ read1
+        Array[File] read2 = []
         String inputFormat = "fastq"
         Boolean phred64 = false
         Int minHitLength = 22
         Array[File]+ indexFiles
-        Array[File]+ read1
         String outputPrefix
-        Array[File] read2 = []
 
         Int? trim5
         Int? trim3
@@ -155,13 +155,13 @@ task Classify {
 
     parameter_meta {
         # inputs
+        read1: {description: "List of files containing mate 1s, or unpaired reads.", category: "required"}
+        read2: {description: "List of files containing mate 2s.", category: "common"}
         inputFormat: {description: "The format of the read file(s).", category: "required"}
         phred64: {description: "If set to true, phred+64 encoding is used.", category: "required"}
         minHitLength: {description: "Minimum length of partial hits.", category: "required"}
         indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
-        read1: {description: "List of files containing mate 1s, or unpaired reads.", category: "required"}
         outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
-        read2: {description: "List of files containing mate 2s.", category: "common"}
         trim5: {description: "Trim <int> bases from 5' (left) end of each read before alignment.", category: "common"}
         trim3: {description: "Trim <int> bases from 3' (right) end of each read before alignment.", category: "common"}
         reportMaxDistinct: {description: "It searches for at most <int> distinct, primary assignments for each read or pair.", category: "common"}
diff --git a/chunked-scatter.wdl b/chunked-scatter.wdl
index 115c5ca4eb8fb8407ef0f2d7c36c61c39c669d11..844d69903fd9228220e98bfcb3e56e89d40f82fe 100644
--- a/chunked-scatter.wdl
+++ b/chunked-scatter.wdl
@@ -25,6 +25,7 @@ task ChunkedScatter {
         File inputFile
         String prefix = "./scatter"
         Boolean splitContigs = false
+
         Int? chunkSize
         Int? overlap
         Int? minimumBasesPerFile
@@ -57,15 +58,16 @@ task ChunkedScatter {
     }
 
     parameter_meta {
+        # inputs
         inputFile: {description: "Either a bed file describing regiosn of intrest or a sequence dictionary.", category: "required"}
         prefix: {description: "The prefix for the output files.", category: "advanced"}
+        splitContigs: {description: "If set, contigs are allowed to be split up over multiple files.", category: "advanced"}
         chunkSize: {description: "Equivalent to chunked-scatter's `-c` option.", category: "advanced"}
         overlap: {description: "Equivalent to chunked-scatter's `-o` option.", category: "advanced"}
         minimumBasesPerFile: {description: "Equivalent to chunked-scatter's `-m` option.", category: "advanced"}
-        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
@@ -76,9 +78,11 @@ task ScatterRegions {
         String prefix = "scatters/scatter-" 
         Boolean splitContigs = false
         Int scatterSizeMillions = 1000
+
         Int? scatterSize
-        Int timeMinutes = 2
+
         String memory = "256M"
+        Int timeMinutes = 2
         String dockerImage = "quay.io/biocontainers/chunked-scatter:0.2.0--py_0"
     }
 
@@ -105,15 +109,14 @@ task ScatterRegions {
     }
 
     parameter_meta {
+        # inputs
         inputFile: {description: "The input file, either a bed file or a sequence dict. Which format is used is detected by the extension: '.bed', '.fai' or '.dict'.", category: "required"}
         prefix: {description: "The prefix of the ouput files. Output will be named like: <PREFIX><N>.bed, in which N is an incrementing number. Default 'scatter-'.", category: "advanced"}
         splitContigs: {description: "If set, contigs are allowed to be split up over multiple files.", category: "advanced"}
         scatterSizeMillions: {description: "Over how many million base pairs should be scattered.", category: "common"}
         scatterSize: {description: "Overrides scatterSizeMillions with a smaller value if set.", category: "advanced"}
-
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
diff --git a/clever.wdl b/clever.wdl
index 3a6515f721ff642ce3d91f7f8189db0b4542e2d7..75e889b3de0cad621b78f8e0f3e229216f89fd6a 100644
--- a/clever.wdl
+++ b/clever.wdl
@@ -1,7 +1,5 @@
 version 1.0
 
-# MIT License
-#
 # Copyright (c) 2018 Leiden University Medical Center
 #
 # Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -74,12 +72,12 @@ task Mateclever {
         indexedFiteredBam: {description: "The index of the filtered bam file.", category: "required"}
         bwaIndex: {description: "The BWA index files.", category: "required"}
         predictions: {description: "The predicted deletions (VCF) from clever.", category: "required"}
-        maxOffset: {description: "The maximum center distance between split-read and read-pair deletion to be considered identical.", category: "advanced"}
-        maxLengthDiff: {description: "The maximum length difference between split-read and read-pair deletion to be considered identical.", category: "advanced"}
-        cleverMaxDelLength: {description: "The maximum deletion length to look for in Clever predictions.", category: "advanced"}
         outputPath: {description: "The location the output VCF file should be written.", category: "common"}
-        threads: {description: "The the number of threads required to run a program", category: "advanced"}
-        memory: {description: "The memory required to run the programs", category: "advanced"}
+        cleverMaxDelLength: {description: "The maximum deletion length to look for in Clever predictions.", category: "advanced"}
+        maxLengthDiff: {description: "The maximum length difference between split-read and read-pair deletion to be considered identical.", category: "advanced"}
+        maxOffset: {description: "The maximum center distance between split-read and read-pair deletion to be considered identical.", category: "advanced"}
+        threads: {description: "The the number of threads required to run a program.", category: "advanced"}
+        memory: {description: "The memory required to run the programs.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
@@ -128,8 +126,8 @@ task Prediction {
         bamIndex: {description: "The index bam file.", category: "required"}
         bwaIndex: {description: "The BWA index files.", category: "required"}
         outputPath: {description: "The location the output VCF file should be written.", category: "common"}
-        threads: {description: "The the number of threads required to run a program", category: "advanced"}
-        memory: {description: "The memory required to run the programs", category: "advanced"}
+        threads: {description: "The the number of threads required to run a program.", category: "advanced"}
+        memory: {description: "The memory required to run the programs.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
         dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
 
diff --git a/collect-columns.wdl b/collect-columns.wdl
index fe41c5e897c0095792f94f27cfcf1716ca7874b9..67db617915fe9dfdec4f6ac703143f6834b1d62d 100644
--- a/collect-columns.wdl
+++ b/collect-columns.wdl
@@ -24,12 +24,13 @@ task CollectColumns {
     input {
         Array[File]+ inputTables
         String outputPath
+        Boolean header = false
+        Boolean sumOnDuplicateId = false
+
         Int? featureColumn
         Int? valueColumn
         Int? separator
         Array[String]? sampleNames
-        Boolean header = false
-        Boolean sumOnDuplicateId = false
         Array[String]? additionalAttributes
         File? referenceGtf
         String? featureAttribute
@@ -67,20 +68,20 @@ task CollectColumns {
     }
 
     parameter_meta {
+        # inputs
         inputTables: {description: "The tables from which columns should be taken.", category: "required"}
         outputPath: {description: "The path to which the output should be written.", category: "required"}
+        header: {description: "Equivalent to the -H flag of collect-columns.", category: "advanced"}
+        sumOnDuplicateId: {description: "Equivalent to the -S flag of collect-columns.", category: "advanced"}
         featureColumn: {description: "Equivalent to the -f option of collect-columns.", category: "advanced"}
         valueColumn: {description: "Equivalent to the -c option of collect-columns.", category: "advanced"}
         separator: {description: "Equivalent to the -s option of collect-columns.", category: "advanced"}
         sampleNames: {description: "Equivalent to the -n option of collect-columns.", category: "advanced"}
-        header: {description: "Equivalent to the -H flag of collect-columns.", category: "advanced"}
-        sumOnDuplicateId: {description: "Equivalent to the -S flag of collect-columns.", category: "advanced"}
         additionalAttributes: {description: "Equivalent to the -a option of collect-columns.", category: "advanced"}
         referenceGtf: {description: "Equivalent to the -g option of collect-columns.", category: "advanced"}
         featureAttribute: {description: "Equivalent to the -F option of collect-columns.", category: "advanced"}
-        memoryGb: {description: "The maximum amount of memory the job will need in GB", category: "advanced"}
+        memoryGb: {description: "The maximum amount of memory the job will need in GB.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
-}
\ No newline at end of file
+}
diff --git a/common.wdl b/common.wdl
index e96cc1c814fc7294a2b5c0da7befa5adcedd974c..b3878bb6bc55db8b9d04f800be492ff4c6b80e52 100644
--- a/common.wdl
+++ b/common.wdl
@@ -45,7 +45,7 @@ task CheckFileMD5 {
     input {
         File file
         String md5
-        # By default cromwell expects /bin/bash to be present in the container
+        # By default cromwell expects /bin/bash to be present in the container.
         # The 'bash' container does not fill this requirement. (It is in /usr/local/bin/bash)
         # Use a stable version of debian:stretch-slim for this. (Smaller than ubuntu)
         String dockerImage = "debian@sha256:f05c05a218b7a4a5fe979045b1c8e2a9ec3524e5611ebfdd0ef5b8040f9008fa"
@@ -71,7 +71,7 @@ task ConcatenateTextFiles {
         Boolean zip = false
     }
 
-    # When input and output is both compressed decompression is not needed
+    # When input and output is both compressed decompression is not needed.
     String cmdPrefix = if (unzip && !zip) then "zcat " else "cat "
     String cmdSuffix = if (!unzip && zip) then " | gzip -c " else ""
 
@@ -116,8 +116,8 @@ task Copy {
 }
 
 task CreateLink {
-    # Making this of type File will create a link to the copy of the file in the execution
-    # folder, instead of the actual file.
+    # Making this of type File will create a link to the copy of the file in
+    # the execution folder, instead of the actual file.
     # This cannot be propperly call-cached or used within a container.
     input {
         String inputFile
@@ -182,6 +182,7 @@ task TextToFile {
     input {
         String text
         String outputFile = "out.txt"
+
         Int timeMinutes = 1
         String dockerImage = "debian@sha256:f05c05a218b7a4a5fe979045b1c8e2a9ec3524e5611ebfdd0ef5b8040f9008fa"
     }
@@ -194,18 +195,19 @@ task TextToFile {
         File out = outputFile
     }
 
-    parameter_meta {
-        text: {description: "The text to print", category: "required"}
-        outputFile: {description: "The name of the output file", category: "common"}
-        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
-    }
     runtime {
         memory: "1G"
         time_minutes: timeMinutes
         docker: dockerImage
     }
+
+    parameter_meta {
+        # inputs
+        text: {description: "The text to print.", category: "required"}
+        outputFile: {description: "The name of the output file.", category: "common"}
+        timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
+    }
 }
 
 task YamlToJson {
@@ -213,11 +215,12 @@ task YamlToJson {
         File yaml
         String outputJson = basename(yaml, "\.ya?ml$") + ".json"
 
-        Int timeMinutes = 1
         String  memory = "128M"
+        Int timeMinutes = 1
         # biowdl-input-converter has python and pyyaml.
         String dockerImage = "quay.io/biocontainers/biowdl-input-converter:0.2.1--py_0"
     }
+
     command {
         set -e
         mkdir -p "$(dirname ~{outputJson})"
@@ -230,6 +233,7 @@ task YamlToJson {
             json.dump(content, output_json)
         CODE
     }
+
     output {
         File json = outputJson
     }
@@ -241,12 +245,12 @@ task YamlToJson {
     }
 
     parameter_meta {
+        # inputs
         yaml: {description: "The YAML file to convert.", category: "required"}
         outputJson: {description: "The location the output JSON file should be written to.", category: "advanced"}
         memory: {description: "The maximum amount of memory the job will need.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
 
diff --git a/cutadapt.wdl b/cutadapt.wdl
index 7faeaff1e69891a9ff296d820f6222e49863de0a..74f57912aa8b65b2a709d66516771994353f80a0 100644
--- a/cutadapt.wdl
+++ b/cutadapt.wdl
@@ -32,6 +32,14 @@ task Cutadapt {
         Array[String] adapterRead2 = []
         Array[String] frontRead2 = []
         Array[String] anywhereRead2 = []
+        String reportPath = "cutadapt_report.txt"
+        # Cutadapt compresses the zipped output files with a ridiculously
+        # high compression level (5 or 6).
+        # This is not the fast compression preset. It takes up to 400% more
+        # CPU time for a 20% reduction in file size.
+        # Hence we use compression level 1 here.
+        Int compressionLevel = 1  # This only affects outputs with the .gz suffix.
+
         Boolean? interleaved
         String? pairFilter
         Float? errorRate
@@ -52,7 +60,7 @@ task Cutadapt {
         String? stripSuffix
         String? prefix
         String? suffix
-        Int? minimumLength = 2  # Necessary to prevent creation of empty reads or 1 base reads.
+        Int? minimumLength = 2 # Necessary to prevent creation of empty reads or 1 base reads.
         Int? maximumLength
         Int? maxN
         Boolean? discardTrimmed
@@ -73,11 +81,7 @@ task Cutadapt {
         Boolean? bwa
         Boolean? zeroCap
         Boolean? noZeroCap
-        String reportPath = "cutadapt_report.txt"
-        # Cutadapt compresses the zipped output files with a ridiculously high compression level (5 or 6).
-        # This is not the fast compression preset. It takes up to 400% more CPU time for a 20% reduction in file size.
-        # Hence we use compression level 1 here.
-        Int compressionLevel = 1  # This only affects outputs with the .gz suffix.
+
         Int cores = 4
         String memory = "~{300 + 100 * cores}M"
         Int timeMinutes = 1 + ceil(size([read1, read2], "G")  * 12.0 / cores)
@@ -152,8 +156,8 @@ task Cutadapt {
 
     output{
         File cutRead1 = read1output
-        File? cutRead2 = read2output
         File report = reportPath
+        File? cutRead2 = read2output
         File? tooLongOutput=tooLongOutputPath
         File? tooShortOutput=tooShortOutputPath
         File? untrimmedOutput=untrimmedOutputPath
@@ -173,22 +177,19 @@ task Cutadapt {
     }
 
     parameter_meta {
+        # inputs
         read1: {description: "The first or single end fastq file to be run through cutadapt.", category: "required"}
         read2: {description: "An optional second end fastq file to be run through cutadapt.", category: "common"}
         read1output: {description: "The name of the resulting first or single end fastq file.", category: "common"}
         read2output: {description: "The name of the resulting second end fastq file.", category: "common"}
-        adapter: {description: "A list of 3' ligated adapter sequences to be cut from the given first or single end fastq file.",
-                  category: "common"}
-        front: {description: "A list of 5' ligated adapter sequences to be cut from the given first or single end fastq file.",
-                category: "advanced"}
-        anywhere: {description: "A list of 3' or 5' ligated adapter sequences to be cut from the given first or single end fastq file.",
-                   category: "advanced"}
-        adapterRead2: {description: "A list of 3' ligated adapter sequences to be cut from the given second end fastq file.",
-                       category: "common"}
-        frontRead2: {description: "A list of 5' ligated adapter sequences to be cut from the given second end fastq file.",
-                     category: "advanced"}
-        anywhereRead2: {description: "A list of 3' or 5' ligated adapter sequences to be cut from the given second end fastq file.",
-                        category: "advanced"}
+        adapter: {description: "A list of 3' ligated adapter sequences to be cut from the given first or single end fastq file.", category: "common"}
+        front: {description: "A list of 5' ligated adapter sequences to be cut from the given first or single end fastq file.", category: "advanced"}
+        anywhere: {description: "A list of 3' or 5' ligated adapter sequences to be cut from the given first or single end fastq file.", category: "advanced"}
+        adapterRead2: {description: "A list of 3' ligated adapter sequences to be cut from the given second end fastq file.", category: "common"}
+        frontRead2: {description: "A list of 5' ligated adapter sequences to be cut from the given second end fastq file.", category: "advanced"}
+        anywhereRead2: {description: "A list of 3' or 5' ligated adapter sequences to be cut from the given second end fastq file.", category: "advanced"}
+        reportPath: {description: "The name of the file to write cutadapts's stdout to, this contains some metrics.", category: "common"}
+        compressionLevel: {description: "The compression level if gzipped output is used.", category: "advanced"}
         interleaved: {description: "Equivalent to cutadapt's --interleaved flag.", category: "advanced"}
         pairFilter: {description: "Equivalent to cutadapt's --pair-filter option.", category: "advanced"}
         errorRate: {description: "Equivalent to cutadapt's --error-rate option.", category: "advanced"}
@@ -230,13 +231,9 @@ task Cutadapt {
         bwa: {description: "Equivalent to cutadapt's --bwa flag.", category: "advanced"}
         zeroCap: {description: "Equivalent to cutadapt's --zero-cap flag.", category: "advanced"}
         noZeroCap: {description: "Equivalent to cutadapt's --no-zero-cap flag.", category: "advanced"}
-        reportPath: {description: "The name of the file to write cutadapts's stdout to, this contains some metrics.",
-                     category: "common"}
-        compressionLevel: {description: "The compression level if gzipped output is used.", category: "advanced"}
         cores: {description: "The number of cores to use.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }
diff --git a/deepvariant.wdl b/deepvariant.wdl
index f56618868de33bd64de5fc7856c28fffb000b6bd..20bf8e274c70c58188a2c993fd2789bd36f8d097 100644
--- a/deepvariant.wdl
+++ b/deepvariant.wdl
@@ -28,6 +28,7 @@ task RunDeepVariant {
         File inputBamIndex
         String modelType
         String outputVcf
+
         String? postprocessVariantsExtraArgs
         File? customizedModel
         Int? numShards
@@ -43,7 +44,6 @@ task RunDeepVariant {
 
     command {
         set -e
-
         /opt/deepvariant/bin/run_deepvariant \
         --ref ~{referenceFasta} \
         --reads ~{inputBam} \
@@ -59,36 +59,36 @@ task RunDeepVariant {
     }
 
     runtime {
-        docker: dockerImage
-        time_minutes: timeMinutes
         memory: memory
+        time_minutes: timeMinutes
+        docker: dockerImage
     }
 
     output {
         File outputVCF = outputVcf
         File outputVCFIndex = outputVCF + ".tbi"
+        Array[File] outputVCFStatsReport = glob("*.visual_report.html")
         File? outputGVCF = outputGVcf
         File? outputGVCFIndex = outputGVcf + ".tbi"
-        Array[File] outputVCFStatsReport = glob("*.visual_report.html")
     }
-    
+
     parameter_meta {
-        referenceFasta: {description: "Genome reference to use", category: "required"}
+        # inputs
+        referenceFasta: {description: "Genome reference to use.", category: "required"}
         referenceFastaIndex: {description: "Index for the genome reference file.", category: "required"}
         inputBam: {description: "Aligned, sorted, indexed BAM file containing the reads we want to call.", category: "required"}
         inputBamIndex: {description: "Index for the input bam file.", category: "required"}
-        modelType: {description: "<WGS|WES|PACBIO>. Type of model to use for variant calling. Each model_type has an associated default model, which can be overridden by the --customized_model flag", category: "required"}
+        modelType: {description: "<WGS|WES|PACBIO>. Type of model to use for variant calling. Each model_type has an associated default model, which can be overridden by the --customized_model flag.", category: "required"}
         outputVcf: {description: "Path where we should write VCF file.", category: "required"}
-        customizedModel: {description: "A path to a model checkpoint to load for the `call_variants` step. If not set, the default for each --model_type will be used", category: "advanced"}
+        postprocessVariantsExtraArgs: {description: "A comma-separated list of flag_name=flag_value. 'flag_name' has to be valid flags for calpostprocess_variants.py.", category: "advanced"}
+        customizedModel: {description: "A path to a model checkpoint to load for the `call_variants` step. If not set, the default for each --model_type will be used"., category: "advanced"}
         numShards: {description: "Number of shards for make_examples step.", category: "common"}
         outputGVcf: {description: "Path where we should write gVCF file.", category: "common"}
         regions: {description: "List of regions we want to process, in BED/BEDPE format.", category: "advanced"}
         sampleName: {description: "Sample name to use instead of the sample name from the input reads BAM (SM tag in the header).", category: "common"}
         VCFStatsReport: {description: "Output a visual report (HTML) of statistics about the output VCF.", category: "common"}
-        postprocessVariantsExtraArgs: {description: "A comma-separated list of flag_name=flag_value. 'flag_name' has to be valid flags for calpostprocess_variants.py.", category: "advanced"}
         memory: {description: "The amount of memory this job will use.", category: "advanced"}
         timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
-        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-                      category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
     }
 }