diff --git a/CHANGELOG.md b/CHANGELOG.md
index 38c95774a8ed59517b39ca343af57ab7f3d8333d..1becd5aeeef53fc73e475326c2264366f779128f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,15 @@ that users understand how the changes affect the new version.
 
 version 2.2.0-dev
 ---------------------------
++ Removed unused "cores" inputs from transcriptclean tasks.
++ Removed unused "cores" inputs from talon tasks.
++ Removed unused "threads" input from ModifyStrelka.
++ Removed the "installDir" inputs from the somaticseq tasks.
++ Removed the "installDir" input from CombineVariants.
++ Removed the "extraArgs" input from FilterMutectCalls.
++ Removed unused "verbose" and "quiet" inputs from multiqc.
++ Added parameter_meta sections to a variety of tasks.
++ Picard's BedToIntervalList outputPath input is now optional (with a default of "regions.interval_list")
 + TALON: Fix SQLite error concerning database/disk space being full.
 + Update htseq to default image version 0.11.2
 + Update biowdl-input-converter in common.wdl to version 0.2.1.
diff --git a/CPAT.wdl b/CPAT.wdl
index f9a77bed2c452ecd5aab2a680e8264699c140955..73c9d13c71e9089b2d8a94834a90c25af980e3ee 100644
--- a/CPAT.wdl
+++ b/CPAT.wdl
@@ -36,6 +36,20 @@ task CPAT {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        gene: {description: "Equivalent to CPAT's `--gene` option.", category: "required"}
+        outFilePath: {description: "Equivalent to CPAT's `--outfile` option.", category: "required"}
+        hex: {description: "Equivalent to CPAT's `--hex` option.", category: "required"}
+        logitModel: {description: "Equivalent to CPAT's `--logitModel` option.", category: "required"}
+        referenceGenome: {description: "Equivalent to CPAT's `--ref` option.", category: "advanced"}
+        referenceGenomeIndex: {description: "The index of the reference. Should be added as input if CPAT should not index the reference genome.",
+                               category: "advanced"}
+        startCodons: {description: "Equivalent to CPAT's `--start` option.", category: "advanced"}
+        stopCodons: {description: "Equivalent to CPAT's `--stop` option.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # There is also make_hexamer_tab.py and make_logitModel.py
diff --git a/biopet/biopet.wdl b/biopet/biopet.wdl
index 8cf23813bd0880c726010316c02d802b8eef922a..f91f93eab4d26502318ca0fe5d9b59b1989d5ef1 100644
--- a/biopet/biopet.wdl
+++ b/biopet/biopet.wdl
@@ -226,6 +226,12 @@ task ReorderGlobbedScatters {
         # 4 gigs of memory to be able to build the docker image in singularity
         memory: "4G"
     }
+
+    parameter_meta {
+        scatters: {description: "The files which should be ordered.", category: "required"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ScatterRegions {
@@ -268,6 +274,25 @@ task ScatterRegions {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        scatterSize: {description: "Equivalent to biopet scatterregions' `-s` option.", category: "common"}
+        regions: {description: "The regions to be scattered.", category: "advanced"}
+        notSplitContigs: {description: "Equivalent to biopet scatterregions' `--notSplitContigs` flag.",
+                          category: "advanced"}
+        bamFile: {description: "Equivalent to biopet scatterregions' `--bamfile` option.",
+                  category: "advanced"}
+        bamIndex: {description: "The index for the bamfile given through bamFile.", category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ValidateAnnotation {
diff --git a/biowdl.wdl b/biowdl.wdl
index 32fd5a735d254f4f1d7bd27b5d676429976a465b..7aa68b271b73ac6c098fe1b0deffa6a06178b4a3 100644
--- a/biowdl.wdl
+++ b/biowdl.wdl
@@ -52,4 +52,17 @@ task InputConverter {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        samplesheet: {description: "The samplesheet to be processed.", category: "required"}
+        outputFile: {description: "The location the JSON representation of the samplesheet should be written to.",
+                     category: "advanced"}
+        skipFileCheck: {description: "Whether or not the existance of the files mentioned in the samplesheet should be checked.",
+                        category: "advanced"}
+        checkFileMd5sums: {description: "Whether or not the MD5 sums of the files mentioned in the samplesheet should be checked.",
+                           category: "advanced"}
+        old: {description: "Whether or not the old samplesheet format should be used.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
diff --git a/bowtie.wdl b/bowtie.wdl
index 72a396412d5f6edb9546adf1b946067be5031667..18fd6146adb6ea1b606f9d7b3bec6753d2c8fca4 100644
--- a/bowtie.wdl
+++ b/bowtie.wdl
@@ -80,6 +80,27 @@ task Bowtie {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        readsUpstream: {description: "The first-/single-end fastq files.", category: "required"}
+        readsDownstream: {description: "The second-end fastq files.", category: "common"}
+        outputPath: {description: "The location the output BAM file should be written to.", category: "common"}
+        indexFiles: {description: "The index files for bowtie.", category: "required"}
+        seedmms: {description: "Equivalent to bowtie's `--seedmms` option.", category: "advanced"}
+        seedlen: {description: "Equivalent to bowtie's `--seedlen` option.", category: "advanced"}
+        k: {description: "Equivalent to bowtie's `-k` option.", category: "advanced"}
+        best: {description: "Equivalent to bowtie's `--best` flag.", category: "advanced"}
+        strata: {description: "Equivalent to bowtie's `--strata` flag.", category: "advanced"}
+        allowContain: {description: "Equivalent to bowtie's `--allow-contain` flag.", category: "advanced"}
+        samRG: {description: "Equivalent to bowtie's `--sam-RG` option.", category: "advanced"}
+
+        picardXmx: {description: "The maximum memory available to the picard (used for sorting the output). Should be lower than `memory` to accommodate JVM overhead and bowtie's memory usage.",
+                  category: "advanced"}
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 struct BowtieIndex {
diff --git a/bwa.wdl b/bwa.wdl
index 05c8716af2ea03b1ca1e1127f1df3ed48104e02a..b0b1daf753377237c4d7f333ab6feee57b79b034 100644
--- a/bwa.wdl
+++ b/bwa.wdl
@@ -42,6 +42,21 @@ task Mem {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        read1: {description: "The first or single end fastq file.", category: "required"}
+        read2: {description: "The second end fastq file.", category: "common"}
+        bwaIndex: {description: "The BWA index files.", category: "required"}
+        outputPath: {description: "The location the output BAM file should be written to.", category: "required"}
+        readgroup: {description: "The readgroup to be assigned to the reads. See BWA mem's `-R` option.", category: "common"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        picardXmx: {description: "The maximum memory available to picard SortSam. Should be lower than `memory` to accommodate JVM overhead and BWA mem's memory usage.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Kit {
@@ -91,47 +106,20 @@ task Kit {
     }
 
     parameter_meta {
-        read1: {
-            description: "The first-end fastq file.",
-            category: "required"
-        }
-        read2: {
-            description: "The second-end fastq file.",
-            category: "common"
-        }
-        bwaIndex: {
-            description: "The BWA index, including a .alt file.",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "The prefix of the output files, including any parent directories.",
-            category: "required"
-        }
-        readgroup: {
-            description: "A readgroup identifier.",
-            category: "common"
-        }
-        sixtyFour: {
-            description: "Whether or not the index uses the '.64' suffixes.",
-            category: "common"
-        }
-        threads: {
-            description: "The number of threads to use for alignment.",
-            category: "advanced"
-        }
-        sortThreads: {
-            description: "The number of threads to use for sorting.",
-            category: "advanced"
-        }
-        memory: {
-            description: "The amount of memory this job will use.",
-            category: "advanced"
-        }
-        dockerImage: {
-            description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-            category: "advanced"
-        }
+        # inputs
+        read1: {description: "The first-end fastq file.", category: "required"}
+        read2: {description: "The second-end fastq file.", category: "common"}
+        bwaIndex: {description: "The BWA index, including a .alt file.", category: "required"}
+        outputPrefix: {description: "The prefix of the output files, including any parent directories.", category: "required"}
+        readgroup: {description: "A readgroup identifier.", category: "common"}
+        sixtyFour: {description: "Whether or not the index uses the '.64' suffixes.", category: "common"}
+        threads: {description: "The number of threads to use for alignment.", category: "advanced"}
+        sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
 
+        # outputs
         outputBam: "The produced BAM file."
         outputBamIndex: "The index of the produced BAM file."
     }
diff --git a/chunked-scatter.wdl b/chunked-scatter.wdl
index 06f62baf54446bb080a9062a082b88af179d5a3b..6b320368a421f1b19728525ea0344295bc86f08b 100644
--- a/chunked-scatter.wdl
+++ b/chunked-scatter.wdl
@@ -30,4 +30,14 @@ task ChunkedScatter {
         memory: "4G"
         docker: dockerImage
     }
+
+    parameter_meta {
+        inputFile: {description: "Either a bed file describing regiosn of intrest or a sequence dictionary.", category: "required"}
+        prefix: {description: "The prefix for the output files.", category: "advanced"}
+        chunkSize: {description: "Equivalent to chunked-scatter's `-c` option.", category: "advanced"}
+        overlap: {description: "Equivalent to chunked-scatter's `-o` option.", category: "advanced"}
+        minimumBasesPerFile: {description: "Equivalent to chunked-scatter's `-m` option.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
\ No newline at end of file
diff --git a/collect-columns.wdl b/collect-columns.wdl
index d453e5bed4ae2a9389875f31c0dba1468d64c432..09788e20b39b0b326d7797c560f2b1e2f0009e87 100644
--- a/collect-columns.wdl
+++ b/collect-columns.wdl
@@ -44,49 +44,27 @@ task CollectColumns {
     }
 
     parameter_meta {
-        inputTables: {
-            description: "The tables from which columns should be taken.",
-            category: "required"
-        }
-        outputPath: {
-            description: "The path to which the output should be written.",
-            category: "required"
-        }
-        featureColumn: {
-            description: "Equivalent to the -f option of collect-columns.",
-            category: "common" # Should likely be controlled by the calling workflow
-        }
-        valueColumn: {
-            description: "Equivalent to the -c option of collect-columns.",
-            category: "common" # Should likely be controlled by the calling workflow
-        }
-        separator: {
-            description: "Equivalent to the -s option of collect-columns.",
-            category: "common" # Should likely be controlled by the calling workflow
-        }
-        sampleNames: {
-            description: "Equivalent to the -n option of collect-columns.",
-            category: "common" # Should likely be controlled by the calling workflow
-        }
-        header: {
-            description: "Equivalent to the -H flag of collect-columns.",
-            category: "common"
-        }
-        additionalAttributes: {
-            description: "Equivalent to the -a option of collect-columns.",
-            category: "advanced"
-        }
-        referenceGtf: {
-            description: "Equivalent to the -g option of collect-columns.",
-            category: "advanced"
-        }
-        featureAttribute: {
-            description: "Equivalent to the -F option of collect-columns.",
-            category: "advanced"
-        }
-        dockerImage: {
-            description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
-            category: "advanced"
-        }
+        inputTables: {description: "The tables from which columns should be taken.",
+                      category: "required"}
+        outputPath: {description: "The path to which the output should be written.",
+                     category: "required"}
+        featureColumn: {description: "Equivalent to the -f option of collect-columns.",
+                        category: "advanced"}
+        valueColumn: {description: "Equivalent to the -c option of collect-columns.",
+                      category: "advanced"}
+        separator: {description: "Equivalent to the -s option of collect-columns.",
+                    category: "advanced"}
+        sampleNames: {description: "Equivalent to the -n option of collect-columns.",
+                      category: "advanced"}
+        header: {description: "Equivalent to the -H flag of collect-columns.",
+                 category: "advanced"}
+        additionalAttributes: {description: "Equivalent to the -a option of collect-columns.",
+                               category: "advanced"}
+        referenceGtf: {description: "Equivalent to the -g option of collect-columns.",
+                       category: "advanced"}
+        featureAttribute: {description: "Equivalent to the -F option of collect-columns.",
+                           category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
     }
 }
\ No newline at end of file
diff --git a/common.wdl b/common.wdl
index f8b2cd8b9dc6aa180c4624927c651fbfea74c851..73325bf4c726f0716b067e6ddc3f7f96b3cb5587 100644
--- a/common.wdl
+++ b/common.wdl
@@ -184,6 +184,13 @@ task YamlToJson {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        yaml: {description: "The YAML file to convert.", category: "required"}
+        outputJson: {description: "The location the output JSON file should be written to.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 struct Reference {
diff --git a/gatk.wdl b/gatk.wdl
index a48cb8b05e3184b376e6b81e20f40658fd2ab15f..5ca149c335ece5cc6edcfb45ecde6def36cacdf3 100644
--- a/gatk.wdl
+++ b/gatk.wdl
@@ -45,6 +45,25 @@ task ApplyBQSR {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The BAM file which should be recalibrated.", category: "required"}
+        inputBamIndex: {description: "The input BAM file's index.", category: "required"}
+        outputBamPath: {description: "The location the resulting BAM file should be written.", category: "required"}
+        recalibrationReport: {description: "The BQSR report the be used for recalibration.", category: "required"}
+        sequenceGroupInterval: {description: "Bed files describing the regions to operate on.", category: "advanced"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Generate Base Quality Score Recalibration (BQSR) model
@@ -89,6 +108,28 @@ task BaseRecalibrator {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The BAM file to generate a BQSR report for.", category: "required"}
+        inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
+        recalibrationReportPath: {description: "The location to write the BQSR report to.", category: "required"}
+        sequenceGroupInterval: {description: "Bed files describing the regions to operate on.", category: "advanced"}
+        knownIndelsSitesVCFs: {description: "VCF files with known indels.", category: "advanced"}
+        knownIndelsSitesVCFIndexes: {description: "The indexed for the known variant VCFs.", category: "advanced"}
+        dbsnpVCF: {description: "A dbSNP VCF.", category: "common"}
+        dbsnpVCFIndex: {description: "The index for the dbSNP VCF.", category: "common"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CombineGVCFs {
@@ -126,6 +167,24 @@ task CombineGVCFs {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        gvcfFiles: {description: "The GVCF files to be combined.", category: "required"}
+        gvcfFilesIndex: {description: "The indexes for the GVCF files.", caregory: "required"}
+        intervals: {description: "Bed files or interval lists describing the regions to operate on.", category: "advanced"}
+        outputPath: {description: "The location the combined GVCF should be written to.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Combine multiple recalibration tables from scattered BaseRecalibrator runs
@@ -156,6 +215,17 @@ task GatherBqsrReports {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBQSRreports: {description: "The BQSR reports to be merged.", category: "required"}
+        outputReportPath: {description: "The location of the combined BQSR report.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task GenotypeGVCFs {
@@ -200,6 +270,26 @@ task GenotypeGVCFs {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        gvcfFiles: {description: "The GVCF files to be genotypes.", category: "required"}
+        gvcfFilesIndex: {description: "The index of the input GVCF files.", category: "required"}
+        intervals: {description: "Bed files or interval lists describing the regions to operate on.", category: "required"}
+        outputPath: {description: "The location to write the output VCF file to.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        dbsnpVCF: {description: "A dbSNP VCF.", category: "common"}
+        dbsnpVCFIndex: {description: "The index for the dbSNP VCF.", category: "common"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Call variants on a single sample with HaplotypeCaller to produce a GVCF
@@ -244,6 +334,27 @@ task HaplotypeCallerGvcf {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBams: {description: "The BAM files on which to perform variant calling.", category: "required"}
+        inputBamsIndex: {description: "The indexes for the input BAM files.", category: "required"}
+        intervalList: {description: "Bed files or interval lists describing the regions to operate on.", category: "required"}
+        gvcfPath: {description: "The location to write the output GVCF to.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaIndex: {description: "The index for the reference fasta file.", category: "required"}
+        contamination: {description: "Equivalent to HaplotypeCaller's `-contamination` option.", category: "advanced"}
+        dbsnpVCF: {description: "A dbSNP VCF.", category: "common"}
+        dbsnpVCFIndex: {description: "The index for the dbSNP VCF.", category: "common"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task MuTect2 {
@@ -296,6 +407,29 @@ task MuTect2 {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBams: {description: "The BAM files on which to perform variant calling.", category: "required"}
+        inputBamsIndex: {description: "The indexes for the input BAM files.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        outputVcf: {description: "The location to write the output VCF file to.", category: "required"}
+        tumorSample: {description: "The name of the tumor/case sample.", category: "required"}
+        normalSample: {description: "The name of the normal/control sample.", category: "common"}
+        germlineResource: {description: "Equivalent to Mutect2's `--germline-resource` option.", category: "advanced"}
+        germlineResourceIndex: {description: "The index for the germline resource.", category: "advanced"}
+        panelOfNormals: {description: "Equivalent to Mutect2's `--panel-of-normals` option.", category: "advanced"}
+        panelOfNormalsIndex: {description: "The index for the panel of normals.", category: "advanced"}
+        f1r2TarGz: {description: "Equivalent to Mutect2's `--f1r2-tar-gz` option.", category: "advanced"}
+        intervals: {description: "Bed files describing the regiosn to operate on.", category: "required"}
+        outputStats: {description: "The location the output statistics should be written to.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task LearnReadOrientationModel {
@@ -323,6 +457,15 @@ task LearnReadOrientationModel {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        f1r2TarGz: {description: "A f1r2TarGz file outputed by mutect2.", category: "required"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task MergeStats {
@@ -350,6 +493,15 @@ task MergeStats {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        stats: {description: "Statistics files to be merged.", category: "required"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task GetPileupSummaries {
@@ -385,6 +537,22 @@ task GetPileupSummaries {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        sampleBam: {description: "A BAM file for which a pileup should be created.", category: "required"}
+        sampleBamIndex: {description: "The index of the input BAM file.", category: "required"}
+        variantsForContamination: {description: "A VCF file with common variants.", category: "required"}
+        variantsForContaminationIndex: {description: "The index for the common variants VCF file.", category: "required"}
+        sitesForContamination: {description: "A bed file describing regions to operate on.", category: "required"}
+        sitesForContaminationIndex: {description: "The index for the bed file.", category: "required"}
+        outputPrefix: {description: "The prefix for the ouput.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CalculateContamination {
@@ -416,6 +584,16 @@ task CalculateContamination {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        tumorPileups: {description: "The pileup summary of a tumor/case sample.", category: "required"}
+        normalPileups: {description: "The pileup summary of the normal/control sample.", category: "common"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task FilterMutectCalls {
@@ -431,7 +609,6 @@ task FilterMutectCalls {
         File? artifactPriors
         Int uniqueAltReadCount = 4
         File mutect2Stats
-        String? extraArgs
 
         String memory = "24G"
         String javaXmx = "12G"
@@ -452,8 +629,7 @@ task FilterMutectCalls {
         ~{"--unique-alt-read-count " + uniqueAltReadCount} \
         ~{"-stats " + mutect2Stats} \
         --filtering-stats "filtering.stats" \
-        --showHidden \
-        ~{extraArgs}
+        --showHidden
     }
 
     output {
@@ -466,6 +642,26 @@ task FilterMutectCalls {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        unfilteredVcf: {description: "An unfiltered VCF file as produced by Mutect2.", category: "required"}
+        unfilteredVcfIndex: {description: "The index of the unfiltered VCF file.", category: "required"}
+        outputVcf: {description: "The location the filtered VCF file should be written.", category: "required"}
+        contaminationTable: {description: "Equivalent to FilterMutectCalls' `--contamination-table` option.", category: "advanced"}
+        mafTumorSegments: {description: "Equivalent to FilterMutectCalls' `--tumor-segmentation` option.", category: "advanced"}
+        artifactPriors: {description: "Equivalent to FilterMutectCalls' `--ob-priors` option.", category: "advanced"}
+        uniqueAltReadCount: {description: "Equivalent to FilterMutectCalls' `--unique-alt-read-count` option.", category: "advanced"}
+        mutect2Stats: {description: "Equivalent to FilterMutectCalls' `-stats` option.", category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task SplitNCigarReads {
@@ -503,12 +699,28 @@ task SplitNCigarReads {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The BAM file for which spliced reads should be split.", category: "required"}
+        inputBamIndex: {description: "The input BAM file's index.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        outputBam: {description: "The location the output BAM file should be written.", category: "required"}
+        intervals: {description: "Bed files or interval lists describing the regions to operate on.", category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CombineVariants {
     input {
-        String installDir = "/usr"  # .jar location in the docker image
-
         File referenceFasta
         File referenceFastaFai
         File referenceFastaDict
@@ -539,7 +751,7 @@ task CombineVariants {
             printf -- "-V:%s %s " "${ids[i]}" "${vars[i]}"
           done
         ')
-        java -Xmx~{javaXmx} -jar ~{installDir}/GenomeAnalysisTK.jar \
+        java -Xmx~{javaXmx} -jar /usr/GenomeAnalysisTK.jar \
         -T CombineVariants \
         -R ~{referenceFasta} \
         --genotypemergeoption ~{genotypeMergeOption} \
@@ -557,4 +769,22 @@ task CombineVariants {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        genotypeMergeOption: {description: "Equivalent to CombineVariants' `--genotypemergeoption` option.", category: "advanced"}
+        filteredRecordsMergeType: {description: "Equivalent to CombineVariants' `--filteredrecordsmergetype` option.", category: "advanced"}
+        identifiers: {description: "The sample identifiers in the same order as variantVcfs.", category: "required"}
+        variantVcfs: {description: "The input VCF files in the same order as identifiers.", category: "required"}
+        variantIndexes: {description: "The indexes of the input VCF files.", category: "required"}
+        outputPath: {description: "The location the output should be written to", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
diff --git a/gffcompare.wdl b/gffcompare.wdl
index b60881fafa084028b2963b4b210a896e105f2517..60d19d5f475d87d459497353fdca67f54a38a261 100644
--- a/gffcompare.wdl
+++ b/gffcompare.wdl
@@ -30,6 +30,7 @@ task GffCompare {
         # Issue addressed at https://github.com/openwdl/wdl/pull/263
         File? noneFile # This is a wdl workaround. Please do not assign!
     }
+
     # This allows for the creation of output directories
     String dirPrefix = if defined(outputDir)
         then select_first([outputDir]) + "/"
@@ -91,4 +92,35 @@ task GffCompare {
     runtime {
        docker: dockerImage
     }
+
+    parameter_meta {
+        inputGtfList: {description: "Equivalent to gffcompare's `-i` option.", category: "advanced"}
+        inputGtfFiles: {description: "The input GTF files.", category: "required"}
+        referenceAnnotation: {description: "The GTF file to compare with.", category: "required"}
+        outputDir: {description: "The location the output should be written.", category: "common"}
+        outPrefix: {description: "The prefix for the output.", category: "advanced"}
+        genomeSequences: {description: "Equivalent to gffcompare's `-s` option.", category: "advanced"}
+        maxDistanceFreeEndsTerminalExons: {description: "Equivalent to gffcompare's `-e` option.", category: "advanced"}
+        maxDistanceGroupingTranscriptStartSites: {description: "Equivalent to gffcompare's `-d` option.", category: "advanced"}
+        namePrefix: {description: "Equivalent to gffcompare's `-p` option.", category: "advanced"}
+        C: {description: "Equivalent to gffcompare's `-C` flag.", category: "advanced"}
+        A: {description: "Equivalent to gffcompare's `-A` flag.", category: "advanced"}
+        X: {description: "Equivalent to gffcompare's `-X` flag.", category: "advanced"}
+        K: {description: "Equivalent to gffcompare's `-K` flag.", category: "advanced"}
+        snCorrection: {description: "Equivalent to gffcompare's `-R` flag.", category: "advanced"}
+        precisionCorrection: {description: "Equivalent to gffcompare's `-Q` flag.", category: "advanced"}
+        discardSingleExonTransfragsAndReferenceTranscripts: {description: "Equivalent to gffcompare's `-M` flag.", category: "advanced"}
+        discardSingleExonReferenceTranscripts: {description: "Equivalent to gffcompare's `-N` flag.", category: "advanced"}
+        noTmap: {description: "Equivalent to gffcompare's `-T` flag.", category: "advanced"}
+        verbose: {description: "Equivalent to gffcompare's `-V` flag.", category: "advanced"}
+        debugMode: {description: "Equivalent to gffcompare's `-D` flag.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
+
+    meta {
+        WDL_AID: {
+            exclude: ["noneFile"]
+        }
+    }
 }
\ No newline at end of file
diff --git a/gffread.wdl b/gffread.wdl
index da99781b99529b4e452ae301a6eb2e67c6e2ccaf..43682fbca25f060d938fa324151d0b644d4fcc4f 100644
--- a/gffread.wdl
+++ b/gffread.wdl
@@ -43,4 +43,17 @@ task GffRead {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        inputGff: {description: "The input GFF file.", category: "required"}
+        genomicSequence: {description: "The genome.", category: "required"}
+        genomicIndex: {description: "The genome's index.", category: "advanced"}
+        exonsFastaPath: {description: "The location the exons fasta should be written to.", category: "advanced"}
+        CDSFastaPath: {description: "The location the CDS fasta should be written to.", category: "advanced"}
+        proteinFastaPath: {description: "The location the protein fasta should be written to.", category: "advanced"}
+        filteredGffPath: {description: "The location the filtered GFF should be written to.", category: "advanced"}
+        outputGtfFormat: {description: "Equivalent to gffread's `-T` flag.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
\ No newline at end of file
diff --git a/hisat2.wdl b/hisat2.wdl
index 3423e56b02f6ba4f475dc7c02262b7654d7ce55a..1575f7e360c0525d3466959007c8ee8a3246cfb4 100644
--- a/hisat2.wdl
+++ b/hisat2.wdl
@@ -49,4 +49,20 @@ task Hisat2 {
         cpu: threads + 1
         docker: dockerImage
     }
+
+    parameter_meta {
+        indexFiles: {description: "The hisat2 index files.", category: "required"}
+        inputR1: {description: "The first-/single-end FastQ file.", category: "required"}
+        inputR2: {description: "The second-end FastQ file.", category: "common"}
+        outputBam: {description: "The location the output BAM file should be written to.", category: "required"}
+        sample: {description: "The sample id.", category: "required"}
+        library: {description: "The library id.", category: "required"}
+        readgroup: {description: "The readgroup id.", category: "required"}
+        platform: {description: "The platform used for sequencing.", category: "advanced"}
+        downstreamTranscriptomeAssembly: {description: "Equivalent to hisat2's `--dta` flag.", category: "advanced"}
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
\ No newline at end of file
diff --git a/manta.wdl b/manta.wdl
index 21dd21c3fcb60abb343a66bd6f7a74be37cd1cf6..d0ca75e014165fc049071178bb0fadcc3258202e 100644
--- a/manta.wdl
+++ b/manta.wdl
@@ -18,7 +18,6 @@ task Somatic {
         Int cores = 1
         Int memoryGb = 4
         String dockerImage = "quay.io/biocontainers/manta:1.4.0--py27_1"
-
     }
 
     command {
@@ -56,4 +55,22 @@ task Somatic {
         memory: "~{memoryGb}G"
         docker: dockerImage
     }
+
+    parameter_meta {
+        tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
+        tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
+        normalBam: {description: "The normal/control sample's BAM file.", category: "common"}
+        normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "common"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        runDir: {description: "The directory to use as run/output directory.", category: "common"}
+        callRegions: {description: "The bed file which indicates the regions to operate on.", category: "common"}
+        callRegionsIndex: {description: "The index of the bed file which indicates the regions to operate on.", category: "common"}
+        exome: {description: "Whether or not the data is from exome sequencing.", category: "common"}
+
+        cores: {description: "The number of cores to use.", category: "advanced"}
+        memoryGb: {description: "The amount of memory this job will use in Gigabytes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
diff --git a/minimap2.wdl b/minimap2.wdl
index d8a454daab3a5f879378bedf6c2c9e896cd75468..aff51dccf760e4241af907a85b42a7308f66759d 100644
--- a/minimap2.wdl
+++ b/minimap2.wdl
@@ -59,34 +59,20 @@ task Indexing {
     }
 
     parameter_meta {
-        useHomopolymerCompressedKmer: {
-            description: "Use homopolymer-compressed k-mer (preferrable for PacBio).",
-            category: "advanced"
-        }
-        kmerSize: {
-            description: "K-mer size (no larger than 28).",
-            category: "advanced"
-        }
-        minimizerWindowSize: {
-            description: "Minimizer window size.",
-            category: "advanced"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        referenceFile: {
-            description: "Reference fasta file.",
-            category: "required"
-        }
-        splitIndex: {
-            description: "Split index for every ~NUM input bases.",
-            category: "advanced"
-        }
-        outputIndexFile: {
-            description: "Indexed reference file.",
-            category: "required"
-        }
+        # input
+        useHomopolymerCompressedKmer: {description: "Use homopolymer-compressed k-mer (preferrable for PacBio).", category: "advanced"}
+        kmerSize: {description: "K-mer size (no larger than 28).", category: "advanced"}
+        minimizerWindowSize: {description: "Minimizer window size.", category: "advanced"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        referenceFile: {description: "Reference fasta file.", category: "required"}
+        splitIndex: {description: "Split index for every ~NUM input bases.", category: "advanced"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # output
+        outputIndexFile: {description: "Indexed reference file."}
     }
 }
 
@@ -147,69 +133,27 @@ task Mapping {
     }
 
     parameter_meta {
-        presetOption: {
-            description: "This option applies multiple options at the same time.",
-            category: "common"
-        }
-        kmerSize: {
-            description: "K-mer size (no larger than 28).",
-            category: "advanced"
-        }
-        outputSAM: {
-            description: "Output in the SAM format.",
-            category: "common"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        maxIntronLength: {
-            description: "Max intron length (effective with -xsplice; changing -r).",
-            category: "advanced"
-        }
-        maxFragmentLength: {
-            description: "Max fragment length (effective with -xsr or in the fragment mode).",
-            category: "advanced"
-        }
-        skipSelfAndDualMappings: {
-            description: "Skip self and dual mappings (for the all-vs-all mode).",
-            category: "advanced"
-        }
-        retainMaxSecondaryAlignments: {
-            description: "Retain at most INT secondary alignments.",
-            category: "advanced"
-        }
-        matchingScore: {
-            description: "Matching score.",
-            category: "advanced"
-        }
-        mismatchPenalty: {
-            description: "Mismatch penalty.",
-            category: "advanced"
-        }
-        howToFindGTAG: {
-            description: "How to find GT-AG. f:transcript strand, b:both strands, n:don't match GT-AG.",
-            category: "common"
-        }
-        addMDtagToSAM: {
-            description: "Adds a MD tag to the SAM output file.",
-            category: "common"
-        }
-        secondaryAlignment: {
-            description: "Whether to output secondary alignments.",
-            category: "advanced"
-        }
-        referenceFile: {
-            description: "Reference fasta file.",
-            category: "required"
-        }
-        queryFile: {
-            description: "Input fasta file.",
-            category: "required"
-        }
-        outputAlignmentFile: {
-            description: "Mapping and alignment between collections of DNA sequences file.",
-            category: "required"
-        }
+        presetOption: {description: "This option applies multiple options at the same time.", category: "common"}
+        kmerSize: {description: "K-mer size (no larger than 28).", category: "advanced"}
+        outputSAM: {description: "Output in the SAM format.", category: "common"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        maxIntronLength: {description: "Max intron length (effective with -xsplice; changing -r).", category: "advanced"}
+        maxFragmentLength: {description: "Max fragment length (effective with -xsr or in the fragment mode).", category: "advanced"}
+        skipSelfAndDualMappings: {description: "Skip self and dual mappings (for the all-vs-all mode).", category: "advanced"}
+        retainMaxSecondaryAlignments: {description: "Retain at most INT secondary alignments.", category: "advanced"}
+        matchingScore: {description: "Matching score.", category: "advanced"}
+        mismatchPenalty: {description: "Mismatch penalty.", category: "advanced"}
+        howToFindGTAG: {description: "How to find GT-AG. f:transcript strand, b:both strands, n:don't match GT-AG.", category: "common"}
+        addMDtagToSAM: {description: "Adds a MD tag to the SAM output file.", category: "common"}
+        secondaryAlignment: {description: "Whether to output secondary alignments.", category: "advanced"}
+        referenceFile: {description: "Reference fasta file.", category: "required"}
+        queryFile: {description: "Input fasta file.", category: "required"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # output
+        outputAlignmentFile: {description: "Mapping and alignment between collections of DNA sequences file."}
     }
 }
diff --git a/multiqc.wdl b/multiqc.wdl
index 1c5ecfd49a56ca5efd35ff64f6dfaeae33ca76b1..f30cadeaec3556a6572ef41ddfa9be8b5b05a703 100644
--- a/multiqc.wdl
+++ b/multiqc.wdl
@@ -34,8 +34,6 @@ task MultiQC {
         Boolean megaQCUpload = false # This must be actively enabled in my opinion. The tools default is to upload.
         File? config  # A directory
         String? clConfig
-        Boolean verbose  = false
-        Boolean quiet = false
         Array[Boolean] finished = []  # An array of booleans that can be used to let multiqc wait on stuff.
 
         String memory = "4G"
@@ -92,4 +90,49 @@ task MultiQC {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        analysisDirectory: {description: "The directory to run MultiQC on.", category: "required"}
+        dependencies: {description: "This must be used in order to run multiqc after these tasks.", category: "internal_use_only"}
+        force: {description: "Equivalent to MultiQC's `--force` flag.", category: "advanced"}
+        dirs: {description: "Equivalent to MultiQC's `--dirs` flag.", category: "advanced"}
+        dirsDepth: {description: "Equivalent to MultiQC's `--dirs-depth` option.", category: "advanced"}
+        fullNames: {description: "Equivalent to MultiQC's `--fullnames` flag.", category: "advanced"}
+        title: {description: "Equivalent to MultiQC's `--title` option.", category: "advanced"}
+        comment: {description: "Equivalent to MultiQC's `--comment` option.", category: "advanced"}
+        fileName: {description: "Equivalent to MultiQC's `--filename` option.", category: "advanced"}
+        outDir: {description: "Directory in whihc the output should be written.", category: "common"}
+        template: {description: "Equivalent to MultiQC's `--template` option.", category: "advanced"}
+        tag: {description: "Equivalent to MultiQC's `--tag` option.", category: "advanced"}
+        ignore: {description: "Equivalent to MultiQC's `--ignore` option.", category: "advanced"}
+        ignoreSamples: {description: "Equivalent to MultiQC's `--ignore-samples` option.", category: "advanced"}
+        ignoreSymlinks: {description: "Equivalent to MultiQC's `--ignore-symlinks` flag.", category: "advanced"}
+        sampleNames: {description: "Equivalent to MultiQC's `--sample-names` option.", category: "advanced"}
+        fileList: {description: "Equivalent to MultiQC's `--file-list` option.", category: "advanced"}
+        exclude: {description: "Equivalent to MultiQC's `--exclude` option.", category: "advanced"}
+        module: {description: "Equivalent to MultiQC's `--module` option.", category: "advanced"}
+        dataDir: {description: "Equivalent to MultiQC's `--data-dir` flag.", category: "advanced"}
+        noDataDir: {description: "Equivalent to MultiQC's `--no-data-dir` flag.", category: "advanced"}
+        dataFormat: {description: "Equivalent to MultiQC's `--data-format` option.", category: "advanced"}
+        zipDataDir: {description: "Equivalent to MultiQC's `--zip-data-dir` flag.", category: "advanced"}
+        export: {description: "Equivalent to MultiQC's `--export` flag.", category: "advanced"}
+        flat: {description: "Equivalent to MultiQC's `--flat` flag.", category: "advanced"}
+        interactive: {description: "Equivalent to MultiQC's `--interactive` flag.", category: "advanced"}
+        lint: {description: "Equivalent to MultiQC's `--lint` flag.", category: "advanced"}
+        pdf: {description: "Equivalent to MultiQC's `--pdf` flag.", category: "advanced"}
+        megaQCUpload: {description: "Opposite to MultiQC's `--no-megaqc-upload` flag.", category: "advanced"}
+        config: {description: "Equivalent to MultiQC's `--config` option.", category: "advanced"}
+        clConfig: {description: "Equivalent to MultiQC's `--cl-config` option.", category: "advanced"}
+        finished: {description: "An array of booleans that can be used to let multiqc wait on stuff.", category: "internal_use_only"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
+
+    meta {
+        WDL_AID: {
+            exclude: ["finished", "dependencies"]
+        }
+    }
 }
diff --git a/picard.wdl b/picard.wdl
index caee009b044783696b3d20a62bc6f0562cd3d8c3..48ebf2d5c0e5d0dbf4e7d60d3be563498161e328 100644
--- a/picard.wdl
+++ b/picard.wdl
@@ -4,7 +4,7 @@ task BedToIntervalList {
     input {
         File bedFile
         File dict
-        String outputPath
+        String outputPath = "regions.interval_list"
 
         String memory = "12G"
         String javaXmx = "4G"
@@ -29,6 +29,18 @@ task BedToIntervalList {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        bedFile: {description: "A bed file.", category: "required"}
+        dict: {description: "A sequence dict file.", category: "required"}
+        outputPath: {description: "The location the output interval list should be written to.",
+                     category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CollectMultipleMetrics {
@@ -119,10 +131,44 @@ task CollectMultipleMetrics {
     }
 
     runtime {
-
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The input BAM file for which metrics will be collected.",
+                   category: "required"}
+        inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        basename: {description: "The basename/prefix of the output files (may include directories).",
+                   category: "required"}
+        collectAlignmentSummaryMetrics: {description: "Equivalent to the `PROGRAM=CollectAlignmentSummaryMetrics` argument.",
+                                         category: "advanced"}
+        collectInsertSizeMetrics: {description: "Equivalent to the `PROGRAM=CollectInsertSizeMetrics` argument.",
+                                   category: "advanced"}
+        qualityScoreDistribution: {description: "Equivalent to the `PROGRAM=QualityScoreDistribution` argument.",
+                                   category: "advanced"}
+        meanQualityByCycle: {description: "Equivalent to the `PROGRAM=MeanQualityByCycle` argument.",
+                             category: "advanced"}
+        collectBaseDistributionByCycle: {description: "Equivalent to the `PROGRAM=CollectBaseDistributionByCycle` argument.",
+                                         category: "advanced"}
+        collectGcBiasMetrics: {description: "Equivalent to the `PROGRAM=CollectGcBiasMetrics` argument.",
+                               category: "advanced"}
+        collectSequencingArtifactMetrics: {description: "Equivalent to the `PROGRAM=CollectSequencingArtifactMetrics` argument.",
+                                           category: "advanced"}
+        collectQualityYieldMetrics: {description: "Equivalent to the `PROGRAM=CollectQualityYieldMetrics` argument.",
+                                     category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CollectRnaSeqMetrics {
@@ -159,6 +205,23 @@ task CollectRnaSeqMetrics {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The input BAM file for which metrics will be collected.",
+                   category: "required"}
+        inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
+        refRefflat: {description: "A refflat file containing gene annotations.", catehory: "required"}
+        basename: {description: "The basename/prefix of the output files (may include directories).",
+                   category: "required"}
+        strandSpecificity: {description: "Equivalent to the `STRAND_SPECIFICITY` option of picard's CollectRnaSeqMetrics.",
+                            category: "common"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task CollectTargetedPcrMetrics {
@@ -201,6 +264,29 @@ task CollectTargetedPcrMetrics {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBam: {description: "The input BAM file for which metrics will be collected.",
+                   category: "required"}
+        inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.",
+                         category: "required"}
+        referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
+                             category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        ampliconIntervals: {description: "An interval list describinig the coordinates of the amplicons sequenced.",
+                           category: "required"}
+        targetIntervals: {description: "An interval list describing the coordinates of the targets sequenced.",
+                          category: "required"}
+        basename: {description: "The basename/prefix of the output files (may include directories).",
+                   category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Combine multiple recalibrated BAM files from scattered ApplyRecalibration runs
@@ -236,6 +322,18 @@ task GatherBamFiles {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBams: {description: "The BAM files to be merged together.", category: "required"}
+        inputBamsIndex: {description: "The indexes of the input BAM files.", category: "required"}
+        outputBamPath: {description: "The path where the merged BAM file will be written.", caregory: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task GatherVcfs {
@@ -266,6 +364,18 @@ task GatherVcfs {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputVcfs: {description: "The VCF files to be merged together.", category: "required"}
+        inputVcfIndexes: {description: "The indexes of the input VCF files.", category: "required"}
+        outputVcfPath: {description: "The path where the merged VCF file will be written.", caregory: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Mark duplicate reads to avoid counting non-independent observations
@@ -320,6 +430,20 @@ task MarkDuplicates {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputBams: {description: "The BAM files for which the duplicate reads should be marked.", category: "required"}
+        inputBamIndexes: {description: "Th eindexes for the input BAM files.", category: "required"}
+        outputBamPath: {description: "The location where the ouptut BAM file should be written.", category: "required"}
+        metricsPath: {description: "The location where the output metrics file should be written.", category: "required"}
+        read_name_regex: {description: "Equivalent to the `READ_NAME_REGEX` option of MarkDuplicates.", category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 # Combine multiple VCFs or GVCFs from scattered HaplotypeCaller runs
@@ -355,6 +479,18 @@ task MergeVCFs {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        inputVCFs: {description: "The VCF files to be merged.", category: "required"}
+        inputVCFsIndexes: {description: "The indexes of the VCF files.", category: "required"}
+        outputVcfPath: {description: "The location the output VCF file should be written to.", category: "required"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task SamToFastq {
@@ -438,7 +574,7 @@ task SortVcf {
         String memory = "24G"
         String javaXmx = "8G"
         String dockerImage = "quay.io/biocontainers/picard:2.20.5--0"
-        }
+    }
 
 
     command {
@@ -460,4 +596,16 @@ task SortVcf {
         docker: dockerImage
         memory: memory
     }
+
+    parameter_meta {
+        vcfFiles: {description: "The VCF files to merge and sort.", category: "required"}
+        outputVcfPath: {description: "The location the sorted VCF files should be written to.", category: "required"}
+        dict: {description: "A sequence dictionary matching the VCF files.", category: "advanced"}
+
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
\ No newline at end of file
diff --git a/samtools.wdl b/samtools.wdl
index 492cfaf432075af69f7e53340d8333c19273ebb3..73aa9525ec29df01d9deb5cd34093eefc190d9cc 100644
--- a/samtools.wdl
+++ b/samtools.wdl
@@ -26,6 +26,14 @@ task BgzipAndIndex {
     runtime {
        docker: dockerImage
     }
+
+    parameter_meta {
+        inputFile: {description: "The file to be compressed and indexed.", category: "required"}
+        outputDir: {description: "The directory in which the output will be placed.", category: "required"}
+        type: {description: "The type of file (eg. vcf or bed) to be compressed and indexed.", category: "common"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Index {
@@ -60,6 +68,14 @@ task Index {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        bamFile: {description: "The BAM file for which an index should be made.", category: "required"}
+        outputBamPath: {description: "The location where the BAM file should be written to. The index will appear alongside this link to the BAM file.",
+                        category: "common"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Merge {
@@ -87,6 +103,14 @@ task Merge {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        bamFiles: {description: "The BAM files to merge.", category: "required"}
+        outputBamPath: {description: "The location the merged BAM file should be written to.", category: "common"}
+        force: {description: "Equivalent to samtools merge's `-f` flag.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task SortByName {
@@ -110,6 +134,13 @@ task SortByName {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        bamFile: {description: "The BAM file to get sorted.", category: "required"}
+        outputBamPath: {description: "The location the sorted BAM file should be written to.", category: "common"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Markdup {
@@ -133,6 +164,13 @@ task Markdup {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        inputBam: {description: "The BAM file to be processed.", category: "required"}
+        outputBamPath: {description: "The location of the output BAM file.", category: "required"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Flagstat {
@@ -156,6 +194,13 @@ task Flagstat {
     runtime {
         docker: dockerImage
     }
+
+    parameter_meta {
+        inputBam: {description: "The BAM file for which statistics should be retrieved.", category: "required"}
+        outputPath: {description: "The location the ouput should be written to.", category: "required"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Fastq {
@@ -204,13 +249,19 @@ task Fastq {
     }
 
     parameter_meta {
-        inputBam: "The bam file to process."
-        outputRead1: "If only outputRead1 is given '-s' flag is assumed. Else '-1'."
-        includeFilter: "Include reads with ALL of these flags. Corresponds to '-f'"
-        excludeFilter: "Exclude reads with ONE OR MORE of these flags. Corresponds to '-F'"
-        excludeSpecificFilter: "Exclude reads with ALL of these flags. Corresponds to '-G'"
-        appendReadNumber: "Append /1 and /2 to the read name, or don't. Corresponds to '-n/N"
-
+        inputBam: {description: "The bam file to process.", category: "required"}
+        outputRead1: {description: "The location the reads (first reads for pairs, in case of paired-end sequencing) should be written to.", category: "required"}
+        outputRead2: {description: "The location the second reads from pairs should be written to.", category: "common"}
+        outputRead0: {description: "The location the unpaired reads should be written to (in case of paired-end sequenicng).", category: "advanced"}
+        includeFilter: {description: "Include reads with ALL of these flags. Corresponds to `-f`", category: "advanced"}
+        excludeFilter: {description: "Exclude reads with ONE OR MORE of these flags. Corresponds to `-F`", category: "advanced"}
+        excludeSpecificFilter: {description: "Exclude reads with ALL of these flags. Corresponds to `-G`", category: "advanced"}
+        appendReadNumber: {description: "Append /1 and /2 to the read name, or don't. Corresponds to `-n/N`", category: "advanced"}
+        outputQuality: {description: "Equivalent to samtools fastq's `-O` flag.", category: "advanced"}
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
     }
 }
 
@@ -240,6 +291,15 @@ task Tabix {
     runtime {
        docker: dockerImage
     }
+
+    parameter_meta {
+        inputFile: {description: "The file to be indexed.", category: "required"}
+        outputFilePath: {description: "The location where the file should be written to. The index will appear alongside this link to the file.",
+                        category: "common"}
+        type: {description: "The type of file (eg. vcf or bed) to be indexed.", category: "common"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task View {
@@ -286,4 +346,20 @@ task View {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        inFile: {description: "A BAM, SAM or CRAM file.", category: "required"}
+        referenceFasta: {description: "The reference fasta file also used for mapping.", category: "advanced"}
+        outputFileName: {description: "The location the output BAM file should be written.", category: "common"}
+        uncompressedBamOutput: {description: "Equivalent to samtools view's `-u` flag.", category: "advanced"}
+        includeFilter: {description: "Equivalent to samtools view's `-f` option.", category: "advanced"}
+        excludeFilter: {description: "Equivalent to samtools view's `-F` option.", category: "advanced"}
+        excludeSpecificFilter: {description: "Equivalent to samtools view's `-G` option.", category: "advanced"}
+        MAPQthreshold: {description: "Equivalent to samtools view's `-q` option.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
diff --git a/scripts b/scripts
index fc603e5d408b89b99297fb5737586c059c5f9df6..a1783b5c789ebef601a8ec5849c4bbfe7dd3f87d 160000
--- a/scripts
+++ b/scripts
@@ -1 +1 @@
-Subproject commit fc603e5d408b89b99297fb5737586c059c5f9df6
+Subproject commit a1783b5c789ebef601a8ec5849c4bbfe7dd3f87d
diff --git a/somaticseq.wdl b/somaticseq.wdl
index 450152554c244306819917310f54ba891667c6df..55dd4b94681645b0b39e210154dd6ea9b6bf18e9 100644
--- a/somaticseq.wdl
+++ b/somaticseq.wdl
@@ -2,8 +2,6 @@ version 1.0
 
 task ParallelPaired {
     input {
-        String installDir = "/opt/somaticseq" #the location in the docker image
-
         File? classifierSNV
         File? classifierIndel
         String outputDir
@@ -33,7 +31,7 @@ task ParallelPaired {
     }
 
     command {
-        ~{installDir}/somaticseq_parallel.py \
+        /opt/somaticseq/somaticseq_parallel.py \
         ~{"--classifier-snv " + classifierSNV} \
         ~{"--classifier-indel " + classifierIndel} \
         --output-directory ~{outputDir} \
@@ -73,12 +71,40 @@ task ParallelPaired {
         cpu: threads
         docker: dockerImage
     }
+
+    parameter_meta {
+        classifierSNV: {description: "A somaticseq SNV classifier.", category: "common"}
+        classifierIndel: {description: "A somaticseq Indel classifier.", category: "common"}
+        outputDir: {description: "The directory to write the output to.", category: "common"}
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
+        exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
+        normalBam: {description: "The normal/control sample's BAM file.", category: "required"}
+        normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "required"}
+        tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
+        tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
+        mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
+        varscanSNV: {description: "An SNV VCF as produced by varscan.", category: "advanced"}
+        varscanIndel: {description: "An indel VCF as produced by varscan.", category: "advanced"}
+        jsmVCF: {description: "A VCF as produced by jsm.", category: "advanced"}
+        somaticsniperVCF: {description: "A VCF as produced by somaticsniper.", category: "advanced"}
+        vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
+        museVCF: {description: "A VCF as produced by muse.", category: "advanced"}
+        lofreqSNV: {description: "An SNV VCF as produced by lofreq.", category: "advanced"}
+        lofreqIndel: {description: "An indel VCF as produced by lofreq.", category: "advanced"}
+        scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
+        strelkaSNV: {description: "An SNV VCF as produced by strelka.", category: "advanced"}
+        strelkaIndel: {description: "An indel VCF as produced by somaticsniper.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ParallelPairedTrain {
     input {
-        String installDir = "/opt/somaticseq" #the location in the docker image
-
         File truthSNV
         File truthIndel
         String outputDir
@@ -108,7 +134,7 @@ task ParallelPairedTrain {
     }
 
     command {
-        ~{installDir}/somaticseq_parallel.py \
+        /opt/somaticseq/somaticseq_parallel.py \
         --somaticseq-train \
         --truth-snv ~{truthSNV} \
         --truth-indel ~{truthIndel} \
@@ -147,12 +173,40 @@ task ParallelPairedTrain {
         cpu: threads
         docker: dockerImage
     }
+
+    parameter_meta {
+        truthSNV: {description: "A VCF of true SNVs.", category: "required"}
+        truthIndel: {description: "A VCF of true indels.", category: "required"}
+        outputDir: {description: "The directory to write the output to.", category: "common"}
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
+        exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
+        normalBam: {description: "The normal/control sample's BAM file.", category: "required"}
+        normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "required"}
+        tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
+        tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
+        mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
+        varscanSNV: {description: "An SNV VCF as produced by varscan.", category: "advanced"}
+        varscanIndel: {description: "An indel VCF as produced by varscan.", category: "advanced"}
+        jsmVCF: {description: "A VCF as produced by jsm.", category: "advanced"}
+        somaticsniperVCF: {description: "A VCF as produced by somaticsniper.", category: "advanced"}
+        vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
+        museVCF: {description: "A VCF as produced by muse.", category: "advanced"}
+        lofreqSNV: {description: "An SNV VCF as produced by lofreq.", category: "advanced"}
+        lofreqIndel: {description: "An indel VCF as produced by lofreq.", category: "advanced"}
+        scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
+        strelkaSNV: {description: "An SNV VCF as produced by strelka.", category: "advanced"}
+        strelkaIndel: {description: "An indel VCF as produced by somaticsniper.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ParallelSingle {
     input {
-        String installDir = "/opt/somaticseq" #the location in the docker image
-
         File? classifierSNV
         File? classifierIndel
         String outputDir
@@ -174,7 +228,7 @@ task ParallelSingle {
     }
 
     command {
-        ~{installDir}/somaticseq_parallel.py \
+        /opt/somaticseq/somaticseq_parallel.py \
         ~{"--classifier-snv " + classifierSNV} \
         ~{"--classifier-indel " + classifierIndel} \
         --output-directory ~{outputDir} \
@@ -207,12 +261,32 @@ task ParallelSingle {
         cpu: threads
         docker: dockerImage
     }
+
+    parameter_meta {
+        classifierSNV: {description: "A somaticseq SNV classifier.", category: "common"}
+        classifierIndel: {description: "A somaticseq Indel classifier.", category: "common"}
+        outputDir: {description: "The directory to write the output to.", category: "common"}
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
+        exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
+        bam: {description: "The input BAM file.", category: "required"}
+        bamIndex: {description: "The index for the input BAM file.", category: "required"}
+        mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
+        varscanVCF: {description: "A VCF as produced by varscan.", category: "advanced"}
+        vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
+        lofreqVCF: {description: "A VCF as produced by lofreq.", category: "advanced"}
+        scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
+        strelkaVCF: {description: "A VCF as produced by strelka.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ParallelSingleTrain {
     input {
-        String installDir = "/opt/somaticseq" #the location in the docker image
-
         File truthSNV
         File truthIndel
         String outputDir
@@ -234,7 +308,7 @@ task ParallelSingleTrain {
     }
 
     command {
-        ~{installDir}/somaticseq_parallel.py \
+        /opt/somaticseq/somaticseq_parallel.py \
         --somaticseq-train \
         --truth-snv ~{truthSNV} \
         --truth-indel ~{truthIndel} \
@@ -266,23 +340,41 @@ task ParallelSingleTrain {
         cpu: threads
         docker: dockerImage
     }
+
+    parameter_meta {
+        truthSNV: {description: "A VCF of true SNVs.", category: "required"}
+        truthIndel: {description: "A VCF of true indels.", category: "required"}
+        outputDir: {description: "The directory to write the output to.", category: "common"}
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
+        exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
+        bam: {description: "The input BAM file.", category: "required"}
+        bamIndex: {description: "The index for the input BAM file.", category: "required"}
+        mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
+        varscanVCF: {description: "A VCF as produced by varscan.", category: "advanced"}
+        vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
+        lofreqVCF: {description: "A VCF as produced by lofreq.", category: "advanced"}
+        scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
+        strelkaVCF: {description: "A VCF as produced by strelka.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task ModifyStrelka {
     input {
-        String installDir = "/opt/somaticseq/vcfModifier" #the location in the docker image
-
         File strelkaVCF
         String outputVCFName = basename(strelkaVCF, ".gz")
-
-        Int threads = 1
         String dockerImage = "lethalfang/somaticseq:3.1.0"
     }
 
     command {
         set -e
 
-        ~{installDir}/modify_Strelka.py \
+        /opt/somaticseq/vcfModifier/modify_Strelka.py \
         -infile ~{strelkaVCF} \
         -outfile "modified_strelka.vcf"
 
@@ -295,7 +387,13 @@ task ModifyStrelka {
     }
 
     runtime {
-        cpu: threads
         docker: dockerImage
     }
+
+    parameter_meta {
+        strelkaVCF: {description: "A vcf file as produced by strelka.", category: "required"}
+        outputVCFName: {description: "The location the output VCF file should be written to.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
diff --git a/star.wdl b/star.wdl
index fb788175af154fdc0af84018b9e3de840e2a622c..bc6ae5d943e602ea699ff1b44850577c07691370 100644
--- a/star.wdl
+++ b/star.wdl
@@ -19,7 +19,7 @@ task Star {
         String dockerImage = "quay.io/biocontainers/star:2.7.3a--0"
     }
 
-    #TODO Needs to be extended for all possible output extensions
+    #TODO Could be extended for all possible output extensions
     Map[String, String] samOutputNames = {"BAM SortedByCoordinate": "sortedByCoord.out.bam"}
 
     command {
@@ -48,6 +48,24 @@ task Star {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        inputR1: {description: "The first-/single-end FastQ files.", category: "required"}
+        inputR2: {description: "The second-end FastQ files (in the same order as the first-end files).", category: "common"}
+        indexFiles: {description: "The star index files.", category: "required"}
+        outFileNamePrefix: {description: "The prefix for the output files. May include directories.", category: "required"}
+        outSAMtype: {description: "The type of alignment file to be produced. Currently only `BAM SortedByCoordinate` is supported.", category: "advanced"}
+        readFilesCommand: {description: "Equivalent to star's `--readFilesCommand` option.", category: "advanced"}
+        outStd: {description: "Equivalent to star's `--outStd` option.", category: "advanced"}
+        twopassMode: {description: "Equivalent to star's `--twopassMode` option.", category: "advanced"}
+        outSAMattrRGline: {description: "The readgroup lines for the fastq pairs given (in the same order as the fastq files).", category: "common"}
+        outSAMunmapped: {description: "Equivalent to star's `--outSAMunmapped` option.", category: "advanced"}
+        limitBAMsortRAM: {description: "Equivalent to star's `--limitBAMsortRAM` option.", category: "advanced"}
+        runThreadN: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task MakeStarRGline {
diff --git a/strelka.wdl b/strelka.wdl
index 2c8b5a06728580f0bd199cf369a95b802d78e6ce..212863ce62070df97b63bf58266ac1526fd82dc3 100644
--- a/strelka.wdl
+++ b/strelka.wdl
@@ -44,6 +44,23 @@ task Germline {
         cpu: cores
         memory: "~{memoryGb}G"
     }
+
+    parameter_meta {
+        runDir: {description: "The directory to use as run/output directory.", category: "common"}
+        bams: {description: "The input BAM files.", category: "required"}
+        indexes: {description: "The indexes for the input BAM files.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        callRegions: {description: "The bed file which indicates the regions to operate on.", category: "common"}
+        callRegionsIndex: {description: "The index of the bed file which indicates the regions to operate on.", category: "common"}
+        exome: {description: "Whether or not the data is from exome sequencing.", category: "common"}
+        rna: {description: "Whether or not the data is from RNA sequencing.", category: "common"}
+
+        cores: {description: "The number of cores to use.", category: "advanced"}
+        memoryGb: {description: "The amount of memory this job will use in Gigabytes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }
 
 task Somatic {
@@ -96,4 +113,30 @@ task Somatic {
         cpu: cores
         memory: "~{memoryGb}G"
     }
+
+    parameter_meta {
+        runDir: {description: "The directory to use as run/output directory.", category: "common"}
+        normalBam: {description: "The normal/control sample's BAM file.", category: "required"}
+        normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "required"}
+        tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
+        tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
+        referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        callRegions: {description: "The bed file which indicates the regions to operate on.", category: "common"}
+        callRegionsIndex: {description: "The index of the bed file which indicates the regions to operate on.", category: "common"}
+        indelCandidatesVcf: {description: "An indel candidates VCF file from manta.", category: "advanced"}
+        indelCandidatesVcfIndex: {description: "The index for the indel candidates VCF file.", category: "advanced"}
+        exome: {description: "Whether or not the data is from exome sequencing.", category: "common"}
+
+        cores: {description: "The number of cores to use.", category: "advanced"}
+        memoryGb: {description: "The amount of memory this job will use in Gigabytes.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
+
+    meta {
+        WDL_AID: {
+            exclude: ["doNotDefineThis"]
+        }
+    }
 }
\ No newline at end of file
diff --git a/talon.wdl b/talon.wdl
index d63cada7b4f16365c9bb1cb38236956507c960b3..c861e56d2e6e923810506f3f62dc94a8cdfece55 100644
--- a/talon.wdl
+++ b/talon.wdl
@@ -30,7 +30,6 @@ task CreateAbundanceFileFromDatabase {
         File? whitelistFile
         File? datasetsFile
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -52,40 +51,25 @@ task CreateAbundanceFileFromDatabase {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        databaseFile: {
-            description: "TALON database.",
-            category: "required"
-        }
-        annotationVersion: {
-            description: "Which annotation version to use.",
-            category: "required"
-        }
-        genomeBuild: {
-            description: "Genome build to use.",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        whitelistFile: {
-            description: "Whitelist file of transcripts to include in the output.",
-            category: "advanced"
-        }
-        datasetsFile: {
-            description: "A file indicating which datasets should be included.",
-            category: "advanced"
-        }
-        outputAbundanceFile: {
-            description: "Abundance for each transcript in the TALON database across datasets.",
-            category: "required"
-        }
+        # inputs
+        databaseFile: {description: "TALON database.", category: "required"}
+        annotationVersion: {description: "Which annotation version to use.", category: "required"}
+        genomeBuild: {description: "Genome build to use.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        whitelistFile: {description: "Whitelist file of transcripts to include in the output.", category: "advanced"}
+        datasetsFile: {description: "A file indicating which datasets should be included.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputAbundanceFile: {description: "Abundance for each transcript in the TALON database across datasets."}
+
     }
 }
 
@@ -100,7 +84,6 @@ task CreateGtfFromDatabase {
         File? whitelistFile
         File? datasetFile
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -123,44 +106,25 @@ task CreateGtfFromDatabase {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        databaseFile: {
-            description: "TALON database.",
-            category: "required"
-        }
-        genomeBuild: {
-            description: "Genome build to use.",
-            category: "required"
-        }
-        annotationVersion: {
-            description: "Which annotation version to use.",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        observedInDataset: {
-            description: "The output will only include transcripts that were observed at least once.",
-            category: "advanced"
-        }
-        whitelistFile: {
-            description: "Whitelist file of transcripts to include in the output.",
-            category: "advanced"
-        }
-        datasetFile: {
-            description: "A file indicating which datasets should be included.",
-            category: "advanced"
-        }
-        outputGTFfile: {
-            description: "The genes, transcripts, and exons stored a TALON database in GTF format.",
-            category: "required"
-        }
+        # inputs
+        databaseFile: {description: "TALON database.", category: "required"}
+        genomeBuild: {description: "Genome build to use.", category: "required"}
+        annotationVersion: {description: "Which annotation version to use.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        observedInDataset: {description: "The output will only include transcripts that were observed at least once.", category: "advanced"}
+        whitelistFile: {description: "Whitelist file of transcripts to include in the output.", category: "advanced"}
+        datasetFile: {description: "A file indicating which datasets should be included.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputGTFfile: {description: "The genes, transcripts, and exons stored a TALON database in GTF format."}
     }
 }
 
@@ -172,7 +136,6 @@ task FilterTalonTranscripts {
 
         File? pairingsFile
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -192,28 +155,18 @@ task FilterTalonTranscripts {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        databaseFile: {
-            description: "TALON database.",
-            category: "required"
-        }
-        annotationVersion: {
-            description: "Which annotation version to use.",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        pairingsFile: {
-            description: "A file indicating which datasets should be considered together.",
-            category: "advanced"
-        }
+        databaseFile: {description: "TALON database.", category: "required"}
+        annotationVersion: {description: "Which annotation version to use.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        pairingsFile: {description: "A file indicating which datasets should be considered together.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
     }
 }
 
@@ -225,7 +178,6 @@ task GetReadAnnotations {
 
         File? datasetFile
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -245,32 +197,22 @@ task GetReadAnnotations {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        databaseFile: {
-            description: "TALON database.",
-            category: "required"
-        }
-        genomeBuild: {
-            description: "Genome build to use.",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        datasetFile: {
-            description: "A file indicating which datasets should be included.",
-            category: "advanced"
-        }
-        outputAnnotation: {
-            description: "Read-specific annotation information from a TALON database.",
-            category: "required"
-        }
+        # inputs
+        databaseFile: { description: "TALON database.", category: "required"}
+        genomeBuild: {description: "Genome build to use.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        datasetFile: {description: "A file indicating which datasets should be included.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputAnnotation: {description: "Read-specific annotation information from a TALON database."}
     }
 }
 
@@ -285,7 +227,6 @@ task InitializeTalonDatabase {
         Int cutoff3p = 300
         String outputPrefix
 
-        Int cores = 1
         String memory = "10G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -309,48 +250,26 @@ task InitializeTalonDatabase {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        GTFfile: {
-            description: "GTF annotation containing genes, transcripts, and edges.",
-            category: "required"
-        }
-        genomeBuild: {
-            description: "Name of genome build that the GTF file is based on (ie hg38).",
-            category: "required"
-        }
-        annotationVersion: {
-            description: "Name of supplied annotation (will be used to label data).",
-            category: "required"
-        }
-        minimumLength: { 
-            description: "Minimum required transcript length.",
-            category: "common"
-        }
-        novelIDprefix: {
-            description: "Prefix for naming novel discoveries in eventual TALON runs.",
-            category: "common"
-        }
-        cutoff5p: { 
-            description: "Maximum allowable distance (bp) at the 5' end during annotation.",
-            category: "advanced"
-        }
-        cutoff3p: {
-            description: "Maximum allowable distance (bp) at the 3' end during annotation.",
-            category: "advanced"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        outputDatabase: {
-            description: "TALON database.",
-            category: "required"
-        }
+        # inputs
+        GTFfile: {description: "GTF annotation containing genes, transcripts, and edges.", category: "required"}
+        genomeBuild: {description: "Name of genome build that the GTF file is based on (ie hg38).", category: "required"}
+        annotationVersion: {description: "Name of supplied annotation (will be used to label data).", category: "required"}
+        minimumLength: { description: "Minimum required transcript length.", category: "common"}
+        novelIDprefix: {description: "Prefix for naming novel discoveries in eventual TALON runs.", category: "common"}
+        cutoff5p: { description: "Maximum allowable distance (bp) at the 5' end during annotation.", category: "advanced"}
+        cutoff3p: {description: "Maximum allowable distance (bp) at the 3' end during annotation.", category: "advanced"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputDatabase: {description: "TALON database."}
     }
 }
 
@@ -358,7 +277,6 @@ task ReformatGtf {
     input {
         File GTFfile
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -374,16 +292,15 @@ task ReformatGtf {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        GTFfile: {
-            description: "GTF annotation containing genes, transcripts, and edges.",
-            category: "required"
-        }
+        GTFfile: {description: "GTF annotation containing genes, transcripts, and edges.", category: "required"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
     }
 }
 
@@ -395,7 +312,6 @@ task SummarizeDatasets {
 
         File? datasetGroupsCSV
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/talon:v4.4.1_cv1"
     }
@@ -415,32 +331,22 @@ task SummarizeDatasets {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        databaseFile: {
-            description: "TALON database.",
-            category: "required"
-        }
-        setVerbose: {
-            description: "Print out the counts in terminal.",
-            category: "advanced"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        datasetGroupsCSV: {
-            description: "File of comma-delimited dataset groups to process together.",
-            category: "advanced"
-        }
-        outputSummaryFile: {
-            description: "Tab-delimited file of gene and transcript counts for each dataset.",
-            category: "required"
-        }
+        # inputs
+        databaseFile: {description: "TALON database.", category: "required"}
+        setVerbose: {description: "Print out the counts in terminal.", category: "advanced"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        datasetGroupsCSV: {description: "File of comma-delimited dataset groups to process together.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputSummaryFile: {description: "Tab-delimited file of gene and transcript counts for each dataset."}
     }
 }
 
@@ -496,53 +402,24 @@ task Talon {
     }
 
     parameter_meta {
-        SAMfiles: {
-            description: "Input SAM files.",
-            category: "required"
-        }
-        organism: {
-            description: "The name of the organism from which the samples originated.",
-            category: "required"
-        }
-        sequencingPlatform: {
-            description: "The sequencing platform used to generate long reads.",
-            category: "required"
-        }
-        databaseFile: {
-            description: "TALON database. Created using initialize_talon_database.py.",
-            category: "required"
-        }
-        genomeBuild: {
-            description: "Genome build (i.e. hg38) to use.",
-            category: "required"
-        }
-        minimumCoverage: {
-            description: "Minimum alignment coverage in order to use a SAM entry.",
-            category: "common"
-        }
-        minimumIdentity: {
-            description: "Minimum alignment identity in order to use a SAM entry.",
-            category: "common" 
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        outputUpdatedDatabase: {
-            description: "Updated TALON database.",
-            category: "required"
-        }
-        outputLog: {
-            description: "Log file from TALON run.",
-            category: "required"
-        }
-        outputAnnot: {
-            description: "Read annotation file from TALON run.",
-            category: "required"
-        }
-        outputConfigFile: {
-            description: "The TALON configuration file.",
-            category: "required"
-        }
+        # inputs
+        SAMfiles: {description: "Input SAM files.", category: "required"}
+        organism: {description: "The name of the organism from which the samples originated.", category: "required"}
+        sequencingPlatform: {description: "The sequencing platform used to generate long reads.", category: "required"}
+        databaseFile: {description: "TALON database. Created using initialize_talon_database.py.", category: "required"}
+        genomeBuild: {description: "Genome build (i.e. hg38) to use.", category: "required"}
+        minimumCoverage: {description: "Minimum alignment coverage in order to use a SAM entry.", category: "common"}
+        minimumIdentity: {description: "Minimum alignment identity in order to use a SAM entry.", category: "common" }
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputUpdatedDatabase: {description: "Updated TALON database."}
+        outputLog: {description: "Log file from TALON run."}
+        outputAnnot: {description: "Read annotation file from TALON run."}
+        outputConfigFile: {description: "The TALON configuration file."}
     }
 }
diff --git a/transcriptclean.wdl b/transcriptclean.wdl
index b7b913dcf310de9980e61fb445b41d1ec9987b3e..e288e316736c5a10cd7b01407d53124785b21357 100644
--- a/transcriptclean.wdl
+++ b/transcriptclean.wdl
@@ -27,7 +27,6 @@ task GetSJsFromGtf {
         String outputPrefix
         Int minIntronSize = 21
 
-        Int cores = 1
         String memory = "8G"
         String dockerImage = "biocontainers/transcriptclean:v2.0.2_cv1"
     }
@@ -47,32 +46,21 @@ task GetSJsFromGtf {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        GTFfile: {
-            description: "Input GTF file",
-            category: "required"
-        }
-        genomeFile: {
-            description: "Reference genome",
-            category: "required"
-        }
-        minIntronSize: {
-            description: "Minimum size of intron to consider a junction.",
-            category: "advanced"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        outputSJsFile: {
-            description: "Extracted splice junctions.",
-            category: "required"
-        }
+        # inputs
+        GTFfile: {description: "Input GTF file", category: "required"}
+        genomeFile: {description: "Reference genome", category: "required"}
+        minIntronSize: {description: "Minimum size of intron to consider a junction.", category: "advanced"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+        # outputs
+        outputSJsFile: {description: "Extracted splice junctions."}
     }
 }
 
@@ -81,7 +69,6 @@ task GetTranscriptCleanStats {
         File transcriptCleanSAMfile
         String outputPrefix
 
-        Int cores = 1
         String memory = "4G"
         String dockerImage = "biocontainers/transcriptclean:v2.0.2_cv1"
     }
@@ -99,24 +86,20 @@ task GetTranscriptCleanStats {
     }
 
     runtime {
-        cpu: cores
         memory: memory
         docker: dockerImage
     }
 
     parameter_meta {
-        transcriptCleanSAMfile: {
-            description: "Output SAM file from TranscriptClean",
-            category: "required"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        outputStatsFile: {
-            description: "Summary stats from TranscriptClean run.",
-            category: "required"
-        }
+        # inputs
+        transcriptCleanSAMfile: {description: "Output SAM file from TranscriptClean", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+
+        # outputs
+        outputStatsFile: {description: "Summary stats from TranscriptClean run."}
     }
 }
 
@@ -180,81 +163,31 @@ task TranscriptClean {
     }
 
     parameter_meta {
-        SAMfile: {
-            description: "Input SAM file containing transcripts to correct.",
-            category: "required"
-        }
-        referenceGenome: {
-            description: "Reference genome fasta file.",
-            category: "required"
-        }
-        maxLenIndel: {
-            description: "Maximum size indel to correct.",
-            category: "advanced"
-        }
-        maxSJoffset: {
-            description: "Maximum distance from annotated splice junction to correct.",
-            category: "advanced"
-        }
-        outputPrefix: {
-            description: "Output directory path + output file prefix.",
-            category: "required"
-        }
-        correctMismatches: {
-            description: "Set this to make TranscriptClean correct mismatches.",
-            category: "common"
-        }
-        correctIndels: {
-            description: "Set this to make TranscriptClean correct indels.",
-            category: "common"
-        }
-        correctSJs: {
-            description: "Set this to make TranscriptClean correct splice junctions.",
-            category: "common"
-        }
-        dryRun: {
-            description: "TranscriptClean will read in the data but don't do any correction.",
-            category: "advanced"
-        }
-        primaryOnly: {
-            description: "Only output primary mappings of transcripts.",
-            category: "advanced"
-        }
-        canonOnly: {
-            description: "Only output canonical transcripts and transcript containing annotated noncanonical junctions.",
-            category: "advanced"
-        }
-        bufferSize: {
-            description: "Number of lines to output to file at once by each thread during run.",
-            category: "common"
-        }
-        deleteTmp: {
-            description: "The temporary directory generated by TranscriptClean will be removed.",
-            category: "common"
-        }
-        spliceJunctionAnnotation: {
-            description: "Splice junction file.",
-            category: "common"
-        }
-        variantFile: {
-            description: "VCF formatted file of variants.",
-            category: "common"
-        }
-        outputTranscriptCleanFasta: {
-            description: "Fasta file containing corrected reads.",
-            category: "required"
-        }
-        outputTranscriptCleanLog: {
-            description: "Log file of TranscriptClean run.",
-            category: "required"
-        }
-        outputTranscriptCleanSAM: {
-            description: "SAM file containing corrected aligned reads.",
-            category: "required"
-        }
-        outputTranscriptCleanTElog: {
-            description: "TE log file of TranscriptClean run.",
-            category: "required"
-        }
+        # inputs
+        SAMfile: {description: "Input SAM file containing transcripts to correct.", category: "required"}
+        referenceGenome: {description: "Reference genome fasta file.", category: "required"}
+        maxLenIndel: {description: "Maximum size indel to correct.", category: "advanced"}
+        maxSJoffset: {description: "Maximum distance from annotated splice junction to correct.", category: "advanced"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        correctMismatches: {description: "Set this to make TranscriptClean correct mismatches.", category: "common"}
+        correctIndels: {description: "Set this to make TranscriptClean correct indels.", category: "common"}
+        correctSJs: {description: "Set this to make TranscriptClean correct splice junctions.", category: "common"}
+        dryRun: {description: "TranscriptClean will read in the data but don't do any correction.", category: "advanced"}
+        primaryOnly: {description: "Only output primary mappings of transcripts.", category: "advanced"}
+        canonOnly: {description: "Only output canonical transcripts and transcript containing annotated noncanonical junctions.", category: "advanced"}
+        bufferSize: {description: "Number of lines to output to file at once by each thread during run.", category: "common"}
+        deleteTmp: {description: "The temporary directory generated by TranscriptClean will be removed.", category: "common"}
+        spliceJunctionAnnotation: {description: "Splice junction file.", category: "common"}
+        variantFile: {description: "VCF formatted file of variants.", category: "common"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+        
+        # outputs
+        outputTranscriptCleanFasta: {description: "Fasta file containing corrected reads."}
+        outputTranscriptCleanLog: {description: "Log file of TranscriptClean run."}
+        outputTranscriptCleanSAM: {description: "SAM file containing corrected aligned reads."}
+        outputTranscriptCleanTElog: {description: "TE log file of TranscriptClean run."}
    }
 }
diff --git a/vardict.wdl b/vardict.wdl
index 85e5fd2baf38c29a48e16cc06aa5b4c5ae1d495f..ed9ee22ddbc6595d7ff4dcf3d16c23e0cb0308e0 100644
--- a/vardict.wdl
+++ b/vardict.wdl
@@ -69,4 +69,34 @@ task VarDict {
         memory: memory
         docker: dockerImage
     }
+
+    parameter_meta {
+        tumorSampleName: {description: "The name of the tumor/case sample.", category: "required"}
+        tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
+        tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
+        normalSampleName: {description: "The name of the normal/control sample.", category: "common"}
+        normalBam: {description: "The normal/control sample's BAM file.", category: "common"}
+        normalBamIndex: {description: "The normal/control sample's BAM file.", category: "common"}
+        referenceFasta: {description: "The reference fasta file.", category: "required"}
+        referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
+        bedFile: {description: "A bed file describing the regions to operate on. These regions must be below 1e6 bases in size.", category: "required"}
+        outputVcf: {description: "The location to write the output VCF file to.", category: "required"}
+        chromosomeColumn: {description: "Equivalent to vardict-java's `-c` option.", category: "advanced"}
+        startColumn: {description: "Equivalent to vardict-java's `-S` option.", category: "advanced"}
+        endColumn: {description: "Equivalent to vardict-java's `-E` option.", category: "advanced"}
+        geneColumn: {description: "Equivalent to vardict-java's `-g` option.", category: "advanced"}
+        outputCandidateSomaticOnly: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-M` flag.", category: "advanced"}
+        outputAllVariantsAtSamePosition: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-A` flag.", category: "advanced"}
+        mappingQuality: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-Q` option.", category: "advanced"}
+        minimumTotalDepth: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-d` option.", category: "advanced"}
+        minimumVariantDepth: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-v` option.", category: "advanced"}
+        minimumAlleleFrequency: {description: "Equivalent to var2vcf_paired.pl or var2vcf_valid.pl's `-f` option.", category: "advanced"}
+
+        threads: {description: "The number of threads to use.", category: "advanced"}
+        memory: {description: "The amount of memory this job will use.", category: "advanced"}
+        javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
+                  category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
+                      category: "advanced"}
+    }
 }