diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1d156f54010be0960b81d9325973f3b5ecbb7c67..95e479c8b9c5410eb21a6f3d14ef3372362d03b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,9 @@ that users understand how the changes affect the new version.
 
 version 2.2.0-dev
 ---------------------------
++ Update command section syntax Minimap2, Talon, TranscriptClean and Centrifuge.
++ Add CCS workflow WDL files (ccs.wdl, lima.wdl, isoseq3.wdl).
++ Update TALON version to 4.4.2.
 + The statsPrefix input for umitools dedup is now optional.
 + Allow setting the `--emit-ref-confidence` flag for HaplotypeCaller.
 + Add `--output-mode` flag to HaplotypeCaller.
@@ -18,13 +21,13 @@ version 2.2.0-dev
 + Added gatk.SelectVariants and gatk.VariantFiltration tasks. 
 + Fixed a bug where the output directory was not created for bwa.Kit.
 + Add vt task for variants normalization and decomposition.
-+ Update WDL task Picard (Add task RenameSample)
-+ Update WDL task Samtools (Add task FilterShortReadsBam)
-+ Add WDL task for BCFtools (bcf to vcf)
-+ Add WDL task for SURVIVOR (merge)
-+ Update WDL task Manta (Add germline SV calling)
-+ Add WDL task for Delly
-+ Add WDL task for Clever (and Mate-Clever)
++ Update WDL task Picard (Add task RenameSample).
++ Update WDL task Samtools (Add task FilterShortReadsBam).
++ Add WDL task for BCFtools (bcf to vcf).
++ Add WDL task for SURVIVOR (merge).
++ Update WDL task Manta (Add germline SV calling).
++ Add WDL task for Delly.
++ Add WDL task for Clever (and Mate-Clever).
 + Add proper copyright headers to all WDL files. So the free software license
   is clear to end users who wish to adapt and modify.
 + Add pedigree input for HaplotypeCaller and GenotypeGVCFs.
@@ -43,7 +46,7 @@ version 2.2.0-dev
 + Update parameter_meta for TALON, Centrifuge and Minimap2.
 + Centrifuge: Fix issue where Centrifuge Inspect did not get the correct index files location.
 + Add `minimumContigLength` input to PlotDenoisedCopyRatios and PlotModeledSegments.
-+ Add `commonVariantSitesIndex` input to CollectAllelicCounts
++ Add `commonVariantSitesIndex` input to CollectAllelicCounts.
 + Centrifuge: Fix issue where Centrifuge could not locate index files.
 + Increase default memory of BWA mem to 32G (was 16G).
 + Add `memory` input to fastqc task.
@@ -78,9 +81,9 @@ version 2.2.0-dev
 + Removed the "extraArgs" input from FilterMutectCalls.
 + Removed unused "verbose" and "quiet" inputs from multiqc.
 + Added parameter_meta sections to a variety of tasks.
-+ Picard's BedToIntervalList outputPath input is now optional (with a default of "regions.interval_list")
++ Picard's BedToIntervalList outputPath input is now optional (with a default of "regions.interval_list").
 + TALON: Fix SQLite error concerning database/disk space being full.
-+ Update htseq to default image version 0.11.2
++ Update htseq to default image version 0.11.2.
 + Update biowdl-input-converter in common.wdl to version 0.2.1.
 + Update TALON section to now include the new annotation file output, and add config file creation to the TALON task.
 + Removed unused inputs (trimPrimer and format) for cutadapt.
diff --git a/ccs.wdl b/ccs.wdl
new file mode 100644
index 0000000000000000000000000000000000000000..2ae54cb74de81d412b9649646b39ce557069b0b0
--- /dev/null
+++ b/ccs.wdl
@@ -0,0 +1,89 @@
+version 1.0
+
+# Copyright (c) 2020 Sequencing Analysis Support Core - Leiden University Medical Center
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+task CCS {
+    input {
+        Int minPasses = 3
+        Int minLength = 10
+        Int maxLength = 50000
+        Boolean byStrand = false
+        Float minReadQuality = 0.99
+        String logLevel = "WARN"
+        File subreadsFile
+        String outputPrefix
+        
+        Int cores = 4
+        String memory = "10G"
+        String dockerImage = "quay.io/biocontainers/pbccs:4.2.0--0"
+    }
+
+    command {
+        set -e
+        mkdir -p "$(dirname ~{outputPrefix})"
+        ccs \
+        --min-passes ~{minPasses} \
+        --min-length ~{minLength} \
+        --max-length ~{maxLength} \
+        ~{true="--by-strand" false="" byStrand} \
+        --min-rq ~{minReadQuality} \
+        --log-level ~{logLevel} \
+        --num-threads ~{cores} \
+        ~{"--report-file " + outputPrefix + ".ccs.report.txt"} \
+        ~{"--log-file " + outputPrefix + ".ccs.stderr.log"} \
+        ~{subreadsFile}
+        ~{outputPrefix + ".ccs.bam"}
+    }
+
+    output {
+        File outputCCSfile = outputPrefix + ".ccs.bam"
+        File outputCCSindexFile = outputPrefix + ".ccs.bam.pbi"
+        File outputReportFile = outputPrefix + ".ccs.report.txt"
+        File outputSTDERRfile = outputPrefix + ".ccs.stderr.log"
+    }
+
+    runtime {
+        cpu: cores
+        memory: memory
+        docker: dockerImage
+    }
+
+    parameter_meta {
+        # inputs
+        minPasses: {description: "Minimum number of full-length subreads required to generate CCS for a ZMW.", category: "advanced"}
+        minLength: {description: "Minimum draft length before polishing.", category: "advanced"}
+        maxLength: {description: "Maximum draft length before polishing.", category: "advanced"}
+        byStrand: {description: "Generate a consensus for each strand.", category: "advanced"}
+        minReadQuality: {description: "Minimum predicted accuracy in [0, 1].", category: "common"}
+        logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
+        subreadsFile: {description: "Subreads input file.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
+
+        # outputs
+        outputCCSfile: {description: "Consensus reads output file."}
+        outputCCSindexFile: {description: "Index of consensus reads output file."}
+        outputReportFile: {description: "CCS results report file."}
+        outputSTDERRfile: {description: "CCS STDERR log file."}
+    }
+}
diff --git a/centrifuge.wdl b/centrifuge.wdl
index c5fd66f55e5216e5210ab5e0fad2664c096c878c..1fbc7be11726db716b2b9edd6b9b7b5528c74ac3 100644
--- a/centrifuge.wdl
+++ b/centrifuge.wdl
@@ -44,15 +44,15 @@ task Build {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         centrifuge-build \
-        ~{"--threads " + threads} \
+        --threads ~{threads} \
         ~{true="--nodc" false="" disableDifferenceCover} \
         ~{"--offrate " + offrate} \
         ~{"--ftabchars " + ftabChars} \
         ~{"--kmer-count " + kmerCount} \
         ~{"--size-table " + sizeTable} \
-        ~{"--conversion-table " + conversionTable} \
-        ~{"--taxonomy-tree " + taxonomyTree} \
-        ~{"--name-table " + nameTable} \
+        --conversion-table ~{conversionTable} \
+        --taxonomy-tree ~{taxonomyTree} \
+        --name-table ~{nameTable} \
         ~{referenceFile} \
         ~{outputPrefix + "/" + indexBasename}
     }
@@ -123,9 +123,9 @@ task Classify {
         centrifuge \
         ~{inputFormatOptions[inputFormat]} \
         ~{true="--phred64" false="--phred33" phred64} \
-        ~{"--min-hitlen " + minHitLength} \
+        --min-hitlen ~{minHitLength} \
         ~{"--met-file " + outputPrefix + "_alignment_metrics.tsv"} \
-        ~{"--threads " + threads} \
+        --threads ~{threads} \
         ~{"--trim5 " + trim5} \
         ~{"--trim3 " + trim3} \
         ~{"-k " + reportMaxDistinct} \
diff --git a/isoseq3.wdl b/isoseq3.wdl
new file mode 100644
index 0000000000000000000000000000000000000000..d7111c9aa5e0cd509c1cdf80719ebc5d4fd94fca
--- /dev/null
+++ b/isoseq3.wdl
@@ -0,0 +1,86 @@
+version 1.0
+
+# Copyright (c) 2020 Sequencing Analysis Support Core - Leiden University Medical Center
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+task Refine {
+    input {
+        Int minPolyAlength = 20
+        Boolean requirePolyA = false
+        String logLevel = "WARN"
+        File inputBamFile
+        File primerFile
+        String outputPrefix
+
+        Int cores = 4
+        String memory = "10G"
+        String dockerImage = "quay.io/biocontainers/isoseq3:3.3.0--0"
+    }
+
+    command {
+        set -e
+        mkdir -p "$(dirname ~{outputPrefix})"
+        isoseq3 refine \
+        --min-poly-length ~{minPolyAlength} \
+        ~{true="--require-polya" false="" requirePolyA} \
+        --log-level ~{logLevel} \
+        --num-threads ~{cores} \
+        ~{"--log-file " + outputPrefix + ".flnc.stderr.log"} \
+        ~{inputBamFile} \
+        ~{primerFile} \
+        ~{outputPrefix + ".flnc.bam"}
+    }
+
+    output {
+        File outputFLfile = outputPrefix + ".flnc.bam"
+        File outputFLindexFile = outputPrefix + ".flnc.bam.pbi"
+        File outputSTDERRfile = outputPrefix + ".flnc.stderr.log"
+        File outputConsensusReadsetFile = outputPrefix + ".consensusreadset.xml"
+        File outputFilterSummaryFile = outputPrefix + ".filter_summary.json"
+        File outputReportFile = outputPrefix + ".report.csv"
+    }
+
+    runtime {
+        cpu: cores
+        memory: memory
+        docker: dockerImage
+    }
+
+    parameter_meta {
+        # inputs
+        minPolyAlength: {description: "Minimum poly(A) tail length.", category: "advanced"}
+        requirePolyA: {description: "Require FL reads to have a poly(A) tail and remove it.", category: "common"}
+        logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
+        inputBamFile: {description: "BAM input file.", category: "required"}
+        primerFile: {description: "Barcode/primer fasta file.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
+
+        # outputs
+        outputFLfile: {description: "Filtered reads output file."}
+        outputFLindexFile: {description: "Index of filtered reads output file."}
+        outputSTDERRfile: {description: "Refine STDERR log file."}
+        outputConsensusReadsetFile: {description: "Refine consensus readset XML file."}
+        outputFilterSummaryFile: {description: "Refine summary file."}
+        outputReportFile: {description: "Refine report file."}
+    }
+}
diff --git a/lima.wdl b/lima.wdl
new file mode 100644
index 0000000000000000000000000000000000000000..52f169706b155daad6fcfb89925ffa7f745afe53
--- /dev/null
+++ b/lima.wdl
@@ -0,0 +1,147 @@
+version 1.0
+
+# Copyright (c) 2020 Sequencing Analysis Support Core - Leiden University Medical Center
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+task Lima {
+    input {
+        String libraryDesign = "same"
+        Boolean scoreFullPass = false
+        Int maxScoredBarcodePairs = 0
+        Int maxScoredBarcodes = 0
+        Int maxScoredAdapters = 0
+        Int minPasses = 0
+        Int minLength = 50
+        Int maxInputLength = 0
+        Float minRefSpan = 0.5
+        Int minScoringRegion = 1
+        Int minScore = 0
+        Int minEndScore = 0
+        Int minSignalIncrease = 10
+        Int minScoreLead = 10
+        Boolean ccsMode = false
+        Boolean splitBamNamed = false
+        Float scoredAdapterRatio = 0.25
+        Int peek = 0
+        Int guess = 0
+        Int guessMinCount = 0
+        Boolean peekGuess = false
+        String logLevel = "WARN"
+        File inputBamFile
+        File barcodeFile
+        String outputPrefix
+        
+        Int cores = 4
+        String memory = "10G"
+        String dockerImage = "quay.io/biocontainers/lima:1.11.0--0"
+    }
+
+    Map[String, String] libraryDesignOptions = {"same": "--same", "different": "--different", "neighbors": "--neighbors"}
+
+    command {
+        set -e
+        mkdir -p "$(dirname ~{outputPrefix})"
+        lima \
+        ~{libraryDesignOptions[libraryDesign]} \
+        ~{true="--score-full-pass" false="" scoreFullPass} \
+        --max-scored-barcode-pairs ~{maxScoredBarcodePairs} \
+        --max-scored-barcodes ~{maxScoredBarcodes} \
+        --max-scored-adapters ~{maxScoredAdapters} \
+        --min-passes ~{minPasses} \
+        --min-length ~{minLength} \
+        --max-input-length ~{maxInputLength} \
+        --min-ref-span ~{minRefSpan} \
+        --min-scoring-regions ~{minScoringRegion} \
+        --min-score ~{minScore} \
+        --min-end-score ~{minEndScore} \
+        --min-signal-increase ~{minSignalIncrease} \
+        --min-score-lead ~{minScoreLead} \
+        ~{true="--ccs" false="" ccsMode} \
+        ~{true="--split-bam-named" false="" splitBamNamed} \
+        --scored-adapter-ratio ~{scoredAdapterRatio} \
+        --peek ~{peek} \
+        --guess ~{guess} \
+        --guess-min-count ~{guessMinCount} \
+        ~{true="--peek-guess" false="" peekGuess} \
+        --log-level ~{logLevel} \
+        --num-threads ~{cores} \
+        ~{"--log-file " + outputPrefix + ".fl.stderr.log"} \
+        ~{inputBamFile} \
+        ~{barcodeFile} \
+        ~{outputPrefix + ".fl.bam"}
+    }
+
+    output {
+        File outputFLfile = outputPrefix + "*.bam"
+        File outputFLindexFile = outputPrefix + "*.bam.pbi"
+        File outputSTDERRfile = outputPrefix + ".fl.stderr.log"
+        File outputJSONfile = outputPrefix + ".fl.json"
+        File outputCountsFile = outputPrefix + ".fl.lima.counts"
+        File outputReportFile = outputPrefix + ".fl.lima.report"
+        File outputSummaryFile = outputPrefix + ".fl.lima.summary"
+    }
+
+    runtime {
+        cpu: cores
+        memory: memory
+        docker: dockerImage
+    }
+
+    parameter_meta {
+        # inputs
+        libraryDesign: {description: "Barcode structure of the library design.", category: "required"}
+        scoreFullPass: {description: "Only use subreads flanked by adapters for barcode identification.", category: "advanced"}
+        maxScoredBarcodePairs: {description: "Only use up to N barcode pair regions to find the barcode, 0 means use all.", category: "advanced"}
+        maxScoredBarcodes: {description: "Analyze at maximum the provided number of barcodes per ZMW, 0 means deactivated.", category: "advanced"}
+        maxScoredAdapters: {description: "Analyze at maximum the provided number of adapters per ZMW, 0 means deactivated.", category: "advanced"}
+        minPasses: {description: "Minimal number of full passes.", category: "common"}
+        minLength: {description: "Minimum sequence length after clipping.", category: "common"}
+        maxInputLength: {description: "Maximum input sequence length, 0 means deactivated.", category: "advanced"}
+        minRefSpan: {description: "Minimum reference span relative to the barcode length.", category: "advanced"}
+        minScoringRegion: {description: "Minimum number of barcode regions with sufficient relative span to the barcode length.", category: "advanced"}
+        minScore: {description: "Reads below the minimum barcode score are removed from downstream analysis.", category: "common"}
+        minEndScore: {description: "Minimum end barcode score threshold is applied to the individual leading and trailing ends.", category: "advanced"}
+        minSignalIncrease: {description: "The minimal score difference, between first and combined, required to call a barcode pair different.", category: "advanced"}
+        minScoreLead: {description: "The minimal score lead required to call a barcode pair significant.", category: "common"}
+        ccsMode: {description: "CCS mode, use optimal alignment options.", category: "common"}
+        splitBamNamed: {description: "Split BAM output by resolved barcode pair name.", category: "common"}
+        scoredAdapterRatio: {description: "Minimum ratio of scored vs sequenced adapters.", category: "advanced"}
+        peek: {description: "Demux the first N ZMWs and return the mean score, 0 means peeking deactivated.", category: "advanced"}
+        guess: {description: "Try to guess the used barcodes, using the provided mean score threshold, 0 means guessing deactivated.", category: "advanced"}
+        guessMinCount: {description: "Minimum number of ZMWs observed to whitelist barcodes.", category: "advanced"}
+        peekGuess: {description: "Try to infer the used barcodes subset, by peeking at the first 50,000 ZMWs.", category: "advanced"}
+        logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
+        inputBamFile: {description: "BAM input file.", category: "required"}
+        barcodeFile: {description: "Barcode/primer fasta file.", category: "required"}
+        outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
+        cores: {description: "The number of cores to be used.", category: "advanced"}
+        memory: {description: "The amount of memory available to the job.", category: "advanced"}
+        dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
+
+        # outputs
+        outputFLfile: {description: "Demultiplexed reads output file."}
+        outputFLindexFile: {description: "Index of demultiplexed reads output file."}
+        outputSTDERRfile: {description: "Lima STDERR log file."}
+        outputJSONfile: {description: "Lima JSON file."}
+        outputCountsFile: {description: "Lima counts file."}
+        outputReportFile: {description: "Lima report file."}
+        outputSummaryFile: {description: "Lima summary file."}
+    }
+}
diff --git a/minimap2.wdl b/minimap2.wdl
index 32c0666af206f36f2d4326ba4acdc6aa183af468..fd28d4a9b0eed78dd8c52db211f1eea81246f1da 100644
--- a/minimap2.wdl
+++ b/minimap2.wdl
@@ -40,10 +40,10 @@ task Indexing {
         mkdir -p "$(dirname ~{outputPrefix})"
         minimap2 \
         ~{true="-H" false="" useHomopolymerCompressedKmer} \
-        ~{"-k " + kmerSize} \
-        ~{"-w " + minimizerWindowSize} \
+        -k ~{kmerSize} \
+        -w ~{minimizerWindowSize} \
         ~{"-d " + outputPrefix + ".mmi"} \
-        ~{"-t " + cores} \
+        -t ~{cores} \
         ~{"-I " + splitIndex} \
         ~{referenceFile}
     }
@@ -103,14 +103,14 @@ task Mapping {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         minimap2 \
-        ~{"-x " + presetOption} \
-        ~{"-k " + kmerSize} \
+        -x ~{presetOption} \
+        -k ~{kmerSize} \
         ~{true="-X" false="" skipSelfAndDualMappings} \
         ~{true="-a" false="" outputSAM} \
-        ~{"-o " + outputPrefix} \
+        -o ~{outputPrefix} \
         ~{true="--MD" false="" addMDtagToSAM} \
         --secondary=~{true="yes" false="no" secondaryAlignment} \
-        ~{"-t " + cores} \
+        -t ~{cores} \
         ~{"-G " + maxIntronLength} \
         ~{"-F " + maxFragmentLength} \
         ~{"-N " + retainMaxSecondaryAlignments} \
diff --git a/talon.wdl b/talon.wdl
index 050189629a8af5d6e34b630da8d3ccf321ed01bb..6ddb841e12a76a99ba2b94f776f91c3315f8de38 100644
--- a/talon.wdl
+++ b/talon.wdl
@@ -31,17 +31,17 @@ task CreateAbundanceFileFromDatabase {
         File? datasetsFile
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_abundance \
-        ~{"--db=" + databaseFile} \
-        ~{"-a " + annotationVersion} \
-        ~{"-b " + genomeBuild} \
-        ~{"--o=" + outputPrefix} \
+        --db=~{databaseFile} \
+        -a ~{annotationVersion} \
+        -b ~{genomeBuild} \
+        --o=~{outputPrefix} \
         ~{"--whitelist=" + whitelistFile} \
         ~{"-d " + datasetsFile}
     }
@@ -84,19 +84,19 @@ task CreateGtfFromDatabase {
         File? datasetFile
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_create_GTF \
-        ~{"--db=" + databaseFile} \
-        ~{"-b " + genomeBuild} \
-        ~{"-a " + annotationVersion} \
-        ~{"--o=" + outputPrefix} \
-        ~{"--whitelist=" + whitelistFile} \
+        --db=~{databaseFile} \
+        -b ~{genomeBuild} \
+        -a ~{annotationVersion} \
+        --o=~{outputPrefix} \
         ~{true="--observed" false="" observedInDataset} \
+        ~{"--whitelist=" + whitelistFile} \
         ~{"-d " + datasetFile}
     }
 
@@ -135,15 +135,15 @@ task FilterTalonTranscripts {
         File? pairingsFile
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_filter_transcripts \
-        ~{"--db=" + databaseFile} \
-        ~{"-a " + annotationVersion} \
+        --db=~{databaseFile} \
+        -a ~{annotationVersion} \
         ~{"--o=" + outputPrefix + "_whitelist.csv"} \
         ~{"-p " + pairingsFile}
     }
@@ -180,16 +180,16 @@ task GetReadAnnotations {
         File? datasetFile
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_fetch_reads \
-        ~{"--db " + databaseFile} \
-        ~{"--build " + genomeBuild} \
-        ~{"--o " + outputPrefix} \
+        --db ~{databaseFile} \
+        --build ~{genomeBuild} \
+        --o ~{outputPrefix} \
         ~{"--datasets " + datasetFile}
     }
 
@@ -228,21 +228,21 @@ task InitializeTalonDatabase {
         String outputPrefix
 
         String memory = "10G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_initialize_database \
-        ~{"--f=" + GTFfile} \
-        ~{"--g=" + genomeBuild} \
-        ~{"--a=" + annotationVersion} \
-        ~{"--l=" +  minimumLength} \
-        ~{"--idprefix=" + novelIDprefix} \
-        ~{"--5p=" + cutoff5p} \
-        ~{"--3p=" + cutoff3p} \
-        ~{"--o=" + outputPrefix}
+        --f=~{GTFfile} \
+        --g=~{genomeBuild} \
+        --a=~{annotationVersion} \
+        --l=~{minimumLength} \
+        --idprefix=~{novelIDprefix} \
+        --5p=~{cutoff5p} \
+        --3p=~{cutoff3p} \
+        --o=~{outputPrefix}
     }
 
     output {
@@ -277,13 +277,13 @@ task ReformatGtf {
         File GTFfile
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         talon_reformat_gtf \
-        ~{"-gtf " + GTFfile}
+        -gtf ~{GTFfile}
     }
 
     output {
@@ -315,16 +315,16 @@ task SummarizeDatasets {
         File? datasetGroupsCSV
 
         String memory = "4G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         talon_summarize \
-        ~{"--db " + databaseFile} \
+        --db ~{databaseFile} \
         ~{true="--verbose" false="" setVerbose} \
-        ~{"--o " + outputPrefix} \
+        --o ~{outputPrefix} \
         ~{"--groups " + datasetGroupsCSV}
     }
 
@@ -364,7 +364,7 @@ task Talon {
 
         Int cores = 4
         String memory = "25G"
-        String dockerImage = "biocontainers/talon:v4.4.1_cv1"
+        String dockerImage = "biocontainers/talon:v4.4.2_cv1"
     }
 
     command <<<
@@ -381,11 +381,11 @@ task Talon {
         done
         talon \
         ~{"--f " + outputPrefix + "/talonConfigFile.csv"} \
-        ~{"--db " + databaseFile} \
-        ~{"--build " + genomeBuild} \
-        ~{"--threads " + cores} \
-        ~{"--cov " + minimumCoverage} \
-        ~{"--identity " + minimumIdentity} \
+        --db ~{databaseFile} \
+        --build ~{genomeBuild} \
+        --threads ~{cores} \
+        --cov ~{minimumCoverage} \
+        --identity ~{minimumIdentity} \
         ~{"--o " + outputPrefix + "/run"}
     >>>
 
diff --git a/transcriptclean.wdl b/transcriptclean.wdl
index e288e316736c5a10cd7b01407d53124785b21357..68bcbf240ca1f5845c0848ca07b477986794b611 100644
--- a/transcriptclean.wdl
+++ b/transcriptclean.wdl
@@ -35,9 +35,9 @@ task GetSJsFromGtf {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         get_SJs_from_gtf \
-        ~{"--f=" + GTFfile} \
-        ~{"--g=" + genomeFile} \
-        ~{"--minIntronSize=" + minIntronSize} \
+        --f=~{GTFfile} \
+        --g=~{genomeFile} \
+        --minIntronSize=~{minIntronSize} \
         ~{"--o=" + outputPrefix + ".tsv"}
     }
 
@@ -131,19 +131,19 @@ task TranscriptClean {
         set -e
         mkdir -p "$(dirname ~{outputPrefix})"
         TranscriptClean \
-        ~{"-s " + SAMfile} \
-        ~{"-g " + referenceGenome} \
-        ~{"-t " + cores} \
-        ~{"--maxLenIndel=" + maxLenIndel} \
-        ~{"--maxSJOffset=" + maxSJoffset} \
-        ~{"-o " + outputPrefix} \
+        -s ~{SAMfile} \
+        -g ~{referenceGenome} \
+        -t ~{cores} \
+        --maxLenIndel=~{maxLenIndel} \
+        --maxSJOffset=~{maxSJoffset} \
+        -o ~{outputPrefix} \
         ~{true="-m true" false="-m false" correctMismatches} \
         ~{true="-i true" false="-i false" correctIndels} \
         ~{true="--correctSJs=true" false="--correctSJs=false" correctSJs} \
         ~{true="--dryRun" false="" dryRun} \
         ~{true="--primaryOnly" false="" primaryOnly} \
         ~{true="--canonOnly" false="" canonOnly} \
-        ~{"--bufferSize=" + bufferSize} \
+        --bufferSize=~{bufferSize} \
         ~{true="--deleteTmp" false="" deleteTmp} \
         ~{"-j " + spliceJunctionAnnotation} \
         ~{"-v " + variantFile}