Skip to content
Snippets Groups Projects
Commit 589712ab authored by JasperBoom's avatar JasperBoom
Browse files

Merge branch 'develop' of https://github.com/biowdl/tasks into BIOWDL-380

parents 4255eca7 7f1545f4
Branches KG-295
No related tags found
No related merge requests found
......@@ -11,6 +11,15 @@ that users understand how the changes affect the new version.
version 2.2.0-dev
---------------------------
+ Removed unused "cores" inputs from transcriptclean tasks.
+ Removed unused "cores" inputs from talon tasks.
+ Removed unused "threads" input from ModifyStrelka.
+ Removed the "installDir" inputs from the somaticseq tasks.
+ Removed the "installDir" input from CombineVariants.
+ Removed the "extraArgs" input from FilterMutectCalls.
+ Removed unused "verbose" and "quiet" inputs from multiqc.
+ Added parameter_meta sections to a variety of tasks.
+ Picard's BedToIntervalList outputPath input is now optional (with a default of "regions.interval_list")
+ TALON: Fix SQLite error concerning database/disk space being full.
+ Update htseq to default image version 0.11.2
+ Update biowdl-input-converter in common.wdl to version 0.2.1.
......
......@@ -36,6 +36,20 @@ task CPAT {
runtime {
docker: dockerImage
}
parameter_meta {
gene: {description: "Equivalent to CPAT's `--gene` option.", category: "required"}
outFilePath: {description: "Equivalent to CPAT's `--outfile` option.", category: "required"}
hex: {description: "Equivalent to CPAT's `--hex` option.", category: "required"}
logitModel: {description: "Equivalent to CPAT's `--logitModel` option.", category: "required"}
referenceGenome: {description: "Equivalent to CPAT's `--ref` option.", category: "advanced"}
referenceGenomeIndex: {description: "The index of the reference. Should be added as input if CPAT should not index the reference genome.",
category: "advanced"}
startCodons: {description: "Equivalent to CPAT's `--start` option.", category: "advanced"}
stopCodons: {description: "Equivalent to CPAT's `--stop` option.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
# There is also make_hexamer_tab.py and make_logitModel.py
......
......@@ -226,6 +226,12 @@ task ReorderGlobbedScatters {
# 4 gigs of memory to be able to build the docker image in singularity
memory: "4G"
}
parameter_meta {
scatters: {description: "The files which should be ordered.", category: "required"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ScatterRegions {
......@@ -268,6 +274,25 @@ task ScatterRegions {
docker: dockerImage
memory: memory
}
parameter_meta {
referenceFasta: {description: "The reference fasta file.", category: "required"}
referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
category: "required"}
scatterSize: {description: "Equivalent to biopet scatterregions' `-s` option.", category: "common"}
regions: {description: "The regions to be scattered.", category: "advanced"}
notSplitContigs: {description: "Equivalent to biopet scatterregions' `--notSplitContigs` flag.",
category: "advanced"}
bamFile: {description: "Equivalent to biopet scatterregions' `--bamfile` option.",
category: "advanced"}
bamIndex: {description: "The index for the bamfile given through bamFile.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ValidateAnnotation {
......
......@@ -52,4 +52,17 @@ task InputConverter {
runtime {
docker: dockerImage
}
parameter_meta {
samplesheet: {description: "The samplesheet to be processed.", category: "required"}
outputFile: {description: "The location the JSON representation of the samplesheet should be written to.",
category: "advanced"}
skipFileCheck: {description: "Whether or not the existance of the files mentioned in the samplesheet should be checked.",
category: "advanced"}
checkFileMd5sums: {description: "Whether or not the MD5 sums of the files mentioned in the samplesheet should be checked.",
category: "advanced"}
old: {description: "Whether or not the old samplesheet format should be used.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
......@@ -80,6 +80,27 @@ task Bowtie {
memory: memory
docker: dockerImage
}
parameter_meta {
readsUpstream: {description: "The first-/single-end fastq files.", category: "required"}
readsDownstream: {description: "The second-end fastq files.", category: "common"}
outputPath: {description: "The location the output BAM file should be written to.", category: "common"}
indexFiles: {description: "The index files for bowtie.", category: "required"}
seedmms: {description: "Equivalent to bowtie's `--seedmms` option.", category: "advanced"}
seedlen: {description: "Equivalent to bowtie's `--seedlen` option.", category: "advanced"}
k: {description: "Equivalent to bowtie's `-k` option.", category: "advanced"}
best: {description: "Equivalent to bowtie's `--best` flag.", category: "advanced"}
strata: {description: "Equivalent to bowtie's `--strata` flag.", category: "advanced"}
allowContain: {description: "Equivalent to bowtie's `--allow-contain` flag.", category: "advanced"}
samRG: {description: "Equivalent to bowtie's `--sam-RG` option.", category: "advanced"}
picardXmx: {description: "The maximum memory available to the picard (used for sorting the output). Should be lower than `memory` to accommodate JVM overhead and bowtie's memory usage.",
category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
struct BowtieIndex {
......
......@@ -42,6 +42,21 @@ task Mem {
memory: memory
docker: dockerImage
}
parameter_meta {
read1: {description: "The first or single end fastq file.", category: "required"}
read2: {description: "The second end fastq file.", category: "common"}
bwaIndex: {description: "The BWA index files.", category: "required"}
outputPath: {description: "The location the output BAM file should be written to.", category: "required"}
readgroup: {description: "The readgroup to be assigned to the reads. See BWA mem's `-R` option.", category: "common"}
threads: {description: "The number of threads to use.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
picardXmx: {description: "The maximum memory available to picard SortSam. Should be lower than `memory` to accommodate JVM overhead and BWA mem's memory usage.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Kit {
......@@ -91,47 +106,20 @@ task Kit {
}
parameter_meta {
read1: {
description: "The first-end fastq file.",
category: "required"
}
read2: {
description: "The second-end fastq file.",
category: "common"
}
bwaIndex: {
description: "The BWA index, including a .alt file.",
category: "required"
}
outputPrefix: {
description: "The prefix of the output files, including any parent directories.",
category: "required"
}
readgroup: {
description: "A readgroup identifier.",
category: "common"
}
sixtyFour: {
description: "Whether or not the index uses the '.64' suffixes.",
category: "common"
}
threads: {
description: "The number of threads to use for alignment.",
category: "advanced"
}
sortThreads: {
description: "The number of threads to use for sorting.",
category: "advanced"
}
memory: {
description: "The amount of memory this job will use.",
category: "advanced"
}
dockerImage: {
description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"
}
# inputs
read1: {description: "The first-end fastq file.", category: "required"}
read2: {description: "The second-end fastq file.", category: "common"}
bwaIndex: {description: "The BWA index, including a .alt file.", category: "required"}
outputPrefix: {description: "The prefix of the output files, including any parent directories.", category: "required"}
readgroup: {description: "A readgroup identifier.", category: "common"}
sixtyFour: {description: "Whether or not the index uses the '.64' suffixes.", category: "common"}
threads: {description: "The number of threads to use for alignment.", category: "advanced"}
sortThreads: {description: "The number of threads to use for sorting.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
# outputs
outputBam: "The produced BAM file."
outputBamIndex: "The index of the produced BAM file."
}
......
......@@ -30,4 +30,14 @@ task ChunkedScatter {
memory: "4G"
docker: dockerImage
}
parameter_meta {
inputFile: {description: "Either a bed file describing regiosn of intrest or a sequence dictionary.", category: "required"}
prefix: {description: "The prefix for the output files.", category: "advanced"}
chunkSize: {description: "Equivalent to chunked-scatter's `-c` option.", category: "advanced"}
overlap: {description: "Equivalent to chunked-scatter's `-o` option.", category: "advanced"}
minimumBasesPerFile: {description: "Equivalent to chunked-scatter's `-m` option.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
\ No newline at end of file
......@@ -44,49 +44,27 @@ task CollectColumns {
}
parameter_meta {
inputTables: {
description: "The tables from which columns should be taken.",
category: "required"
}
outputPath: {
description: "The path to which the output should be written.",
category: "required"
}
featureColumn: {
description: "Equivalent to the -f option of collect-columns.",
category: "common" # Should likely be controlled by the calling workflow
}
valueColumn: {
description: "Equivalent to the -c option of collect-columns.",
category: "common" # Should likely be controlled by the calling workflow
}
separator: {
description: "Equivalent to the -s option of collect-columns.",
category: "common" # Should likely be controlled by the calling workflow
}
sampleNames: {
description: "Equivalent to the -n option of collect-columns.",
category: "common" # Should likely be controlled by the calling workflow
}
header: {
description: "Equivalent to the -H flag of collect-columns.",
category: "common"
}
additionalAttributes: {
description: "Equivalent to the -a option of collect-columns.",
category: "advanced"
}
referenceGtf: {
description: "Equivalent to the -g option of collect-columns.",
category: "advanced"
}
featureAttribute: {
description: "Equivalent to the -F option of collect-columns.",
category: "advanced"
}
dockerImage: {
description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"
}
inputTables: {description: "The tables from which columns should be taken.",
category: "required"}
outputPath: {description: "The path to which the output should be written.",
category: "required"}
featureColumn: {description: "Equivalent to the -f option of collect-columns.",
category: "advanced"}
valueColumn: {description: "Equivalent to the -c option of collect-columns.",
category: "advanced"}
separator: {description: "Equivalent to the -s option of collect-columns.",
category: "advanced"}
sampleNames: {description: "Equivalent to the -n option of collect-columns.",
category: "advanced"}
header: {description: "Equivalent to the -H flag of collect-columns.",
category: "advanced"}
additionalAttributes: {description: "Equivalent to the -a option of collect-columns.",
category: "advanced"}
referenceGtf: {description: "Equivalent to the -g option of collect-columns.",
category: "advanced"}
featureAttribute: {description: "Equivalent to the -F option of collect-columns.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
\ No newline at end of file
......@@ -184,6 +184,13 @@ task YamlToJson {
runtime {
docker: dockerImage
}
parameter_meta {
yaml: {description: "The YAML file to convert.", category: "required"}
outputJson: {description: "The location the output JSON file should be written to.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
struct Reference {
......
This diff is collapsed.
......@@ -30,6 +30,7 @@ task GffCompare {
# Issue addressed at https://github.com/openwdl/wdl/pull/263
File? noneFile # This is a wdl workaround. Please do not assign!
}
# This allows for the creation of output directories
String dirPrefix = if defined(outputDir)
then select_first([outputDir]) + "/"
......@@ -91,4 +92,35 @@ task GffCompare {
runtime {
docker: dockerImage
}
parameter_meta {
inputGtfList: {description: "Equivalent to gffcompare's `-i` option.", category: "advanced"}
inputGtfFiles: {description: "The input GTF files.", category: "required"}
referenceAnnotation: {description: "The GTF file to compare with.", category: "required"}
outputDir: {description: "The location the output should be written.", category: "common"}
outPrefix: {description: "The prefix for the output.", category: "advanced"}
genomeSequences: {description: "Equivalent to gffcompare's `-s` option.", category: "advanced"}
maxDistanceFreeEndsTerminalExons: {description: "Equivalent to gffcompare's `-e` option.", category: "advanced"}
maxDistanceGroupingTranscriptStartSites: {description: "Equivalent to gffcompare's `-d` option.", category: "advanced"}
namePrefix: {description: "Equivalent to gffcompare's `-p` option.", category: "advanced"}
C: {description: "Equivalent to gffcompare's `-C` flag.", category: "advanced"}
A: {description: "Equivalent to gffcompare's `-A` flag.", category: "advanced"}
X: {description: "Equivalent to gffcompare's `-X` flag.", category: "advanced"}
K: {description: "Equivalent to gffcompare's `-K` flag.", category: "advanced"}
snCorrection: {description: "Equivalent to gffcompare's `-R` flag.", category: "advanced"}
precisionCorrection: {description: "Equivalent to gffcompare's `-Q` flag.", category: "advanced"}
discardSingleExonTransfragsAndReferenceTranscripts: {description: "Equivalent to gffcompare's `-M` flag.", category: "advanced"}
discardSingleExonReferenceTranscripts: {description: "Equivalent to gffcompare's `-N` flag.", category: "advanced"}
noTmap: {description: "Equivalent to gffcompare's `-T` flag.", category: "advanced"}
verbose: {description: "Equivalent to gffcompare's `-V` flag.", category: "advanced"}
debugMode: {description: "Equivalent to gffcompare's `-D` flag.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
meta {
WDL_AID: {
exclude: ["noneFile"]
}
}
}
\ No newline at end of file
......@@ -43,4 +43,17 @@ task GffRead {
runtime {
docker: dockerImage
}
parameter_meta {
inputGff: {description: "The input GFF file.", category: "required"}
genomicSequence: {description: "The genome.", category: "required"}
genomicIndex: {description: "The genome's index.", category: "advanced"}
exonsFastaPath: {description: "The location the exons fasta should be written to.", category: "advanced"}
CDSFastaPath: {description: "The location the CDS fasta should be written to.", category: "advanced"}
proteinFastaPath: {description: "The location the protein fasta should be written to.", category: "advanced"}
filteredGffPath: {description: "The location the filtered GFF should be written to.", category: "advanced"}
outputGtfFormat: {description: "Equivalent to gffread's `-T` flag.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
\ No newline at end of file
......@@ -49,4 +49,20 @@ task Hisat2 {
cpu: threads + 1
docker: dockerImage
}
parameter_meta {
indexFiles: {description: "The hisat2 index files.", category: "required"}
inputR1: {description: "The first-/single-end FastQ file.", category: "required"}
inputR2: {description: "The second-end FastQ file.", category: "common"}
outputBam: {description: "The location the output BAM file should be written to.", category: "required"}
sample: {description: "The sample id.", category: "required"}
library: {description: "The library id.", category: "required"}
readgroup: {description: "The readgroup id.", category: "required"}
platform: {description: "The platform used for sequencing.", category: "advanced"}
downstreamTranscriptomeAssembly: {description: "Equivalent to hisat2's `--dta` flag.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
\ No newline at end of file
......@@ -18,7 +18,6 @@ task Somatic {
Int cores = 1
Int memoryGb = 4
String dockerImage = "quay.io/biocontainers/manta:1.4.0--py27_1"
}
command {
......@@ -56,4 +55,22 @@ task Somatic {
memory: "~{memoryGb}G"
docker: dockerImage
}
parameter_meta {
tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
normalBam: {description: "The normal/control sample's BAM file.", category: "common"}
normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "common"}
referenceFasta: {description: "The reference fasta file which was also used for mapping.", category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
runDir: {description: "The directory to use as run/output directory.", category: "common"}
callRegions: {description: "The bed file which indicates the regions to operate on.", category: "common"}
callRegionsIndex: {description: "The index of the bed file which indicates the regions to operate on.", category: "common"}
exome: {description: "Whether or not the data is from exome sequencing.", category: "common"}
cores: {description: "The number of cores to use.", category: "advanced"}
memoryGb: {description: "The amount of memory this job will use in Gigabytes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
......@@ -59,34 +59,20 @@ task Indexing {
}
parameter_meta {
useHomopolymerCompressedKmer: {
description: "Use homopolymer-compressed k-mer (preferrable for PacBio).",
category: "advanced"
}
kmerSize: {
description: "K-mer size (no larger than 28).",
category: "advanced"
}
minimizerWindowSize: {
description: "Minimizer window size.",
category: "advanced"
}
outputPrefix: {
description: "Output directory path + output file prefix.",
category: "required"
}
referenceFile: {
description: "Reference fasta file.",
category: "required"
}
splitIndex: {
description: "Split index for every ~NUM input bases.",
category: "advanced"
}
outputIndexFile: {
description: "Indexed reference file.",
category: "required"
}
# input
useHomopolymerCompressedKmer: {description: "Use homopolymer-compressed k-mer (preferrable for PacBio).", category: "advanced"}
kmerSize: {description: "K-mer size (no larger than 28).", category: "advanced"}
minimizerWindowSize: {description: "Minimizer window size.", category: "advanced"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
referenceFile: {description: "Reference fasta file.", category: "required"}
splitIndex: {description: "Split index for every ~NUM input bases.", category: "advanced"}
cores: {description: "The number of cores to be used.", category: "advanced"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
# output
outputIndexFile: {description: "Indexed reference file."}
}
}
......@@ -147,69 +133,27 @@ task Mapping {
}
parameter_meta {
presetOption: {
description: "This option applies multiple options at the same time.",
category: "common"
}
kmerSize: {
description: "K-mer size (no larger than 28).",
category: "advanced"
}
outputSAM: {
description: "Output in the SAM format.",
category: "common"
}
outputPrefix: {
description: "Output directory path + output file prefix.",
category: "required"
}
maxIntronLength: {
description: "Max intron length (effective with -xsplice; changing -r).",
category: "advanced"
}
maxFragmentLength: {
description: "Max fragment length (effective with -xsr or in the fragment mode).",
category: "advanced"
}
skipSelfAndDualMappings: {
description: "Skip self and dual mappings (for the all-vs-all mode).",
category: "advanced"
}
retainMaxSecondaryAlignments: {
description: "Retain at most INT secondary alignments.",
category: "advanced"
}
matchingScore: {
description: "Matching score.",
category: "advanced"
}
mismatchPenalty: {
description: "Mismatch penalty.",
category: "advanced"
}
howToFindGTAG: {
description: "How to find GT-AG. f:transcript strand, b:both strands, n:don't match GT-AG.",
category: "common"
}
addMDtagToSAM: {
description: "Adds a MD tag to the SAM output file.",
category: "common"
}
secondaryAlignment: {
description: "Whether to output secondary alignments.",
category: "advanced"
}
referenceFile: {
description: "Reference fasta file.",
category: "required"
}
queryFile: {
description: "Input fasta file.",
category: "required"
}
outputAlignmentFile: {
description: "Mapping and alignment between collections of DNA sequences file.",
category: "required"
}
presetOption: {description: "This option applies multiple options at the same time.", category: "common"}
kmerSize: {description: "K-mer size (no larger than 28).", category: "advanced"}
outputSAM: {description: "Output in the SAM format.", category: "common"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
maxIntronLength: {description: "Max intron length (effective with -xsplice; changing -r).", category: "advanced"}
maxFragmentLength: {description: "Max fragment length (effective with -xsr or in the fragment mode).", category: "advanced"}
skipSelfAndDualMappings: {description: "Skip self and dual mappings (for the all-vs-all mode).", category: "advanced"}
retainMaxSecondaryAlignments: {description: "Retain at most INT secondary alignments.", category: "advanced"}
matchingScore: {description: "Matching score.", category: "advanced"}
mismatchPenalty: {description: "Mismatch penalty.", category: "advanced"}
howToFindGTAG: {description: "How to find GT-AG. f:transcript strand, b:both strands, n:don't match GT-AG.", category: "common"}
addMDtagToSAM: {description: "Adds a MD tag to the SAM output file.", category: "common"}
secondaryAlignment: {description: "Whether to output secondary alignments.", category: "advanced"}
referenceFile: {description: "Reference fasta file.", category: "required"}
queryFile: {description: "Input fasta file.", category: "required"}
cores: {description: "The number of cores to be used.", category: "advanced"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
# output
outputAlignmentFile: {description: "Mapping and alignment between collections of DNA sequences file."}
}
}
......@@ -34,8 +34,6 @@ task MultiQC {
Boolean megaQCUpload = false # This must be actively enabled in my opinion. The tools default is to upload.
File? config # A directory
String? clConfig
Boolean verbose = false
Boolean quiet = false
Array[Boolean] finished = [] # An array of booleans that can be used to let multiqc wait on stuff.
String memory = "4G"
......@@ -92,4 +90,49 @@ task MultiQC {
memory: memory
docker: dockerImage
}
parameter_meta {
analysisDirectory: {description: "The directory to run MultiQC on.", category: "required"}
dependencies: {description: "This must be used in order to run multiqc after these tasks.", category: "internal_use_only"}
force: {description: "Equivalent to MultiQC's `--force` flag.", category: "advanced"}
dirs: {description: "Equivalent to MultiQC's `--dirs` flag.", category: "advanced"}
dirsDepth: {description: "Equivalent to MultiQC's `--dirs-depth` option.", category: "advanced"}
fullNames: {description: "Equivalent to MultiQC's `--fullnames` flag.", category: "advanced"}
title: {description: "Equivalent to MultiQC's `--title` option.", category: "advanced"}
comment: {description: "Equivalent to MultiQC's `--comment` option.", category: "advanced"}
fileName: {description: "Equivalent to MultiQC's `--filename` option.", category: "advanced"}
outDir: {description: "Directory in whihc the output should be written.", category: "common"}
template: {description: "Equivalent to MultiQC's `--template` option.", category: "advanced"}
tag: {description: "Equivalent to MultiQC's `--tag` option.", category: "advanced"}
ignore: {description: "Equivalent to MultiQC's `--ignore` option.", category: "advanced"}
ignoreSamples: {description: "Equivalent to MultiQC's `--ignore-samples` option.", category: "advanced"}
ignoreSymlinks: {description: "Equivalent to MultiQC's `--ignore-symlinks` flag.", category: "advanced"}
sampleNames: {description: "Equivalent to MultiQC's `--sample-names` option.", category: "advanced"}
fileList: {description: "Equivalent to MultiQC's `--file-list` option.", category: "advanced"}
exclude: {description: "Equivalent to MultiQC's `--exclude` option.", category: "advanced"}
module: {description: "Equivalent to MultiQC's `--module` option.", category: "advanced"}
dataDir: {description: "Equivalent to MultiQC's `--data-dir` flag.", category: "advanced"}
noDataDir: {description: "Equivalent to MultiQC's `--no-data-dir` flag.", category: "advanced"}
dataFormat: {description: "Equivalent to MultiQC's `--data-format` option.", category: "advanced"}
zipDataDir: {description: "Equivalent to MultiQC's `--zip-data-dir` flag.", category: "advanced"}
export: {description: "Equivalent to MultiQC's `--export` flag.", category: "advanced"}
flat: {description: "Equivalent to MultiQC's `--flat` flag.", category: "advanced"}
interactive: {description: "Equivalent to MultiQC's `--interactive` flag.", category: "advanced"}
lint: {description: "Equivalent to MultiQC's `--lint` flag.", category: "advanced"}
pdf: {description: "Equivalent to MultiQC's `--pdf` flag.", category: "advanced"}
megaQCUpload: {description: "Opposite to MultiQC's `--no-megaqc-upload` flag.", category: "advanced"}
config: {description: "Equivalent to MultiQC's `--config` option.", category: "advanced"}
clConfig: {description: "Equivalent to MultiQC's `--cl-config` option.", category: "advanced"}
finished: {description: "An array of booleans that can be used to let multiqc wait on stuff.", category: "internal_use_only"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
meta {
WDL_AID: {
exclude: ["finished", "dependencies"]
}
}
}
......@@ -4,7 +4,7 @@ task BedToIntervalList {
input {
File bedFile
File dict
String outputPath
String outputPath = "regions.interval_list"
String memory = "12G"
String javaXmx = "4G"
......@@ -29,6 +29,18 @@ task BedToIntervalList {
docker: dockerImage
memory: memory
}
parameter_meta {
bedFile: {description: "A bed file.", category: "required"}
dict: {description: "A sequence dict file.", category: "required"}
outputPath: {description: "The location the output interval list should be written to.",
category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task CollectMultipleMetrics {
......@@ -119,10 +131,44 @@ task CollectMultipleMetrics {
}
runtime {
docker: dockerImage
memory: memory
}
parameter_meta {
inputBam: {description: "The input BAM file for which metrics will be collected.",
category: "required"}
inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
referenceFasta: {description: "The reference fasta file which was also used for mapping.",
category: "required"}
referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
basename: {description: "The basename/prefix of the output files (may include directories).",
category: "required"}
collectAlignmentSummaryMetrics: {description: "Equivalent to the `PROGRAM=CollectAlignmentSummaryMetrics` argument.",
category: "advanced"}
collectInsertSizeMetrics: {description: "Equivalent to the `PROGRAM=CollectInsertSizeMetrics` argument.",
category: "advanced"}
qualityScoreDistribution: {description: "Equivalent to the `PROGRAM=QualityScoreDistribution` argument.",
category: "advanced"}
meanQualityByCycle: {description: "Equivalent to the `PROGRAM=MeanQualityByCycle` argument.",
category: "advanced"}
collectBaseDistributionByCycle: {description: "Equivalent to the `PROGRAM=CollectBaseDistributionByCycle` argument.",
category: "advanced"}
collectGcBiasMetrics: {description: "Equivalent to the `PROGRAM=CollectGcBiasMetrics` argument.",
category: "advanced"}
collectSequencingArtifactMetrics: {description: "Equivalent to the `PROGRAM=CollectSequencingArtifactMetrics` argument.",
category: "advanced"}
collectQualityYieldMetrics: {description: "Equivalent to the `PROGRAM=CollectQualityYieldMetrics` argument.",
category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task CollectRnaSeqMetrics {
......@@ -159,6 +205,23 @@ task CollectRnaSeqMetrics {
docker: dockerImage
memory: memory
}
parameter_meta {
inputBam: {description: "The input BAM file for which metrics will be collected.",
category: "required"}
inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
refRefflat: {description: "A refflat file containing gene annotations.", catehory: "required"}
basename: {description: "The basename/prefix of the output files (may include directories).",
category: "required"}
strandSpecificity: {description: "Equivalent to the `STRAND_SPECIFICITY` option of picard's CollectRnaSeqMetrics.",
category: "common"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task CollectTargetedPcrMetrics {
......@@ -201,6 +264,29 @@ task CollectTargetedPcrMetrics {
docker: dockerImage
memory: memory
}
parameter_meta {
inputBam: {description: "The input BAM file for which metrics will be collected.",
category: "required"}
inputBamIndex: {description: "The index of the input BAM file.", category: "required"}
referenceFasta: {description: "The reference fasta file which was also used for mapping.",
category: "required"}
referenceFastaDict: {description: "The sequence dictionary associated with the reference fasta file.",
category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
ampliconIntervals: {description: "An interval list describinig the coordinates of the amplicons sequenced.",
category: "required"}
targetIntervals: {description: "An interval list describing the coordinates of the targets sequenced.",
category: "required"}
basename: {description: "The basename/prefix of the output files (may include directories).",
category: "required"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
# Combine multiple recalibrated BAM files from scattered ApplyRecalibration runs
......@@ -236,6 +322,18 @@ task GatherBamFiles {
docker: dockerImage
memory: memory
}
parameter_meta {
inputBams: {description: "The BAM files to be merged together.", category: "required"}
inputBamsIndex: {description: "The indexes of the input BAM files.", category: "required"}
outputBamPath: {description: "The path where the merged BAM file will be written.", caregory: "required"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task GatherVcfs {
......@@ -266,6 +364,18 @@ task GatherVcfs {
docker: dockerImage
memory: memory
}
parameter_meta {
inputVcfs: {description: "The VCF files to be merged together.", category: "required"}
inputVcfIndexes: {description: "The indexes of the input VCF files.", category: "required"}
outputVcfPath: {description: "The path where the merged VCF file will be written.", caregory: "required"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
# Mark duplicate reads to avoid counting non-independent observations
......@@ -320,6 +430,20 @@ task MarkDuplicates {
docker: dockerImage
memory: memory
}
parameter_meta {
inputBams: {description: "The BAM files for which the duplicate reads should be marked.", category: "required"}
inputBamIndexes: {description: "Th eindexes for the input BAM files.", category: "required"}
outputBamPath: {description: "The location where the ouptut BAM file should be written.", category: "required"}
metricsPath: {description: "The location where the output metrics file should be written.", category: "required"}
read_name_regex: {description: "Equivalent to the `READ_NAME_REGEX` option of MarkDuplicates.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
# Combine multiple VCFs or GVCFs from scattered HaplotypeCaller runs
......@@ -355,6 +479,18 @@ task MergeVCFs {
docker: dockerImage
memory: memory
}
parameter_meta {
inputVCFs: {description: "The VCF files to be merged.", category: "required"}
inputVCFsIndexes: {description: "The indexes of the VCF files.", category: "required"}
outputVcfPath: {description: "The location the output VCF file should be written to.", category: "required"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task SamToFastq {
......@@ -438,7 +574,7 @@ task SortVcf {
String memory = "24G"
String javaXmx = "8G"
String dockerImage = "quay.io/biocontainers/picard:2.20.5--0"
}
}
command {
......@@ -460,4 +596,16 @@ task SortVcf {
docker: dockerImage
memory: memory
}
parameter_meta {
vcfFiles: {description: "The VCF files to merge and sort.", category: "required"}
outputVcfPath: {description: "The location the sorted VCF files should be written to.", category: "required"}
dict: {description: "A sequence dictionary matching the VCF files.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
javaXmx: {description: "The maximum memory available to the program. Should be lower than `memory` to accommodate JVM overhead.",
category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
\ No newline at end of file
......@@ -26,6 +26,14 @@ task BgzipAndIndex {
runtime {
docker: dockerImage
}
parameter_meta {
inputFile: {description: "The file to be compressed and indexed.", category: "required"}
outputDir: {description: "The directory in which the output will be placed.", category: "required"}
type: {description: "The type of file (eg. vcf or bed) to be compressed and indexed.", category: "common"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Index {
......@@ -60,6 +68,14 @@ task Index {
runtime {
docker: dockerImage
}
parameter_meta {
bamFile: {description: "The BAM file for which an index should be made.", category: "required"}
outputBamPath: {description: "The location where the BAM file should be written to. The index will appear alongside this link to the BAM file.",
category: "common"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Merge {
......@@ -87,6 +103,14 @@ task Merge {
runtime {
docker: dockerImage
}
parameter_meta {
bamFiles: {description: "The BAM files to merge.", category: "required"}
outputBamPath: {description: "The location the merged BAM file should be written to.", category: "common"}
force: {description: "Equivalent to samtools merge's `-f` flag.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task SortByName {
......@@ -110,6 +134,13 @@ task SortByName {
runtime {
docker: dockerImage
}
parameter_meta {
bamFile: {description: "The BAM file to get sorted.", category: "required"}
outputBamPath: {description: "The location the sorted BAM file should be written to.", category: "common"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Markdup {
......@@ -133,6 +164,13 @@ task Markdup {
runtime {
docker: dockerImage
}
parameter_meta {
inputBam: {description: "The BAM file to be processed.", category: "required"}
outputBamPath: {description: "The location of the output BAM file.", category: "required"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Flagstat {
......@@ -156,6 +194,13 @@ task Flagstat {
runtime {
docker: dockerImage
}
parameter_meta {
inputBam: {description: "The BAM file for which statistics should be retrieved.", category: "required"}
outputPath: {description: "The location the ouput should be written to.", category: "required"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task Fastq {
......@@ -204,13 +249,19 @@ task Fastq {
}
parameter_meta {
inputBam: "The bam file to process."
outputRead1: "If only outputRead1 is given '-s' flag is assumed. Else '-1'."
includeFilter: "Include reads with ALL of these flags. Corresponds to '-f'"
excludeFilter: "Exclude reads with ONE OR MORE of these flags. Corresponds to '-F'"
excludeSpecificFilter: "Exclude reads with ALL of these flags. Corresponds to '-G'"
appendReadNumber: "Append /1 and /2 to the read name, or don't. Corresponds to '-n/N"
inputBam: {description: "The bam file to process.", category: "required"}
outputRead1: {description: "The location the reads (first reads for pairs, in case of paired-end sequencing) should be written to.", category: "required"}
outputRead2: {description: "The location the second reads from pairs should be written to.", category: "common"}
outputRead0: {description: "The location the unpaired reads should be written to (in case of paired-end sequenicng).", category: "advanced"}
includeFilter: {description: "Include reads with ALL of these flags. Corresponds to `-f`", category: "advanced"}
excludeFilter: {description: "Exclude reads with ONE OR MORE of these flags. Corresponds to `-F`", category: "advanced"}
excludeSpecificFilter: {description: "Exclude reads with ALL of these flags. Corresponds to `-G`", category: "advanced"}
appendReadNumber: {description: "Append /1 and /2 to the read name, or don't. Corresponds to `-n/N`", category: "advanced"}
outputQuality: {description: "Equivalent to samtools fastq's `-O` flag.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
......@@ -240,6 +291,15 @@ task Tabix {
runtime {
docker: dockerImage
}
parameter_meta {
inputFile: {description: "The file to be indexed.", category: "required"}
outputFilePath: {description: "The location where the file should be written to. The index will appear alongside this link to the file.",
category: "common"}
type: {description: "The type of file (eg. vcf or bed) to be indexed.", category: "common"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task View {
......@@ -286,4 +346,20 @@ task View {
memory: memory
docker: dockerImage
}
parameter_meta {
inFile: {description: "A BAM, SAM or CRAM file.", category: "required"}
referenceFasta: {description: "The reference fasta file also used for mapping.", category: "advanced"}
outputFileName: {description: "The location the output BAM file should be written.", category: "common"}
uncompressedBamOutput: {description: "Equivalent to samtools view's `-u` flag.", category: "advanced"}
includeFilter: {description: "Equivalent to samtools view's `-f` option.", category: "advanced"}
excludeFilter: {description: "Equivalent to samtools view's `-F` option.", category: "advanced"}
excludeSpecificFilter: {description: "Equivalent to samtools view's `-G` option.", category: "advanced"}
MAPQthreshold: {description: "Equivalent to samtools view's `-q` option.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
memory: {description: "The amount of memory this job will use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
Subproject commit fc603e5d408b89b99297fb5737586c059c5f9df6
Subproject commit a1783b5c789ebef601a8ec5849c4bbfe7dd3f87d
......@@ -2,8 +2,6 @@ version 1.0
task ParallelPaired {
input {
String installDir = "/opt/somaticseq" #the location in the docker image
File? classifierSNV
File? classifierIndel
String outputDir
......@@ -33,7 +31,7 @@ task ParallelPaired {
}
command {
~{installDir}/somaticseq_parallel.py \
/opt/somaticseq/somaticseq_parallel.py \
~{"--classifier-snv " + classifierSNV} \
~{"--classifier-indel " + classifierIndel} \
--output-directory ~{outputDir} \
......@@ -73,12 +71,40 @@ task ParallelPaired {
cpu: threads
docker: dockerImage
}
parameter_meta {
classifierSNV: {description: "A somaticseq SNV classifier.", category: "common"}
classifierIndel: {description: "A somaticseq Indel classifier.", category: "common"}
outputDir: {description: "The directory to write the output to.", category: "common"}
referenceFasta: {description: "The reference fasta file.", category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
normalBam: {description: "The normal/control sample's BAM file.", category: "required"}
normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "required"}
tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
varscanSNV: {description: "An SNV VCF as produced by varscan.", category: "advanced"}
varscanIndel: {description: "An indel VCF as produced by varscan.", category: "advanced"}
jsmVCF: {description: "A VCF as produced by jsm.", category: "advanced"}
somaticsniperVCF: {description: "A VCF as produced by somaticsniper.", category: "advanced"}
vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
museVCF: {description: "A VCF as produced by muse.", category: "advanced"}
lofreqSNV: {description: "An SNV VCF as produced by lofreq.", category: "advanced"}
lofreqIndel: {description: "An indel VCF as produced by lofreq.", category: "advanced"}
scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
strelkaSNV: {description: "An SNV VCF as produced by strelka.", category: "advanced"}
strelkaIndel: {description: "An indel VCF as produced by somaticsniper.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ParallelPairedTrain {
input {
String installDir = "/opt/somaticseq" #the location in the docker image
File truthSNV
File truthIndel
String outputDir
......@@ -108,7 +134,7 @@ task ParallelPairedTrain {
}
command {
~{installDir}/somaticseq_parallel.py \
/opt/somaticseq/somaticseq_parallel.py \
--somaticseq-train \
--truth-snv ~{truthSNV} \
--truth-indel ~{truthIndel} \
......@@ -147,12 +173,40 @@ task ParallelPairedTrain {
cpu: threads
docker: dockerImage
}
parameter_meta {
truthSNV: {description: "A VCF of true SNVs.", category: "required"}
truthIndel: {description: "A VCF of true indels.", category: "required"}
outputDir: {description: "The directory to write the output to.", category: "common"}
referenceFasta: {description: "The reference fasta file.", category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
normalBam: {description: "The normal/control sample's BAM file.", category: "required"}
normalBamIndex: {description: "The index for the normal/control sample's BAM file.", category: "required"}
tumorBam: {description: "The tumor/case sample's BAM file.", category: "required"}
tumorBamIndex: {description: "The index for the tumor/case sample's BAM file.", category: "required"}
mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
varscanSNV: {description: "An SNV VCF as produced by varscan.", category: "advanced"}
varscanIndel: {description: "An indel VCF as produced by varscan.", category: "advanced"}
jsmVCF: {description: "A VCF as produced by jsm.", category: "advanced"}
somaticsniperVCF: {description: "A VCF as produced by somaticsniper.", category: "advanced"}
vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
museVCF: {description: "A VCF as produced by muse.", category: "advanced"}
lofreqSNV: {description: "An SNV VCF as produced by lofreq.", category: "advanced"}
lofreqIndel: {description: "An indel VCF as produced by lofreq.", category: "advanced"}
scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
strelkaSNV: {description: "An SNV VCF as produced by strelka.", category: "advanced"}
strelkaIndel: {description: "An indel VCF as produced by somaticsniper.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ParallelSingle {
input {
String installDir = "/opt/somaticseq" #the location in the docker image
File? classifierSNV
File? classifierIndel
String outputDir
......@@ -174,7 +228,7 @@ task ParallelSingle {
}
command {
~{installDir}/somaticseq_parallel.py \
/opt/somaticseq/somaticseq_parallel.py \
~{"--classifier-snv " + classifierSNV} \
~{"--classifier-indel " + classifierIndel} \
--output-directory ~{outputDir} \
......@@ -207,12 +261,32 @@ task ParallelSingle {
cpu: threads
docker: dockerImage
}
parameter_meta {
classifierSNV: {description: "A somaticseq SNV classifier.", category: "common"}
classifierIndel: {description: "A somaticseq Indel classifier.", category: "common"}
outputDir: {description: "The directory to write the output to.", category: "common"}
referenceFasta: {description: "The reference fasta file.", category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
bam: {description: "The input BAM file.", category: "required"}
bamIndex: {description: "The index for the input BAM file.", category: "required"}
mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
varscanVCF: {description: "A VCF as produced by varscan.", category: "advanced"}
vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
lofreqVCF: {description: "A VCF as produced by lofreq.", category: "advanced"}
scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
strelkaVCF: {description: "A VCF as produced by strelka.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ParallelSingleTrain {
input {
String installDir = "/opt/somaticseq" #the location in the docker image
File truthSNV
File truthIndel
String outputDir
......@@ -234,7 +308,7 @@ task ParallelSingleTrain {
}
command {
~{installDir}/somaticseq_parallel.py \
/opt/somaticseq/somaticseq_parallel.py \
--somaticseq-train \
--truth-snv ~{truthSNV} \
--truth-indel ~{truthIndel} \
......@@ -266,23 +340,41 @@ task ParallelSingleTrain {
cpu: threads
docker: dockerImage
}
parameter_meta {
truthSNV: {description: "A VCF of true SNVs.", category: "required"}
truthIndel: {description: "A VCF of true indels.", category: "required"}
outputDir: {description: "The directory to write the output to.", category: "common"}
referenceFasta: {description: "The reference fasta file.", category: "required"}
referenceFastaFai: {description: "The index for the reference fasta file.", category: "required"}
inclusionRegion: {description: "A bed file describing regions to include.", category: "common"}
exclusionRegion: {description: "A bed file describing regions to exclude.", category: "common"}
bam: {description: "The input BAM file.", category: "required"}
bamIndex: {description: "The index for the input BAM file.", category: "required"}
mutect2VCF: {description: "A VCF as produced by mutect2.", category: "advanced"}
varscanVCF: {description: "A VCF as produced by varscan.", category: "advanced"}
vardictVCF: {description: "A VCF as produced by vardict.", category: "advanced"}
lofreqVCF: {description: "A VCF as produced by lofreq.", category: "advanced"}
scalpelVCF: {description: "A VCF as produced by scalpel.", category: "advanced"}
strelkaVCF: {description: "A VCF as produced by strelka.", category: "advanced"}
threads: {description: "The number of threads to use.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
task ModifyStrelka {
input {
String installDir = "/opt/somaticseq/vcfModifier" #the location in the docker image
File strelkaVCF
String outputVCFName = basename(strelkaVCF, ".gz")
Int threads = 1
String dockerImage = "lethalfang/somaticseq:3.1.0"
}
command {
set -e
~{installDir}/modify_Strelka.py \
/opt/somaticseq/vcfModifier/modify_Strelka.py \
-infile ~{strelkaVCF} \
-outfile "modified_strelka.vcf"
......@@ -295,7 +387,13 @@ task ModifyStrelka {
}
runtime {
cpu: threads
docker: dockerImage
}
parameter_meta {
strelkaVCF: {description: "A vcf file as produced by strelka.", category: "required"}
outputVCFName: {description: "The location the output VCF file should be written to.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.",
category: "advanced"}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment