Skip to content
Snippets Groups Projects
Commit c248c5bb authored by JasperBoom's avatar JasperBoom
Browse files

Update output names for the first set of tasks.

parent 795a285e
No related branches found
No related tags found
No related merge requests found
......@@ -11,6 +11,8 @@ that users understand how the changes affect the new version.
version 4.0.0-develop
---------------------------
+ Renamed outputs of tasks used in the TALON-WDL, PacBio-subreads-processing &
sequence-classification pipelines.
+ Reworked bcf2vcf task into bcftools view task.
+ Removed the redundant format flag from the htseq interface. This is
autodetected in newer versions of htseq.
......
......@@ -55,10 +55,10 @@ task CCS {
}
output {
File outputCCSfile = outputPrefix + ".ccs.bam"
File outputCCSindexFile = outputPrefix + ".ccs.bam.pbi"
File outputReportFile = outputPrefix + ".ccs.report.txt"
File outputSTDERRfile = outputPrefix + ".ccs.stderr.log"
File ccsBam = outputPrefix + ".ccs.bam"
File ccsBamIndex = outputPrefix + ".ccs.bam.pbi"
File ccsReport = outputPrefix + ".ccs.report.txt"
File ccsStderr = outputPrefix + ".ccs.stderr.log"
}
runtime {
......@@ -70,7 +70,7 @@ task CCS {
parameter_meta {
# inputs
minPasses: {description: "Minimum number of full-length subreads required to generate CCS for a ZMW.", category: "advanced"}
minPasses: {description: "Minimum number of full-length subreads required to generate ccs for a ZMW.", category: "advanced"}
minLength: {description: "Minimum draft length before polishing.", category: "advanced"}
maxLength: {description: "Maximum draft length before polishing.", category: "advanced"}
byStrand: {description: "Generate a consensus for each strand.", category: "advanced"}
......@@ -84,9 +84,9 @@ task CCS {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputCCSfile: {description: "Consensus reads output file."}
outputCCSindexFile: {description: "Index of consensus reads output file."}
outputReportFile: {description: "CCS results report file."}
outputSTDERRfile: {description: "CCS STDERR log file."}
ccsBam: {description: "Consensus reads output file."}
ccsBamIndex: {description: "Index of consensus reads output file."}
ccsReport: {description: "Ccs results report file."}
ccsStderr: {description: "Ccs STDERR log file."}
}
}
......@@ -59,7 +59,7 @@ task Build {
}
output {
Array[File] outputIndex = glob(outputPrefix + "/" + indexBasename + "*.cf")
Array[File] index = glob(outputPrefix + "/" + indexBasename + "*.cf")
}
runtime {
......@@ -75,7 +75,7 @@ task Build {
conversionTable: {description: "List of UIDs (unique ID) and corresponding taxonomic IDs.", category: "required"}
taxonomyTree: {description: "Taxonomic tree (e.g. nodes.dmp).", category: "required"}
nameTable: {description: "Name table (e.g. names.dmp).", category: "required"}
referenceFile: {description: "A comma-separated list of FASTA files containing the reference sequences to be aligned to.", category: "required"}
referenceFile: {description: "A comma-separated list of fasta files containing the reference sequences to be aligned to.", category: "required"}
indexBasename: {description: "The basename of the index files to write.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
offrate: {description: "The number of rows marked by the indexer.", category: "common"}
......@@ -88,7 +88,7 @@ task Build {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputIndex: {description: "Generated Centrifuge index."}
index: {description: "Generated centrifuge index."}
}
}
......@@ -142,9 +142,9 @@ task Classify {
>>>
output {
File outputMetrics = outputPrefix + "_alignment_metrics.tsv"
File outputClassification = outputPrefix + "_classification.tsv"
File outputReport = outputPrefix + "_output_report.tsv"
File metrics = outputPrefix + "_alignment_metrics.tsv"
File classification = outputPrefix + "_classification.tsv"
File report = outputPrefix + "_output_report.tsv"
}
runtime {
......@@ -156,7 +156,7 @@ task Classify {
parameter_meta {
# inputs
inputFormat: {description: "The format of the read file(s).", category: "required"}
phred64: {description: "If set to true, Phred+64 encoding is used.", category: "required"}
phred64: {description: "If set to true, phred+64 encoding is used.", category: "required"}
minHitLength: {description: "Minimum length of partial hits.", category: "required"}
indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
read1: {description: "List of files containing mate 1s, or unpaired reads.", category: "required"}
......@@ -172,9 +172,9 @@ task Classify {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputMetrics: {description: "File with Centrifuge metrics."}
outputClassification: {description: "File with the classification results."}
outputReport: {description: "File with a classification summary."}
metrics: {description: "File with centrifuge metrics."}
classification: {description: "File with the classification results."}
report: {description: "File with a classification summary."}
}
}
......@@ -209,7 +209,7 @@ task Inspect {
>>>
output {
File outputInspect = outputPrefix + "/" + printOption
File inspectResult = outputPrefix + "/" + printOption
}
runtime {
......@@ -223,13 +223,13 @@ task Inspect {
printOption: {description: "The output option for inspect (fasta, summary, conversionTable, taxonomyTree, nameTable, sizeTable)", category: "required"}
indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
across: {description: "When printing FASTA output, output a newline character every <int> bases.", category: "common"}
across: {description: "When printing fasta output, output a newline character every <int> bases.", category: "common"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputInspect: {description: "Output file according to output option."}
inspectResult: {description: "Output file according to output option."}
}
}
......@@ -300,7 +300,7 @@ task DownloadTaxonomy {
}
}
task Kreport {
task KReport {
input {
File centrifugeClassification
String outputPrefix
......@@ -337,7 +337,7 @@ task Kreport {
>>>
output {
File outputKreport = outputPrefix + "_kreport.tsv"
File KReport = outputPrefix + "_kreport.tsv"
}
runtime {
......@@ -348,10 +348,10 @@ task Kreport {
parameter_meta {
# inputs
centrifugeClassification: {description: "File with Centrifuge classification results.", category: "required"}
centrifugeClassification: {description: "File with centrifuge classification results.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
noLCA: {description: "Do not report the LCA of multiple assignments, but report count fractions at the taxa.", category: "advanced"}
noLCA: {description: "Do not report the lca of multiple assignments, but report count fractions at the taxa.", category: "advanced"}
showZeros: {description: "Show clades that have zero reads.", category: "advanced"}
isCountTable: {description: "The format of the file is taxID<tab>COUNT.", category: "advanced"}
minimumScore: {description: "Require a minimum score for reads to be counted.", category: "advanced"}
......@@ -361,7 +361,7 @@ task Kreport {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputKreport: {description: "File with kraken style report."}
KReport: {description: "File with kraken style report."}
}
}
......@@ -384,7 +384,7 @@ task KTimportTaxonomy {
}
output {
File outputKronaPlot = outputPrefix + "_krona.html"
File kronaPlot = outputPrefix + "_krona.html"
}
runtime {
......@@ -395,13 +395,13 @@ task KTimportTaxonomy {
parameter_meta {
# inputs
inputFile: {description: "File with Centrifuge classification results.", category: "required"}
inputFile: {description: "File with centrifuge classification results.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputKronaPlot: {description: "Krona taxonomy plot html file."}
kronaPlot: {description: "Krona taxonomy plot html file."}
}
}
......@@ -51,12 +51,12 @@ task Refine {
}
output {
File outputFLNCfile = outputDir + "/" + outputNamePrefix + ".bam"
File outputFLNCindexFile = outputDir + "/" + outputNamePrefix + ".bam.pbi"
File outputConsensusReadsetFile = outputDir + "/" + outputNamePrefix + ".consensusreadset.xml"
File outputFilterSummaryFile = outputDir + "/" + outputNamePrefix + ".filter_summary.json"
File outputReportFile = outputDir + "/" + outputNamePrefix + ".report.csv"
File outputSTDERRfile = outputDir + "/" + outputNamePrefix + ".stderr.log"
File refineBam = outputDir + "/" + outputNamePrefix + ".bam"
File refineBamIndex = outputDir + "/" + outputNamePrefix + ".bam.pbi"
File refineConsensusReadset = outputDir + "/" + outputNamePrefix + ".consensusreadset.xml"
File refineFilterSummary = outputDir + "/" + outputNamePrefix + ".filter_summary.json"
File refineReport = outputDir + "/" + outputNamePrefix + ".report.csv"
File refineStderr = outputDir + "/" + outputNamePrefix + ".stderr.log"
}
runtime {
......@@ -69,9 +69,9 @@ task Refine {
parameter_meta {
# inputs
minPolyAlength: {description: "Minimum poly(A) tail length.", category: "advanced"}
requirePolyA: {description: "Require FL reads to have a poly(A) tail and remove it.", category: "common"}
requirePolyA: {description: "Require fl reads to have a poly(A) tail and remove it.", category: "common"}
logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
inputBamFile: {description: "BAM input file.", category: "required"}
inputBamFile: {description: "Bam input file.", category: "required"}
primerFile: {description: "Barcode/primer fasta file.", category: "required"}
outputDir: {description: "Output directory path.", category: "required"}
outputNamePrefix: {description: "Basename of the output files.", category: "required"}
......@@ -81,11 +81,11 @@ task Refine {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputFLNCfile: {description: "Filtered reads output file."}
outputFLNCindexFile: {description: "Index of filtered reads output file."}
outputSTDERRfile: {description: "Refine STDERR log file."}
outputConsensusReadsetFile: {description: "Refine consensus readset XML file."}
outputFilterSummaryFile: {description: "Refine summary file."}
outputReportFile: {description: "Refine report file."}
refineBam: {description: "Filtered reads output file."}
refineBamIndex: {description: "Index of filtered reads output file."}
refineConsensusReadset: {description: "Refine consensus readset xml file."}
refineFilterSummary: {description: "Refine summary file."}
refineReport: {description: "Refine report file."}
refineStderr: {description: "Refine stderr log file."}
}
}
......@@ -98,14 +98,14 @@ task Lima {
}
output {
Array[File] outputFLfile = glob("*.bam")
Array[File] outputFLindexFile = glob("*.bam.pbi")
Array[File] outputFLxmlFile = glob("*.subreadset.xml")
File outputSTDERRfile = outputPrefix + ".fl.stderr.log"
File outputJSONfile = outputPrefix + ".fl.json"
File outputCountsFile = outputPrefix + ".fl.lima.counts"
File outputReportFile = outputPrefix + ".fl.lima.report"
File outputSummaryFile = outputPrefix + ".fl.lima.summary"
Array[File] limaBam = glob("*.bam")
Array[File] limaBamIndex = glob("*.bam.pbi")
Array[File] limaXml = glob("*.subreadset.xml")
File limaStderr = outputPrefix + ".fl.stderr.log"
File limaJson = outputPrefix + ".fl.json"
File limaCounts = outputPrefix + ".fl.lima.counts"
File limaReport = outputPrefix + ".fl.lima.report"
File limaSummary = outputPrefix + ".fl.lima.summary"
}
runtime {
......@@ -131,15 +131,15 @@ task Lima {
minEndScore: {description: "Minimum end barcode score threshold is applied to the individual leading and trailing ends.", category: "advanced"}
minSignalIncrease: {description: "The minimal score difference, between first and combined, required to call a barcode pair different.", category: "advanced"}
minScoreLead: {description: "The minimal score lead required to call a barcode pair significant.", category: "common"}
ccsMode: {description: "CCS mode, use optimal alignment options.", category: "common"}
splitBamNamed: {description: "Split BAM output by resolved barcode pair name.", category: "common"}
ccsMode: {description: "Ccs mode, use optimal alignment options.", category: "common"}
splitBamNamed: {description: "Split bam output by resolved barcode pair name.", category: "common"}
scoredAdapterRatio: {description: "Minimum ratio of scored vs sequenced adapters.", category: "advanced"}
peek: {description: "Demux the first N ZMWs and return the mean score, 0 means peeking deactivated.", category: "advanced"}
guess: {description: "Try to guess the used barcodes, using the provided mean score threshold, 0 means guessing deactivated.", category: "advanced"}
guessMinCount: {description: "Minimum number of ZMWs observed to whitelist barcodes.", category: "advanced"}
peekGuess: {description: "Try to infer the used barcodes subset, by peeking at the first 50,000 ZMWs.", category: "advanced"}
logLevel: {description: "Set log level. Valid choices: (TRACE, DEBUG, INFO, WARN, FATAL).", category: "advanced"}
inputBamFile: {description: "BAM input file.", category: "required"}
inputBamFile: {description: "Bam input file.", category: "required"}
barcodeFile: {description: "Barcode/primer fasta file.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
cores: {description: "The number of cores to be used.", category: "advanced"}
......@@ -148,13 +148,13 @@ task Lima {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputFLfile: {description: "Demultiplexed reads output file(s)."}
outputFLindexFile: {description: "Index of demultiplexed reads output file(s)."}
outputFLxmlFile: {description: "XML file of the subreadset(s)."}
outputSTDERRfile: {description: "Lima STDERR log file."}
outputJSONfile: {description: "Lima JSON file."}
outputCountsFile: {description: "Lima counts file."}
outputReportFile: {description: "Lima report file."}
outputSummaryFile: {description: "Lima summary file."}
limaBam: {description: "Demultiplexed reads output file(s)."}
limaBamIndex: {description: "Index of demultiplexed reads output file(s)."}
limaXml: {description: "Xml file of the subreadset(s)."}
limaStderr: {description: "Lima stderr log file."}
limaJson: {description: "Lima json file."}
limaCounts: {description: "Lima counts file."}
limaReport: {description: "Lima report file."}
limaSummary: {description: "Lima summary file."}
}
}
......@@ -48,7 +48,7 @@ task CreateAbundanceFileFromDatabase {
}
output {
File outputAbundanceFile = outputPrefix + "_talon_abundance.tsv"
File abundanceFile = outputPrefix + "_talon_abundance.tsv"
}
runtime {
......@@ -59,7 +59,7 @@ task CreateAbundanceFileFromDatabase {
parameter_meta {
# inputs
databaseFile: {description: "TALON database.", category: "required"}
databaseFile: {description: "Talon database.", category: "required"}
annotationVersion: {description: "Which annotation version to use.", category: "required"}
genomeBuild: {description: "Genome build to use.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
......@@ -70,7 +70,7 @@ task CreateAbundanceFileFromDatabase {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputAbundanceFile: {description: "Abundance for each transcript in the TALON database across datasets."}
abundanceFile: {description: "Abundance for each transcript in the talon database across datasets."}
}
}
......@@ -105,7 +105,7 @@ task CreateGtfFromDatabase {
}
output {
File outputGTFfile = outputPrefix + "_talon.gtf"
File gtfFile = outputPrefix + "_talon.gtf"
}
runtime {
......@@ -116,7 +116,7 @@ task CreateGtfFromDatabase {
parameter_meta {
# inputs
databaseFile: {description: "TALON database.", category: "required"}
databaseFile: {description: "Talon database.", category: "required"}
genomeBuild: {description: "Genome build to use.", category: "required"}
annotationVersion: {description: "Which annotation version to use.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
......@@ -128,7 +128,7 @@ task CreateGtfFromDatabase {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputGTFfile: {description: "The genes, transcripts, and exons stored a TALON database in GTF format."}
gtfFile: {description: "The genes, transcripts, and exons stored a talon database in gtf format."}
}
}
......@@ -164,7 +164,7 @@ task FilterTalonTranscripts {
}
output {
File outputTranscriptWhitelist = outputPrefix + "_whitelist.csv"
File transcriptWhitelist = outputPrefix + "_whitelist.csv"
}
runtime {
......@@ -175,11 +175,11 @@ task FilterTalonTranscripts {
parameter_meta {
# inputs
databaseFile: {description: "TALON database.", category: "required"}
databaseFile: {description: "Talon database.", category: "required"}
annotationVersion: {description: "Which annotation version to use.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
maxFracA: {description: "Maximum fraction of As to allow in the window located immediately after any read assigned to a novel transcript.", category: "advanced"}
minCount: {description: "Number of minimum occurrences required for a novel transcript PER dataset.", category: "advanced"}
minCount: {description: "Number of minimum occurrences required for a novel transcript per dataset.", category: "advanced"}
allowGenomic: {description: "If this option is set, transcripts from the Genomic novelty category will be permitted in the output.", category: "advanced"}
datasetsFile: {description: "Datasets to include.", category: "advanced"}
minDatasets: {description: "Minimum number of datasets novel transcripts must be found in.", category: "advanced"}
......@@ -188,7 +188,7 @@ task FilterTalonTranscripts {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputTranscriptWhitelist: {description: "A transcript whitelist produced from the TALON database."}
transcriptWhitelist: {description: "Transcript whitelist produced from the talon database."}
}
}
......@@ -216,7 +216,7 @@ task GetReadAnnotations {
}
output {
File outputAnnotation = outputPrefix + "_talon_read_annot.tsv"
File readAnnotations = outputPrefix + "_talon_read_annot.tsv"
}
runtime {
......@@ -227,7 +227,7 @@ task GetReadAnnotations {
parameter_meta {
# inputs
databaseFile: { description: "TALON database.", category: "required"}
databaseFile: { description: "Talon database.", category: "required"}
genomeBuild: {description: "Genome build to use.", category: "required"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
datasetFile: {description: "A file indicating which datasets should be included.", category: "advanced"}
......@@ -236,7 +236,7 @@ task GetReadAnnotations {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputAnnotation: {description: "Read-specific annotation information from a TALON database."}
readAnnotations: {description: "Read-specific annotation information from a talon database."}
}
}
......@@ -266,7 +266,7 @@ task GetSpliceJunctions {
}
output {
File outputSJfile = outputPrefix + "_" + runMode + "s.tsv"
File spliceJunctions = outputPrefix + "_" + runMode + "s.tsv"
}
runtime {
......@@ -277,9 +277,9 @@ task GetSpliceJunctions {
parameter_meta {
# inputs
SJinformationFile: {description: "TALON GTF file or database from which to extract exons/introns.", category: "required"}
SJinformationFile: {description: "Talon gtf file or database from which to extract exons/introns.", category: "required"}
inputFileType: {description: "The file type of SJinformationFile.", category: "common"}
referenceGTF: {description: "GTF reference file (ie GENCODE).", category: "required"}
referenceGTF: {description: "Gtf reference file (ie gencode).", category: "required"}
runMode: {description: "Determines whether to include introns or exons in the output.", category: "common"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
......@@ -287,7 +287,7 @@ task GetSpliceJunctions {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputSJfile: {description: "File containing locations, novelty and transcript assignments of exons/introns."}
spliceJunctions: {description: "File containing locations, novelty and transcript assignments of exons/introns."}
}
}
......@@ -322,7 +322,7 @@ task InitializeTalonDatabase {
}
output {
File outputDatabase = outputPrefix + ".db"
File database = outputPrefix + ".db"
}
runtime {
......@@ -333,11 +333,11 @@ task InitializeTalonDatabase {
parameter_meta {
# inputs
GTFfile: {description: "GTF annotation containing genes, transcripts, and edges.", category: "required"}
genomeBuild: {description: "Name of genome build that the GTF file is based on (ie hg38).", category: "required"}
GTFfile: {description: "Gtf annotation containing genes, transcripts, and edges.", category: "required"}
genomeBuild: {description: "Name of genome build that the gtf file is based on (ie hg38).", category: "required"}
annotationVersion: {description: "Name of supplied annotation (will be used to label data).", category: "required"}
minimumLength: { description: "Minimum required transcript length.", category: "common"}
novelIDprefix: {description: "Prefix for naming novel discoveries in eventual TALON runs.", category: "common"}
novelIDprefix: {description: "Prefix for naming novel discoveries in eventual talon runs.", category: "common"}
cutoff5p: { description: "Maximum allowable distance (bp) at the 5' end during annotation.", category: "advanced"}
cutoff3p: {description: "Maximum allowable distance (bp) at the 3' end during annotation.", category: "advanced"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
......@@ -346,7 +346,7 @@ task InitializeTalonDatabase {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputDatabase: {description: "TALON database."}
database: {description: "Talon database."}
}
}
......@@ -379,8 +379,8 @@ task LabelReads {
}
output {
File outputLabeledSAM = outputPrefix + "_labeled.sam"
File outputReadLabels = outputPrefix + "_read_labels.tsv"
File labeledSam = outputPrefix + "_labeled.sam"
File readLabels = outputPrefix + "_read_labels.tsv"
}
runtime {
......@@ -392,7 +392,7 @@ task LabelReads {
parameter_meta {
# inputs
SAMfile: {description: "SAM file of transcripts.", category: "required"}
SAMfile: {description: "Sam file of transcripts.", category: "required"}
referenceGenome: {description: "Reference genome fasta file.", category: "required"}
fracaRangeSize: {description: "Size of post-transcript interval to compute fraction.", category: "common"}
tmpDir: {description: "Path to directory for tmp files.", category: "advanced"}
......@@ -404,8 +404,8 @@ task LabelReads {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputLabeledSAM: {description: "SAM file with labeled transcripts."}
outputReadLabels: {description: "Tabular file with fraction description per read."}
labeledSam: {description: "Sam file with labeled transcripts."}
readLabels: {description: "Tabular file with fraction description per read."}
}
}
......@@ -425,7 +425,7 @@ task ReformatGtf {
}
output {
File outputReformattedGTF = GTFfile
File reformattedGtf = GTFfile
}
runtime {
......@@ -436,13 +436,13 @@ task ReformatGtf {
parameter_meta {
# inputs
GTFfile: {description: "GTF annotation containing genes, transcripts, and edges.", category: "required"}
GTFfile: {description: "Gtf annotation containing genes, transcripts, and edges.", category: "required"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputReformattedGTF: {description: "Reformatted GTF file."}
reformattedGtf: {description: "Reformatted gtf file."}
}
}
......@@ -470,7 +470,7 @@ task SummarizeDatasets {
}
output {
File outputSummaryFile = outputPrefix + "_talon_summary.tsv"
File summaryFile = outputPrefix + "_talon_summary.tsv"
}
runtime {
......@@ -481,7 +481,7 @@ task SummarizeDatasets {
parameter_meta {
# inputs
databaseFile: {description: "TALON database.", category: "required"}
databaseFile: {description: "Talon database.", category: "required"}
setVerbose: {description: "Print out the counts in terminal.", category: "advanced"}
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
datasetGroupsCSV: {description: "File of comma-delimited dataset groups to process together.", category: "advanced"}
......@@ -490,7 +490,7 @@ task SummarizeDatasets {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputSummaryFile: {description: "Tab-delimited file of gene and transcript counts for each dataset."}
summaryFile: {description: "Tab-delimited file of gene and transcript counts for each dataset."}
}
}
......@@ -534,10 +534,10 @@ task Talon {
>>>
output {
File outputUpdatedDatabase = databaseFile
File outputLog = outputPrefix + "/run_QC.log"
File outputAnnot = outputPrefix + "/run_talon_read_annot.tsv"
File outputConfigFile = outputPrefix + "/talonConfigFile.csv"
File updatedDatabase = databaseFile
File talonLog = outputPrefix + "/run_QC.log"
File talonAnnotation = outputPrefix + "/run_talon_read_annot.tsv"
File talonConfigFile = outputPrefix + "/talonConfigFile.csv"
}
runtime {
......@@ -549,13 +549,13 @@ task Talon {
parameter_meta {
# inputs
SAMfiles: {description: "Input SAM files.", category: "required"}
SAMfiles: {description: "Input sam files.", category: "required"}
organism: {description: "The name of the organism from which the samples originated.", category: "required"}
sequencingPlatform: {description: "The sequencing platform used to generate long reads.", category: "required"}
databaseFile: {description: "TALON database. Created using initialize_talon_database.py.", category: "required"}
databaseFile: {description: "Talon database. Created using initialize_talon_database.py.", category: "required"}
genomeBuild: {description: "Genome build (i.e. hg38) to use.", category: "required"}
minimumCoverage: {description: "Minimum alignment coverage in order to use a SAM entry.", category: "common"}
minimumIdentity: {description: "Minimum alignment identity in order to use a SAM entry.", category: "common" }
minimumCoverage: {description: "Minimum alignment coverage in order to use a sam entry.", category: "common"}
minimumIdentity: {description: "Minimum alignment identity in order to use a sam entry.", category: "common" }
outputPrefix: {description: "Output directory path + output file prefix.", category: "required"}
threads: {description: "The number of threads to be used.", category: "advanced"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
......@@ -563,9 +563,9 @@ task Talon {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputUpdatedDatabase: {description: "Updated TALON database."}
outputLog: {description: "Log file from TALON run."}
outputAnnot: {description: "Read annotation file from TALON run."}
outputConfigFile: {description: "The TALON configuration file."}
updatedDatabase: {description: "Updated talon database."}
talonLog: {description: "Log file from talon run."}
talonAnnotation: {description: "Read annotation file from talon run."}
talonConfigFile: {description: "The talon configuration file."}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment