minPasses: {description: "Minimum number of full-length subreads required to generate CCS for a ZMW.", category: "advanced"}
minPasses: {description: "Minimum number of full-length subreads required to generate ccs for a ZMW.", category: "advanced"}
minLength: {description: "Minimum draft length before polishing.", category: "advanced"}
maxLength: {description: "Maximum draft length before polishing.", category: "advanced"}
byStrand: {description: "Generate a consensus for each strand.", category: "advanced"}
...
...
@@ -84,9 +84,9 @@ task CCS {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
offrate: {description: "The number of rows marked by the indexer.", category: "common"}
...
...
@@ -88,7 +88,7 @@ task Build {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
inputFormat: {description: "The format of the read file(s).", category: "required"}
phred64: {description: "If set to true, Phred+64 encoding is used.", category: "required"}
phred64: {description: "If set to true, phred+64 encoding is used.", category: "required"}
minHitLength: {description: "Minimum length of partial hits.", category: "required"}
indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
read1: {description: "List of files containing mate 1s, or unpaired reads.", category: "required"}
...
...
@@ -172,9 +172,9 @@ task Classify {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputMetrics: {description: "File with Centrifuge metrics."}
outputClassification: {description: "File with the classification results."}
outputReport: {description: "File with a classification summary."}
metrics: {description: "File with centrifuge metrics."}
classification: {description: "File with the classification results."}
report: {description: "File with a classification summary."}
across: {description: "When printing FASTA output, output a newline character every <int> bases.", category: "common"}
across: {description: "When printing fasta output, output a newline character every <int> bases.", category: "common"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputInspect: {description: "Output file according to output option."}
inspectResult: {description: "Output file according to output option."}
indexFiles: {description: "The files of the index for the reference genomes.", category: "required"}
noLCA: {description: "Do not report the LCA of multiple assignments, but report count fractions at the taxa.", category: "advanced"}
noLCA: {description: "Do not report the lca of multiple assignments, but report count fractions at the taxa.", category: "advanced"}
showZeros: {description: "Show clades that have zero reads.", category: "advanced"}
isCountTable: {description: "The format of the file is taxID<tab>COUNT.", category: "advanced"}
minimumScore: {description: "Require a minimum score for reads to be counted.", category: "advanced"}
...
...
@@ -361,7 +361,7 @@ task Kreport {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputKreport: {description: "File with kraken style report."}
KReport: {description: "File with kraken style report."}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputKronaPlot: {description: "Krona taxonomy plot html file."}
kronaPlot: {description: "Krona taxonomy plot html file."}
outputNamePrefix: {description: "Basename of the output files.", category: "required"}
...
...
@@ -81,11 +81,11 @@ task Refine {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
minEndScore: {description: "Minimum end barcode score threshold is applied to the individual leading and trailing ends.", category: "advanced"}
minSignalIncrease: {description: "The minimal score difference, between first and combined, required to call a barcode pair different.", category: "advanced"}
minScoreLead: {description: "The minimal score lead required to call a barcode pair significant.", category: "common"}
ccsMode: {description: "CCS mode, use optimal alignment options.", category: "common"}
cores: {description: "The number of cores to be used.", category: "advanced"}
...
...
@@ -148,13 +148,13 @@ task Lima {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputAbundanceFile: {description: "Abundance for each transcript in the TALON database across datasets."}
abundanceFile: {description: "Abundance for each transcript in the talon database across datasets."}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputGTFfile: {description: "The genes, transcripts, and exons stored a TALON database in GTF format."}
gtfFile: {description: "The genes, transcripts, and exons stored a talon database in gtf format."}
maxFracA: {description: "Maximum fraction of As to allow in the window located immediately after any read assigned to a novel transcript.", category: "advanced"}
minCount: {description: "Number of minimum occurrences required for a novel transcript PER dataset.", category: "advanced"}
minCount: {description: "Number of minimum occurrences required for a novel transcript per dataset.", category: "advanced"}
allowGenomic: {description: "If this option is set, transcripts from the Genomic novelty category will be permitted in the output.", category: "advanced"}
datasetsFile: {description: "Datasets to include.", category: "advanced"}
minDatasets: {description: "Minimum number of datasets novel transcripts must be found in.", category: "advanced"}
...
...
@@ -188,7 +188,7 @@ task FilterTalonTranscripts {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputTranscriptWhitelist: {description: "A transcript whitelist produced from the TALON database."}
transcriptWhitelist: {description: "Transcript whitelist produced from the talon database."}
datasetFile: {description: "A file indicating which datasets should be included.", category: "advanced"}
...
...
@@ -236,7 +236,7 @@ task GetReadAnnotations {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputAnnotation: {description: "Read-specific annotation information from a TALON database."}
readAnnotations: {description: "Read-specific annotation information from a talon database."}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
...
...
@@ -287,7 +287,7 @@ task GetSpliceJunctions {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputSJfile: {description: "File containing locations, novelty and transcript assignments of exons/introns."}
spliceJunctions: {description: "File containing locations, novelty and transcript assignments of exons/introns."}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
SAMfile: {description: "SAM file of transcripts.", category: "required"}
SAMfile: {description: "Sam file of transcripts.", category: "required"}
referenceGenome: {description: "Reference genome fasta file.", category: "required"}
fracaRangeSize: {description: "Size of post-transcript interval to compute fraction.", category: "common"}
tmpDir: {description: "Path to directory for tmp files.", category: "advanced"}
...
...
@@ -404,8 +404,8 @@ task LabelReads {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputLabeledSAM: {description: "SAM file with labeled transcripts."}
outputReadLabels: {description: "Tabular file with fraction description per read."}
labeledSam: {description: "Sam file with labeled transcripts."}
readLabels: {description: "Tabular file with fraction description per read."}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
timeMinutes: {description: "The maximum amount of time the job will run in minutes.", category: "advanced"}
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
datasetGroupsCSV: {description: "File of comma-delimited dataset groups to process together.", category: "advanced"}
...
...
@@ -490,7 +490,7 @@ task SummarizeDatasets {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputSummaryFile: {description: "Tab-delimited file of gene and transcript counts for each dataset."}
summaryFile: {description: "Tab-delimited file of gene and transcript counts for each dataset."}
threads: {description: "The number of threads to be used.", category: "advanced"}
memory: {description: "The amount of memory available to the job.", category: "advanced"}
...
...
@@ -563,9 +563,9 @@ task Talon {
dockerImage: {description: "The docker image used for this task. Changing this may result in errors which the developers may choose not to address.", category: "advanced"}
# outputs
outputUpdatedDatabase: {description: "Updated TALON database."}
outputLog: {description: "Log file from TALON run."}
outputAnnot: {description: "Read annotation file from TALON run."}
outputConfigFile: {description: "The TALON configuration file."}
updatedDatabase: {description: "Updated talon database."}
talonLog: {description: "Log file from talon run."}
talonAnnotation: {description: "Read annotation file from talon run."}
talonConfigFile: {description: "The talon configuration file."}