diff --git a/.dockstore.yml b/.dockstore.yml index 7c238594c..76e333a4e 100644 --- a/.dockstore.yml +++ b/.dockstore.yml @@ -349,3 +349,13 @@ workflows: primaryDescriptorPath: /pipes/WDL/workflows/trimal.wdl testParameterFiles: - empty.json + - name: amplicon16S_analysis + subclass: WDL + primaryDescriptorPath: /pipes/WDL/workflows/amplicon16S_analysis.wdl + testParameterFiles: + - empty.json + - name: qiime_import_bam + subclass: WDL + primaryDescriptorPath: /pipes/WDL/workflows/qiime_import_bam.wdl + testParameterFiles: + - empty.json \ No newline at end of file diff --git a/pipes/WDL/tasks/tasks_16S_amplicon.wdl b/pipes/WDL/tasks/tasks_16S_amplicon.wdl new file mode 100755 index 000000000..07c8d59cd --- /dev/null +++ b/pipes/WDL/tasks/tasks_16S_amplicon.wdl @@ -0,0 +1,295 @@ +version 1.0 + +task qiime_import_from_bam { + meta { + description: "Parsing demultiplexed fastq BAM files into qiime readable files." + } + input { + File reads_bam + String sample_name + Int memory_mb = 2000 + Int cpu = 1 + Int disk_size_gb = ceil(2*size(reads_bam, "GiB")) + 5 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + parameter_meta { + reads_bam: {description: "Input BAM file"} + } + + command <<< + set -ex -o pipefail + #Part 1A | BAM -> FASTQ [Simple samtools command] + samtools fastq -1 $(pwd)/R1.fastq.gz -2 $(pwd)/R2.fastq.gz -0 /dev/null ~{reads_bam} + #making new bash variable | regex: (_) -> (-) + NEWSAMPLENAME=$(echo "~{sample_name}" | perl -lape 's/[_]/-/g') + #All names added to one giant file + echo ${NEWSAMPLENAME} > NEWSAMPLENAME.txt + #Make a manifest.txt that contains [1.sampleid 2.R1_fastq 3.R2.fastq] + #> =overwrite or writes new file + echo -e "sample-id\tforward-absolute-filepath\treverse-absolute-filepath" > manifest.tsv + #>>= appends + #\t= tabs next value + echo -e "$NEWSAMPLENAME\t$(pwd)/R1.fastq.gz\t$(pwd)/R2.fastq.gz" >> manifest.tsv + + #fastq -> bam (provided by qiime tools import fxn) + qiime tools import \ + --type 'SampleData[PairedEndSequencesWithQuality]' \ + --input-path manifest.tsv \ + --input-format PairedEndFastqManifestPhred33V2 \ + --output-path "~{sample_name}.qza" + >>> + + output { + File reads_qza = "~{sample_name}.qza" + String samplename_master_sheet = read_string("NEWSAMPLENAME.txt") + } + runtime { + docker: docker + memory: "${memory_mb} MiB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} + +#Part 1 | Step 2:cutadapt: Trim sequences +#trimreads_trim +#trim = default +task trim_reads { + + meta { + description:"Removing adapter sequences, primers, and other unwanted sequence from sequence data." + } + + input { + File reads_qza + + String qza_basename = basename(reads_qza, '.qza') + #Boolean not_default = false + String forward_adapter = "CTGCTGCCTCCCGTAGGAGT" + String reverse_adapter = "AGAGTTTGATCCTGGCTCAG" + Int min_length = 1 + Boolean keep_untrimmed_reads = false + Int memory_mb = 2000 + Int cpu = 1 + Int disk_size_gb = ceil(2*size(reads_qza, "GiB")) + 5 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + + command <<< + set -ex -o pipefail + qiime cutadapt trim-paired \ + --i-demultiplexed-sequences "~{reads_qza}" \ + --p-front-f "~{forward_adapter}" \ + --p-front-r "~{reverse_adapter}" \ + ~{"--p-minimum-length " + min_length} \ + ~{true='--p-no-discard-untrimmed' false='--p-discard-untrimmed' keep_untrimmed_reads} \ + --o-trimmed-sequences "~{qza_basename}_trimmed.qza" + + #trim_visual + qiime demux summarize \ + --i-data "~{qza_basename}_trimmed.qza" \ + --o-visualization "~{qza_basename}_trim_summary.qzv" + >>> + + output { + #trimmed_sequences = paired ends for vsearch + File trimmed_reads_qza = "~{qza_basename}_trimmed.qza" + File trimmed_visualization = "~{qza_basename}_trim_summary.qzv" + } + + runtime { + docker: docker + memory: "${memory_mb} MiB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} + +#Part 1 | Step 3:VSEARCH: Merge sequences +task join_paired_ends { + meta { + description: "Join paired-end sequence reads using vseach's merge_pairs function." + } + input { + #Input File: Merge paired reads + File trimmed_reads_qza + String reads_basename = basename(trimmed_reads_qza, '.qza') + Int memory_mb = 2000 + Int cpu = 1 + Int disk_size_gb = ceil(2*size(trimmed_reads_qza, "GiB")) + 5 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + + command <<< + set -ex -o pipefail + qiime vsearch join-pairs \ + --i-demultiplexed-seqs ~{trimmed_reads_qza} \ + --o-joined-sequences "~{reads_basename}_joined.qza" + + qiime demux summarize \ + --i-data "~{reads_basename}_joined.qza" \ + --o-visualization "~{reads_basename}_visualization.qzv" + >>> + output { + File joined_end_reads_qza = "~{reads_basename}_joined.qza" + File joined_end_visualization = "~{reads_basename}_visualization.qzv" + } + runtime { + docker: docker + memory: "${memory_mb} MiB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} + +task deblur { + + meta { + description: "Perform sequence quality control for Illumina data using the Deblur workflow with a 16S reference as a positive filter." + } + input { + File joined_end_reads_qza + String joined_end_basename = basename(joined_end_reads_qza, '.qza') + Int trim_length_var = 300 + Int memory_mb = 2000 + Int cpu = 1 + Int disk_size_gb = ceil(2*size(joined_end_reads_qza, "GiB")) + 5 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + command <<< + set -ex -o pipefail + + qiime deblur denoise-16S \ + --i-demultiplexed-seqs ~{joined_end_reads_qza} \ + ~{"--p-trim-length " + trim_length_var} \ + --p-sample-stats \ + --o-representative-sequences "~{joined_end_basename}_rep_seqs.qza" \ + --o-table "~{joined_end_basename}_table.qza" \ + --o-stats "~{joined_end_basename}_stats.qza" + + #Generate feature table- give you the number of features per sample + qiime feature-table summarize \ + --i-table "~{joined_end_basename}_table.qza" \ + --o-visualization "~{joined_end_basename}_table.qzv" + #Generate visualization of deblur stats + qiime deblur visualize-stats \ + --i-deblur-stats "~{joined_end_basename}_stats.qza" \ + --o-visualization "~{joined_end_basename}_stats.qzv" + >>> + output { + File representative_seqs_qza = "~{joined_end_basename}_rep_seqs.qza" + File representative_table_qza = "~{joined_end_basename}_table.qza" + File feature_table = "~{joined_end_basename}_table.qzv" + File visualize_stats = "~{joined_end_basename}_stats.qzv" + + } + runtime { + docker: docker + memory: "${memory_mb} MiB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} +task train_classifier { + meta { + descrription: " Upload a classidier trained to classify v1-2 amplicon sequences" + } + input { + File otu_ref + File taxanomy_ref + String forward_adapter + String reverse_adapter + Int min_length = 100 + Int max_length = 500 + String otu_basename = basename(otu_ref, '.qza') + Int memory_mb = 2000 + Int cpu = 1 + Int disk_size_gb = ceil(2*size(otu_ref, "GiB")) + 5 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + command <<< + set -ex -o pipefail + CONDA_ENV_NAME=$(conda info --envs -q | awk -F" " '/qiime.*/{ print $1 }') + conda activate ${CONDA_ENV_NAME} + + qiime tools import \ + --type 'FeatureData[Sequence]' \ + --input-path ~{otu_ref} \ + --output-path "~{otu_basename}_seqs.qza" + + qiime tools import \ + --type 'FeatureData[Taxonomy]' + --input-format HeaderlessTSVTaxonomyFormat \ + --input-path ~{taxanomy_ref} \ + --output-path "~{otu_basename}_tax.qza" + + qiime feature-classifier extract-reads\ + --i-sequeunces "~{otu_basename}_seqs.qza"\ + --p-f-primer "~{forward_adapter}" \ + --p-r-primer "~{reverse_adapter}" \ + ~{"--p-min-length " + min_length} \ + ~{"--p-max-length " + max_length} \ + --o-reads "~{otu_basename}_v1-2-ref-seqs.qza" + + qiime feature-classifier fit-classifier-naive-bayes \ + --i-reference-reads "~{otu_basename}_v1-2-ref-seqs.qza" \ + --i-reference-taxonomy "~{otu_basename}_tax.qza" \ + --o-classifier "~{otu_basename}_v1-2-classifier.qza" + >>> + output { + File trained_classifier = "~{otu_basename}_v1-2-classifier.qza" + } + runtime { + docker: docker + memory: "${memory_mb} MiB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} +task tax_analysis { + meta { + description: "Protocol describes performing a taxonomic classification with a naive bayes classifier that has been trained on the V1-2 regions amplified by our primers." + } + input { + File trained_classifier + File representative_seqs_qza + File representative_table_qza + String basename = basename(trained_classifier, '.qza') + Int memory_mb = 5 + Int cpu = 1 + Int disk_size_gb = 375 + String docker = "quay.io/broadinstitute/qiime2:conda" + } + command <<< + set -ex -o pipefail + qiime feature-classifier classify-sklearn \ + --i-classifier ~{trained_classifier} \ + --i-reads ~{representative_seqs_qza} \ + --o-classification "~{basename}_tax.qza" + + qiime feature-table tabulate-seqs \ + --i-data ~{representative_seqs_qza} \ + --o-visualization "~{basename}_rep_seqs.qzv" + + qiime taxa barplot \ + --i-table ~{representative_table_qza} \ + --i-taxonomy "~{basename}_tax.qza" \ + --o-visualization "~{basename}_bar_plots.qzv" + >>> + output { + File rep_seq_list = "~{basename}_rep_seqs.qzv" + File tax_classification_graph = "~{basename}_bar_plots.qzv" +} + runtime { + docker: docker + memory: "7 GB" + cpu: cpu + disk: disk_size_gb + " GB" + disks: "local-disk " + disk_size_gb + " HDD" + } +} \ No newline at end of file diff --git a/pipes/WDL/tasks/tasks_metagenomics.wdl b/pipes/WDL/tasks/tasks_metagenomics.wdl index d9b806193..127d354b4 100644 --- a/pipes/WDL/tasks/tasks_metagenomics.wdl +++ b/pipes/WDL/tasks/tasks_metagenomics.wdl @@ -209,7 +209,7 @@ task kraken2 { Float? confidence_threshold Int? min_base_qual - Int? machine_mem_gb + Int machine_mem_gb = 72 String docker = "quay.io/broadinstitute/viral-classify:2.1.33.0" } @@ -237,7 +237,7 @@ task kraken2 { String out_basename = basename(basename(reads_bam, '.bam'), '.fasta') Int disk_size = 750 - command { + command <<< set -ex -o pipefail if [ -z "$TMPDIR" ]; then @@ -248,72 +248,72 @@ task kraken2 { # decompress DB to $DB_DIR read_utils.py extract_tarball \ - ${kraken2_db_tgz} $DB_DIR/kraken2 \ + "~{kraken2_db_tgz}" $DB_DIR/kraken2 \ --loglevel=DEBUG du -hs $DB_DIR/kraken2 # unpack krona taxonomy.tab - if [[ ${krona_taxonomy_db_tgz} == *.tar.* ]]; then + if [[ "~{krona_taxonomy_db_tgz}" == *.tar.* ]]; then read_utils.py extract_tarball \ - ${krona_taxonomy_db_tgz} $DB_DIR/krona \ + "~{krona_taxonomy_db_tgz}" $DB_DIR/krona \ --loglevel=DEBUG & # we don't need this until later else - if [[ "${krona_taxonomy_db_tgz}" == *.zst ]]; then - cat "${krona_taxonomy_db_tgz}" | zstd -d > $DB_DIR/krona/taxonomy.tab & - elif [[ "${krona_taxonomy_db_tgz}" == *.gz ]]; then - cat "${krona_taxonomy_db_tgz}" | pigz -dc > $DB_DIR/krona/taxonomy.tab & - elif [[ "${krona_taxonomy_db_tgz}" == *.bz2 ]]; then - cat "${krona_taxonomy_db_tgz}" | bzip -dc > $DB_DIR/krona/taxonomy.tab & + if [[ "~{krona_taxonomy_db_tgz}" == *.zst ]]; then + cat "~{krona_taxonomy_db_tgz}" | zstd -d > $DB_DIR/krona/taxonomy.tab & + elif [[ "~{krona_taxonomy_db_tgz}" == *.gz ]]; then + cat "~{krona_taxonomy_db_tgz}" | pigz -dc > $DB_DIR/krona/taxonomy.tab & + elif [[ "~{krona_taxonomy_db_tgz}" == *.bz2 ]]; then + cat "~{krona_taxonomy_db_tgz}" | bzip -dc > $DB_DIR/krona/taxonomy.tab & else - cp "${krona_taxonomy_db_tgz}" $DB_DIR/krona/taxonomy.tab & + cp "~{krona_taxonomy_db_tgz}" $DB_DIR/krona/taxonomy.tab & fi fi metagenomics.py --version | tee VERSION - if [[ ${reads_bam} == *.bam ]]; then + if [[ "~{reads_bam}" == *.bam ]]; then metagenomics.py kraken2 \ $DB_DIR/kraken2 \ - ${reads_bam} \ - --outReads "${out_basename}".kraken2.reads.txt \ - --outReports "${out_basename}".kraken2.report.txt \ - ${"--confidence " + confidence_threshold} \ - ${"--min_base_qual " + min_base_qual} \ + "~{reads_bam}" \ + --outReads "~{out_basename}".kraken2.reads.txt \ + --outReports "~{out_basename}".kraken2.report.txt \ + ~{"--confidence " + confidence_threshold} \ + ~{"--min_base_qual " + min_base_qual} \ --loglevel=DEBUG else # fasta input file: call kraken2 directly kraken2 \ --db $DB_DIR/kraken2 \ - ${reads_bam} \ - --output "${out_basename}".kraken2.reads.txt \ - --report "${out_basename}".kraken2.report.txt \ - ${"--confidence " + confidence_threshold} \ - ${"--min_base_qual " + min_base_qual} + "~{reads_bam}" \ + --output "~{out_basename}".kraken2.reads.txt \ + --report "~{out_basename}".kraken2.report.txt \ + ~{"--confidence " + confidence_threshold} \ + ~{"--min_base_qual " + min_base_qual} fi wait # for krona_taxonomy_db_tgz to download and extract - pigz "${out_basename}".kraken2.reads.txt & + pigz "~{out_basename}".kraken2.reads.txt & metagenomics.py krona \ - "${out_basename}".kraken2.report.txt \ + "~{out_basename}".kraken2.report.txt \ $DB_DIR/krona \ - "${out_basename}".kraken2.krona.html \ - --sample_name "${out_basename}" \ + "~{out_basename}".kraken2.krona.html \ + --sample_name "~{out_basename}" \ --noRank --noHits --inputType kraken2 \ --loglevel=DEBUG wait # pigz reads.txt - } + >>> output { - File kraken2_reads_report = "${out_basename}.kraken2.reads.txt.gz" - File kraken2_summary_report = "${out_basename}.kraken2.report.txt" - File krona_report_html = "${out_basename}.kraken2.krona.html" + File kraken2_reads_report = "~{out_basename}.kraken2.reads.txt.gz" + File kraken2_summary_report = "~{out_basename}.kraken2.report.txt" + File krona_report_html = "~{out_basename}.kraken2.krona.html" String viralngs_version = read_string("VERSION") } runtime { - docker: "${docker}" - memory: select_first([machine_mem_gb, 52]) + " GB" + docker: docker + memory: machine_mem_gb + " GB" cpu: 8 disks: "local-disk " + disk_size + " LOCAL" disk: disk_size + " GB" # TESs diff --git a/pipes/WDL/tasks/tasks_read_utils.wdl b/pipes/WDL/tasks/tasks_read_utils.wdl index 46dac190e..dd1db3a69 100644 --- a/pipes/WDL/tasks/tasks_read_utils.wdl +++ b/pipes/WDL/tasks/tasks_read_utils.wdl @@ -144,7 +144,7 @@ task merge_and_reheader_bams { Array[File]+ in_bams String? sample_name File? reheader_table - String out_basename + String out_basename = basename(in_bams[0], ".bam") String docker = "quay.io/broadinstitute/viral-core:2.1.33" } diff --git a/pipes/WDL/tasks/tasks_sarscov2.wdl b/pipes/WDL/tasks/tasks_sarscov2.wdl index ed975f987..6891fe07c 100644 --- a/pipes/WDL/tasks/tasks_sarscov2.wdl +++ b/pipes/WDL/tasks/tasks_sarscov2.wdl @@ -10,7 +10,7 @@ task pangolin_one_sample { Float? max_ambig String? analysis_mode Boolean update_dbs_now=false - String docker = "quay.io/staphb/pangolin:4.1.2-pdata-1.14" + String docker = "quay.io/staphb/pangolin:4.1.3-pdata-1.17" } String basename = basename(genome_fasta, ".fasta") Int disk_size = 50 @@ -93,7 +93,7 @@ task pangolin_many_samples { String? analysis_mode Boolean update_dbs_now=false String basename - String docker = "quay.io/staphb/pangolin:4.1.2-pdata-1.14" + String docker = "quay.io/staphb/pangolin:4.1.3-pdata-1.17" } Int disk_size = 100 command <<< diff --git a/pipes/WDL/workflows/amplicon16S_analysis.wdl b/pipes/WDL/workflows/amplicon16S_analysis.wdl new file mode 100755 index 000000000..b0635eafb --- /dev/null +++ b/pipes/WDL/workflows/amplicon16S_analysis.wdl @@ -0,0 +1,48 @@ +version 1.0 + +import "../tasks/tasks_16S_amplicon.wdl" as qiime + +workflow amplicon16S_analysis { + + meta { + description: "Running 16S amplicon (from BAM format) sequencing analysis with qiime." + author: "fnegrete" + email: "viral_ngs@broadinstitute.org" + allowNestedInputs: true + } + input { + File reads_bam + File trained_classifier + String sample_name + Boolean keep_untrimmed_reads + } + + call qiime.qiime_import_from_bam { + input: + reads_bam = reads_bam, + sample_name = sample_name + } + #__________________________________________ + call qiime.trim_reads { + input: + reads_qza = qiime_import_from_bam.reads_qza, + keep_untrimmed_reads = keep_untrimmed_reads + } + #__________________________________________ + call qiime.join_paired_ends { + input: + trimmed_reads_qza = trim_reads.trimmed_reads_qza + } + #_________________________________________ + call qiime.deblur { + input: + joined_end_reads_qza = join_paired_ends.joined_end_reads_qza + } + #_________________________________________ + call qiime.tax_analysis { + input: + trained_classifier = trained_classifier, + representative_seqs_qza = deblur.representative_seqs_qza, + representative_table_qza = deblur.representative_table_qza + } +} \ No newline at end of file diff --git a/pipes/WDL/workflows/classify_single.wdl b/pipes/WDL/workflows/classify_single.wdl index e26c05b34..3d7d6f9c2 100644 --- a/pipes/WDL/workflows/classify_single.wdl +++ b/pipes/WDL/workflows/classify_single.wdl @@ -14,7 +14,7 @@ workflow classify_single { } input { - File reads_bam + Array[File]+ reads_bams File ncbi_taxdump_tgz @@ -26,8 +26,8 @@ workflow classify_single { } parameter_meta { - reads_bam: { - description: "Reads to classify. May be unmapped or mapped or both, paired-end or single-end.", + reads_bams: { + description: "Reads to classify. May be unmapped or mapped or both, paired-end or single-end. Multiple input files will be merged first.", patterns: ["*.bam"] } spikein_db: { @@ -52,9 +52,12 @@ workflow classify_single { } } - call reports.fastqc as fastqc_raw { - input: reads_bam = reads_bam + call read_utils.merge_and_reheader_bams as merge_raw_reads { + input: + in_bams = reads_bams } + File reads_bam = merge_raw_reads.out_bam + call reports.align_and_count as spikein { input: reads_bam = reads_bam, @@ -110,6 +113,10 @@ workflow classify_single { File kraken2_summary_report = kraken2.kraken2_summary_report File kraken2_krona_plot = kraken2.krona_report_html + File raw_fastqc = merge_raw_reads.fastqc + File cleaned_fastqc = fastqc_cleaned.fastqc_html + File spikein_report = spikein.report + String spikein_tophit = spikein.top_hit_id String kraken2_viral_classify_version = kraken2.viralngs_version String deplete_viral_classify_version = deplete.viralngs_version diff --git a/pipes/WDL/workflows/qiime_import_bam.wdl b/pipes/WDL/workflows/qiime_import_bam.wdl new file mode 100644 index 000000000..caaee2cdb --- /dev/null +++ b/pipes/WDL/workflows/qiime_import_bam.wdl @@ -0,0 +1,23 @@ +version 1.0 + +import "../tasks/tasks_16S_amplicon.wdl" as infile + +workflow qiime_import_bam { + +meta{ + description: "Importing BAM files into QIIME" + author: "fnegrete" + email: "viral_ngs@broadinstitute.org" + allowNestedInputs: true +} +input { + File reads_bam + String sample_name +} + +call infile.qiime_import_from_bam { + input: + reads_bam = reads_bam, + sample_name = sample_name + } +} \ No newline at end of file diff --git a/requirements-modules.txt b/requirements-modules.txt index cda9fbce8..88a82212c 100644 --- a/requirements-modules.txt +++ b/requirements-modules.txt @@ -7,5 +7,5 @@ broadinstitute/beast-beagle-cuda=1.10.5pre broadinstitute/ncbi-tools=2.10.7.10 nextstrain/base=build-20211012T204409Z andersenlabapps/ivar=1.3.1 -quay.io/staphb/pangolin=4.1.2-pdata-1.14 +quay.io/staphb/pangolin=4.1.3-pdata-1.17 nextstrain/nextclade=2.9.1 diff --git a/test/input/WDL/cromwell-local/test_inputs-qiime_import_bam-local.json b/test/input/WDL/cromwell-local/test_inputs-qiime_import_bam-local.json new file mode 120000 index 000000000..15bb6b643 --- /dev/null +++ b/test/input/WDL/cromwell-local/test_inputs-qiime_import_bam-local.json @@ -0,0 +1 @@ +../test_inputs-qiime_import_bam-local.json \ No newline at end of file diff --git a/test/input/WDL/miniwdl-local/test_inputs-qiime_import_bam-local.json b/test/input/WDL/miniwdl-local/test_inputs-qiime_import_bam-local.json new file mode 120000 index 000000000..15bb6b643 --- /dev/null +++ b/test/input/WDL/miniwdl-local/test_inputs-qiime_import_bam-local.json @@ -0,0 +1 @@ +../test_inputs-qiime_import_bam-local.json \ No newline at end of file diff --git a/test/input/WDL/test_inputs-qiime_import_bam-local.json b/test/input/WDL/test_inputs-qiime_import_bam-local.json new file mode 100644 index 000000000..000c9f95a --- /dev/null +++ b/test/input/WDL/test_inputs-qiime_import_bam-local.json @@ -0,0 +1,4 @@ +{ + "qiime_import_bam.reads_bam": "test/input/G5012.3.subset.bam", + "qiime_import_bam.sample_name": "G5012.3.subset.bam" +} diff --git a/test/input/WDL/test_outputs-sarscov2_lineages-local.json b/test/input/WDL/test_outputs-sarscov2_lineages-local.json index bf4191625..804a776ee 100644 --- a/test/input/WDL/test_outputs-sarscov2_lineages-local.json +++ b/test/input/WDL/test_outputs-sarscov2_lineages-local.json @@ -1,5 +1,5 @@ { - "sarscov2_lineages.nextclade_clade": "20C", + "sarscov2_lineages.nextclade_clade": "20A", "sarscov2_lineages.nextclade_aa_subs": "ORF1b:P314L,ORF3a:Q57H,S:D614G", "sarscov2_lineages.nextclade_aa_dels": "", "sarscov2_lineages.pango_lineage": "B.1"