宏基因组
1.数据库下载
## nt和nr库
https://ftp.ncbi.nlm.nih.gov/blast/db/
https://ftp.ncbi.nlm.nih.gov/refseq/release/
https://nmdc.cn/datadownload
2.宏基因组分析
########################################################
##三代纳米孔宏基因组分析 #
# 1 纳米孔测序数据处理 #
cp -r /TestDatas/nanopore8/data/fast5_files/ ./
guppy_basecaller -i fast5_files/ -s output --config dna_r9.4.1_450bps_fast.cfg -r -x cuda:all
cat output/*.fastq >mocklog.fastq
gzip mocklog.fastq
filtlong --min_length 500 --min_mean_q 90 mocklog.fastq.gz | gzip >mocklog.filtlong.fq.gz
NanoPlot --fastq mocklog.filtlong.fq.gz -o clean -t 12
# 2 建立索引
cp -R /TestDatas/nanopore8/index/ ./
centrifuge-build --conversion-table gi_to_tid.dmp --taxonomy-tree nodes.dmp --name-table names.dmp test.fa test
zcat nucl_gb.accession2taxid.gz | awk '{print $2"\t"$3}' >acc_to_tid.dmp
python2 /Software/biosoft/centrifuge/centrifuge-build --conversion-table acc_to_tid.dmp --taxonomy-tree nodes.dmp --name-table names.dmp nt nt
# 3 centrifuge微生物鉴定
time centrifuge -x /MetaDatabase/centrifuge_h+p+v_20200318/hpv -U mocklog.filtlong.fq.gz --report-file report.tsv -S result.tsv -p 12 2>centrifuge.log
# 4 过滤结果
awk -F "\t" '{if ($3=="species" && $6 >5) print $1"\t"$6}' report.tsv >report-filter.txt
# 5 提取reads
TAXNAME="Pseudomonas fluorescens"
TAXID=`grep "$TAXNAME" report.tsv |cut -f 2`
awk -F '\t' -v var=$TAXID '{if ($3==var) print $1}' result.tsv |sort |uniq >id.list
#方法一:
seqtk subseq mocklog.filtlong.fq.gz id.list
#方法二:
seqkit grep -f id.list mocklog.filtlong.fq.gz
#转换格式
centrifuge-kreport -x centrifuge_h+p+v_20200318/hpv report.tsv >report_kraken.tsv
# 临床样本检测
# 1 去除宿主
mkdir clincal;cd clincal
#去除宿主
REF=MetaDatabase/human/GCF_000001405.39_GRCh38.p13_genomic.fna
READ=/data/PRJEB30781/P10.fastq.gz
minimap2 -ax map-ont $REF $READ -Y -N 20 -t 12 >minimap2.sam
samtools fastq -f 4 minimap2.sam | gzip >P10.filter.fq.gz
#统计过滤前后数变化
seqkit stat /data/PRJEB30781/P10.fastq.gz P10.filter.fq.gz
# 2 临床微生物检验
time centrifuge -x centrifuge_h+p+v_20200318/hpv -U P10.filter.fq.gz --report-file report.tsv -S result.tsv -p 12 2>centrifuge.log
# 3 批量操作
cd PRJEB30781
ls -1 *.gz | while read i;do echo "1 $PWD/${i} ${i%.*.gz}.result ${i%.*.gz}.report";done;
#保存结果到temp.list中
awk '{print $1"\t"$2"\t\t"$3"\t"$4}' temp.list >sample_sheet.list
cat -T sample_sheet.list
#批量运行
centrifuge -x /MetaDatabase/centrifuge_h+p+v_20200318/hpv --sample-sheet sample_sheet.list --min-hitlen 100 -p 12
# 二代测序宏基因组环境搭建
#自建bowtie2索引
#下载拟南芥
wget -c http://ftp.ensemblgenomes.org/pub/plants/release-53/fasta/arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.dna_rm.toplevel.fa.gz
gunzip Arabidopsis_thaliana.TAIR10.dna.toplevel.fa.gz
mv Arabidopsis_thaliana.TAIR10.dna.toplevel.fa tair10.fa
# bowtie2构建索引
bowtie2-build -f tair10.fa tair10 --threads 12
#配置数据库
cd ~/miniconda3/envs/biobakery/lib/python3.7/site-packages/metaphlan/
cp -R /MetaDatabase/metaphlan_databases/ ./
# 4 humann数据库
#3 更新数据库
humann_config --print
buccal_mucosa_samples="SRS013506 SRS015374 SRS015646 SRS017687 SRS019221 SRS019329 SRS020336 SRS022145 SRS022532 SRS045049"
for s in ${buccal_mucosa_samples}
do
wget http://downloads.hmpdacc.org/data/Illumina/buccal_mucosa/${s}.tar.bz2 -O input/${s}.tar.bz2
done
tongue_dorsum_samples="SRS011243 SRS013234 SRS014888 SRS015941 SRS016086 SRS016342 SRS017713 SRS019219 SRS019327 SRS043663"
for s in ${tongue_dorsum_samples}
do
wget http://downloads.hmpdacc.org/data/Illumina/tongue_dorsum/${s}.tar.bz2 -O input/${s}.tar.bz2
done
# 二代测序宏基因组环境搭建
# 1 kneaddata质控过滤过滤
kneaddata -i ERR4007992_1.fastq.gz \
-i ERR4007992_2.fastq.gz \
-db Homo_sapiens \
-o kneaddata_output --remove-intermediate-output -v -t 12 \
--trimmomatic ~/miniconda3/envs/biobakery/share/trimmomatic/ --trimmomatic-options \
'ILLUMINACLIP:~/miniconda3/envs/biobakery/share/trimmomatic/adapters/TruSeq3-PE.fa:2:40:15 SLIDINGWINDOW:4:20 MINLEN:50' \
--reorder --bowtie2-options '--very-sensitive --dovetail' --run-fastqc-start --run-fastqc-end
# 1 质控过滤过滤
mkdir qc
fastqc -f fastq -o qc SRS011243/*.fastq.gz -t 12
fastp -i SRS011243.1.fastq.gz -o SRS011243_clean.1.fq.gz \
-I SRS011243.2.fastq.gz -O SRS011243_clean.2.fq.gz \
-D -z 4 -q 20 -u 30 -n 10 -h SRS011243.html
#3 bwa+samtools去重复
REF=human_g1k_v37.fasta
READ1=SRS011243.1.fastq.gz
READ2=SRS011243.2.fastq.gz
bwa-mem2 mem $REF $READ1 $READ2 -o SRS011243.sam -t 12
samtools flagstat -@ 12 SRS011243.sam
#过滤掉比对上的
samtools fastq -@ 12 -G 2 -1 SRS011243_filter.1.fastq -2 SRS011243_filter.2.fastq SRS011243.sam
#压缩
pigz -p 12 SRS011243_filter.1.fastq SRS011243_filter.2.fastq
# 2 metaphlan物种鉴定
metaphlan --input_type fastq --nproc 12 --bowtie2out metagenome.bowtie2.bz2 \
SRS011243_filter.1.fastq.gz,SRS011243_filter.1.fastq.gz -o SRS011243.txt
#循环处理
cat metadata.txt | awk '{if (NR > 1) print $1}' | while read i;do echo "metaphlan --input_type fastq --nproc 12 \
./data/hmp/${i}/${i}.1.fastq.gz,./data/hmp/${i}/${i}.2.fastq.gz \
--bowtie2out ${i}.bowtie2.bz2 -o ${i}.txt" ;done;
# 3 humann物种及功能鉴定
humann --input-format fastq.gz --input SRS011243_filter.1.fastq.gz --input SRS011243_filter.2.fastq.gz \
--output SRS011243 --threads 12 --search-mode uniref90
# 4 结果可视化
#合并结果
merge_metaphlan_tables.py profiled_samples/*.txt >merged_abundance_table.txt
#提取种丰度信息
grep -E "s__|clade" merged_abundance_table.txt | sed 's/^.*s__//g'| cut -f1,3-8 | sed -e 's/clade_name/body_site/g' > merged_abundance_table_species.txt
hclust2.py -i merged_abundance_table_species.txt -o abundance_heatmap_species.png --f_dist_f braycurtis \
--s_dist_f braycurtis --cell_aspect_ratio 0.5 -l --flabel_size 10 --slabel_size 10 --max_flabel_len 100 \
--max_slabel_len 100 --minv 0.1 --dpi 300 --ftop 25
x <- read.table("merged_abundance_table_species.txt",sep = "\t",header = T,row.names = 1)
head(x)
library(pheatmap)
y <- x[order(rowSums(x),decreasing = TRUE),]
pheatmap(y[1:10])
tail -n +2 merged_abundance_table.txt | cut -f1,3- > merged_abundance_table_reformatted.txt
export2graphlan.py --skip_rows 1 -i merged_abundance_table_reformatted.txt --tree merged_abundance.tree.txt --annotation merged_abundance.annot.txt --most_abundant 100 --abundance_threshold 1 --least_biomarkers 10 --annotations 5,6 --external_annotations 7 --min_clade_size 1
#绘图
graphlan_annotate.py --annot merged_abundance.annot.txt merged_abundance.tree.txt merged_abundance.xml
graphlan.py --dpi 300 merged_abundance.xml merged_abundance.png --external_legends