Mercurial > repos > drosofff > fetch_fasta_from_ncbi
annotate retrieve_fasta_from_NCBI.py @ 12:0bec3cba5c56 draft
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit b6de14061c479f0418cd89e26d6f5ac26e565a07
author | drosofff |
---|---|
date | Wed, 09 Nov 2016 11:27:31 -0500 |
parents | 2c5375809c03 |
children | 639daa4c3c1a |
rev | line source |
---|---|
0 | 1 #!/usr/bin/env python |
2 # -*- coding: utf-8 -*- | |
3 """ | |
4 From a taxonomy ID retrieves all the nucleotide sequences | |
5 It returns a multiFASTA nuc/prot file | |
6 | |
7 Entrez Database UID common name E-utility Database Name | |
8 Nucleotide GI number nuccore | |
9 Protein GI number protein | |
10 | |
11 Retrieve strategy: | |
12 | |
13 esearch to get total number of UIDs (count) | |
14 esearch to get UIDs in batches | |
15 loop untile end of UIDs list: | |
16 epost to put a batch of UIDs in the history server | |
17 efetch to retrieve info from previous post | |
18 | |
19 retmax of efetch is 1/10 of declared value from NCBI | |
20 | |
21 queries are 1 sec delayed, to satisfy NCBI guidelines (more than what they request) | |
22 | |
23 | |
24 python get_fasta_from_taxon.py -i 1638 -o test.out -d protein | |
25 python get_fasta_from_taxon.py -i 327045 -o test.out -d nuccore # 556468 UIDs | |
26 """ | |
2 | 27 import sys |
0 | 28 import logging |
29 import optparse | |
30 import time | |
31 import urllib | |
32 import urllib2 | |
7
cd7de2d6c716
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
5
diff
changeset
|
33 import httplib |
0 | 34 import re |
12
0bec3cba5c56
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit b6de14061c479f0418cd89e26d6f5ac26e565a07
drosofff
parents:
10
diff
changeset
|
35 |
0bec3cba5c56
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit b6de14061c479f0418cd89e26d6f5ac26e565a07
drosofff
parents:
10
diff
changeset
|
36 |
0 | 37 class Eutils: |
38 | |
39 def __init__(self, options, logger): | |
40 self.logger = logger | |
12
0bec3cba5c56
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit b6de14061c479f0418cd89e26d6f5ac26e565a07
drosofff
parents:
10
diff
changeset
|
41 self.base = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/" |
0 | 42 self.query_string = options.query_string |
43 self.dbname = options.dbname | |
44 if options.outname: | |
45 self.outname = options.outname | |
46 else: | |
47 self.outname = 'NCBI_download' + '.' + self.dbname + '.fasta' | |
48 self.ids = [] | |
49 self.retmax_esearch = 100000 | |
50 self.retmax_efetch = 1000 | |
51 self.count = 0 | |
52 self.webenv = "" | |
53 self.query_key = "" | |
54 | |
55 def retrieve(self): | |
56 """ """ | |
57 self.get_count_value() | |
58 self.get_uids_list() | |
59 self.get_sequences() | |
60 | |
61 def get_count_value(self): | |
62 """ | |
63 just to retrieve Count (number of UIDs) | |
64 Total number of UIDs from the retrieved set to be shown in the XML | |
65 output (default=20). By default, ESearch only includes the first 20 | |
66 UIDs retrieved in the XML output. If usehistory is set to 'y', | |
67 the remainder of the retrieved set will be stored on the History server; | |
68 | |
69 http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch | |
70 """ | |
71 self.logger.info("retrieving data from %s" % self.base) | |
72 self.logger.info("for Query: %s and database: %s" % | |
73 (self.query_string, self.dbname)) | |
74 querylog = self.esearch(self.dbname, self.query_string, '', '', "count") | |
75 self.logger.debug("Query response:") | |
76 for line in querylog: | |
77 self.logger.debug(line.rstrip()) | |
78 if '</Count>' in line: | |
79 self.count = int(line[line.find('<Count>')+len('<Count>') : line.find('</Count>')]) | |
80 self.logger.info("Founded %d UIDs" % self.count) | |
81 | |
82 def get_uids_list(self): | |
83 """ | |
84 Increasing retmax allows more of the retrieved UIDs to be included in the XML output, | |
85 up to a maximum of 100,000 records. | |
86 from http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch | |
87 """ | |
88 retmax = self.retmax_esearch | |
89 if (self.count > retmax): | |
90 num_batches = (self.count / retmax) + 1 | |
91 else: | |
92 num_batches = 1 | |
93 self.logger.info("Batch size for esearch action: %d UIDs" % retmax) | |
94 self.logger.info("Number of batches for esearch action: %d " % num_batches) | |
95 for n in range(num_batches): | |
96 querylog = self.esearch(self.dbname, self.query_string, n*retmax, retmax, '') | |
97 for line in querylog: | |
98 if '<Id>' in line and '</Id>' in line: | |
99 uid = (line[line.find('<Id>')+len('<Id>') : line.find('</Id>')]) | |
100 self.ids.append(uid) | |
101 self.logger.info("Retrieved %d UIDs" % len(self.ids)) | |
102 | |
103 def esearch(self, db, term, retstart, retmax, rettype): | |
104 url = self.base + "esearch.fcgi" | |
105 self.logger.debug("url: %s" % url) | |
106 values = {'db': db, | |
107 'term': term, | |
108 'rettype': rettype, | |
109 'retstart': retstart, | |
110 'retmax': retmax} | |
111 data = urllib.urlencode(values) | |
112 self.logger.debug("data: %s" % str(data)) | |
113 req = urllib2.Request(url, data) | |
114 response = urllib2.urlopen(req) | |
115 querylog = response.readlines() | |
116 time.sleep(1) | |
117 return querylog | |
118 | |
119 def epost(self, db, ids): | |
120 url = self.base + "epost.fcgi" | |
121 self.logger.debug("url_epost: %s" % url) | |
122 values = {'db': db, | |
123 'id': ids} | |
124 data = urllib.urlencode(values) | |
125 req = urllib2.Request(url, data) | |
126 #self.logger.debug("data: %s" % str(data)) | |
127 req = urllib2.Request(url, data) | |
2 | 128 serverResponse = False |
129 while not serverResponse: | |
130 try: | |
131 response = urllib2.urlopen(req) | |
132 serverResponse = True | |
133 except: # catch *all* exceptions | |
134 e = sys.exc_info()[0] | |
135 self.logger.info( "Catched Error: %s" % e ) | |
136 self.logger.info( "Retrying in 10 sec") | |
137 time.sleep(10) | |
0 | 138 querylog = response.readlines() |
139 self.logger.debug("query response:") | |
140 for line in querylog: | |
141 self.logger.debug(line.rstrip()) | |
142 if '</QueryKey>' in line: | |
143 self.query_key = str(line[line.find('<QueryKey>')+len('<QueryKey>'):line.find('</QueryKey>')]) | |
144 if '</WebEnv>' in line: | |
145 self.webenv = str(line[line.find('<WebEnv>')+len('<WebEnv>'):line.find('</WebEnv>')]) | |
146 self.logger.debug("*** epost action ***") | |
147 self.logger.debug("query_key: %s" % self.query_key) | |
148 self.logger.debug("webenv: %s" % self.webenv) | |
149 time.sleep(1) | |
150 | |
151 def efetch(self, db, query_key, webenv): | |
152 url = self.base + "efetch.fcgi" | |
153 self.logger.debug("url_efetch: %s" % url) | |
154 values = {'db': db, | |
155 'query_key': query_key, | |
156 'webenv': webenv, | |
157 'rettype': "fasta", | |
158 'retmode': "text"} | |
159 data = urllib.urlencode(values) | |
160 req = urllib2.Request(url, data) | |
161 self.logger.debug("data: %s" % str(data)) | |
162 req = urllib2.Request(url, data) | |
8
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
163 serverTransaction = False |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
164 counter = 0 |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
165 while not serverTransaction: |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
166 counter += 1 |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
167 self.logger.info("Server Transaction Trial: %s" % ( counter ) ) |
2 | 168 try: |
169 response = urllib2.urlopen(req) | |
8
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
170 fasta = response.read() |
10
2c5375809c03
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit e0a1114b735bf1af257456174f64e5ef8d205754-dirty
drosofff
parents:
8
diff
changeset
|
171 if ("Resource temporarily unavailable" in fasta) or (not fasta.startswith(">") ): |
8
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
172 serverTransaction = False |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
173 else: |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
174 serverTransaction = True |
2 | 175 except urllib2.HTTPError as e: |
8
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
176 serverTransaction = False |
2 | 177 self.logger.info("urlopen error:%s, %s" % (e.code, e.read() ) ) |
8
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
178 except httplib.IncompleteRead as e: |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
179 serverTransaction = False |
cc43a7a11324
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
7
diff
changeset
|
180 self.logger.info("IncompleteRead error: %s" % ( e.partial ) ) |
0 | 181 fasta = self.sanitiser(self.dbname, fasta) # |
182 time.sleep(1) | |
183 return fasta | |
184 | |
185 def sanitiser(self, db, fastaseq): | |
5
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
186 if db not in "nuccore protein" : return fastaseq |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
187 regex = re.compile(r"[ACDEFGHIKLMNPQRSTVWYBZ]{49,}") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
188 sane_seqlist = [] |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
189 seqlist = fastaseq.split("\n\n") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
190 for seq in seqlist[:-1]: |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
191 fastalines = seq.split("\n") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
192 if len(fastalines) < 2: |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
193 self.logger.info("Empty sequence for %s" % ("|".join(fastalines[0].split("|")[:4]) ) ) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
194 self.logger.info("%s download is skipped" % ("|".join(fastalines[0].split("|")[:4]) ) ) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
195 continue |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
196 if db == "nuccore": |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
197 badnuc = 0 |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
198 for nucleotide in fastalines[1]: |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
199 if nucleotide not in "ATGC": |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
200 badnuc += 1 |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
201 if float(badnuc)/len(fastalines[1]) > 0.4: |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
202 self.logger.info("%s ambiguous nucleotides in %s or download interrupted at this offset | %s" % ( float(badnuc)/len(fastalines[1]), "|".join(fastalines[0].split("|")[:4]), fastalines[1]) ) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
203 self.logger.info("%s download is skipped" % (fastalines[0].split("|")[:4]) ) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
204 continue |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
205 fastalines[0] = fastalines[0].replace(" ","_")[:100] # remove spaces and trim the header to 100 chars |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
206 cleanseq = "\n".join(fastalines) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
207 sane_seqlist.append(cleanseq) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
208 elif db == "protein": |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
209 fastalines[0] = fastalines[0][0:100] |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
210 fastalines[0] = fastalines[0].replace(" ", "_") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
211 fastalines[0] = fastalines[0].replace("[", "_") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
212 fastalines[0] = fastalines[0].replace("]", "_") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
213 fastalines[0] = fastalines[0].replace("=", "_") |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
214 fastalines[0] = fastalines[0].rstrip("_") # because blast makedb doesn't like it |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
215 fastalines[0] = re.sub(regex, "_", fastalines[0]) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
216 cleanseq = "\n".join(fastalines) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
217 sane_seqlist.append(cleanseq) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
218 self.logger.info("clean sequences appended: %d" % (len(sane_seqlist) ) ) |
4ff395248db4
planemo upload for repository https://bitbucket.org/drosofff/gedtools/
drosofff
parents:
2
diff
changeset
|
219 return "\n".join(sane_seqlist) |
0 | 220 |
221 def get_sequences(self): | |
222 """ | |
223 Total number of records from the input set to be retrieved, up to a maximum | |
224 of 10,000. Optionally, for a large set the value of retstart can be iterated | |
225 while holding retmax constant, thereby downloading the entire set in batches | |
226 of size retmax. | |
227 | |
228 http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch | |
229 | |
230 """ | |
231 batch_size = self.retmax_efetch | |
232 count = self.count | |
233 uids_list = self.ids | |
234 self.logger.info("Batch size for efetch action: %d" % batch_size) | |
235 self.logger.info("Number of batches for efetch action: %d" % ((count / batch_size) + 1)) | |
236 with open(self.outname, 'w') as out: | |
237 for start in range(0, count, batch_size): | |
238 end = min(count, start+batch_size) | |
239 batch = uids_list[start:end] | |
240 self.epost(self.dbname, ",".join(batch)) | |
1 | 241 mfasta = '' |
242 while not mfasta: | |
243 self.logger.info("retrieving batch %d" % ((start / batch_size) + 1)) | |
244 mfasta = self.efetch(self.dbname, self.query_key, self.webenv) | |
0 | 245 out.write(mfasta + '\n') |
246 | |
247 | |
248 LOG_FORMAT = '%(asctime)s|%(levelname)-8s|%(message)s' | |
249 LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' | |
250 LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] | |
251 | |
252 | |
253 def __main__(): | |
254 """ main function """ | |
255 parser = optparse.OptionParser(description='Retrieve data from NCBI') | |
256 parser.add_option('-i', dest='query_string', help='NCBI Query String') | |
257 parser.add_option('-o', dest='outname', help='output file name') | |
258 parser.add_option('-l', '--logfile', help='log file (default=stderr)') | |
259 parser.add_option('--loglevel', choices=LOG_LEVELS, default='INFO', help='logging level (default: INFO)') | |
260 parser.add_option('-d', dest='dbname', help='database type') | |
261 (options, args) = parser.parse_args() | |
262 if len(args) > 0: | |
263 parser.error('Wrong number of arguments') | |
264 | |
265 log_level = getattr(logging, options.loglevel) | |
266 kwargs = {'format': LOG_FORMAT, | |
267 'datefmt': LOG_DATEFMT, | |
268 'level': log_level} | |
269 if options.logfile: | |
270 kwargs['filename'] = options.logfile | |
271 logging.basicConfig(**kwargs) | |
272 logger = logging.getLogger('data_from_NCBI') | |
273 | |
274 E = Eutils(options, logger) | |
275 E.retrieve() | |
276 | |
277 | |
278 if __name__ == "__main__": | |
279 __main__() |