0
|
1 #!/usr/bin/env python
|
|
2 #Processes uploads from the user.
|
|
3
|
|
4 # WARNING: Changes in this tool (particularly as related to parsing) may need
|
|
5 # to be reflected in galaxy.web.controllers.tool_runner and galaxy.tools
|
|
6
|
|
7 import urllib, sys, os, gzip, tempfile, shutil, re, gzip, zipfile, codecs, binascii
|
|
8 from galaxy import eggs
|
|
9 # need to import model before sniff to resolve a circular import dependency
|
|
10 import galaxy.model
|
|
11 from galaxy.datatypes.checkers import *
|
|
12 from galaxy.datatypes import sniff
|
|
13 from galaxy.datatypes.binary import *
|
|
14 from galaxy.datatypes.images import Pdf
|
|
15 from galaxy.datatypes.registry import Registry
|
|
16 from galaxy import util
|
|
17 from galaxy.datatypes.util.image_util import *
|
|
18 from galaxy.util.json import *
|
|
19
|
|
20 try:
|
|
21 import Image as PIL
|
|
22 except ImportError:
|
|
23 try:
|
|
24 from PIL import Image as PIL
|
|
25 except:
|
|
26 PIL = None
|
|
27
|
|
28 try:
|
|
29 import bz2
|
|
30 except:
|
|
31 bz2 = None
|
|
32
|
|
33 assert sys.version_info[:2] >= ( 2, 4 )
|
|
34
|
|
35 def stop_err( msg, ret=1 ):
|
|
36 sys.stderr.write( msg )
|
|
37 sys.exit( ret )
|
|
38 def file_err( msg, dataset, json_file ):
|
3
|
39 json_file.write( dumps( dict( type = 'dataset',
|
0
|
40 ext = 'data',
|
|
41 dataset_id = dataset.dataset_id,
|
|
42 stderr = msg ) ) + "\n" )
|
|
43 # never remove a server-side upload
|
|
44 if dataset.type in ( 'server_dir', 'path_paste' ):
|
|
45 return
|
|
46 try:
|
|
47 os.remove( dataset.path )
|
|
48 except:
|
|
49 pass
|
|
50 def safe_dict(d):
|
|
51 """
|
|
52 Recursively clone json structure with UTF-8 dictionary keys
|
|
53 http://mellowmachines.com/blog/2009/06/exploding-dictionary-with-unicode-keys-as-python-arguments/
|
|
54 """
|
|
55 if isinstance(d, dict):
|
|
56 return dict([(k.encode('utf-8'), safe_dict(v)) for k,v in d.iteritems()])
|
|
57 elif isinstance(d, list):
|
|
58 return [safe_dict(x) for x in d]
|
|
59 else:
|
|
60 return d
|
|
61 def parse_outputs( args ):
|
|
62 rval = {}
|
|
63 for arg in args:
|
|
64 id, files_path, path = arg.split( ':', 2 )
|
|
65 rval[int( id )] = ( path, files_path )
|
|
66 return rval
|
|
67 def add_file( dataset, registry, json_file, output_path ):
|
|
68 data_type = None
|
|
69 line_count = None
|
|
70 converted_path = None
|
|
71 stdout = None
|
|
72 link_data_only = dataset.get( 'link_data_only', 'copy_files' )
|
|
73 in_place = dataset.get( 'in_place', True )
|
|
74
|
|
75 try:
|
|
76 ext = dataset.file_type
|
|
77 except AttributeError:
|
|
78 file_err( 'Unable to process uploaded file, missing file_type parameter.', dataset, json_file )
|
|
79 return
|
|
80
|
|
81 if dataset.type == 'url':
|
|
82 try:
|
|
83 page = urllib.urlopen( dataset.path ) #page will be .close()ed by sniff methods
|
|
84 temp_name, dataset.is_multi_byte = sniff.stream_to_file( page, prefix='url_paste', source_encoding=util.get_charset_from_http_headers( page.headers ) )
|
|
85 except Exception, e:
|
|
86 file_err( 'Unable to fetch %s\n%s' % ( dataset.path, str( e ) ), dataset, json_file )
|
|
87 return
|
|
88 dataset.path = temp_name
|
|
89 # See if we have an empty file
|
|
90 if not os.path.exists( dataset.path ):
|
|
91 file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
|
|
92 return
|
|
93 if not os.path.getsize( dataset.path ) > 0:
|
|
94 file_err( 'The uploaded file is empty', dataset, json_file )
|
|
95 return
|
|
96 if not dataset.type == 'url':
|
|
97 # Already set is_multi_byte above if type == 'url'
|
|
98 try:
|
|
99 dataset.is_multi_byte = util.is_multi_byte( codecs.open( dataset.path, 'r', 'utf-8' ).read( 100 ) )
|
|
100 except UnicodeDecodeError, e:
|
|
101 dataset.is_multi_byte = False
|
|
102 # Is dataset an image?
|
|
103 image = check_image( dataset.path )
|
|
104 if image:
|
|
105 if not PIL:
|
|
106 image = None
|
|
107 # get_image_ext() returns None if nor a supported Image type
|
|
108 ext = get_image_ext( dataset.path, image )
|
|
109 data_type = ext
|
|
110 # Is dataset content multi-byte?
|
|
111 elif dataset.is_multi_byte:
|
|
112 data_type = 'multi-byte char'
|
|
113 ext = sniff.guess_ext( dataset.path, is_multi_byte=True )
|
|
114 # Is dataset content supported sniffable binary?
|
|
115 else:
|
3
|
116 # FIXME: This ignores the declared sniff order in datatype_conf.xml
|
|
117 # resulting in improper behavior
|
0
|
118 type_info = Binary.is_sniffable_binary( dataset.path )
|
|
119 if type_info:
|
|
120 data_type = type_info[0]
|
|
121 ext = type_info[1]
|
3
|
122 data_type = 'compressed archive' #upload zip file modification
|
0
|
123 if not data_type:
|
3
|
124 root_datatype = registry.get_datatype_by_extension( dataset.file_type )
|
|
125 if getattr( root_datatype, 'compressed', False ):
|
|
126 data_type = 'compressed archive'
|
|
127 ext = dataset.file_type
|
|
128 else:
|
|
129 # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
|
|
130 is_gzipped, is_valid = check_gzip( dataset.path )
|
|
131 if is_gzipped and not is_valid:
|
2
|
132 file_err( 'The gzipped uploaded file contains inappropriate content', dataset, json_file )
|
|
133 return
|
3
|
134 elif is_gzipped and is_valid:
|
2
|
135 if link_data_only == 'copy_files':
|
3
|
136 # We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
|
|
137 CHUNK_SIZE = 2**20 # 1Mb
|
|
138 fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_gunzip_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
|
|
139 gzipped_file = gzip.GzipFile( dataset.path, 'rb' )
|
2
|
140 while 1:
|
|
141 try:
|
3
|
142 chunk = gzipped_file.read( CHUNK_SIZE )
|
2
|
143 except IOError:
|
|
144 os.close( fd )
|
|
145 os.remove( uncompressed )
|
3
|
146 file_err( 'Problem decompressing gzipped data', dataset, json_file )
|
2
|
147 return
|
|
148 if not chunk:
|
|
149 break
|
|
150 os.write( fd, chunk )
|
|
151 os.close( fd )
|
3
|
152 gzipped_file.close()
|
|
153 # Replace the gzipped file with the decompressed file if it's safe to do so
|
2
|
154 if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
|
|
155 dataset.path = uncompressed
|
|
156 else:
|
|
157 shutil.move( uncompressed, dataset.path )
|
|
158 os.chmod(dataset.path, 0644)
|
3
|
159 dataset.name = dataset.name.rstrip( '.gz' )
|
|
160 data_type = 'gzip'
|
|
161 if not data_type and bz2 is not None:
|
|
162 # See if we have a bz2 file, much like gzip
|
|
163 is_bzipped, is_valid = check_bz2( dataset.path )
|
|
164 if is_bzipped and not is_valid:
|
|
165 file_err( 'The gzipped uploaded file contains inappropriate content', dataset, json_file )
|
|
166 return
|
|
167 elif is_bzipped and is_valid:
|
|
168 if link_data_only == 'copy_files':
|
|
169 # We need to uncompress the temp_name file
|
|
170 CHUNK_SIZE = 2**20 # 1Mb
|
|
171 fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_bunzip2_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
|
|
172 bzipped_file = bz2.BZ2File( dataset.path, 'rb' )
|
|
173 while 1:
|
|
174 try:
|
|
175 chunk = bzipped_file.read( CHUNK_SIZE )
|
|
176 except IOError:
|
|
177 os.close( fd )
|
|
178 os.remove( uncompressed )
|
|
179 file_err( 'Problem decompressing bz2 compressed data', dataset, json_file )
|
|
180 return
|
|
181 if not chunk:
|
|
182 break
|
|
183 os.write( fd, chunk )
|
|
184 os.close( fd )
|
|
185 bzipped_file.close()
|
|
186 # Replace the bzipped file with the decompressed file if it's safe to do so
|
|
187 if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
|
|
188 dataset.path = uncompressed
|
|
189 else:
|
|
190 shutil.move( uncompressed, dataset.path )
|
|
191 os.chmod(dataset.path, 0644)
|
|
192 dataset.name = dataset.name.rstrip( '.bz2' )
|
|
193 data_type = 'bz2'
|
|
194 if not data_type:
|
|
195 # See if we have a zip archive
|
|
196 is_zipped = check_zip( dataset.path )
|
|
197 if is_zipped:
|
|
198 if link_data_only == 'copy_files':
|
|
199 CHUNK_SIZE = 2**20 # 1Mb
|
|
200 uncompressed = None
|
|
201 uncompressed_name = None
|
|
202 unzipped = False
|
|
203 z = zipfile.ZipFile( dataset.path )
|
|
204 for name in z.namelist():
|
|
205 if name.endswith('/'):
|
|
206 continue
|
|
207 if unzipped:
|
|
208 stdout = 'ZIP file contained more than one file, only the first file was added to Galaxy.'
|
|
209 break
|
|
210 fd, uncompressed = tempfile.mkstemp( prefix='data_id_%s_upload_zip_' % dataset.dataset_id, dir=os.path.dirname( output_path ), text=False )
|
|
211 if sys.version_info[:2] >= ( 2, 6 ):
|
|
212 zipped_file = z.open( name )
|
|
213 while 1:
|
|
214 try:
|
|
215 chunk = zipped_file.read( CHUNK_SIZE )
|
|
216 except IOError:
|
|
217 os.close( fd )
|
|
218 os.remove( uncompressed )
|
|
219 file_err( 'Problem decompressing zipped data', dataset, json_file )
|
|
220 return
|
|
221 if not chunk:
|
|
222 break
|
|
223 os.write( fd, chunk )
|
|
224 os.close( fd )
|
|
225 zipped_file.close()
|
|
226 uncompressed_name = name
|
|
227 unzipped = True
|
|
228 else:
|
|
229 # python < 2.5 doesn't have a way to read members in chunks(!)
|
2
|
230 try:
|
3
|
231 outfile = open( uncompressed, 'wb' )
|
|
232 outfile.write( z.read( name ) )
|
|
233 outfile.close()
|
|
234 uncompressed_name = name
|
|
235 unzipped = True
|
2
|
236 except IOError:
|
|
237 os.close( fd )
|
|
238 os.remove( uncompressed )
|
|
239 file_err( 'Problem decompressing zipped data', dataset, json_file )
|
|
240 return
|
3
|
241 z.close()
|
|
242 # Replace the zipped file with the decompressed file if it's safe to do so
|
|
243 if uncompressed is not None:
|
|
244 if dataset.type in ( 'server_dir', 'path_paste' ) or not in_place:
|
|
245 dataset.path = uncompressed
|
|
246 else:
|
|
247 shutil.move( uncompressed, dataset.path )
|
|
248 os.chmod(dataset.path, 0644)
|
|
249 dataset.name = uncompressed_name
|
|
250 data_type = 'zip'
|
|
251 if not data_type:
|
|
252 # TODO refactor this logic. check_binary isn't guaranteed to be
|
|
253 # correct since it only looks at whether the first 100 chars are
|
|
254 # printable or not. If someone specifies a known unsniffable
|
|
255 # binary datatype and check_binary fails, the file gets mangled.
|
|
256 if check_binary( dataset.path ) or Binary.is_ext_unsniffable(dataset.file_type):
|
|
257 # We have a binary dataset, but it is not Bam, Sff or Pdf
|
|
258 data_type = 'binary'
|
|
259 #binary_ok = False
|
|
260 parts = dataset.name.split( "." )
|
|
261 if len( parts ) > 1:
|
|
262 ext = parts[-1].strip().lower()
|
|
263 if not Binary.is_ext_unsniffable(ext):
|
|
264 file_err( 'The uploaded binary file contains inappropriate content', dataset, json_file )
|
|
265 return
|
|
266 elif Binary.is_ext_unsniffable(ext) and dataset.file_type != ext:
|
|
267 err_msg = "You must manually set the 'File Format' to '%s' when uploading %s files." % ( ext.capitalize(), ext )
|
|
268 file_err( err_msg, dataset, json_file )
|
|
269 return
|
|
270 if not data_type:
|
|
271 # We must have a text file
|
|
272 if check_html( dataset.path ):
|
|
273 file_err( 'The uploaded file contains inappropriate HTML content', dataset, json_file )
|
|
274 return
|
|
275 if data_type != 'binary':
|
|
276 if link_data_only == 'copy_files':
|
|
277 if dataset.type in ( 'server_dir', 'path_paste' ) and data_type not in [ 'gzip', 'bz2', 'zip' ]:
|
|
278 in_place = False
|
|
279 # Convert universal line endings to Posix line endings, but allow the user to turn it off,
|
|
280 # so that is becomes possible to upload gzip, bz2 or zip files with binary data without
|
|
281 # corrupting the content of those files.
|
|
282 if dataset.to_posix_lines:
|
|
283 tmpdir = output_adjacent_tmpdir( output_path )
|
|
284 tmp_prefix = 'data_id_%s_convert_' % dataset.dataset_id
|
|
285 if dataset.space_to_tab:
|
|
286 line_count, converted_path = sniff.convert_newlines_sep2tabs( dataset.path, in_place=in_place, tmp_dir=tmpdir, tmp_prefix=tmp_prefix )
|
2
|
287 else:
|
3
|
288 line_count, converted_path = sniff.convert_newlines( dataset.path, in_place=in_place, tmp_dir=tmpdir, tmp_prefix=tmp_prefix )
|
|
289 if dataset.file_type == 'auto':
|
|
290 ext = sniff.guess_ext( dataset.path, registry.sniff_order )
|
|
291 else:
|
|
292 ext = dataset.file_type
|
|
293 data_type = ext
|
0
|
294 # Save job info for the framework
|
|
295 if ext == 'auto' and dataset.ext:
|
|
296 ext = dataset.ext
|
|
297 if ext == 'auto':
|
|
298 ext = 'data'
|
|
299 datatype = registry.get_datatype_by_extension( ext )
|
|
300 if dataset.type in ( 'server_dir', 'path_paste' ) and link_data_only == 'link_to_files':
|
|
301 # Never alter a file that will not be copied to Galaxy's local file store.
|
|
302 if datatype.dataset_content_needs_grooming( dataset.path ):
|
|
303 err_msg = 'The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be ' + \
|
|
304 '<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed.'
|
|
305 file_err( err_msg, dataset, json_file )
|
|
306 return
|
|
307 if link_data_only == 'copy_files' and dataset.type in ( 'server_dir', 'path_paste' ) and data_type not in [ 'gzip', 'bz2', 'zip' ]:
|
|
308 # Move the dataset to its "real" path
|
|
309 if converted_path is not None:
|
|
310 shutil.copy( converted_path, output_path )
|
|
311 try:
|
|
312 os.remove( converted_path )
|
|
313 except:
|
|
314 pass
|
|
315 else:
|
|
316 # This should not happen, but it's here just in case
|
|
317 shutil.copy( dataset.path, output_path )
|
|
318 elif link_data_only == 'copy_files':
|
2
|
319 shutil.move( dataset.path, output_path )
|
0
|
320 # Write the job info
|
|
321 stdout = stdout or 'uploaded %s file' % data_type
|
|
322 info = dict( type = 'dataset',
|
|
323 dataset_id = dataset.dataset_id,
|
|
324 ext = ext,
|
|
325 stdout = stdout,
|
|
326 name = dataset.name,
|
|
327 line_count = line_count )
|
|
328 if dataset.get('uuid', None) is not None:
|
|
329 info['uuid'] = dataset.get('uuid')
|
3
|
330 json_file.write( dumps( info ) + "\n" )
|
0
|
331
|
|
332 if link_data_only == 'copy_files' and datatype.dataset_content_needs_grooming( output_path ):
|
|
333 # Groom the dataset content if necessary
|
|
334 datatype.groom_dataset_content( output_path )
|
|
335
|
|
336 def add_composite_file( dataset, registry, json_file, output_path, files_path ):
|
|
337 if dataset.composite_files:
|
|
338 os.mkdir( files_path )
|
|
339 for name, value in dataset.composite_files.iteritems():
|
|
340 value = util.bunch.Bunch( **value )
|
|
341 if dataset.composite_file_paths[ value.name ] is None and not value.optional:
|
|
342 file_err( 'A required composite data file was not provided (%s)' % name, dataset, json_file )
|
|
343 break
|
|
344 elif dataset.composite_file_paths[value.name] is not None:
|
|
345 dp = dataset.composite_file_paths[value.name][ 'path' ]
|
|
346 isurl = dp.find('://') <> -1 # todo fixme
|
|
347 if isurl:
|
|
348 try:
|
|
349 temp_name, dataset.is_multi_byte = sniff.stream_to_file( urllib.urlopen( dp ), prefix='url_paste' )
|
|
350 except Exception, e:
|
|
351 file_err( 'Unable to fetch %s\n%s' % ( dp, str( e ) ), dataset, json_file )
|
|
352 return
|
|
353 dataset.path = temp_name
|
|
354 dp = temp_name
|
|
355 if not value.is_binary:
|
3
|
356 tmpdir = output_adjacent_tmpdir( output_path )
|
|
357 tmp_prefix = 'data_id_%s_convert_' % dataset.dataset_id
|
0
|
358 if dataset.composite_file_paths[ value.name ].get( 'space_to_tab', value.space_to_tab ):
|
3
|
359 sniff.convert_newlines_sep2tabs( dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix )
|
0
|
360 else:
|
3
|
361 sniff.convert_newlines( dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix )
|
0
|
362 shutil.move( dp, os.path.join( files_path, name ) )
|
|
363 # Move the dataset to its "real" path
|
|
364 shutil.move( dataset.primary_file, output_path )
|
|
365 # Write the job info
|
|
366 info = dict( type = 'dataset',
|
|
367 dataset_id = dataset.dataset_id,
|
|
368 stdout = 'uploaded %s file' % dataset.file_type )
|
3
|
369 json_file.write( dumps( info ) + "\n" )
|
|
370
|
|
371
|
|
372 def output_adjacent_tmpdir( output_path ):
|
|
373 """ For temp files that will ultimately be moved to output_path anyway
|
|
374 just create the file directly in output_path's directory so shutil.move
|
|
375 will work optimially.
|
|
376 """
|
|
377 return os.path.dirname( output_path )
|
|
378
|
0
|
379
|
|
380 def __main__():
|
|
381
|
|
382 if len( sys.argv ) < 4:
|
|
383 print >>sys.stderr, 'usage: upload.py <root> <datatypes_conf> <json paramfile> <output spec> ...'
|
|
384 sys.exit( 1 )
|
|
385
|
|
386 output_paths = parse_outputs( sys.argv[4:] )
|
|
387 json_file = open( 'galaxy.json', 'w' )
|
|
388
|
|
389 registry = Registry()
|
|
390 registry.load_datatypes( root_dir=sys.argv[1], config=sys.argv[2] )
|
|
391
|
|
392 for line in open( sys.argv[3], 'r' ):
|
3
|
393 dataset = loads( line )
|
0
|
394 dataset = util.bunch.Bunch( **safe_dict( dataset ) )
|
|
395 try:
|
|
396 output_path = output_paths[int( dataset.dataset_id )][0]
|
|
397 except:
|
|
398 print >>sys.stderr, 'Output path for dataset %s not found on command line' % dataset.dataset_id
|
|
399 sys.exit( 1 )
|
|
400 if dataset.type == 'composite':
|
|
401 files_path = output_paths[int( dataset.dataset_id )][1]
|
|
402 add_composite_file( dataset, registry, json_file, output_path, files_path )
|
|
403 else:
|
|
404 add_file( dataset, registry, json_file, output_path )
|
|
405
|
|
406 # clean up paramfile
|
|
407 # TODO: this will not work when running as the actual user unless the
|
|
408 # parent directory is writable by the user.
|
|
409 try:
|
|
410 os.remove( sys.argv[3] )
|
|
411 except:
|
|
412 pass
|
|
413
|
|
414 if __name__ == '__main__':
|
|
415 __main__()
|