def _get_filelist_remote(remote_uri, recursive = True): ## If remote_uri ends with '/' then all remote files will have ## the remote_uri prefix removed in the relative path. ## If, on the other hand, the remote_uri ends with something else ## (probably alphanumeric symbol) we'll use the last path part ## in the relative path. ## ## Complicated, eh? See an example: ## _get_filelist_remote("s3://bckt/abc/def") may yield: ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} } ## _get_filelist_remote("s3://bckt/abc/def/") will yield: ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} } ## Furthermore a prefix-magic can restrict the return list: ## _get_filelist_remote("s3://bckt/abc/def/x") yields: ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) s3 = S3(Config()) response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri if rem_base != '' and rem_base[-1] != '/': rem_base = rem_base[:rem_base.rfind('/')+1] remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base)) rem_base_len = len(rem_base) rem_list = FileDict(ignore_case = False) break_now = False for object in response['list']: if object['Key'] == rem_base_original and object['Key'][-1] != "/": ## We asked for one file and we got that file :-) key = os.path.basename(object['Key']) object_uri_str = remote_uri_original.uri() break_now = True rem_list = FileDict(ignore_case = False) ## Remove whatever has already been put to rem_list else: key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key rem_list[key] = { 'size' : int(object['Size']), 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-( 'md5' : object['ETag'][1:-1], 'object_key' : object['Key'], 'object_uri_str' : object_uri_str, 'base_uri' : remote_uri, 'dev' : None, 'inode' : None, } if rem_list[key]['md5'].find("-") != -1: # always get it for multipart uploads _get_remote_attribs(S3Uri(object_uri_str), rem_list[key]) md5 = rem_list[key]['md5'] rem_list.record_md5(key, md5) if break_now: break return rem_list
def filter_exclude_include(src_list): info(u"Applying --exclude/--include") cfg = Config() exclude_list = FileDict(ignore_case = False) for file in src_list.keys(): debug(u"CHECK: %s" % file) excluded = False for r in cfg.exclude: if r.search(file): excluded = True debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r])) break if excluded: ## No need to check for --include if not excluded for r in cfg.include: if r.search(file): excluded = False debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r])) break if excluded: ## Still excluded - ok, action it debug(u"EXCLUDE: %s" % file) exclude_list[file] = src_list[file] del(src_list[file]) continue else: debug(u"PASS: %r" % (file)) return src_list, exclude_list
def __init__(self, homeDir, name, f_hasFailed, interactive=False, newjob=True, quiet=False): """ homeDir is where experiments are stored name is the name of this experiment if there is another job with the same name, they are assumed to be the same. delete old jobs that you do not want to be confused with f_hasFailed is a function that takes a log file and returns a boolean for whether or not the job has failed if in interactive mode, will prompt user for input this should not be used for scripts, which would hang forever in these cases """ self.name = name self.home = os.path.join(homeDir, name) self.logDir = os.path.join(self.home, 'log') self.f_hasFailed = f_hasFailed if os.path.isfile(self.home): raise Exception('you the home directory you gave must not have any files or folders that match the name you gave') if newjob: print 'creating new job:', self.home if os.path.isdir(self.home): if interactive: print "the directory %s already exists" % (self.home) r = raw_input('would you like me to delete it for you? [y|n] ') if str2bool(r): os.system('rm -r ' + self.home) os.system('mkdir ' + self.home) else: raise Exception('cannot proceed') else: raise Exception('please give a unique name or delete old jobs: ' + self.home) elif 0 != os.system('mkdir -p ' + self.home): raise Exception('cannot make home directory!') if not os.path.isdir(self.logDir) and 0 != os.system('mkdir -p ' + self.logDir): raise Exception('cannot make log directory!') else: assert os.path.isdir(self.home) assert os.path.isdir(self.logDir) if not quiet: print 'loading existing job:', self.home self.javaOpt = FileDict(os.path.join(self.home, 'java.settings'), exists=not newjob) # start with "-D" self.metaOpt = FileDict(os.path.join(self.home, 'meta.settings'), exists=not newjob) # xmx, jar, profile, etc self.qsubOpt = FileDict(os.path.join(self.home, 'qsub.settings'), exists=not newjob) # mem_free, h_rt self.prepared = False
def makeRun(self, baseConfig, runData): """Make a new run of config files from the base config and runData. runData is a list of tuples which contain a label and a dict. Labels are used to name generated configs and their specified output files. The dicts are key-value pairs for data to modify in the base config. Return a list of the names of config files generated. """ configNames = [] baseConfigFullPath = os.path.join(self.path, baseConfig) for label, labelData in runData: newConfig = FileDict(baseConfigFullPath) newConfigFullPath = os.path.join(self.path, label + "_config") labelData.update({"outputLogName" : label + "_out.fd", "errorLogName" : label + "_error", "debugLogName" : label + "_debug"}) for key, value in labelData.items(): newConfig.setGlobal(str(key), str(value)) configNames.append(newConfigFullPath) newConfig.writeToFile(newConfigFullPath) return configNames
def _get_filelist_remote(remote_uri, recursive = True): ## If remote_uri ends with '/' then all remote files will have ## the remote_uri prefix removed in the relative path. ## If, on the other hand, the remote_uri ends with something else ## (probably alphanumeric symbol) we'll use the last path part ## in the relative path. ## ## Complicated, eh? See an example: ## _get_filelist_remote("s3://bckt/abc/def") may yield: ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} } ## _get_filelist_remote("s3://bckt/abc/def/") will yield: ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} } ## Furthermore a prefix-magic can restrict the return list: ## _get_filelist_remote("s3://bckt/abc/def/x") yields: ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) s3 = S3(Config()) response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri if rem_base != '' and rem_base[-1] != '/': rem_base = rem_base[:rem_base.rfind('/')+1] remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base)) rem_base_len = len(rem_base) rem_list = FileDict(ignore_case = False) break_now = False for object in response['list']: if object['Key'] == rem_base_original and object['Key'][-1] != "/": ## We asked for one file and we got that file :-) key = os.path.basename(object['Key']) object_uri_str = remote_uri_original.uri() break_now = True rem_list = FileDict(ignore_case = False) ## Remove whatever has already been put to rem_list else: key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key rem_list[key] = { 'size' : int(object['Size']), 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-( 'md5' : object['ETag'][1:-1], 'object_key' : object['Key'], 'object_uri_str' : object_uri_str, 'base_uri' : remote_uri, 'dev' : None, 'inode' : None, } if rem_list[key]['md5'].find("-"): # always get it for multipart uploads _get_remote_attribs(S3Uri(object_uri_str), rem_list[key]) md5 = rem_list[key]['md5'] rem_list.record_md5(key, md5) if break_now: break return rem_list
def makeRun(self, baseConfig, runData): """Make a new run of config files from the base config and runData. runData is a list of tuples which contain a label and a dict. Labels are used to name generated configs and their specified output files. The dicts are key-value pairs for data to modify in the base config. Return a list of the names of config files generated. """ configNames = [] baseConfigFullPath = os.path.join(self.path, baseConfig) for label, labelData in runData: newConfig = FileDict(baseConfigFullPath) newConfigFullPath = os.path.join(self.path, label + "_config") labelData.update({ "outputLogName": label + "_out.fd", "errorLogName": label + "_error", "debugLogName": label + "_debug" }) for key, value in labelData.items(): newConfig.setGlobal(str(key), str(value)) configNames.append(newConfigFullPath) newConfig.writeToFile(newConfigFullPath) return configNames
def compare_filelists(src_list, dst_list, src_remote, dst_remote): def __direction_str(is_remote): return is_remote and "remote" or "local" def _compare(src_list, dst_lst, src_remote, dst_remote, file): """Return True if src_list[file] matches dst_list[file], else False""" attribs_match = True if not (src_list.has_key(file) and dst_list.has_key(file)): info( u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, src_list.has_key(file), dst_list.has_key(file))) return False ## check size first if 'size' in cfg.sync_checks: if 'size' in dst_list[file] and 'size' in src_list[file]: if dst_list[file]['size'] != src_list[file]['size']: debug( u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size'])) attribs_match = False ## check md5 compare_md5 = 'md5' in cfg.sync_checks # Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn" if compare_md5: if (src_remote == True and '-' in src_list[file]['md5']) or ( dst_remote == True and '-' in dst_list[file]['md5']): compare_md5 = False info(u"disabled md5 check for %s" % file) if attribs_match and compare_md5: try: src_md5 = src_list.get_md5(file) dst_md5 = dst_list.get_md5(file) except (IOError, OSError): # md5 sum verification failed - ignore that file altogether debug(u"IGNR: %s (disappeared)" % (file)) warning(u"%s: file disappeared, ignoring." % (file)) raise if src_md5 != dst_md5: ## checksums are different. attribs_match = False debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5)) return attribs_match # we don't support local->local sync, use 'rsync' or something like that instead ;-) assert (not (src_remote == False and dst_remote == False)) info(u"Verifying attributes...") cfg = Config() ## Items left on src_list will be transferred ## Items left on update_list will be transferred after src_list ## Items left on copy_pairs will be copied from dst1 to dst2 update_list = FileDict(ignore_case=False) ## Items left on dst_list will be deleted copy_pairs = [] debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote))) for relative_file in src_list.keys(): debug(u"CHECK: %s" % (relative_file)) if dst_list.has_key(relative_file): ## Was --skip-existing requested? if cfg.skip_existing: debug(u"IGNR: %s (used --skip-existing)" % (relative_file)) del (src_list[relative_file]) del (dst_list[relative_file]) continue try: same_file = _compare(src_list, dst_list, src_remote, dst_remote, relative_file) except (IOError, OSError): debug(u"IGNR: %s (disappeared)" % (relative_file)) warning(u"%s: file disappeared, ignoring." % (relative_file)) del (src_list[relative_file]) del (dst_list[relative_file]) continue if same_file: debug(u"IGNR: %s (transfer not needed)" % relative_file) del (src_list[relative_file]) del (dst_list[relative_file]) else: # look for matching file in src try: md5 = src_list.get_md5(relative_file) except IOError: md5 = None if md5 is not None and dst_list.by_md5.has_key(md5): # Found one, we want to copy dst1 = list(dst_list.by_md5[md5])[0] debug(u"DST COPY src: %s -> %s" % (dst1, relative_file)) copy_pairs.append( (src_list[relative_file], dst1, relative_file)) del (src_list[relative_file]) del (dst_list[relative_file]) else: # record that we will get this file transferred to us (before all the copies), so if we come across it later again, # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). dst_list.record_md5(relative_file, md5) update_list[relative_file] = src_list[relative_file] del src_list[relative_file] del dst_list[relative_file] else: # dst doesn't have this file # look for matching file elsewhere in dst try: md5 = src_list.get_md5(relative_file) except IOError: md5 = None dst1 = dst_list.find_md5_one(md5) if dst1 is not None: # Found one, we want to copy debug(u"DST COPY dst: %s -> %s" % (dst1, relative_file)) copy_pairs.append( (src_list[relative_file], dst1, relative_file)) del (src_list[relative_file]) else: # we don't have this file, and we don't have a copy of this file elsewhere. Get it. # record that we will get this file transferred to us (before all the copies), so if we come across it later again, # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). dst_list.record_md5(relative_file, md5) for f in dst_list.keys(): if src_list.has_key(f) or update_list.has_key(f): # leave only those not on src_list + update_list del dst_list[f] return src_list, dst_list, update_list, copy_pairs
def fetch_remote_list(args, require_attribs=False, recursive=None, uri_params={}): def _get_remote_attribs(uri, remote_item): response = S3(cfg).object_info(uri) if not response.get('headers'): return remote_item.update({ 'size': int(response['headers']['content-length']), 'md5': response['headers']['etag'].strip('"\''), 'timestamp': dateRFC822toUnix(response['headers']['last-modified']) }) try: md5 = response['s3cmd-attrs']['md5'] remote_item.update({'md5': md5}) debug(u"retreived md5=%s from headers" % md5) except KeyError: pass def _get_filelist_remote(remote_uri, recursive=True): ## If remote_uri ends with '/' then all remote files will have ## the remote_uri prefix removed in the relative path. ## If, on the other hand, the remote_uri ends with something else ## (probably alphanumeric symbol) we'll use the last path part ## in the relative path. ## ## Complicated, eh? See an example: ## _get_filelist_remote("s3://bckt/abc/def") may yield: ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} } ## _get_filelist_remote("s3://bckt/abc/def/") will yield: ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} } ## Furthermore a prefix-magic can restrict the return list: ## _get_filelist_remote("s3://bckt/abc/def/x") yields: ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) empty_fname_re = re.compile(r'\A\s*\Z') total_size = 0 s3 = S3(Config()) response = s3.bucket_list(remote_uri.bucket(), prefix=remote_uri.object(), recursive=recursive, uri_params=uri_params) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri if rem_base != '' and rem_base[-1] != '/': rem_base = rem_base[:rem_base.rfind('/') + 1] remote_uri = S3Uri(u"s3://%s/%s" % (remote_uri.bucket(), rem_base)) rem_base_len = len(rem_base) rem_list = FileDict(ignore_case=False) break_now = False for object in response['list']: if object['Key'] == rem_base_original and object['Key'][-1] != "/": ## We asked for one file and we got that file :-) key = unicodise(os.path.basename(deunicodise(object['Key']))) object_uri_str = remote_uri_original.uri() break_now = True rem_list = FileDict( ignore_case=False ) ## Remove whatever has already been put to rem_list else: key = object['Key'][ rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key if empty_fname_re.match(key): # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems. warning(u"Empty object name on S3 found, ignoring.") continue rem_list[key] = { 'size': int(object['Size']), 'timestamp': dateS3toUnix( object['LastModified'] ), ## Sadly it's upload time, not our lastmod time :-( 'md5': object['ETag'].strip('"\''), 'object_key': object['Key'], 'object_uri_str': object_uri_str, 'base_uri': remote_uri, 'dev': None, 'inode': None, } if '-' in rem_list[key][ 'md5']: # always get it for multipart uploads _get_remote_attribs(S3Uri(object_uri_str), rem_list[key]) md5 = rem_list[key]['md5'] rem_list.record_md5(key, md5) total_size += int(object['Size']) if break_now: break return rem_list, total_size cfg = Config() remote_uris = [] remote_list = FileDict(ignore_case=False) if type(args) not in (list, tuple, set): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 's3': raise ParameterError("Expecting S3 URI instead of '%s'" % arg) remote_uris.append(uri) total_size = 0 if recursive: for uri in remote_uris: objectlist, tmp_total_size = _get_filelist_remote(uri, recursive=True) total_size += tmp_total_size for key in objectlist: remote_list[key] = objectlist[key] remote_list.record_md5(key, objectlist.get_md5(key)) else: for uri in remote_uris: uri_str = uri.uri() ## Wildcards used in remote URI? ## If yes we'll need a bucket listing... wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1) if len(wildcard_split_result) == 2: # wildcards found prefix, rest = wildcard_split_result ## Only request recursive listing if the 'rest' of the URI, ## i.e. the part after first wildcard, contains '/' need_recursion = '/' in rest objectlist, tmp_total_size = _get_filelist_remote( S3Uri(prefix), recursive=need_recursion) total_size += tmp_total_size for key in objectlist: ## Check whether the 'key' matches the requested wildcards if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str): remote_list[key] = objectlist[key] else: ## No wildcards - simply append the given URI to the list key = unicodise(os.path.basename(deunicodise(uri.object()))) if not key: raise ParameterError( u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri()) remote_item = { 'base_uri': uri, 'object_uri_str': uri.uri(), 'object_key': uri.object() } if require_attribs: _get_remote_attribs(uri, remote_item) remote_list[key] = remote_item md5 = remote_item.get('md5') if md5: remote_list.record_md5(key, md5) total_size += remote_item.get('size', 0) remote_list, exclude_list = filter_exclude_include(remote_list) return remote_list, exclude_list, total_size
local_list[i]['mtime'], local_list[i]['size']) cache.purge() cache.save(cfg.cache_file) cfg = Config() cache = HashCache() if cfg.cache_file: try: cache.load(cfg.cache_file) except IOError: info(u"No cache file found, creating it.") local_uris = [] local_list = FileDict(ignore_case=False) single_file = False if type(args) not in (list, tuple, set): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 'file': raise ParameterError( "Expecting filename or directory instead of: %s" % arg) if uri.isdir() and not recursive: raise ParameterError("Use --recursive to upload a directory: %s" %
def __init__(self, homeDir, name, f_hasFailed, interactive=False, newjob=True, quiet=False): """ homeDir is where experiments are stored name is the name of this experiment if there is another job with the same name, they are assumed to be the same. delete old jobs that you do not want to be confused with f_hasFailed is a function that takes a log file and returns a boolean for whether or not the job has failed if in interactive mode, will prompt user for input this should not be used for scripts, which would hang forever in these cases """ self.name = name self.home = os.path.join(homeDir, name) self.logDir = os.path.join(self.home, 'log') self.f_hasFailed = f_hasFailed if os.path.isfile(self.home): raise Exception( 'you the home directory you gave must not have any files or folders that match the name you gave' ) if newjob: print 'creating new job:', self.home if os.path.isdir(self.home): if interactive: print "the directory %s already exists" % (self.home) r = raw_input( 'would you like me to delete it for you? [y|n] ') if str2bool(r): os.system('rm -r ' + self.home) os.system('mkdir ' + self.home) else: raise Exception('cannot proceed') else: raise Exception( 'please give a unique name or delete old jobs: ' + self.home) elif 0 != os.system('mkdir -p ' + self.home): raise Exception('cannot make home directory!') if not os.path.isdir( self.logDir) and 0 != os.system('mkdir -p ' + self.logDir): raise Exception('cannot make log directory!') else: assert os.path.isdir(self.home) assert os.path.isdir(self.logDir) if not quiet: print 'loading existing job:', self.home self.javaOpt = FileDict(os.path.join(self.home, 'java.settings'), exists=not newjob) # start with "-D" self.metaOpt = FileDict(os.path.join(self.home, 'meta.settings'), exists=not newjob) # xmx, jar, profile, etc self.qsubOpt = FileDict(os.path.join(self.home, 'qsub.settings'), exists=not newjob) # mem_free, h_rt self.prepared = False
def fetch_remote_list(args, require_attribs = False, recursive = None): def _get_filelist_remote(remote_uri, recursive = True): ## If remote_uri ends with '/' then all remote files will have ## the remote_uri prefix removed in the relative path. ## If, on the other hand, the remote_uri ends with something else ## (probably alphanumeric symbol) we'll use the last path part ## in the relative path. ## ## Complicated, eh? See an example: ## _get_filelist_remote("s3://bckt/abc/def") may yield: ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} } ## _get_filelist_remote("s3://bckt/abc/def/") will yield: ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} } ## Furthermore a prefix-magic can restrict the return list: ## _get_filelist_remote("s3://bckt/abc/def/x") yields: ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) s3 = S3(Config()) response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri if rem_base != '' and rem_base[-1] != '/': rem_base = rem_base[:rem_base.rfind('/')+1] remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base)) rem_base_len = len(rem_base) rem_list = FileDict(ignore_case = False) break_now = False for object in response['list']: if object['Key'] == rem_base_original and object['Key'][-1] != "/": ## We asked for one file and we got that file :-) key = os.path.basename(object['Key']) object_uri_str = remote_uri_original.uri() break_now = True rem_list = FileDict(ignore_case = False) ## Remove whatever has already been put to rem_list else: key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key rem_list[key] = { 'size' : int(object['Size']), 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-( 'md5' : object['ETag'][1:-1], 'object_key' : object['Key'], 'object_uri_str' : object_uri_str, 'base_uri' : remote_uri, 'dev' : None, 'inode' : None, } md5 = object['ETag'][1:-1] rem_list.record_md5(key, md5) if break_now: break return rem_list cfg = Config() remote_uris = [] remote_list = FileDict(ignore_case = False) if type(args) not in (list, tuple): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 's3': raise ParameterError("Expecting S3 URI instead of '%s'" % arg) remote_uris.append(uri) if recursive: for uri in remote_uris: objectlist = _get_filelist_remote(uri) for key in objectlist: remote_list[key] = objectlist[key] remote_list.record_md5(key, objectlist.get_md5(key)) else: for uri in remote_uris: uri_str = str(uri) ## Wildcards used in remote URI? ## If yes we'll need a bucket listing... if uri_str.find('*') > -1 or uri_str.find('?') > -1: first_wildcard = uri_str.find('*') first_questionmark = uri_str.find('?') if first_questionmark > -1 and first_questionmark < first_wildcard: first_wildcard = first_questionmark prefix = uri_str[:first_wildcard] rest = uri_str[first_wildcard+1:] ## Only request recursive listing if the 'rest' of the URI, ## i.e. the part after first wildcard, contains '/' need_recursion = rest.find('/') > -1 objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion) for key in objectlist: ## Check whether the 'key' matches the requested wildcards if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str): remote_list[key] = objectlist[key] else: ## No wildcards - simply append the given URI to the list key = os.path.basename(uri.object()) if not key: raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri()) remote_item = { 'base_uri': uri, 'object_uri_str': unicode(uri), 'object_key': uri.object() } if require_attribs: response = S3(cfg).object_info(uri) remote_item.update({ 'size': int(response['headers']['content-length']), 'md5': response['headers']['etag'].strip('"\''), 'timestamp' : dateRFC822toUnix(response['headers']['date']) }) # get md5 from header if it's present. We would have set that during upload if response['headers'].has_key('x-amz-meta-s3cmd-attrs'): attrs = parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs']) if attrs.has_key('md5'): remote_item.update({'md5': attrs['md5']}) remote_list[key] = remote_item return remote_list
supergraph.update(G) # nodes with diff expr genes in this graph diff_expressed_nodes = G.nodes & diff_expressed # for each node (from diff expr) making a set of other diff expt gene nodes reachable from it for node in diff_expressed_nodes: diff_expr_descendants = nx.descendants(G, node) & diff_expressed_nodes diff_ancestors[node].update(diff_expr_descendants) all_mega_ancestorz += optimal_greedy(G, diff_expressed) freq = {k: len(v) for k, v in diff_ancestors.items()} freq = sorted(freq.items(), key=lambda x: x[1], reverse=True) hsa_names = FileDict(file='data/hsa_names.tsv', pattern='hsa:(?P<key>\d+)\t(?P<value>.*)') n_top_genes = 10 print('Top {} genes:'.format(n_top_genes)) hsa_names.preload(*[gene for gene, fr in freq[:n_top_genes]]) for gene, fr in freq[:n_top_genes]: print('{:>4} : {:>7} : {}'.format(fr, gene, hsa_names[gene])) print() print('Top {} genes in optimal-greedy algorithm:'.format(n_top_genes)) for gene, fr in all_mega_ancestorz.most_common(n_top_genes): print('{:>4} : {:>7} : {}'.format(fr, gene, hsa_names[gene])) print() print('Looking in the supergraph') supergraph.graph['name'] = 'SuperGraph'
class Job: @staticmethod def fromDir(jobDir, interactive=False, quiet=False): if not os.path.isdir(jobDir): raise Exception('you must give a directory: ' + jobDir) home, name = os.path.split(jobDir) return Job(home, name, interactive, newjob=False, quiet=quiet) @staticmethod def jobsRunning(): jobs = set() lines = subprocess.check_output('qstat').strip() if len(lines) == 0: return jobs ar = lines.split('\n') assert len(ar) >= 3 # first two lines are formatting for jstr in ar[2:]: jobs.add(int(jstr.split()[0])) return jobs @staticmethod def killEverything(): for jid in Job.jobsRunning(): os.system('qdel '+jid) def __init__(self, homeDir, name, f_hasFailed, interactive=False, newjob=True, quiet=False): """ homeDir is where experiments are stored name is the name of this experiment if there is another job with the same name, they are assumed to be the same. delete old jobs that you do not want to be confused with f_hasFailed is a function that takes a log file and returns a boolean for whether or not the job has failed if in interactive mode, will prompt user for input this should not be used for scripts, which would hang forever in these cases """ self.name = name self.home = os.path.join(homeDir, name) self.logDir = os.path.join(self.home, 'log') self.f_hasFailed = f_hasFailed if os.path.isfile(self.home): raise Exception('you the home directory you gave must not have any files or folders that match the name you gave') if newjob: print 'creating new job:', self.home if os.path.isdir(self.home): if interactive: print "the directory %s already exists" % (self.home) r = raw_input('would you like me to delete it for you? [y|n] ') if str2bool(r): os.system('rm -r ' + self.home) os.system('mkdir ' + self.home) else: raise Exception('cannot proceed') else: raise Exception('please give a unique name or delete old jobs: ' + self.home) elif 0 != os.system('mkdir -p ' + self.home): raise Exception('cannot make home directory!') if not os.path.isdir(self.logDir) and 0 != os.system('mkdir -p ' + self.logDir): raise Exception('cannot make log directory!') else: assert os.path.isdir(self.home) assert os.path.isdir(self.logDir) if not quiet: print 'loading existing job:', self.home self.javaOpt = FileDict(os.path.join(self.home, 'java.settings'), exists=not newjob) # start with "-D" self.metaOpt = FileDict(os.path.join(self.home, 'meta.settings'), exists=not newjob) # xmx, jar, profile, etc self.qsubOpt = FileDict(os.path.join(self.home, 'qsub.settings'), exists=not newjob) # mem_free, h_rt self.prepared = False def getResourceDirectory(self, parentFolderName, childFolderName, overwrite=True): '''see getResourceFile for details''' folder = os.path.join(self.home, parentFolderName, childFolderName) if os.path.isdir(folder): if not overwrite: raise else: os.system('mkdir -p ' + folder) return folder def getResourceFile(self, folderName, fileName, overwrite=True): ''' example usage: folderName='diagnostics', fileName='parameters.txt', this just returns a path to home/diagnostics/parameters.txt resources are usefule for job-specific output (avoid job output collision) ''' folder = os.path.join(self.home, folderName) if not os.path.isdir(folder): os.system('mkdir ' + folder) f = os.path.join(folder, fileName) if not overwrite and os.path.isfile(f): raise Exception('this file already exists! ' + f) return f def addLib(self, dirOrFile): if os.path.isfile(dirOrFile): cp = selt.class_path() + ':' + dirOrFile self.metaOpt.setValue('class_path', cp) else: l = all_jars_in(dirOrFile) l.append(self.class_path()) self.metaOpt.setValue('class_path', ':'.join(l)) def jar(self): return self.metaOpt.getValue('jar') def main_class(self): return self.metaOpt.getValue('main_class') def class_path(self): return self.metaOpt.getValue('class_path') def xmx(self): return self.metaOpt.getValue('xmx') def profile(self): return self.metaOpt.getValue('profile', 'n') == 'y' def mem_free(self): return self.qsubOpt.getValue('mem_free') def use_asserts(self): return str2bool(self.metaOpt.getValue('asserts')) def command_line_args(self): f = codecs.open(os.path.join(self.home, 'command_line_args.txt'), 'r', 'utf-8') args = [x.strip() for x in f.readlines()] f.close() return args def setJavaOption(self, key, value): self.javaOpt.setValue(key, value) def qsubScript(self): return os.path.join(self.home, 'job.sh') def writeQsubScript(self, cmd): f = self.qsubScript() ff = codecs.open(f, 'w', 'utf-8') ff.write("#$ -cwd\n") # run from current directory ff.write("#$ -j y\n") # join stderr to stdout ff.write("#$ -V\n") ff.write("#$ -l h_rt=72:00:00\n") # timeout ff.write("#$ -l mem_free=%s\n" % (self.mem_free())) ff.write("#$ -M [email protected]\n") ff.write("#$ -m as\n") # a=aborted b=begining e=end s=suspended ff.write("#$ -o %s\n" % self.logDir) ff.write(cmd + " && echo -e \"finished\\t`date +\"%%Y-%%m-%%d %%H:%%M:%%S\"`\" >> %s\n" % (self.metaOpt.filename)) ff.write('\n') ff.close() return f def prepare(self): # java and jar cmd = 'java' if self.jar() != 'None': if not os.path.isfile(self.jar()): raise Exception('JAR file provide is not a file! ' + self.jar()) cmd += " -jar %s \\\n\t" % (self.jar()) jarMD5 = subprocess.check_output("sha1sum %s" % (self.jar()), shell=True).strip() self.metaOpt.setValue('jar-sha1', jarMD5) # class path, assert cmd += ' -cp ' + self.class_path() + ' \\\n\t' #cmd += ' -cp ' + self.class_path() + ':' + self.home + ' \\\n\t' if self.use_asserts(): cmd += ' -ea \\\n\t' # profiling if self.profile(): cmd += ' -agentlib:hprof=cpu=samples,depth=20,heap=sites \\\n\t' # java options for k,v in self.javaOpt.iteritems(): if not k.startswith('-D'): k = '-D'+k cmd += " %s=\"%s\" \\\n\t" % (k,v) # main class cmd += ' '+self.main_class() + ' \\\n\t' # command line arguments args = self.command_line_args() for a in args: cmd += " %s \\\n\t\t" % (a) # generate a shell script for SGE self.writeQsubScript(cmd) self.prepared = True def setSubmission(self, class_path, main_class, args, jar=None, xmx='2G', mem_free='3G', \ profile=False, asserts=True, actuallySubmit=False): f = codecs.open(os.path.join(self.home, 'command_line_args.txt'), 'w', 'utf-8') for a in args: f.write(a + '\n') f.close() xmx = canonicalMemoryDescription(xmx) mem_free = canonicalMemoryDescription(mem_free) if jar is None: jar = 'None' self.metaOpt.setValue('jar', jar) self.metaOpt.setValue('main_class', main_class) self.metaOpt.setValue('class_path', class_path) self.metaOpt.setValue('xmx', xmx) self.qsubOpt.setValue('mem_free', mem_free) self.metaOpt.setValue('profile', bool2str(profile)) self.metaOpt.setValue('asserts', bool2str(asserts)) self.prepare() if actuallySubmit: self.submit() def submit(self): # don't submit the same job twice assert self.submittedAt() is None assert self.jid() is None assert self.prepared qsubScript = self.qsubScript() self.metaOpt.setValue('submitted', timestamp()) self.metaOpt.flush() self.javaOpt.flush() self.qsubOpt.flush() r = subprocess.check_output("qsub -N %s %s" % (self.name, qsubScript), shell=True) jid = int(r.split()[2]) self.metaOpt.setValue('jid', jid, flush=True) print "submitted job \"%s\" (%d)" % (self.name, jid) def submittedAt(self): s = self.metaOpt.getValue('submitted') if s: return parsetime(s) else: return None def jid(self): s = self.metaOpt.getValue('jid') if s: return int(s) else: return None def failed(self): if self.recentlySubmitted(): return False f = self.newestLog() if not f: print 'there is no log file, assuming it failed!' else: return self.f_hasFailed(f) def recentlySubmitted(self): ''' give qsub some time to get its stuff together... for the first 5 seconds after submitting, just assume the job is running normally ''' assert self.submittedAt() is not None return time.time() - self.submittedAt() < 10.0 def isFinished(self): if self.recentlySubmitted() or self.isRunning(): return False self.metaOpt.load() return self.metaOpt.hasKey('finished') def isRunning(self): if self.recentlySubmitted(): return True assert self.jid() is not None assert type(self.jid()) is int running = Job.jobsRunning() return self.jid() in running def wait(self, timeout=48*60*60, secsBetweenPolls=30, exceptionOnTimeout=False): assert self.jid() is not None assert type(self.jid()) is int total = 0 while total < timeout and self.isRunning(): total += secsBetweenPolls time.sleep(secsBetweenPolls) if exceptionOnTimeout and total >= timeout: raise Exception("waited for %d seconds and job is not done!" % (total)) else: print "waited %d seconds, but %d is done!" % (total, self.jid()) def kill(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qdel %d" % (self.jid())) def pause(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qalter -u %d" % (self.jid())) def unpause(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qalter -U %d" % (self.jid())) def logs(self): assert self.logDir is not None and os.path.isdir(self.logDir) return [os.path.join(self.logDir, x) for x in os.listdir(self.logDir)] def newestLog(self): l = self.logs() if len(l) == 0: raise Exception('there are no logs!') return l[-1]
def fetch_local_list(args, is_src=False, recursive=None): def _fetch_local_list_info(loc_list): len_loc_list = len(loc_list) total_size = 0 info( u"Running stat() and reading/calculating MD5 values on %d files, this may take some time..." % len_loc_list) counter = 0 for relative_file in loc_list: counter += 1 if counter % 1000 == 0: info(u"[%d/%d]" % (counter, len_loc_list)) if relative_file == '-': continue full_name = loc_list[relative_file]['full_name'] try: sr = os.stat_result(os.stat(deunicodise(full_name))) except OSError as e: if e.errno == errno.ENOENT: # file was removed async to us getting the list continue else: raise loc_list[relative_file].update({ 'size': sr.st_size, 'mtime': sr.st_mtime, 'dev': sr.st_dev, 'inode': sr.st_ino, 'uid': sr.st_uid, 'gid': sr.st_gid, 'sr': sr # save it all, may need it in preserve_attrs_list ## TODO: Possibly more to save here... }) total_size += sr.st_size if 'md5' in cfg.sync_checks: md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size) if md5 is None: try: md5 = loc_list.get_md5( relative_file) # this does the file I/O except IOError: continue cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5, sr.st_size) return total_size def _get_filelist_local(loc_list, local_uri, cache): info(u"Compiling list of local files...") if local_uri.basename() == "-": try: uid = os.geteuid() gid = os.getegid() except: uid = 0 gid = 0 loc_list["-"] = { 'full_name': '-', 'size': -1, 'mtime': -1, 'uid': uid, 'gid': gid, 'dev': 0, 'inode': 0, } return loc_list, True if local_uri.isdir(): local_base = local_uri.basename() local_path = local_uri.path() if is_src and len(cfg.files_from): filelist = _get_filelist_from_file(cfg, local_path) single_file = False else: if cfg.follow_symlinks: filelist = _fswalk_follow_symlinks(local_path) else: filelist = _fswalk_no_symlinks(local_path) single_file = False else: local_base = "" local_path = local_uri.dirname() filelist = [(local_path, [], [local_uri.basename()])] single_file = True for root, dirs, files in filelist: rel_root = root.replace(local_path, local_base, 1) for f in files: full_name = os.path.join(root, f) if not os.path.isfile(deunicodise(full_name)): if os.path.exists(deunicodise(full_name)): warning(u"Skipping over non regular file: %s" % full_name) continue if os.path.islink(deunicodise(full_name)): if not cfg.follow_symlinks: warning(u"Skipping over symbolic link: %s" % full_name) continue relative_file = os.path.join(rel_root, f) if os.path.sep != "/": # Convert non-unix dir separators to '/' relative_file = "/".join(relative_file.split(os.path.sep)) if cfg.urlencoding_mode == "normal": relative_file = replace_nonprintables(relative_file) if relative_file.startswith('./'): relative_file = relative_file[2:] loc_list[relative_file] = { 'full_name': full_name, } return loc_list, single_file def _maintain_cache(cache, local_list): # if getting the file list from files_from, it is going to be # a subset of the actual tree. We should not purge content # outside of that subset as we don't know if it's valid or # not. Leave it to a non-files_from run to purge. if cfg.cache_file and len(cfg.files_from) == 0: cache.mark_all_for_purge() for i in local_list.keys(): cache.unmark_for_purge(local_list[i]['dev'], local_list[i]['inode'], local_list[i]['mtime'], local_list[i]['size']) cache.purge() cache.save(cfg.cache_file) cfg = Config() cache = HashCache() if cfg.cache_file: try: cache.load(cfg.cache_file) except IOError: info(u"No cache file found, creating it.") local_uris = [] local_list = FileDict(ignore_case=False) single_file = False if type(args) not in (list, tuple, set): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 'file': raise ParameterError( "Expecting filename or directory instead of: %s" % arg) if uri.isdir() and not recursive: raise ParameterError("Use --recursive to upload a directory: %s" % arg) local_uris.append(uri) for uri in local_uris: list_for_uri, single_file = _get_filelist_local(local_list, uri, cache) ## Single file is True if and only if the user ## specified one local URI and that URI represents ## a FILE. Ie it is False if the URI was of a DIR ## and that dir contained only one FILE. That's not ## a case of single_file==True. if len(local_list) > 1: single_file = False local_list, exclude_list = filter_exclude_include(local_list) total_size = _fetch_local_list_info(local_list) _maintain_cache(cache, local_list) return local_list, single_file, exclude_list, total_size
if src_md5 != dst_md5: ## checksums are different. attribs_match = False debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5)) return attribs_match # we don't support local->local sync, use 'rsync' or something like that instead ;-) assert(not(src_remote == False and dst_remote == False)) info(u"Verifying attributes...") cfg = Config() ## Items left on src_list will be transferred ## Items left on update_list will be transferred after src_list ## Items left on copy_pairs will be copied from dst1 to dst2 update_list = FileDict(ignore_case = False) ## Items left on dst_list will be deleted copy_pairs = [] debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote))) for relative_file in src_list.keys(): debug(u"CHECK: %s" % (relative_file)) if dst_list.has_key(relative_file): ## Was --skip-existing requested? if cfg.skip_existing: debug(u"IGNR: %s (used --skip-existing)" % (relative_file)) del(src_list[relative_file]) del(dst_list[relative_file]) continue
def fetch_local_list(args, recursive = None): def _get_filelist_local(loc_list, local_uri, cache): info(u"Compiling list of local files...") if deunicodise(local_uri.basename()) == "-": loc_list["-"] = { 'full_name_unicode' : '-', 'full_name' : '-', 'size' : -1, 'mtime' : -1, } return loc_list, True if local_uri.isdir(): local_base = deunicodise(local_uri.basename()) local_path = deunicodise(local_uri.path()) if cfg.follow_symlinks: filelist = _fswalk_follow_symlinks(local_path) else: filelist = _fswalk_no_symlinks(local_path) single_file = False else: local_base = "" local_path = deunicodise(local_uri.dirname()) filelist = [( local_path, [], [deunicodise(local_uri.basename())] )] single_file = True for root, dirs, files in filelist: rel_root = root.replace(local_path, local_base, 1) for f in files: full_name = os.path.join(root, f) if not os.path.isfile(full_name): continue if os.path.islink(full_name): if not cfg.follow_symlinks: continue relative_file = unicodise(os.path.join(rel_root, f)) if os.path.sep != "/": # Convert non-unix dir separators to '/' relative_file = "/".join(relative_file.split(os.path.sep)) if cfg.urlencoding_mode == "normal": relative_file = replace_nonprintables(relative_file) if relative_file.startswith('./'): relative_file = relative_file[2:] sr = os.stat_result(os.lstat(full_name)) loc_list[relative_file] = { 'full_name_unicode' : unicodise(full_name), 'full_name' : full_name, 'size' : sr.st_size, 'mtime' : sr.st_mtime, 'dev' : sr.st_dev, 'inode' : sr.st_ino, 'uid' : sr.st_uid, 'gid' : sr.st_gid, 'sr': sr # save it all, may need it in preserve_attrs_list ## TODO: Possibly more to save here... } if 'md5' in cfg.sync_checks: md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size) if md5 is None: try: md5 = loc_list.get_md5(relative_file) # this does the file I/O except IOError: continue cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5) return loc_list, single_file def _maintain_cache(cache, local_list): if cfg.cache_file: cache.mark_all_for_purge() for i in local_list.keys(): cache.unmark_for_purge(local_list[i]['dev'], local_list[i]['inode'], local_list[i]['mtime'], local_list[i]['size']) cache.purge() cache.save(cfg.cache_file) cfg = Config() cache = HashCache() if cfg.cache_file: try: cache.load(cfg.cache_file) except IOError: info(u"No cache file found, creating it.") local_uris = [] local_list = FileDict(ignore_case = False) single_file = False if type(args) not in (list, tuple): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 'file': raise ParameterError("Expecting filename or directory instead of: %s" % arg) if uri.isdir() and not recursive: raise ParameterError("Use --recursive to upload a directory: %s" % arg) local_uris.append(uri) for uri in local_uris: list_for_uri, single_file = _get_filelist_local(local_list, uri, cache) ## Single file is True if and only if the user ## specified one local URI and that URI represents ## a FILE. Ie it is False if the URI was of a DIR ## and that dir contained only one FILE. That's not ## a case of single_file==True. if len(local_list) > 1: single_file = False _maintain_cache(cache, local_list) return local_list, single_file
def compare_filelists(src_list, dst_list, src_remote, dst_remote): def __direction_str(is_remote): return is_remote and "remote" or "local" def _compare(src_list, dst_lst, src_remote, dst_remote, file): """Return True if src_list[file] matches dst_list[file], else False""" attribs_match = True if not (src_list.has_key(file) and dst_list.has_key(file)): info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, src_list.has_key(file), dst_list.has_key(file))) return False ## check size first if 'size' in cfg.sync_checks: if 'size' in dst_list[file] and 'size' in src_list[file]: if dst_list[file]['size'] != src_list[file]['size']: debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size'])) attribs_match = False ## check md5 compare_md5 = 'md5' in cfg.sync_checks # Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn" if compare_md5: if (src_remote == True and '-' in src_list[file]['md5']) or (dst_remote == True and '-' in dst_list[file]['md5']): compare_md5 = False info(u"disabled md5 check for %s" % file) if attribs_match and compare_md5: try: src_md5 = src_list.get_md5(file) dst_md5 = dst_list.get_md5(file) except (IOError,OSError): # md5 sum verification failed - ignore that file altogether debug(u"IGNR: %s (disappeared)" % (file)) warning(u"%s: file disappeared, ignoring." % (file)) raise if src_md5 != dst_md5: ## checksums are different. attribs_match = False debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5)) return attribs_match # we don't support local->local sync, use 'rsync' or something like that instead ;-) assert(not(src_remote == False and dst_remote == False)) info(u"Verifying attributes...") cfg = Config() ## Items left on src_list will be transferred ## Items left on update_list will be transferred after src_list ## Items left on copy_pairs will be copied from dst1 to dst2 update_list = FileDict(ignore_case = False) ## Items left on dst_list will be deleted copy_pairs = [] debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote))) for relative_file in src_list.keys(): debug(u"CHECK: %s" % (relative_file)) if dst_list.has_key(relative_file): ## Was --skip-existing requested? if cfg.skip_existing: debug(u"IGNR: %s (used --skip-existing)" % (relative_file)) del(src_list[relative_file]) del(dst_list[relative_file]) continue try: same_file = _compare(src_list, dst_list, src_remote, dst_remote, relative_file) except (IOError,OSError): debug(u"IGNR: %s (disappeared)" % (relative_file)) warning(u"%s: file disappeared, ignoring." % (relative_file)) del(src_list[relative_file]) del(dst_list[relative_file]) continue if same_file: debug(u"IGNR: %s (transfer not needed)" % relative_file) del(src_list[relative_file]) del(dst_list[relative_file]) else: # look for matching file in src try: md5 = src_list.get_md5(relative_file) except IOError: md5 = None if md5 is not None and dst_list.by_md5.has_key(md5): # Found one, we want to copy dst1 = list(dst_list.by_md5[md5])[0] debug(u"DST COPY src: %s -> %s" % (dst1, relative_file)) copy_pairs.append((src_list[relative_file], dst1, relative_file)) del(src_list[relative_file]) del(dst_list[relative_file]) else: # record that we will get this file transferred to us (before all the copies), so if we come across it later again, # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). dst_list.record_md5(relative_file, md5) update_list[relative_file] = src_list[relative_file] del src_list[relative_file] del dst_list[relative_file] else: # dst doesn't have this file # look for matching file elsewhere in dst try: md5 = src_list.get_md5(relative_file) except IOError: md5 = None dst1 = dst_list.find_md5_one(md5) if dst1 is not None: # Found one, we want to copy debug(u"DST COPY dst: %s -> %s" % (dst1, relative_file)) copy_pairs.append((src_list[relative_file], dst1, relative_file)) del(src_list[relative_file]) else: # we don't have this file, and we don't have a copy of this file elsewhere. Get it. # record that we will get this file transferred to us (before all the copies), so if we come across it later again, # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). dst_list.record_md5(relative_file, md5) for f in dst_list.keys(): if src_list.has_key(f) or update_list.has_key(f): # leave only those not on src_list + update_list del dst_list[f] return src_list, dst_list, update_list, copy_pairs
def fetch_remote_list(args, require_attribs = False, recursive = None, uri_params = {}): def _get_remote_attribs(uri, remote_item): response = S3(cfg).object_info(uri) remote_item.update({ 'size': int(response['headers']['content-length']), 'md5': response['headers']['etag'].strip('"\''), 'timestamp' : dateRFC822toUnix(response['headers']['date']) }) try: md5 = response['s3cmd-attrs']['md5'] remote_item.update({'md5': md5}) debug(u"retreived md5=%s from headers" % md5) except KeyError: pass def _get_filelist_remote(remote_uri, recursive = True): ## If remote_uri ends with '/' then all remote files will have ## the remote_uri prefix removed in the relative path. ## If, on the other hand, the remote_uri ends with something else ## (probably alphanumeric symbol) we'll use the last path part ## in the relative path. ## ## Complicated, eh? See an example: ## _get_filelist_remote("s3://bckt/abc/def") may yield: ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} } ## _get_filelist_remote("s3://bckt/abc/def/") will yield: ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} } ## Furthermore a prefix-magic can restrict the return list: ## _get_filelist_remote("s3://bckt/abc/def/x") yields: ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) empty_fname_re = re.compile(r'\A\s*\Z') s3 = S3(Config()) response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive, uri_params = uri_params) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri if rem_base != '' and rem_base[-1] != '/': rem_base = rem_base[:rem_base.rfind('/')+1] remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base)) rem_base_len = len(rem_base) rem_list = FileDict(ignore_case = False) break_now = False for object in response['list']: if object['Key'] == rem_base_original and object['Key'][-1] != "/": ## We asked for one file and we got that file :-) key = os.path.basename(object['Key']) object_uri_str = remote_uri_original.uri() break_now = True rem_list = FileDict(ignore_case = False) ## Remove whatever has already been put to rem_list else: key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key if empty_fname_re.match(key): # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems. warning(u"Empty object name on S3 found, ignoring.") continue rem_list[key] = { 'size' : int(object['Size']), 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-( 'md5' : object['ETag'][1:-1], 'object_key' : object['Key'], 'object_uri_str' : object_uri_str, 'base_uri' : remote_uri, 'dev' : None, 'inode' : None, } if rem_list[key]['md5'].find("-") > 0: # always get it for multipart uploads _get_remote_attribs(S3Uri(object_uri_str), rem_list[key]) md5 = rem_list[key]['md5'] rem_list.record_md5(key, md5) if break_now: break return rem_list cfg = Config() remote_uris = [] remote_list = FileDict(ignore_case = False) if type(args) not in (list, tuple): args = [args] if recursive == None: recursive = cfg.recursive for arg in args: uri = S3Uri(arg) if not uri.type == 's3': raise ParameterError("Expecting S3 URI instead of '%s'" % arg) remote_uris.append(uri) if recursive: for uri in remote_uris: objectlist = _get_filelist_remote(uri, recursive = True) for key in objectlist: remote_list[key] = objectlist[key] remote_list.record_md5(key, objectlist.get_md5(key)) else: for uri in remote_uris: uri_str = unicode(uri) ## Wildcards used in remote URI? ## If yes we'll need a bucket listing... wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1) if len(wildcard_split_result) == 2: # wildcards found prefix, rest = wildcard_split_result ## Only request recursive listing if the 'rest' of the URI, ## i.e. the part after first wildcard, contains '/' need_recursion = '/' in rest objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion) for key in objectlist: ## Check whether the 'key' matches the requested wildcards if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str): remote_list[key] = objectlist[key] else: ## No wildcards - simply append the given URI to the list key = os.path.basename(uri.object()) if not key: raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri()) remote_item = { 'base_uri': uri, 'object_uri_str': unicode(uri), 'object_key': uri.object() } if require_attribs: _get_remote_attribs(uri, remote_item) remote_list[key] = remote_item md5 = remote_item.get('md5') if md5: remote_list.record_md5(key, md5) remote_list, exclude_list = filter_exclude_include(remote_list) return remote_list, exclude_list
class Job: @staticmethod def fromDir(jobDir, interactive=False, quiet=False): if not os.path.isdir(jobDir): raise Exception('you must give a directory: ' + jobDir) home, name = os.path.split(jobDir) return Job(home, name, interactive, newjob=False, quiet=quiet) @staticmethod def jobsRunning(): jobs = set() lines = subprocess.check_output('qstat').strip() if len(lines) == 0: return jobs ar = lines.split('\n') assert len(ar) >= 3 # first two lines are formatting for jstr in ar[2:]: jobs.add(int(jstr.split()[0])) return jobs @staticmethod def killEverything(): for jid in Job.jobsRunning(): os.system('qdel ' + jid) def __init__(self, homeDir, name, f_hasFailed, interactive=False, newjob=True, quiet=False): """ homeDir is where experiments are stored name is the name of this experiment if there is another job with the same name, they are assumed to be the same. delete old jobs that you do not want to be confused with f_hasFailed is a function that takes a log file and returns a boolean for whether or not the job has failed if in interactive mode, will prompt user for input this should not be used for scripts, which would hang forever in these cases """ self.name = name self.home = os.path.join(homeDir, name) self.logDir = os.path.join(self.home, 'log') self.f_hasFailed = f_hasFailed if os.path.isfile(self.home): raise Exception( 'you the home directory you gave must not have any files or folders that match the name you gave' ) if newjob: print 'creating new job:', self.home if os.path.isdir(self.home): if interactive: print "the directory %s already exists" % (self.home) r = raw_input( 'would you like me to delete it for you? [y|n] ') if str2bool(r): os.system('rm -r ' + self.home) os.system('mkdir ' + self.home) else: raise Exception('cannot proceed') else: raise Exception( 'please give a unique name or delete old jobs: ' + self.home) elif 0 != os.system('mkdir -p ' + self.home): raise Exception('cannot make home directory!') if not os.path.isdir( self.logDir) and 0 != os.system('mkdir -p ' + self.logDir): raise Exception('cannot make log directory!') else: assert os.path.isdir(self.home) assert os.path.isdir(self.logDir) if not quiet: print 'loading existing job:', self.home self.javaOpt = FileDict(os.path.join(self.home, 'java.settings'), exists=not newjob) # start with "-D" self.metaOpt = FileDict(os.path.join(self.home, 'meta.settings'), exists=not newjob) # xmx, jar, profile, etc self.qsubOpt = FileDict(os.path.join(self.home, 'qsub.settings'), exists=not newjob) # mem_free, h_rt self.prepared = False def getResourceDirectory(self, parentFolderName, childFolderName, overwrite=True): '''see getResourceFile for details''' folder = os.path.join(self.home, parentFolderName, childFolderName) if os.path.isdir(folder): if not overwrite: raise else: os.system('mkdir -p ' + folder) return folder def getResourceFile(self, folderName, fileName, overwrite=True): ''' example usage: folderName='diagnostics', fileName='parameters.txt', this just returns a path to home/diagnostics/parameters.txt resources are usefule for job-specific output (avoid job output collision) ''' folder = os.path.join(self.home, folderName) if not os.path.isdir(folder): os.system('mkdir ' + folder) f = os.path.join(folder, fileName) if not overwrite and os.path.isfile(f): raise Exception('this file already exists! ' + f) return f def addLib(self, dirOrFile): if os.path.isfile(dirOrFile): cp = selt.class_path() + ':' + dirOrFile self.metaOpt.setValue('class_path', cp) else: l = all_jars_in(dirOrFile) l.append(self.class_path()) self.metaOpt.setValue('class_path', ':'.join(l)) def jar(self): return self.metaOpt.getValue('jar') def main_class(self): return self.metaOpt.getValue('main_class') def class_path(self): return self.metaOpt.getValue('class_path') def xmx(self): return self.metaOpt.getValue('xmx') def profile(self): return self.metaOpt.getValue('profile', 'n') == 'y' def mem_free(self): return self.qsubOpt.getValue('mem_free') def use_asserts(self): return str2bool(self.metaOpt.getValue('asserts')) def command_line_args(self): f = codecs.open(os.path.join(self.home, 'command_line_args.txt'), 'r', 'utf-8') args = [x.strip() for x in f.readlines()] f.close() return args def setJavaOption(self, key, value): self.javaOpt.setValue(key, value) def qsubScript(self): return os.path.join(self.home, 'job.sh') def writeQsubScript(self, cmd): f = self.qsubScript() ff = codecs.open(f, 'w', 'utf-8') ff.write("#$ -cwd\n") # run from current directory ff.write("#$ -j y\n") # join stderr to stdout ff.write("#$ -V\n") ff.write("#$ -l h_rt=72:00:00\n") # timeout ff.write("#$ -l mem_free=%s\n" % (self.mem_free())) ff.write("#$ -M [email protected]\n") ff.write("#$ -m as\n") # a=aborted b=begining e=end s=suspended ff.write("#$ -o %s\n" % self.logDir) ff.write( cmd + " && echo -e \"finished\\t`date +\"%%Y-%%m-%%d %%H:%%M:%%S\"`\" >> %s\n" % (self.metaOpt.filename)) ff.write('\n') ff.close() return f def prepare(self): # java and jar cmd = 'java' if self.jar() != 'None': if not os.path.isfile(self.jar()): raise Exception('JAR file provide is not a file! ' + self.jar()) cmd += " -jar %s \\\n\t" % (self.jar()) jarMD5 = subprocess.check_output("sha1sum %s" % (self.jar()), shell=True).strip() self.metaOpt.setValue('jar-sha1', jarMD5) # class path, assert cmd += ' -cp ' + self.class_path() + ' \\\n\t' #cmd += ' -cp ' + self.class_path() + ':' + self.home + ' \\\n\t' if self.use_asserts(): cmd += ' -ea \\\n\t' # profiling if self.profile(): cmd += ' -agentlib:hprof=cpu=samples,depth=20,heap=sites \\\n\t' # java options for k, v in self.javaOpt.iteritems(): if not k.startswith('-D'): k = '-D' + k cmd += " %s=\"%s\" \\\n\t" % (k, v) # main class cmd += ' ' + self.main_class() + ' \\\n\t' # command line arguments args = self.command_line_args() for a in args: cmd += " %s \\\n\t\t" % (a) # generate a shell script for SGE self.writeQsubScript(cmd) self.prepared = True def setSubmission(self, class_path, main_class, args, jar=None, xmx='2G', mem_free='3G', \ profile=False, asserts=True, actuallySubmit=False): f = codecs.open(os.path.join(self.home, 'command_line_args.txt'), 'w', 'utf-8') for a in args: f.write(a + '\n') f.close() xmx = canonicalMemoryDescription(xmx) mem_free = canonicalMemoryDescription(mem_free) if jar is None: jar = 'None' self.metaOpt.setValue('jar', jar) self.metaOpt.setValue('main_class', main_class) self.metaOpt.setValue('class_path', class_path) self.metaOpt.setValue('xmx', xmx) self.qsubOpt.setValue('mem_free', mem_free) self.metaOpt.setValue('profile', bool2str(profile)) self.metaOpt.setValue('asserts', bool2str(asserts)) self.prepare() if actuallySubmit: self.submit() def submit(self): # don't submit the same job twice assert self.submittedAt() is None assert self.jid() is None assert self.prepared qsubScript = self.qsubScript() self.metaOpt.setValue('submitted', timestamp()) self.metaOpt.flush() self.javaOpt.flush() self.qsubOpt.flush() r = subprocess.check_output("qsub -N %s %s" % (self.name, qsubScript), shell=True) jid = int(r.split()[2]) self.metaOpt.setValue('jid', jid, flush=True) print "submitted job \"%s\" (%d)" % (self.name, jid) def submittedAt(self): s = self.metaOpt.getValue('submitted') if s: return parsetime(s) else: return None def jid(self): s = self.metaOpt.getValue('jid') if s: return int(s) else: return None def failed(self): if self.recentlySubmitted(): return False f = self.newestLog() if not f: print 'there is no log file, assuming it failed!' else: return self.f_hasFailed(f) def recentlySubmitted(self): ''' give qsub some time to get its stuff together... for the first 5 seconds after submitting, just assume the job is running normally ''' assert self.submittedAt() is not None return time.time() - self.submittedAt() < 10.0 def isFinished(self): if self.recentlySubmitted() or self.isRunning(): return False self.metaOpt.load() return self.metaOpt.hasKey('finished') def isRunning(self): if self.recentlySubmitted(): return True assert self.jid() is not None assert type(self.jid()) is int running = Job.jobsRunning() return self.jid() in running def wait(self, timeout=48 * 60 * 60, secsBetweenPolls=30, exceptionOnTimeout=False): assert self.jid() is not None assert type(self.jid()) is int total = 0 while total < timeout and self.isRunning(): total += secsBetweenPolls time.sleep(secsBetweenPolls) if exceptionOnTimeout and total >= timeout: raise Exception("waited for %d seconds and job is not done!" % (total)) else: print "waited %d seconds, but %d is done!" % (total, self.jid()) def kill(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qdel %d" % (self.jid())) def pause(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qalter -u %d" % (self.jid())) def unpause(self): if not self.isRunning(): raise Exception('jid is none, job is not live') assert self.jid() is not None assert type(self.jid()) is int os.system("qalter -U %d" % (self.jid())) def logs(self): assert self.logDir is not None and os.path.isdir(self.logDir) return [os.path.join(self.logDir, x) for x in os.listdir(self.logDir)] def newestLog(self): l = self.logs() if len(l) == 0: raise Exception('there are no logs!') return l[-1]