def log(code,message,level,stdout=False,stderr=False,logfile=True,logger_name=None): # check code length if len(code)!=12: raise SDException("SYNDALOG-002","%s have an incorrect length"%code) # retrieve modulecode part of code modulecode=code[0:8] if level>=get_verbosity_level(): if stdout: sdtools.print_stdout(message) if stderr: # add msg prefix label=get_verbosity_label(level) formatted_msg='%s: %s'%(label.upper(),message) sdtools.print_stderr(formatted_msg) if logfile: if logger_name is None: # default logger default_logger.log(level,message,extra={'code' : code}) else: if logger_name==sdconst.LOGGER_DOMAIN: domain_logger.log(level,message,extra={'code' : code}) else: assert False
def log(code,message,level,stdout=False,stderr=False,logfile=True,logger_name=None): if level>=get_verbosity_level(): if stdout: sdtools.print_stdout(message) if stderr: # add msg prefix label=get_verbosity_label(level) formatted_msg='%s: %s'%(label.upper(),message) sdtools.print_stderr(formatted_msg) if logfile: if logger_name is None: # default logger default_logger.log(level,message,extra={'code' : code}) else: if logger_name==sdconst.LOGGER_DOMAIN: domain_logger.log(level,message,extra={'code' : code}) else: assert False
def upgrade(args): """ Note inter-selection func """ import sdselectionsgroup, sdparameter, sdsearch # BEWARE: tricky statement # # 'upgrade' is a multi-selections 'subcommand' which do the same as the # mono-selection 'install' subcommand, but for many selections. What we do # here is replace 'upgrade' subcommand with 'install' subcommand, so that we can, # now that we are in 'upgrade' func/context, # come back to the existing mono-selection func, # for each selection, with 'install' subcommand. # args.subcommand='install' project=sdparameter.extract_values_from_parameter(args.parameter,'project') # retrieve project(s) from parameter for selection in sdselectionsgroup.get_selection_list(project=project): print_stderr("Process %s.."%selection.filename) if not args.dry_run: # TODO: maybe force type=file here, in case the selection file have 'type=dataset' files=sdsearch.run(selection=selection) args.yes=True install(args,files=files)
def do_search(self,arg): self.parameter=arg.split() self.complete_parameter() localsearch=sdsessionparam.get_value('localsearch') dry_run=sdsessionparam.get_value('dry_run') type_=sdsessionparam.get_value('type') kw={'parameter':self.parameter,'dry_run':dry_run} if localsearch: if type_=='Dataset': datasets=sdldataset.get_datasets(**kw) if not dry_run: if len(datasets)==0: sdtools.print_stderr('Dataset not found') else: sdldataset.print_list(datasets) elif type_=='File': files=sdlfile.get_files(**kw) if not dry_run: sdlfile.print_(files) else: if type_=='Dataset': datasets=sdrdataset.get_datasets(**kw) if not dry_run: if len(datasets)==0: sdtools.print_stderr('Dataset not found') else: sdrdataset.print_list(datasets) elif type_=='File': files=sdrfile.get_files(**kw) if not dry_run: sdrfile.print_list(files)
def run(args, metadata=None): import syndautils syndautils.check_daemon() if metadata is None: # retrieve metadata if args.incremental and not args.selection_file: print_stderr( "ERROR: 'selection_file' option is not set (a selection file must be used when 'incremental' option is set)" ) return (1, 0) if args.selection_file is not None: sdlog.info("SYNDINST-006", "Process '%s'" % args.selection_file) try: metadata = syndautils.file_full_search(args) except sdexception.EmptySelectionException, e: print_stderr('No dataset will be installed, upgraded, or removed.') return (0, 0) except sdexception.SDException, e: sdlog.info("SYNDINST-006", "Exception occured during installation ('%s')" % str(e)) raise
def run(args): try: metadata = syndautils.file_full_search(args) except sdexception.EmptySelectionException, e: print_stderr( "You must specify at least one facet to perform this action.") return 1
def dump(args): if args.type_==sdconst.SA_TYPE_FILE: file_dump(args) elif args.type_==sdconst.SA_TYPE_AGGREGATION: print_stderr('%s operation is not available for variable/aggregation type'%args.action) elif args.type_==sdconst.SA_TYPE_DATASET: dataset_dump(args)
def do_search(self, arg): self.parameter = arg.split() self.complete_parameter() localsearch = sdsessionparam.get_value('localsearch') dry_run = sdsessionparam.get_value('dry_run') type_ = sdsessionparam.get_value('type') kw = {'parameter': self.parameter, 'dry_run': dry_run} if localsearch: if type_ == 'Dataset': datasets = sdldataset.get_datasets(**kw) if not dry_run: if len(datasets) == 0: sdtools.print_stderr('Dataset not found') else: sdldataset.print_list(datasets) elif type_ == 'File': files = sdlfile.get_files(**kw) if not dry_run: sdlfile.print_(files) else: if type_ == 'Dataset': datasets = sdrdataset.get_datasets(**kw) if not dry_run: if len(datasets) == 0: sdtools.print_stderr('Dataset not found') else: sdrdataset.print_list(datasets) elif type_ == 'File': files = sdrfile.get_files(**kw) if not dry_run: sdrfile.print_list(files)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('pattern1',nargs='?',default=None) parser.add_argument('pattern2',nargs='?',default=None) parser.add_argument('-c','--columns',type=int,default=1) args = parser.parse_args(args=argv) p1=args.pattern1 p2=args.pattern2 if (p1 is None and p2 is None): filter_and_print_name(params.keys()) elif (p1 is not None and p2 is None): if p1 in params: filter_and_print_value(params[p1],columns=args.columns) else: if p1 in mapping_keywords: print_models_mapping(models) else: filter_and_print_name(params.keys(),pattern=p1) elif (p1 is not None and p2 is not None): if p1 in params: filter_and_print_value(params[p1],columns=args.columns,pattern=p2) else: if p1 in mapping_keywords: print_models_mapping(models,p2) else: sdtools.print_stderr("Parameter not found")
def print_(args): p1 = args.pattern1 p2 = args.pattern2 if (p1 is None and p2 is None): filter_and_print_name(params.keys()) elif (p1 is not None and p2 is None): if p1 in params: filter_and_print_value(params[p1], columns=args.columns) else: if p1 in mapping_keywords: print_models_mapping(models) else: filter_and_print_name(params.keys(), pattern=p1) elif (p1 is not None and p2 is not None): if p1 in params: filter_and_print_value(params[p1], columns=args.columns, pattern=p2) else: if p1 in mapping_keywords: print_models_mapping(models, p2) else: sdtools.print_stderr("Parameter not found")
def dataset_version(args): import sdremoteparam, syndautils, sdearlystreamutils # don't be misled about identifiers here: sdinference produces search-api # key (always do) name i.e. instance_id, and I use Synda style variable name # i.e. dataset_functional_id (for better readability). li = sdearlystreamutils.get_facet_values_early(args.stream, 'instance_id') if len(li) == 0: print_stderr('Please specify a dataset name.') return 1 elif len(li) > 1: print_stderr('Too many arguments.') return 1 else: dataset_functional_id = li[0] dataset_functional_id_without_version = syndautils.strip_dataset_version( dataset_functional_id) params = sdremoteparam.run(pname='version', facets_group={ 'type': [sdconst.SA_TYPE_DATASET], 'master_id': [dataset_functional_id_without_version] }, dry_run=args.dry_run) # TODO: func for code below items = params.get('version', []) for item in items: print item.name
def file_dump(args): import sdrfile, sddeferredafter, sdcolumnfilter, sdreducecol sddeferredafter.add_default_parameter(args.stream,'limit',100) if args.raw_mode: post_pipeline_mode=None args.all=True # we force '--all' option when '--raw_mode' option is set else: post_pipeline_mode='file' files=sdrfile.get_files(stream=args.stream,post_pipeline_mode=post_pipeline_mode,dry_run=args.dry_run) if args.all: # do not hide any attribute pass else: # hide non essential attributes files=sdreducecol.run(files) if not args.dry_run: if len(files)>0: files=sdcolumnfilter.run(files,args.column) sdprint.print_format(files,args.format) else: print_stderr("File not found")
def show(args): if args.type_==sdconst.SA_TYPE_FILE: file_show(args) elif args.type_==sdconst.SA_TYPE_AGGREGATION: print_stderr('Not implemented yet.') elif args.type_==sdconst.SA_TYPE_DATASET: dataset_show(args)
def get_metrics(group_,metric,project_,dry_run=False): li=[] c = sddb.conn.cursor() # check assert group_ in ['data_node','project','model'] assert metric in ('rate','size') # WARNING: we don't check project_ for sql injection here. This MUST be done in the calling func. TODO: check for sql injection here # prepare metric calculation if metric=='rate': metric_calculation='avg(rate)' elif metric=='size': metric_calculation='sum(size)' # prepare where clause where_clause="status='done' and rate is not NULL and size is not NULL" if group_=='model': where_clause+=" and project='%s'"%project_ # execute q='select %s, %s as metric from file where %s group by %s order by metric desc'%(group_,metric_calculation,where_clause,group_) if dry_run: print_stderr('%s'%q) return [] c.execute(q) rs=c.fetchone() while rs!=None: group_column_value=rs[0] metric_column_value=rs[1] li.append((group_column_value,metric_column_value)) rs=c.fetchone() c.close() return li
def watch(args): import sdreport, sddaemon if sddaemon.is_running(): sdreport.print_running_transfers() else: print_stderr('Daemon not running')
def show(args): if args.type_ == sdconst.SA_TYPE_FILE: file_show(args) elif args.type_ == sdconst.SA_TYPE_AGGREGATION: print_stderr('Not implemented yet.') elif args.type_ == sdconst.SA_TYPE_DATASET: dataset_show(args)
def get_name_from_value(value): """This method is used by sdinference module.""" names=search_match_fast(value) if len(names)==0: raise SDException("SYDPARAM-002","Parameter name cannot be infered from '%s' value (value not found). %s"%(value,sdi18n.m0020)) elif len(names)==1: return names[0] elif len(names)>1: # If we are here, it's because some parameter value are used by many parameter name. # To solve that, what we do here is select which name we want in priority. # item below is not used very often, so let's try without it if 'cmor_table' in names: names.remove('cmor_table') if len(names)==1: return names[0] # item below is not used very often, so let's try without it if 'source_id' in names: names.remove('source_id') if len(names)==1: return names[0] # if still too many match, print a warning and return the first one sdtools.print_stderr("WARNING: '%s' value has been associated with '%s' facet."%(value,names[0])) return names[0]
def remove(metadata, remove_all=True): # First step, change the files status from 'done' to 'delete' (METADATA). # # Note # This is a deferred delete. # nbr = sddelete.run(metadata) print_stderr("%i file(s) removed" % nbr) # Second step, do the deletion (DATA and METADATA) (to do a deferred # deletion (i.e. by the daemon), comment line below and enable # corresponding line in sdtask. Note that a code review is needed if both # are enabled simultaneously (e.g. see TAGKRE45343J54K5JK)) # sddeletefile.delete_transfers_lowmem(remove_all) if remove_all: print_stderr("Remove empty folders and files.. (may take some time)") # Third step is to remove orphan dataset (METADATA) sddeletedataset.purge_orphan_datasets() # Fourth step is to remove orphan folder (DATA) if remove_all: # part paths = sdmdcommon.get_attributes(metadata, 'local_path') # retrieve paths paths = [os.path.dirname(p) for p in paths] # remove filenames paths = [sdtypes.build_full_local_path(p) for p in paths] # switch to full path paths = [os.path.realpath(p) for p in paths] # follow symlink sdcleanup.part_cleanup(paths) # remove paths
def variable_list(args): import sddeferredafter sddeferredafter.add_default_parameter(args.stream,'limit',15) # note: in variable mode, total number of row is given by: "total+=#variable for each ds" print_stderr('Not implemented yet.') """
def is_one_variable_per_dataset_project(args): """This func is a HACK. HACK description For some project, one dataset = one variable. For such cases, there is no point to display a variable list, so we change the route to display the dataset list. """ import sys # retrieve project from input project=get_facet_early(args.stream,'project') # check if len(project)==0: print_stderr("The project name must be specified in the search (mandatory when using 'variable/agreggation' type)") sys.exit(1) elif len(project)>1: print_stderr("Only one project name must be specified in the search (mandatory when using 'variable/agreggation' type)") sys.exit(1) if sdtools.intersect(project,sdconst.PROJECT_WITH_ONE_VARIABLE_PER_DATASET): return True else: return False
def retry(args): import sdmodify nbr = sdmodify.retry_all() if nbr > 0: print_stderr("%i file(s) marked for retry." % nbr) else: print_stderr("No transfer in error")
def file_dump(args): import sdrfile, sddeferredafter, sdcolumnfilter, sdreducecol sddeferredafter.add_default_parameter(args.stream, 'limit', sdconfig.get_default_limit('dump')) if args.raw_mode: post_pipeline_mode = None args.all = True # we force '--all' option when '--raw_mode' option is set else: post_pipeline_mode = 'file' files = sdrfile.get_files(stream=args.stream, post_pipeline_mode=post_pipeline_mode, dry_run=args.dry_run) if args.all: # do not hide any attribute pass else: # hide non essential attributes files = sdreducecol.run(files) if not args.dry_run: if len(files) > 0: files = sdcolumnfilter.run(files, args.column) sdprint.print_format(files, args.format) else: print_stderr("File not found")
def facet(args): import sdparam,sdremoteparam,syndautils,sdinference,sdignorecase facets_groups=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file,no_default=True) facets_groups=sdignorecase.run(facets_groups) facets_groups=sdinference.run(facets_groups) if sdparam.exists_parameter_name(args.facet_name): # first, we check in cache so to quickly return if facet is unknown if len(facets_groups)==1: # facet selected: retrieve parameters from ESGF facets_group=facets_groups[0] params=sdremoteparam.run(pname=args.facet_name,facets_group=facets_group,dry_run=args.dry_run) # TODO: func for code below items=params.get(args.facet_name,[]) for item in items: print item.name elif len(facets_groups)>1: print_stderr('Multi-queries not supported') else: # Parameter not set. In this case, we retrieve facet values list from cache. sdparam.main([args.facet_name]) # tricks to re-use sdparam CLI parser else: print_stderr('Unknown facet')
def remove(metadata,remove_all=True): # First step, change the files status from 'done' to 'delete' (METADATA). # # Note # This is a deferred delete. # nbr=sddelete.run(metadata) print_stderr("%i file(s) removed"%nbr) # Second step, do the deletion (DATA and METADATA) (to do a deferred # deletion (i.e. by the daemon), comment line below and enable # corresponding line in sdtask. Note that a code review is needed if both # are enabled simultaneously (e.g. see TAGKRE45343J54K5JK)) # sddeletefile.delete_transfers_lowmem(remove_all) if remove_all: print_stderr("Remove empty folders and files.. (may take some time)") # Third step is to remove orphan dataset (METADATA) sddeletedataset.purge_orphan_datasets() # Fourth step is to remove orphan folder (DATA) if remove_all: # part paths=sdmdcommon.get_attributes(metadata,'local_path') # retrieve paths paths=[os.path.dirname(p) for p in paths] # remove filenames paths=[sdtypes.build_full_local_path(p) for p in paths] # switch to full path paths=[os.path.realpath(p) for p in paths] # follow symlink sdcleanup.part_cleanup(paths) # remove paths
def retry(args): import sdmodify nbr=sdmodify.retry_all() if nbr>0: print_stderr("%i file(s) marked for retry."%nbr) else: print_stderr("No transfer in error")
def watch(args): import sdreport, sddaemon if sddaemon.is_running(): sdreport.print_running_transfers() else: print_stderr('Daemon not running')
def print_certificate(): import os, sdutils if os.path.isfile(sdconfig.esgf_x509_proxy): (sdget_status,stdout,stderr)=sdutils.get_status_output(['/usr/bin/openssl','x509','-in',sdconfig.esgf_x509_proxy,'-text'],shell=False) print stdout else: print_stderr("Certificate not found (use 'renew' command to retrieve a new certificate).")
def print_tokens(): try: tokens = load_tokens_from_file(sdconfig.globus_tokens) print(json.dumps(tokens, indent=4)) except: print_stderr( "Globus tokens not found (use 'renew' command to retrieve new tokens)." )
def run(stream=None, path=None, parameter=None, index_host=None, post_pipeline_mode='file', dry_run=False): if parameter is None: parameter = [] queries = sdpipeline.build_queries(stream=stream, path=path, parameter=parameter, index_host=index_host, parallel=False, load_default=False) if len(queries) < 1: raise SDException("SDQSEARC-001", "No query to process") progress = sdsqueries.get_scalar( queries, 'progress', False, type_=bool ) # we cast here as progress can be str (set from parameter) or bool (set programmaticaly) searchapi_host = sdsqueries.get_scalar(queries, 'searchapi_host') if dry_run: for query in queries: request = sdtypes.Request(url=query['url'], pagination=False) print '%s' % request.get_url() # debug #print 'Url: %s'%request.get_url() #print 'Attached parameters: %s'%query.get('attached_parameters') return sdtypes.Response() else: try: if progress: sdtools.print_stderr( sdi18n.m0003(searchapi_host) ) # waiting message => TODO: move into ProgressThread class ProgressThread.start( sleep=0.1, running_message='', end_message='Search completed.') # spinner start mqr = process_queries(queries) metadata = mqr.to_metadata() sdlog.debug("SDQSEARC-002", "files-count=%d" % metadata.count()) metadata = sdpipeline.post_pipeline(metadata, post_pipeline_mode) sdlog.debug("SDQSEARC-004", "files-count=%d" % metadata.count()) return metadata finally: if progress: ProgressThread.stop() # spinner stop
def run_remote(args, stream): syndautils.check_daemon() try: metadata = syndautils.file_full_search(args, stream) except sdexception.EmptySelectionException, e: print_stderr('No packages will be installed, upgraded, or removed.') return 0
def dump(args): if args.type_ == sdconst.SA_TYPE_FILE: file_dump(args) elif args.type_ == sdconst.SA_TYPE_AGGREGATION: print_stderr( "'%s' operation is not available for variable/aggregation type" % args.subcommand) elif args.type_ == sdconst.SA_TYPE_DATASET: dataset_dump(args)
def run_remote(args,stream): syndautils.check_daemon() try: metadata=syndautils.file_full_search(args,stream) except sdexception.EmptySelectionException, e: print_stderr('No packages will be installed, upgraded, or removed.') return 0
def variable_list(args): import sddeferredafter sddeferredafter.add_default_parameter( args.stream, 'limit', sdconfig.get_default_limit('list') ) # note: in variable mode, total number of row is given by: "total+=#variable for each ds" print_stderr('Not implemented yet.') """
def print_selection(filename): title='Filename: %s'%filename sdtools.print_stderr() sdtools.print_stderr(title) sdtools.print_stderr('='*len(title)) sdtools.print_stderr() cat_selection(filename) sdtools.print_stderr()
def version(args): if args.type_ == sdconst.SA_TYPE_FILE: file_version(args) elif args.type_ == sdconst.SA_TYPE_AGGREGATION: print_stderr( '%s operation is not available for variable/aggregation type' % args.action) elif args.type_ == sdconst.SA_TYPE_DATASET: dataset_version(args)
def cache(args): if len(args.parameter)==0: pass else: action=args.parameter[0] # it's a naming mess: rename top level action as subcommand if action=="init": print_stderr("Retrieving parameters from ESGF...") import sdcache sdcache.run(reload=True) print_stderr("Parameters are up-to-date.")
def do_lock(self,arg): if arg=='': return model=arg try: sdlock.lock(model) except SDException,e: sdtools.print_stderr("Lock error: '%s' model not found on %s"%(model,sdindex.get_default_index()))
def file_list(args): import sddeferredafter, sdlfile sddeferredafter.add_default_parameter(args.stream,'limit',20) files=sdlfile.get_files(stream=args.stream,dry_run=args.dry_run) if len(files)==0: print_stderr("File not found") else: sdlfile.print_list(files)
def do_lock(self, arg): if arg == '': return model = arg try: sdlock.lock(model) except SDException, e: sdtools.print_stderr("Lock error: '%s' model not found on %s" % (model, sdindex.get_default_index()))
def save_tokens_to_file(filepath, tokens): """Save a set of tokens for later use.""" directory = os.path.dirname(filepath) if not os.path.isdir(directory): try: os.makedirs(directory) except OSError as e: print_stderr("Could not create {} directory for a Globus OAuth2 token.\n{}".format(directory, e)) sys.exit(1) with open(filepath, 'w') as f: json.dump(tokens, f)
def dataset_list(args): import sddeferredafter sddeferredafter.add_default_parameter(args.stream,'limit',20) import sdldataset datasets=sdldataset.get_datasets(stream=args.stream,dry_run=args.dry_run) if len(datasets)==0: print_stderr('Dataset not found') else: sdldataset.print_list(datasets)
def replica(args): if args.action == "next": if args.file_id is None: import sdfiledao, sdconst files = sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id, args) else: replica_next(args.file_id, args) else: print_stderr('Incorrect argument')
def filter_and_print_name(li,pattern=None): li=sdtools.grep_light(li,pattern) if len(li)==1: v=li[0] print v elif len(li)>1: for v in li: print v else: sdtools.print_stderr("Parameter name not found")
def file_list(args): import sddeferredafter, sdlfile sddeferredafter.add_default_parameter(args.stream, 'limit', sdconfig.get_default_limit('list')) files = sdlfile.get_files(stream=args.stream, dry_run=args.dry_run) if len(files) == 0: print_stderr("File not found") else: sdlfile.print_list(files)
def filter_and_print_name(li, pattern=None): li = sdtools.grep_light(li, pattern) if len(li) == 1: v = li[0] print v elif len(li) > 1: for v in li: print v else: sdtools.print_stderr("Parameter name not found")
def replica(args): if args.action=="next": if args.file_id is None: import sdfiledao,sdconst files=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id,args) else: replica_next(args.file_id,args) else: print_stderr('Incorrect argument')
def print_certificate(): import os, sdutils certdirprefix=sdconfig.tmp_folder if sdconfig.multiuser else os.environ.get('HOME') certificate_file='%s/.esg/credentials.pem'%certdirprefix if os.path.isfile(certificate_file): (sdget_status,stdout,stderr)=sdutils.get_status_output(['/usr/bin/openssl','x509','-in',certificate_file,'-text'],shell=False) print stdout else: print_stderr("Certificate not found (use 'renew' command to retrieve a new certificate).")
def replica_next(file_,replicas): if file_.status in [sdconst.TRANSFER_STATUS_ERROR,sdconst.TRANSFER_STATUS_WAITING]: # replica can only be changed for those file statuses new_replica=sdreplica.replica_next(file_.url,replicas) # TODO: maybe use replica object instead of tuple here if new_replica is None: print_stderr("No other replica found (file_functional_id=%s)"%file_.file_functional_id) else: sdmodifyquery.change_replica(file_.file_functional_id,new_replica) sdlog.info("SDMODIFY-100","File replica set to %s (previous_replica=%s,file_functional_id=%s)"%(new_replica[1],file_.url,file_.file_functional_id)) else: print_stderr("Replica cannot be changed (local file incorrect status).")
def metric(args): import sdmetric, sdparam # check if args.groupby == 'model': if args.project not in sdparam.params['project']: print_stderr("Unknown project (%s)" % args.project) return 1 if args.metric == 'size': sdmetric.print_size(args.groupby, args.project, dry_run=args.dry_run) elif args.metric == 'rate': sdmetric.print_rate(args.groupby, args.project, dry_run=args.dry_run)
def token(args): import sdtoken if args.action is None: sdtoken.print_tokens() return 0 if args.action == 'renew': sdtoken.renew_tokens() return 0 if args.action == 'print': sdtoken.print_tokens() return 0 print_stderr("Not implemented") return 1
def dataset_list(args): import sddeferredafter sddeferredafter.add_default_parameter(args.stream, 'limit', sdconfig.get_default_limit('list')) import sdldataset datasets = sdldataset.get_datasets(stream=args.stream, dry_run=args.dry_run) if len(datasets) == 0: print_stderr('Dataset not found') else: sdldataset.print_list(datasets)
def metric(args): import sdmetric,sdparam # check if args.groupby=='model': if args.project not in sdparam.params['project']: print_stderr("Unknown project (%s)"%args.project) return 1 if args.metric=='size': sdmetric.print_size(args.groupby,args.project,dry_run=args.dry_run) elif args.metric=='rate': sdmetric.print_rate(args.groupby,args.project,dry_run=args.dry_run)
def dataset_dump(args): import sdrdataset, sddeferredafter, sdcolumnfilter sddeferredafter.add_default_parameter(args.stream,'limit',100) post_pipeline_mode=None if args.raw_mode else 'dataset' files=sdrdataset.get_datasets(stream=args.stream,post_pipeline_mode=post_pipeline_mode,dry_run=args.dry_run) if not args.dry_run: if len(files)>0: files=sdcolumnfilter.run(files,args.column) sdprint.print_format(files,args.format) else: print_stderr('Dataset not found')
def dataset_show(args): import sdearlystreamutils # check li = sdearlystreamutils.get_facet_values_early(args.stream, 'instance_id') if len(li) == 0: print_stderr('Please specify a dataset name.') return 1 elif len(li) > 1: print_stderr('Too many arguments.') return 1 if args.localsearch: import sdldataset dataset = sdldataset.get_dataset(stream=args.stream, dry_run=args.dry_run) if not args.dry_run: if dataset is None: print_stderr("Dataset not found") else: sdldataset.print_details(dataset) else: import sdrdataset dataset = sdrdataset.get_dataset(stream=args.stream, dry_run=args.dry_run) if not args.dry_run: if dataset is None: print_stderr("Dataset not found") else: sdrdataset.print_details(dataset, verbose=args.verbose)
def replica(args): if len(args.parameter)<1: print_stderr('Incorrect argument') else: action=args.parameter[0] # it's a naming mess: rename top level action as subcommand if action=="next": if len(args.parameter)==1: import sdfiledao,sdconst files=sdfiledao.get_files(status=sdconst.TRANSFER_STATUS_ERROR) for file_ in files: replica_next(file_.file_functional_id,args) elif len(args.parameter)==2: file_functional_id=args.parameter[1] replica_next(file_functional_id,args)
def variable_show(args): if args.localsearch: print_stderr('Not implemented yet.') """ import sdldataset dataset=sdldataset.get_dataset(stream=args.stream,dry_run=args.dry_run) if dataset is None: print "Variable not found" else: sdldataset.print_details(dataset) """ else: print_stderr('Not implemented yet.') """
def dataset_show(args): # check li=syndautils.get_facet_values_early(args.stream,'instance_id') if len(li)==0: print_stderr('Please specify a dataset name.') return elif len(li)>1: print_stderr('Too many arguments.') return if args.localsearch: import sdldataset dataset=sdldataset.get_dataset(stream=args.stream,dry_run=args.dry_run) if not args.dry_run: if dataset is None: print_stderr("Dataset not found") else: sdldataset.print_details(dataset) else: import sdrdataset dataset=sdrdataset.get_dataset(stream=args.stream,dry_run=args.dry_run) if not args.dry_run: if dataset is None: print_stderr("Dataset not found") else: sdrdataset.print_details(dataset,verbose=args.verbose)
def variable_show(args): if args.localsearch: print_stderr('Not implemented yet.') """ import sdldataset dataset=sdldataset.get_dataset(stream=args.stream,dry_run=args.dry_run) if dataset is None: print "Variable not found" else: sdldataset.print_details(dataset) """ else: print_stderr('Not implemented yet.') """