Ejemplo n.º 1
0
def facet(args):
    import sdparam,sdremoteparam,syndautils,sdinference,sdignorecase

    facets_groups=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file,no_default=True)
    facets_groups=sdignorecase.run(facets_groups)
    facets_groups=sdinference.run(facets_groups)


    if sdparam.exists_parameter_name(args.facet_name): # first, we check in cache so to quickly return if facet is unknown

        if len(facets_groups)==1:
            # facet selected: retrieve parameters from ESGF

            facets_group=facets_groups[0]

            params=sdremoteparam.run(pname=args.facet_name,facets_group=facets_group,dry_run=args.dry_run)

            # TODO: func for code below
            items=params.get(args.facet_name,[])
            for item in items:
                print item.name
        elif len(facets_groups)>1:
            print_stderr('Multi-queries not supported')

        else:
            # Parameter not set. In this case, we retrieve facet values list from cache.

            sdparam.main([args.facet_name]) # tricks to re-use sdparam CLI parser

    else:
        print_stderr('Unknown facet')   
Ejemplo n.º 2
0
def remove(args):
    import sdremove, syndautils

    stream = syndautils.get_stream(subcommand=args.subcommand,
                                   parameter=args.parameter,
                                   selection_file=args.selection_file,
                                   no_default=args.no_default,
                                   raise_exception_if_empty=True)
    return sdremove.run(args, stream)
Ejemplo n.º 3
0
def facet(args):
    import sdparam, sdremoteparam, syndautils, sdinference, sdignorecase

    facets_groups = syndautils.get_stream(subcommand=args.subcommand,
                                          parameter=args.parameter,
                                          selection_file=args.selection_file,
                                          no_default=True)
    facets_groups = sdignorecase.run(facets_groups)
    facets_groups = sdinference.run(facets_groups)

    if sdparam.exists_parameter_name(
            args.facet_name
    ):  # first, we check in cache so to quickly return if facet is unknown

        if len(facets_groups) == 1:
            # facet selected: retrieve parameters from ESGF

            facets_group = facets_groups[0]

            params = sdremoteparam.run(pname=args.facet_name,
                                       facets_group=facets_group,
                                       dry_run=args.dry_run)

            # TODO: func for code below
            items = params.get(args.facet_name, [])
            for item in items:
                print item.name
        elif len(facets_groups) > 1:
            print_stderr('Multi-queries not supported')

        else:
            # Parameter not set. In this case, we retrieve facet values list from cache.

            sdparam.main([args.facet_name
                          ])  # tricks to re-use sdparam CLI parser

    else:
        print_stderr('Unknown facet')
Ejemplo n.º 4
0
        # hack to explode id in individual facets (experimental)
        if args.subcommand == 'search':
            if args.explode:
                if len(args.parameter) > 0:
                    id_ = args.parameter[0]
                    id_ = id_.split(
                        '='
                    )[1] if '=' in id_ else id_  # if id_ is in the form 'k=v', we strip 'k='. We assume here that '=' character doesn't appear in key nor value.
                    delim = '/' if '/' in id_ else '.'
                    li = id_.split(delim) + args.parameter[
                        1:]  # this allow to add other parameter after id e.g. 'synda search <master_id> <version>'
                    args.parameter = li

        stream = syndautils.get_stream(subcommand=args.subcommand,
                                       parameter=args.parameter,
                                       selection_file=args.selection_file,
                                       no_default=args.no_default)

        # hack for 'show' and 'version' subcommands.
        #
        # description
        #     this hack normalize 'show' and 'version' subcommands 'type_'
        #     attribute with other type_ sensitive subcommands. Without this
        #     hack, the next statement (i.e. "if args.type_ is None:") fails with
        #     "AttributeError: 'Namespace' object has no attribute 'type_'".
        #
        # notes
        #     - show and version subcommands type_ attribute is already strictly
        #       defined by the parameter argument (e.g. dataset identifier, file
        #       identifier, etc..), so we dont want the user to also be able to
        #       set type_ attribute using options. This is why type_ group is not
Ejemplo n.º 5
0
def pexec(args):
    import sdsearch, sdpporder, sddb, syndautils, sdconst, sdpostpipelineutils, sdhistorydao, sddeferredbefore, sddomainutils

    if args.order_name=='cdf':
        selection_filename=None

        # use search-api operator to build datasets list
        stream=syndautils.get_stream(subcommand=args.subcommand,selection_file=args.selection_file,no_default=args.no_default)
        sddeferredbefore.add_forced_parameter(stream,'type','Dataset')

        dataset_found_count=0
        order_variable_count=0
        order_dataset_count=0
        for facets_group in stream: # we need to process each facets_group one by one because of TAG45345JK3J53K
            
            metadata=sdsearch.run(stream=[facets_group],post_pipeline_mode='dataset') # TAGJ43KJ234JK

            dataset_found_count+=metadata.count()

            if metadata.count() > 0:

                # WART
                # (gets overwritten at each iteration, but not a big deal as always the same value)
                if selection_filename is None: # this is to keep the first found value (i.e. if last facets_group is empty but not the previous ones do not keep the last one (which would be None))

                    dataset=metadata.get_one_file()
                    selection_filename=sdpostpipelineutils.get_attached_parameter__global([dataset],'selection_filename') # note that if no files are found at all for this selection (no matter the status), then the filename will be blank

                for d in metadata.get_files(): # warning: load list in memory
                    if d['status']==sdconst.DATASET_STATUS_COMPLETE:

                        # TAG45J4K45JK

                        # first, send cdf variable order
                        # (note: total number of variable event is given by: "total+=#variable for each ds")
                        for v in d['variable']:
                            if v in facets_group['variable']: # TAG45345JK3J53K (we check here that the variable has been asked for in the first place)
                                order_variable_count+=1

                                # hack
                                if sddomainutils.is_one_var_per_ds(d['project']): # maybe move this test at TAG45J4K45JK line, and replace 'EVENT_CDF_VARIABLE_O' by a dataset level event (note however that the choice about passing 'EVENT_CDF_VARIABLE_O' event as variable or dataset is arbitrary, both work. But passing as variable is a bit strange as variable appears in both dataset_pattern and variable columns)
                                    e_names=[sdconst.EVENT_CDF_INT_VARIABLE_O, sdconst.EVENT_CDF_COR_VARIABLE_O]

                                    # this case is a bit awkward as we have 'variable' in both dataset_pattern and variable columns..

                                else:
                                    e_names=[sdconst.EVENT_CDF_INT_VARIABLE_N, sdconst.EVENT_CDF_COR_VARIABLE_N]

                                for e_name in e_names:
                                    sdpporder.submit(e_name,d['project'],d['model'],d['local_path'],variable=v,commit=False)

                        # second, send cdf dataset order
                        if d['project'] in sdconst.PROJECT_WITH_ONE_VARIABLE_PER_DATASET:

                            # we do not trigger 'dataset' level event in this case
                            pass
                        else:                        

                            order_dataset_count+=1

                            e_names=[sdconst.EVENT_CDF_INT_DATASET, sdconst.EVENT_CDF_COR_DATASET]
                            for e_name in e_names:
                                    sdpporder.submit(e_name,d['project'],d['model'],d['local_path'],commit=False)

        sddb.conn.commit()

        if dataset_found_count>0:
            if order_dataset_count==0 and order_variable_count==0:
                print_stderr("Data not ready (data must be already downloaded before performing pexec task): operation cancelled")   
            else:
                sdhistorydao.add_history_line(sdconst.ACTION_PEXEC,selection_filename)

                print_stderr("Post-processing task successfully submitted (order_dataset_count=%d,order_variable_count=%d)"%(order_dataset_count,order_variable_count))
        else:
            print_stderr('Data not found')

    elif args.order_name=='cds':
        selection_filename = None

        # use search-api operator to build datasets list
        stream = syndautils.get_stream(subcommand=args.subcommand, selection_file=args.selection_file, no_default=args.no_default)
        sddeferredbefore.add_forced_parameter(stream, 'type', 'Dataset')

        dataset_found_count = 0
        order_variable_count = 0
        for facets_group in stream:  # we need to process each facets_group one by one because of TAG45345JK3J53K

            metadata = sdsearch.run(stream=[facets_group], post_pipeline_mode='dataset')  # TAGJ43KJ234JK

            dataset_found_count += metadata.count()

            if metadata.count() > 0:

                # WART
                # (gets overwritten at each iteration, but not a big deal as always the same value)
                if selection_filename is None:  # this is to keep the first found value (i.e. if last facets_group is empty but not the previous ones do not keep the last one (which would be None))

                    dataset = metadata.get_one_file()
                    selection_filename = sdpostpipelineutils.get_attached_parameter__global([dataset], 'selection_filename')  # note that if no files are found at all for this selection (no matter the status), then the filename will be blank

                for d in metadata.get_files():  # warning: load list in memory
                    if d['status'] == sdconst.DATASET_STATUS_COMPLETE:

                        # TAG45J4K45JK

                        # send cds variable order
                        # (note: total number of variable event is given by: "total+=#variable for each ds")
                        for v in d['variable']:
                            if v in facets_group['variable']:  # TAG45345JK3J53K (we check here that the variable has been asked for in the first place)
                                order_variable_count += 1
                                sdpporder.submit(sdconst.EVENT_CDS_VARIABLE, d['project'], d['model'], d['local_path'], variable=v, commit=False)

        sddb.conn.commit()

        if dataset_found_count > 0:
            if order_variable_count == 0:
                print_stderr("Data not ready (data must be already downloaded before performing pexec task): operation cancelled")
            else:
                sdhistorydao.add_history_line(sdconst.ACTION_PEXEC, selection_filename)

                print_stderr(
                    "Post-processing task successfully submitted (order_variable_count=%d)" % (order_variable_count))
        else:
            print_stderr('Data not found')

    else:
        print_stderr("Invalid order name ('%s')"%args.order_name)
        return 1

    return 0
Ejemplo n.º 6
0
def open_(args):
    import sdview,syndautils,sdsandbox,sdtypes,sdconst,sdearlystreamutils


    stream=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file)


    # check

    li=sdearlystreamutils.get_facet_values_early(stream,'instance_id') # check if 'instance_id' exists
    if len(li)==0:
        # 'instance_id' is not found on cli

        li=sdearlystreamutils.get_facet_values_early(stream,'title') # check if 'title' exists
        if len(li)==0:
            # 'title' is not found on cli

            # no identifier found, we stop the processing
            print_stderr('Please specify a file identifier (id or filename).')
            return 1

        elif len(li)>1:
            print_stderr('Too many arguments.')
            return 1
    elif len(li)>1:
        print_stderr('Too many arguments.')
        return 1


    # discovery

    import sdlfile
    file_=sdlfile.get_file(stream=stream)

    if file_ is None:

        import sdrfile
        file_=sdrfile.get_file(stream=stream)

        if file_ is None:
            print_stderr("File not found")

            return 2


    # cast

    f=sdtypes.File(**file_)


    # check if file exists locally

    if f.status==sdconst.TRANSFER_STATUS_DONE:
        file_local_path=f.get_full_local_path()
    elif sdsandbox.file_exists(f.filename):
        file_local_path=sdsandbox.get_file_path(f.filename)
    else:
        file_local_path=None


    # download (if not done already)

    if file_local_path is None:
        status=sddirectdownload.run([file_], verbosity=1)

        if status!=0:
            return 1


    # open file in external viewer

    sdview.open_(file_local_path,f.variable,args.geometry)


    return 0
Ejemplo n.º 7
0
def remove(args):
    import sdremove,syndautils

    stream=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file,no_default=args.no_default,raise_exception_if_empty=True)
    return sdremove.run(args,stream)
Ejemplo n.º 8
0
def get(args):
    import sdlogon, sdrfile, sddeferredafter, sddirectdownload, syndautils, humanize, sdconfig, os, sdconst, sdearlystreamutils

    # hack
    # see TAG43534FSFS
    if args.quiet:
        args.verbosity=0

    if args.verify_checksum and args.network_bandwidth_test:
        print_stderr("'verify_checksum' option cannot be set when 'network_bandwidth_test' option is set.")
        return 1

    stream=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file)


    if args.openid and args.password:
        # use credential from CLI

        oid=args.openid
        pwd=args.password
    else:
        # use credential from file

        if sdconfig.is_openid_set():
            oid=sdconfig.openid
            pwd=sdconfig.password
        else:
            print_stderr('Error: OpenID not set in configuration file (%s).'%sdconfig.credential_file)   

            return 1

    # retrieve certificate
    sdlogon.renew_certificate(oid,pwd,force_renew_certificate=False)


    http_client=sdconst.HTTP_CLIENT_URLLIB if args.urllib2 else sdconst.HTTP_CLIENT_WGET

    # local_path
    #
    # 'synda get' subcommand currently force local_path to the following construct:
    # '<dest_folder>/<filename>' (i.e. you can't use DRS tree in-between). This may
    # change in the future.
    #
    if args.dest_folder is None:
        local_path_prefix=os.getcwd() # current working directory
    else:
        local_path_prefix=args.dest_folder

    # BEWARE
    #
    # when set in CLI parameter, url is usually an ESGF facet, and as so should
    # be sent to the search-api as other facets
    # BUT
    # we want a special behaviour here (i.e. with 'synda get' command) with url:
    # if url is set by user, we DON'T call search-api operator. Instead, we
    # download the url directly.

    urls=sdearlystreamutils.get_facet_values_early(stream,'url')
    if len(urls)==0:
        # no url in stream: switch to search-api operator mode

        sddeferredafter.add_default_parameter(stream,'limit',5)
        sddeferredafter.add_forced_parameter(stream,'local_path_format','notree')

        files=sdrfile.get_files(stream=stream,post_pipeline_mode='file',dry_run=args.dry_run) # yes: this is the second time we run sdinference filter, but it doesn't hurt as sdinference is idempotent

        if not args.dry_run:
            if len(files)>0:

                # compute metric
                total_size=sum(int(f['size']) for f in files)
                total_size=humanize.naturalsize(total_size,gnu=False)

                print_stderr('%i file(s) will be downloaded for a total size of %s.'%(len(files),total_size))

                status=sddirectdownload.run(files,
                                            args.timeout,
                                            args.force,
                                            http_client,
                                            local_path_prefix,
                                            verify_checksum=args.verify_checksum,
                                            network_bandwidth_test=args.network_bandwidth_test,
                                            debug=True,
                                            verbosity=args.verbosity,
                                            buffered=False,
                                            hpss=args.hpss)

                if status!=0:
                    return 1

            else:
                print_stderr("File not found")
                return 1
        else:
            for f in files:
                size=humanize.naturalsize(f['size'],gnu=False)
                print '%-12s %s'%(size,f['filename'])

    elif len(urls)>0:
        # url(s) found in stream: search-api operator not needed (download url directly)

        # TAGDSFDF432F
        if args.verify_checksum:
            print_stderr("To perform checksum verification, ESGF file identifier (e.g. title, id, tracking id..)  must be used instead of file url.")
            return 1

        # TODO: to improve genericity, maybe merge this block into the previous one (i.e. url CAN be used as a search key in the search-api (but not irods url))

        files=[]
        for url in urls:

            filename=os.path.basename(url)
            local_path=filename

            f=dict(local_path=local_path,url=url)

            files.append(f)
            
        status=sddirectdownload.run(files,
                                    args.timeout,
                                    args.force,
                                    http_client,
                                    local_path_prefix,
                                    verify_checksum=args.verify_checksum, # see above at TAGDSFDF432F
                                    network_bandwidth_test=args.network_bandwidth_test,
                                    debug=True,
                                    verbosity=args.verbosity,
                                    buffered=False,
                                    hpss=args.hpss)

        if status!=0:
            return 1

    else:
        assert False

    return 0
Ejemplo n.º 9
0
def pexec(args):
    import sdsearch, sdpporder, sddb, syndautils, sdconst, sdpostpipelineutils, sdhistorydao, sddeferredbefore, sddomainutils

    if args.order_name == 'cdf':
        selection_filename = None

        # use search-api operator to build datasets list
        stream = syndautils.get_stream(subcommand=args.subcommand,
                                       selection_file=args.selection_file,
                                       no_default=args.no_default)
        sddeferredbefore.add_forced_parameter(stream, 'type', 'Dataset')

        dataset_found_count = 0
        order_variable_count = 0
        order_dataset_count = 0
        for facets_group in stream:  # we need to process each facets_group one by one because of TAG45345JK3J53K

            metadata = sdsearch.run(
                stream=[facets_group],
                post_pipeline_mode='dataset')  # TAGJ43KJ234JK

            dataset_found_count += metadata.count()

            if metadata.count() > 0:

                # WART
                # (gets overwritten at each iteration, but not a big deal as always the same value)
                if selection_filename is None:  # this is to keep the first found value (i.e. if last facets_group is empty but not the previous ones do not keep the last one (which would be None))

                    dataset = metadata.get_one_file()
                    selection_filename = sdpostpipelineutils.get_attached_parameter__global(
                        [dataset], 'selection_filename'
                    )  # note that if no files are found at all for this selection (no matter the status), then the filename will be blank

                for d in metadata.get_files():  # warning: load list in memory
                    if d['status'] == sdconst.DATASET_STATUS_COMPLETE:

                        # TAG45J4K45JK

                        # first, send cdf variable order
                        # (note: total number of variable event is given by: "total+=#variable for each ds")
                        for v in d['variable']:
                            if v in facets_group[
                                    'variable']:  # TAG45345JK3J53K (we check here that the variable has been asked for in the first place)
                                order_variable_count += 1

                                # hack
                                if sddomainutils.is_one_var_per_ds(
                                        d['project']
                                ):  # maybe move this test at TAG45J4K45JK line, and replace 'EVENT_CDF_VARIABLE_O' by a dataset level event (note however that the choice about passing 'EVENT_CDF_VARIABLE_O' event as variable or dataset is arbitrary, both work. But passing as variable is a bit strange as variable appears in both dataset_pattern and variable columns)
                                    e_names = [
                                        sdconst.EVENT_CDF_INT_VARIABLE_O,
                                        sdconst.EVENT_CDF_COR_VARIABLE_O
                                    ]

                                    # this case is a bit awkward as we have 'variable' in both dataset_pattern and variable columns..

                                else:
                                    e_names = [
                                        sdconst.EVENT_CDF_INT_VARIABLE_N,
                                        sdconst.EVENT_CDF_COR_VARIABLE_N
                                    ]

                                for e_name in e_names:
                                    sdpporder.submit(e_name,
                                                     d['project'],
                                                     d['model'],
                                                     d['local_path'],
                                                     variable=v,
                                                     commit=False)

                        # second, send cdf dataset order
                        if d['project'] in sdconst.PROJECT_WITH_ONE_VARIABLE_PER_DATASET:

                            # we do not trigger 'dataset' level event in this case
                            pass
                        else:

                            order_dataset_count += 1

                            e_names = [
                                sdconst.EVENT_CDF_INT_DATASET,
                                sdconst.EVENT_CDF_COR_DATASET
                            ]
                            for e_name in e_names:
                                sdpporder.submit(e_name,
                                                 d['project'],
                                                 d['model'],
                                                 d['local_path'],
                                                 commit=False)

        sddb.conn.commit()

        if dataset_found_count > 0:
            if order_dataset_count == 0 and order_variable_count == 0:
                print_stderr(
                    "Data not ready (data must be already downloaded before performing pexec task): operation cancelled"
                )
            else:
                sdhistorydao.add_history_line(sdconst.ACTION_PEXEC,
                                              selection_filename)

                print_stderr(
                    "Post-processing task successfully submitted (order_dataset_count=%d,order_variable_count=%d)"
                    % (order_dataset_count, order_variable_count))
        else:
            print_stderr('Data not found')

    elif args.order_name == 'cds':
        selection_filename = None

        # use search-api operator to build datasets list
        stream = syndautils.get_stream(subcommand=args.subcommand,
                                       selection_file=args.selection_file,
                                       no_default=args.no_default)
        sddeferredbefore.add_forced_parameter(stream, 'type', 'Dataset')

        dataset_found_count = 0
        order_variable_count = 0
        for facets_group in stream:  # we need to process each facets_group one by one because of TAG45345JK3J53K

            metadata = sdsearch.run(
                stream=[facets_group],
                post_pipeline_mode='dataset')  # TAGJ43KJ234JK

            dataset_found_count += metadata.count()

            if metadata.count() > 0:

                # WART
                # (gets overwritten at each iteration, but not a big deal as always the same value)
                if selection_filename is None:  # this is to keep the first found value (i.e. if last facets_group is empty but not the previous ones do not keep the last one (which would be None))

                    dataset = metadata.get_one_file()
                    selection_filename = sdpostpipelineutils.get_attached_parameter__global(
                        [dataset], 'selection_filename'
                    )  # note that if no files are found at all for this selection (no matter the status), then the filename will be blank

                for d in metadata.get_files():  # warning: load list in memory
                    if d['status'] == sdconst.DATASET_STATUS_COMPLETE:

                        # TAG45J4K45JK

                        # send cds variable order
                        # (note: total number of variable event is given by: "total+=#variable for each ds")
                        for v in d['variable']:
                            if v in facets_group[
                                    'variable']:  # TAG45345JK3J53K (we check here that the variable has been asked for in the first place)
                                order_variable_count += 1
                                sdpporder.submit(sdconst.EVENT_CDS_VARIABLE,
                                                 d['project'],
                                                 d['model'],
                                                 d['local_path'],
                                                 variable=v,
                                                 commit=False)

        sddb.conn.commit()

        if dataset_found_count > 0:
            if order_variable_count == 0:
                print_stderr(
                    "Data not ready (data must be already downloaded before performing pexec task): operation cancelled"
                )
            else:
                sdhistorydao.add_history_line(sdconst.ACTION_PEXEC,
                                              selection_filename)

                print_stderr(
                    "Post-processing task successfully submitted (order_variable_count=%d)"
                    % (order_variable_count))
        else:
            print_stderr('Data not found')

    else:
        print_stderr("Invalid order name ('%s')" % args.order_name)
        return 1

    return 0
Ejemplo n.º 10
0
def open_(args):
    import sdview, syndautils, sdsandbox, sdtypes, sdconst, sdearlystreamutils

    stream = syndautils.get_stream(subcommand=args.subcommand,
                                   parameter=args.parameter,
                                   selection_file=args.selection_file)

    # check

    li = sdearlystreamutils.get_facet_values_early(
        stream, 'instance_id')  # check if 'instance_id' exists
    if len(li) == 0:
        # 'instance_id' is not found on cli

        li = sdearlystreamutils.get_facet_values_early(
            stream, 'title')  # check if 'title' exists
        if len(li) == 0:
            # 'title' is not found on cli

            # no identifier found, we stop the processing
            print_stderr('Please specify a file identifier (id or filename).')
            return 1

        elif len(li) > 1:
            print_stderr('Too many arguments.')
            return 1
    elif len(li) > 1:
        print_stderr('Too many arguments.')
        return 1

    # discovery

    import sdlfile
    file_ = sdlfile.get_file(stream=stream)

    if file_ is None:

        import sdrfile
        file_ = sdrfile.get_file(stream=stream)

        if file_ is None:
            print_stderr("File not found")

            return 2

    # cast

    f = sdtypes.File(**file_)

    # check if file exists locally

    if f.status == sdconst.TRANSFER_STATUS_DONE:
        file_local_path = f.get_full_local_path()
    elif sdsandbox.file_exists(f.filename):
        file_local_path = sdsandbox.get_file_path(f.filename)
    else:
        file_local_path = None

    # download (if not done already)

    if file_local_path is None:
        status = sddirectdownload.run([file_], verbosity=1)

        if status != 0:
            return 1

    # open file in external viewer

    sdview.open_(file_local_path, f.variable, args.geometry)

    return 0
Ejemplo n.º 11
0
def get(args):
    import sdlogon, sdrfile, sddeferredafter, sddirectdownload, syndautils, humanize, sdconfig, os, sdconst, sdearlystreamutils

    # hack
    # see TAG43534FSFS
    if args.quiet:
        args.verbosity = 0

    if args.verify_checksum and args.network_bandwidth_test:
        print_stderr(
            "'verify_checksum' option cannot be set when 'network_bandwidth_test' option is set."
        )
        return 1

    stream = syndautils.get_stream(subcommand=args.subcommand,
                                   parameter=args.parameter,
                                   selection_file=args.selection_file)

    if args.openid and args.password:
        # use credential from CLI

        oid = args.openid
        pwd = args.password
    else:
        # use credential from file

        if sdconfig.is_openid_set():
            oid = sdconfig.openid
            pwd = sdconfig.password
        else:
            print_stderr('Error: OpenID not set in configuration file (%s).' %
                         sdconfig.credential_file)

            return 1

    # retrieve certificate
    sdlogon.renew_certificate(oid, pwd, force_renew_certificate=False)

    http_client = sdconst.HTTP_CLIENT_URLLIB if args.urllib2 else sdconst.HTTP_CLIENT_WGET

    # local_path
    #
    # 'synda get' subcommand currently force local_path to the following construct:
    # '<dest_folder>/<filename>' (i.e. you can't use DRS tree in-between). This may
    # change in the future.
    #
    if args.dest_folder is None:
        local_path_prefix = os.getcwd()  # current working directory
    else:
        local_path_prefix = args.dest_folder

    # BEWARE
    #
    # when set in CLI parameter, url is usually an ESGF facet, and as so should
    # be sent to the search-api as other facets
    # BUT
    # we want a special behaviour here (i.e. with 'synda get' command) with url:
    # if url is set by user, we DON'T call search-api operator. Instead, we
    # download the url directly.

    urls = sdearlystreamutils.get_facet_values_early(stream, 'url')
    if len(urls) == 0:
        # no url in stream: switch to search-api operator mode

        sddeferredafter.add_default_parameter(stream, 'limit', 5)
        sddeferredafter.add_forced_parameter(stream, 'local_path_format',
                                             'notree')

        files = sdrfile.get_files(
            stream=stream, post_pipeline_mode='file', dry_run=args.dry_run
        )  # yes: this is the second time we run sdinference filter, but it doesn't hurt as sdinference is idempotent

        if not args.dry_run:
            if len(files) > 0:

                # compute metric
                total_size = sum(int(f['size']) for f in files)
                total_size = humanize.naturalsize(total_size, gnu=False)

                print_stderr(
                    '%i file(s) will be downloaded for a total size of %s.' %
                    (len(files), total_size))

                status = sddirectdownload.run(
                    files,
                    args.timeout,
                    args.force,
                    http_client,
                    local_path_prefix,
                    verify_checksum=args.verify_checksum,
                    network_bandwidth_test=args.network_bandwidth_test,
                    debug=True,
                    verbosity=args.verbosity,
                    buffered=False,
                    hpss=args.hpss)

                if status != 0:
                    return 1

            else:
                print_stderr("File not found")
                return 1
        else:
            for f in files:
                size = humanize.naturalsize(f['size'], gnu=False)
                print '%-12s %s' % (size, f['filename'])

    elif len(urls) > 0:
        # url(s) found in stream: search-api operator not needed (download url directly)

        # TAGDSFDF432F
        if args.verify_checksum:
            print_stderr(
                "To perform checksum verification, ESGF file identifier (e.g. title, id, tracking id..)  must be used instead of file url."
            )
            return 1

        # TODO: to improve genericity, maybe merge this block into the previous one (i.e. url CAN be used as a search key in the search-api (but not irods url))

        files = []
        for url in urls:

            filename = os.path.basename(url)
            local_path = filename

            f = dict(local_path=local_path, url=url)

            files.append(f)

        status = sddirectdownload.run(
            files,
            args.timeout,
            args.force,
            http_client,
            local_path_prefix,
            verify_checksum=args.verify_checksum,  # see above at TAGDSFDF432F
            network_bandwidth_test=args.network_bandwidth_test,
            debug=True,
            verbosity=args.verbosity,
            buffered=False,
            hpss=args.hpss)

        if status != 0:
            return 1

    else:
        assert False

    return 0
Ejemplo n.º 12
0
    import sdtsaction
    if args.subcommand in sdtsaction.actions.keys():
        import syndautils

        # hack to explode id in individual facets (experimental)
        if args.subcommand=='search':
            if args.explode:
                if len(args.parameter)>0:
                    id_=args.parameter[0]
                    id_=id_.split('=')[1] if '=' in id_ else id_ # if id_ is in the form 'k=v', we strip 'k='. We assume here that '=' character doesn't appear in key nor value.
                    delim='/' if '/' in id_ else '.'
                    li=id_.split(delim)+args.parameter[1:] # this allow to add other parameter after id e.g. 'synda search <master_id> <version>'
                    args.parameter=li

        stream=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file,no_default=args.no_default)


        # hack for 'show' and 'version' subcommands.
        #
        # description
        #     this hack normalize 'show' and 'version' subcommands 'type_'
        #     attribute with other type_ sensitive subcommands. Without this
        #     hack, the next statement (i.e. "if args.type_ is None:") fails with
        #     "AttributeError: 'Namespace' object has no attribute 'type_'".
        #
        # notes
        #     - show and version subcommands type_ attribute is already strictly
        #       defined by the parameter argument (e.g. dataset identifier, file
        #       identifier, etc..), so we dont want the user to also be able to
        #       set type_ attribute using options. This is why type_ group is not
Ejemplo n.º 13
0
        if args.topic is None:
            parser.print_help()
        else:
            if args.topic in subparsers.choices:
                subparsers.choices[args.topic].print_help()
            else:
                sdtools.print_stderr('Help topic not found (%s)'%args.topic)

        sys.exit(0)


    import sdtsaction
    if args.subcommand in sdtsaction.actions.keys():
        import syndautils

        stream=syndautils.get_stream(args)


        # hack for 'show' and 'version' subcommands.
        #
        # description
        #     this hack normalize 'show' and 'version' subcommands 'type_'
        #     attribute with other type_ sensitive subcommands. Without this
        #     hack, the next statement (i.e. "if args.type_ is None:") fails with
        #     "AttributeError: 'Namespace' object has no attribute 'type_'".
        #
        # notes
        #     - show and version subcommands type_ attribute is already strictly
        #       defined by the parameter argument (e.g. dataset identifier, file
        #       identifier, etc..), so we dont want the user to also be able to
        #       set type_ attribute using options. This is why type_ group is not
Ejemplo n.º 14
0
def run():
    # create the top-level parser
    parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
    #parser = sdtools.DefaultHelpParser(formatter_class=argparse.RawDescriptionHelpFormatter,description=sdi18n.m0016)

    subparsers = parser.add_subparsers(dest='subcommand',metavar='subcommand') # ,help=sdi18n.m0015

    parser.add_argument('-V','--version',action='version',version=sdconst.SYNDA_VERSION) # beware: version exist both as option and as subcommand

    # create parser for sub-commands
    sdsubparser.run(subparsers)

    args = parser.parse_args()


    # check type mutex
    #
    # There is no way to check mutex as 'dest' argparse feature is used. Maybe
    # use add_mutually_exclusive_group(), but currently, doing so makes the
    # help looks ugly. So better leave it as is until argparse handle this case
    # smoothly.

    if args.subcommand=='setup':
        print('Setting up environment...')

    # -- permission check -- #

    if args.subcommand in (sdconst.ADMIN_SUBCOMMANDS):
        if not sdpermission.is_admin():
            sdtools.print_stderr(sdi18n.m0028)
            sys.exit(1)

    # -- subcommand routing -- #

    if args.subcommand=='help':

        if args.topic is None:
            parser.print_help()
        else:
            if args.topic in subparsers.choices:
                subparsers.choices[args.topic].print_help()
            else:
                sdtools.print_stderr('Help topic not found (%s)'%args.topic)

        sys.exit(0)


    import sdtsaction
    if args.subcommand in sdtsaction.actions.keys():
        import syndautils

        # hack to explode id in individual facets (experimental)
        if args.subcommand=='search':
            if args.explode:
                if len(args.parameter)>0:
                    id_=args.parameter[0]
                    id_=id_.split('=')[1] if '=' in id_ else id_ # if id_ is in the form 'k=v', we strip 'k='. We assume here that '=' character doesn't appear in key nor value.
                    delim='/' if '/' in id_ else '.'
                    li=id_.split(delim)+args.parameter[1:] # this allow to add other parameter after id e.g. 'synda search <master_id> <version>'
                    args.parameter=li

        stream=syndautils.get_stream(subcommand=args.subcommand,parameter=args.parameter,selection_file=args.selection_file,no_default=args.no_default)


        # hack for 'show' and 'version' subcommands.
        #
        # description
        #     this hack normalize 'show' and 'version' subcommands 'type_'
        #     attribute with other type_ sensitive subcommands. Without this
        #     hack, the next statement (i.e. "if args.type_ is None:") fails with
        #     "AttributeError: 'Namespace' object has no attribute 'type_'".
        #
        # notes
        #     - show and version subcommands type_ attribute is already strictly
        #       defined by the parameter argument (e.g. dataset identifier, file
        #       identifier, etc..), so we dont want the user to also be able to
        #       set type_ attribute using options. This is why type_ group is not
        #       present for show and version subcommands (see subparser module
        #       for details).
        #     - another way to normalize is to use "parser.set_defaults(type_=None)"
        #
        if args.subcommand in ('show','version'):
            args.type_=None


        # infer type if not set by user
        if args.type_ is None:
            import sdtype
            args.type_=sdtype.infer_display_type(stream)

        args.stream=stream # TODO: pass 'stream' object downstream as a standalone argument (not inside args)

        set_stream_type(args)

        status=sdtsaction.actions[args.subcommand](args)

        # hack
        # TODO: review all return code in sdtsaction module
        if not isinstance(status,int):
            status=0 # arbitrary

        sys.exit(status)

    import sdtiaction
    if args.subcommand in sdtiaction.actions.keys():
        status=sdtiaction.actions[args.subcommand](args)

        # hack
        # TODO: review all return code in sdtiaction module
        if not isinstance(status,int):
            status=0 # arbitrary

        sys.exit(status)

    sdtools.print_stderr('Invalid operation %s'%args.subcommand)   
    sdtools.print_stderr("Use '--help' option for more info")
    #parser.print_help()
    sys.exit(2)