def checkAndSetIO(io):
    """ Check the passed io path/filenames and set appropriately. """

    # Check we have a tuple.
    io = checkAndSetInstance(tuple, io)
    if len(io) != 2:
        raise exceptions.RuntimeError(
            "The parameter 'io' can only be a tuple of two strings.")

    # Check if input exists, if not, raise.
    i = checkAndSetInstance(str, io[0])
    if i is None:
        raise exceptions.IOError(
            "The parameter 'input_path' must be a valid filename.")
    i = os.path.abspath(i)
    #
    # Check if output file exists, otherwise attempt to create it.
    o = checkAndSetInstance(str, io[1])
    if o is None:
        raise exceptions.IOError(
            "The parameter 'output_path' must be a valid filename.")
    o = os.path.abspath(o)

    return (i, o)
Пример #2
0
 def verify_section(self, section):
     stuff = self.cfg.items(section[0])
     for i in range(len(section[1])):
         eval(self.key_val_sub(i, stuff, section))
         try:
             val = eval(self.key_val_sub(i, stuff, section))
             if val == False:
                 raise exceptions.ValueError
         except ValueError:
             raise exceptions.ValueError(
                 'Verification function returns False... key:%s, val:%s' %
                 (stuff[i][0], stuff[i][1]))
         except:
             raise exceptions.IOError(
                 'bad configuration... key:%s, val:%s' %
                 (stuff[i][0], stuff[i][1]))
Пример #3
0
    def make_module_skeleton(self):
        dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'])
        if os.path.exists(dest):
            raise exceptions.IOError("Destination %s already exits!" % dest)

        if not os.path.exists(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name'])):
            os.makedirs(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name']))

        current_kernel_names = self.get_current_kernels()
        need_ifdef_updates = ["constant.h", "volk_complex.h", "volk_malloc.h", "volk_prefs.h",
                              "volk_common.h", "volk_cpu.tmpl.h", "volk_config_fixed.tmpl.h",
                              "volk_typedefs.h", "volk.tmpl.h"]
        for root, dirnames, filenames in os.walk(self.my_dict['base']):
            for name in filenames:
                t_table = map(lambda a: re.search(a, name), current_kernel_names)
                t_table = set(t_table)
                if (t_table == set([None])) or (name == "volk_32f_null_32f.h"):
                    infile = os.path.join(root, name)
                    instring = open(infile, 'r').read()
                    outstring = re.sub(self.volk, 'volk_' + self.my_dict['name'], instring)
                    # Update the header ifdef guards only where needed
                    if name in need_ifdef_updates:
                        outstring = re.sub(self.volk_included, 'INCLUDED_VOLK_' + self.my_dict['name'].upper(), outstring)
                    newname = re.sub(self.volk, 'volk_' + self.my_dict['name'], name)
                    if name == 'VolkConfig.cmake.in':
                        outstring = re.sub("VOLK", 'VOLK_' + self.my_dict['name'].upper(), outstring)
                        newname = "Volk%sConfig.cmake.in" % self.my_dict['name']

                    relpath = os.path.relpath(infile, self.my_dict['base'])
                    newrelpath = re.sub(self.volk, 'volk_' + self.my_dict['name'], relpath)
                    dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], os.path.dirname(newrelpath), newname)

                    if not os.path.exists(os.path.dirname(dest)):
                        os.makedirs(os.path.dirname(dest))
                    open(dest, 'w+').write(outstring)

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/kernel_tests.h')
        instring = open(infile, 'r').read()
        outstring = re.sub(self.volk_kernel_tests, '', instring)
        outstring = re.sub(self.volk_null_kernel,'        (VOLK_INIT_TEST(volk_' + self.my_dict['name'] + '_32f_null_32f, test_params))\n        ;', outstring)
        open(infile, 'w+').write(outstring)

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/qa_utils.cc')
        instring = open(infile, 'r').read()
        outstring = re.sub(self.badassert, self.goodassert, instring)
        outstring = re.sub(self.baderase, self.gooderase, outstring)
        open(infile, 'w+').write(outstring)
Пример #4
0
def load_using_bioformats(path,
                          c=None,
                          z=0,
                          t=0,
                          series=None,
                          index=None,
                          rescale=True,
                          wants_max_intensity=False,
                          channel_names=None):
    '''Load the given image file using the Bioformats library
    
    path: path to the file
    z: the frame index in the z (depth) dimension.
    t: the frame index in the time dimension.
    channel_names: None if you don't want them, a list which will be filled if you do
    
    Returns either a 2-d (grayscale) or 3-d (2-d + 3 RGB planes) image
    '''
    #
    # We loop as long as the user is willing to try logging
    # in after timeout.
    #
    while True:
        try:
            return __load_using_bioformats(path, c, z, t, series, index,
                                           rescale, wants_max_intensity,
                                           channel_names)
        except jutil.JavaException, e:
            je = e.throwable
            if jutil.is_instance_of(je, "loci/formats/FormatException"):
                je = jutil.call(je, "getCause", "()Ljava/lang/Throwable;")
            if jutil.is_instance_of(je, "Glacier2/PermissionDeniedException"):
                omero_logout()
                omero_login()
            else:
                import errno
                import exceptions
                import traceback
                logger.warn(e.message)
                for line in traceback.format_exc().split("\n"):
                    logger.warn(line)
                e2 = exceptions.IOError(
                    errno.EINVAL,
                    "Could not load the file as an image (see log for details)",
                    path.encode('utf-8'))
                raise e2
Пример #5
0
def make_dirs(dirname, mode=0o777):
    """
    An idempotent version of os.makedirs().  If the dir already exists, do
    nothing and return without raising an exception.  If this call creates the
    dir, return without raising an exception.  If there is an error that
    prevents creation or if the directory gets deleted after make_dirs() creates
    it and before make_dirs() checks that it exists, raise an exception.
    """
    tx = None
    try:
        os.makedirs(dirname, mode)
    except OSError as x:
        tx = x

    if not os.path.isdir(dirname):
        if tx:
            raise tx
        raise exceptions.IOError("unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname) # careful not to construct an IOError with a 2-tuple, as that has a special meaning...
Пример #6
0
    def make_module_skeleton(self):

        dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'])
        if os.path.exists(dest):
            raise exceptions.IOError("Destination %s already exits!"%(dest));

        if not os.path.exists(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name'])):
            os.makedirs(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name']))

        current_kernel_names = self.get_current_kernels();

        for root, dirnames, filenames in os.walk(self.my_dict['base']):
            for name in filenames:
                t_table = map(lambda a: re.search(a, name), current_kernel_names);
                t_table = set(t_table);
                if t_table == set([None]):
                    infile = os.path.join(root, name);
                    instring = open(infile, 'r').read();
                    outstring = re.sub(self.volk, 'volk_' + self.my_dict['name'], instring);
                    newname = re.sub(self.volk, 'volk_' + self.my_dict['name'], name);
                    relpath = os.path.relpath(infile, self.my_dict['base']);
                    newrelpath = re.sub(self.volk, 'volk_' + self.my_dict['name'], relpath);
                    dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], os.path.dirname(newrelpath), newname);

                    if not os.path.exists(os.path.dirname(dest)):
                        os.makedirs(os.path.dirname(dest))
                    open(dest, 'w+').write(outstring);


        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/testqa.cc');
        instring = open(infile, 'r').read();
        outstring = re.sub(self.volk_run_tests, '', instring);
        open(infile, 'w+').write(outstring);

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'apps/volk_' + self.my_dict['name'] + '_profile.cc');
        instring = open(infile, 'r').read();
        outstring = re.sub(self.volk_profile, '', instring);
        open(infile, 'w+').write(outstring);

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/qa_utils.cc');
        instring = open(infile, 'r').read();
        outstring = re.sub(self.badassert, self.goodassert, instring);
        outstring = re.sub(self.baderase, self.gooderase, outstring);
        open(infile, 'w+').write(outstring);
Пример #7
0
def main():

    if not 'CMSSW_BASE' in os.environ:
        raise exceptions.RuntimeError('CMSSW_BASE is not setup.')

    usage = 'usage: %prog [options]\n'
    usage = usage + 'Create initial production table.'

    parser = OptionParser(usage=usage)

    parser.add_option('--filelist',
                      type='string',
                      help='List of dataset to be deleted.')

    parser.add_option(
        '--type',
        type='string',
        default='mc',
        help='Type of production table (data or mc, default mc).')

    (options, args) = parser.parse_args()

    if os.path.isfile(options.filelist):
        filelist = open(options.filelist)
        datasets = {}
        for dataset in sorted(filelist.readlines()):
            if SelectedSites(dataset.rstrip()):
                print 'Dataset in selected sites (INSITE).\n'
                datasets[dataset.rstrip()] = True
            else:
                print
                datasets[dataset.rstrip()] = False
        if options.type.lower() == 'mc':
            MCProductionTable(datasets)
        elif options.type.lower() == 'data':
            DataProductionTable(datasets)
        else:
            raise exceptions.ValueError(
                'Error type %s indefined (option are data or mc).' %
                options.type.lower())
    else:
        raise exceptions.IOError('Filelist %s does not exist.' %
                                 options.filelist)
    def dumpLoader(cls, fname):
        """
        Creates calculator object from a dump file

        :param fname: path to the dump file.

        :return: Created calculator object.

        :raises RuntimeError: if cannot create object.

        """

        try:
            calculator = dill.load(open(fname))
        except:
            raise exceptions.IOError("Cannot read  from file " + fname)
        if not issubclass(type(calculator), AbstractBaseCalculator):
            raise TypeError(
                "The argument to the script should be a path to a file "
                "with object of subclass of AbstractBaseCalculator")
        return calculator
Пример #9
0
def rdClassSim(simroot, verbose=False):
    """ 
    Check for existing simulation results, 
    then read in classification simulation products
    and return a list containing three SimTable
    objects: [Ia,II,Ibc]
    """
    from __init__ import SimTable

    simnameIa = simroot + '_Ia'
    simnameIbc = simroot + '_Ibc'
    simnameII = simroot + '_II'

    # check first for existence in SNDATA_ROOT
    photfileIa = os.path.join(SNDATA_ROOT,
                              'SIM/' + simnameIa + '/%s_PHOT.FITS' % simnameIa)
    photfileIbc = os.path.join(
        SNDATA_ROOT, 'SIM/' + simnameIbc + '/%s_PHOT.FITS' % simnameIbc)
    photfileII = os.path.join(SNDATA_ROOT,
                              'SIM/' + simnameII + '/%s_PHOT.FITS' % simnameII)

    gridfileIa = os.path.join(SNDATA_ROOT,
                              'SIM/' + simnameIa + '/%s.GRID' % simnameIa)
    gridfileIbc = os.path.join(SNDATA_ROOT,
                               'SIM/' + simnameIbc + '/%s.GRID' % simnameIbc)
    gridfileII = os.path.join(SNDATA_ROOT,
                              'SIM/' + simnameII + '/%s.GRID' % simnameII)

    if (not (os.path.isfile(photfileII) and os.path.isfile(photfileIbc)
             and os.path.isfile(photfileIa))
            and not (os.path.isfile(gridfileII) and os.path.isfile(gridfileIbc)
                     and os.path.isfile(gridfileIa))):
        raise exceptions.IOError("No classification sim products for %s" %
                                 simroot)

    # read in the sim products
    simdatIa = SimTable(simnameIa, verbose=verbose)
    simdatIbc = SimTable(simnameIbc, verbose=verbose)
    simdatII = SimTable(simnameII, verbose=verbose)
    return (ClassSim(simdatIa, simdatIbc, simdatII))
Пример #10
0
def copyFiles(outfilesPickle, destDir='./output'):
    """
    Copy output files to destination directory. This is done using rsync.
    
    :param string outfilesPickle: name of outfiles pickle file
    :param string destDir: directory to which files should be copied
    
    :raises subprocess.CalledProcessError: if rsync returns an error value
    :raises exceptions.IOError: if outfilesPickle does not exist
    """
    # Get a list of all output files
    if not os.path.exists(outfilesPickle):
        raise exceptions.IOError(errno.ENOENT, "File not found",
                                 outfilesPickle)
    outfiles = PipeUtil.FetchObject(outfilesPickle)
    outfilesList = VLBACal.VLBAMakeOutfilesList(outfiles)
    logger.info("Copying (rsync-ing) output files to " + destDir)
    # Prepare destination directory
    if not os.path.isdir(destDir):  # not dir
        if os.path.exists(destDir):  # not dir & exists
            logger.error(
                "Copy failed: destination exists and is not a directory.")
            raise OSError(errno=errno.ENOTDIR,
                          strerror="File exists and is not a directory",
                          filename=destDir)
        else:  # not dir & not exists
            os.makedirs(destDir)
    # Copy files using rsync
    cmd = ["rsync", "--verbose", "--times"]
    cmd.extend(outfilesList)
    cmd.append(destDir)
    try:
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError as e:
        logger.error(
            "Error occurred while rsyncing to destination directory.\n" +
            "rsync return value: " + str(e.returncode))
        raise
    logger.info("All files copied to " + destDir)
Пример #11
0
    def change_directory(self, directory):
        if directory is None:
            return

        directory = os.path.abspath(directory)

        if not os.path.isdir(directory):
            raise exceptions.IOError(directory)

        if len(self.__history) > 0:
            if directory == self.__history[self.__current_history_index]:
                return

        if self.__current_history_index < len(self.__history) - 1:
            self.__history = self.__history[:self.__current_history_index + 1]
            self.__current_history_index = len(self.__history) - 1

        self.__history.append(directory)
        self.__current_history_index += 1

        self.directory_changed.emit(directory)
        self.__on_history_changed()
Пример #12
0
    def make_module_skeleton(self):
        dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'])
        if os.path.exists(dest):
            raise exceptions.IOError("Destination %s already exits!" % dest)

        if not os.path.exists(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name'])):
            os.makedirs(os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'kernels/volk_' + self.my_dict['name']))

        current_kernel_names = self.get_current_kernels()
        for root, dirnames, filenames in os.walk(self.my_dict['base']):
            for name in filenames:
                t_table = map(lambda a: re.search(a, name), current_kernel_names)
                t_table = set(t_table)
                if (t_table == set([None])) or (name == "volk_32f_null_32f.h"):
                    infile = os.path.join(root, name)
                    instring = open(infile, 'r').read()
                    outstring = re.sub(self.volk, 'volk_' + self.my_dict['name'], instring)
                    newname = re.sub(self.volk, 'volk_' + self.my_dict['name'], name)
                    relpath = os.path.relpath(infile, self.my_dict['base'])
                    newrelpath = re.sub(self.volk, 'volk_' + self.my_dict['name'], relpath)
                    dest = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], os.path.dirname(newrelpath), newname)

                    if not os.path.exists(os.path.dirname(dest)):
                        os.makedirs(os.path.dirname(dest))
                    open(dest, 'w+').write(outstring)

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/kernel_tests.h')
        instring = open(infile, 'r').read()
        outstring = re.sub(self.volk_kernel_tests, '', instring)
        outstring = re.sub(self.volk_null_kernel,'        (VOLK_INIT_TEST(volk_' + self.my_dict['name'] + '_32f_null_32f, test_params))\n        ;', outstring)
        open(infile, 'w+').write(outstring)

        infile = os.path.join(self.my_dict['destination'], 'volk_' + self.my_dict['name'], 'lib/qa_utils.cc')
        instring = open(infile, 'r').read()
        outstring = re.sub(self.badassert, self.goodassert, instring)
        outstring = re.sub(self.baderase, self.gooderase, outstring)
        open(infile, 'w+').write(outstring)
Пример #13
0
def get_privkey_file(self):
    """
    Locate file containing private key

    """

    #privkeys = ['../auth/certs/priv.pem',
    #            '../../build/auth/certs/priv.pem'
    #            ]

    privkey_fname = os.path.join(DIRNAME,
                                 const_dp.RELATIVE_PATH_BUILD_PRIV_KEY,
                                 const_dp.DP_PRIV_KEY_NAME)

    if os.path.exists(privkey_fname):
        return privkey_fname

    privkey_fname = os.path.join(DIRNAME, const_dp.RELATIVE_PATH_PRIV_KEY,
                                 const_dp.DP_PRIV_KEY_NAME)

    if os.path.exists(privkey_fname):
        return privkey_fname

    raise exceptions.IOError("Can't find private keyfile")
Пример #14
0
def cmor_reformat(currProject, project_info, variable, model):
    model_name = currProject.get_model_name(model)
    project_name = currProject.get_project_name(model)
    project_basename = currProject.get_project_basename()
    project_info['RUNTIME']['model'] = model_name
    project_info['RUNTIME']['project'] = project_name
    project_info['RUNTIME']['project_basename'] = project_basename
    verbosity = project_info["GLOBAL"]["verbosity"]
    exit_on_warning = project_info['GLOBAL'].get('exit_on_warning', False)

    # Variable put in environment to be used for the (optional)
    # wildcard syntax in the model path, ".../${VARIABLE}/..."
    # in the namelist
    os.environ['__ESMValTool_base_var'] = variable.var

    # Build input and output file names
    indir, infile = currProject.get_cf_infile(project_info, model,
                                              variable.fld, variable.var,
                                              variable.mip, variable.exp)

    fullpath = currProject.get_cf_fullpath(project_info, model, variable.fld,
                                           variable.var, variable.mip,
                                           variable.exp)
    #    print "indir = %s" % indir
    #    print "infile = %s" % infile
    #    print "fullpath = %s" % fullpath

    if (not os.path.isdir(os.path.dirname(fullpath))):
        os.makedirs(os.path.dirname(fullpath))

    # Area file name for ocean grids
    areafile_path = currProject.get_cf_areafile(project_info, model)

    # Land-mask file name for land variables
    lmaskfile_path = currProject.get_cf_lmaskfile(project_info, model)
    omaskfile_path = currProject.get_cf_omaskfile(project_info, model)

    # Porosity file name for land variables
    porofile_path = currProject.get_cf_porofile(project_info, model)

    # Additional grid file names for ocean grids, if available (ECEARTH)
    hgridfile_path = False
    zgridfile_path = False
    lsmfile_path = False
    if hasattr(currProject, "get_cf_hgridfile"):
        hgridfile_path = currProject.get_cf_hgridfile(project_info, model)
    if hasattr(currProject, "get_cf_zgridfile"):
        zgridfile_path = currProject.get_cf_zgridfile(project_info, model)
    if hasattr(currProject, "get_cf_lsmfile"):
        lsmfile_path = \
            currProject.get_cf_lsmfile(project_info, model, variable.fld)

    # General fx file name entry
    fx_file_path = False
    if hasattr(currProject, "get_cf_fx_file"):
        fx_file_path = currProject.get_cf_fx_file(project_info, model)

    project, name, ensemble, start_year, end_year, dir\
        = currProject.get_cf_sections(model)
    info("project is " + project, verbosity, required_verbosity=4)
    info("ensemble is " + ensemble, verbosity, required_verbosity=4)
    info("dir is " + dir, verbosity, required_verbosity=4)

    # Check if the current project has a specific reformat routine,
    # otherwise use default
    if (os.path.isdir("reformat_scripts/" + project)):
        which_reformat = project
    else:
        which_reformat = 'default'

    reformat_script = os.path.join("reformat_scripts", which_reformat,
                                   "reformat_" + which_reformat + "_main.ncl")

    # Set enviroment variables
    project_info['TEMPORARY'] = {}
    project_info['TEMPORARY']['indir_path'] = indir
    project_info['TEMPORARY']['outfile_fullpath'] = fullpath
    project_info['TEMPORARY']['infile_path'] = os.path.join(indir, infile)
    project_info['TEMPORARY']['areafile_path'] = areafile_path
    project_info['TEMPORARY']['lmaskfile_path'] = lmaskfile_path
    project_info['TEMPORARY']['omaskfile_path'] = omaskfile_path
    project_info['TEMPORARY']['porofile_path'] = porofile_path
    project_info['TEMPORARY']['start_year'] = start_year
    project_info['TEMPORARY']['end_year'] = end_year
    project_info['TEMPORARY']['ensemble'] = ensemble
    project_info['TEMPORARY']['variable'] = variable.var
    project_info['TEMPORARY']['field'] = variable.fld

    # FX file path
    if fx_file_path:
        project_info['TEMPORARY']['fx_file_path'] = fx_file_path

    # Special cases
    if 'realm' in currProject.get_model_sections(model):
        project_info['TEMPORARY']['realm'] = \
            currProject.get_model_sections(model)["realm"]
    if 'shift_year' in currProject.get_model_sections(model):
        project_info['TEMPORARY']['shift_year'] = \
            currProject.get_model_sections(model)["shift_year"]
    if 'case_name' in currProject.get_model_sections(model):
        project_info['TEMPORARY']['case_name'] = \
            currProject.get_model_sections(model)["case_name"]

    if hgridfile_path and zgridfile_path:
        project_info['TEMPORARY']['hgridfile_path'] = hgridfile_path
        project_info['TEMPORARY']['zgridfile_path'] = zgridfile_path
    if lsmfile_path:
        project_info['TEMPORARY']['lsmfile_path'] = lsmfile_path

    # Execute the ncl reformat script
    if ((not os.path.isfile(project_info['TEMPORARY']['outfile_fullpath']))
            or project_info['GLOBAL']['force_processing']):

        info("  Calling " + reformat_script + " to check/reformat model data",
             verbosity,
             required_verbosity=1)

        projects.run_executable(reformat_script, project_info, verbosity,
                                exit_on_warning)
    if 'NO_REFORMAT' in reformat_script:
        pass
    else:
        if (not os.path.isfile(project_info['TEMPORARY']['outfile_fullpath'])):
            raise exceptions.IOError(
                2, "Expected reformatted file isn't available: ",
                project_info['TEMPORARY']['outfile_fullpath'])
    del (project_info['TEMPORARY'])
Пример #15
0
def CreateCrabConfig(options, isdata):
    """
    Creates a crab config file for processing data
    """
    if isdata:
        if not options.lumimask:
            raise exceptions.ValueError('Missing lumimask for a data job.')
        elif not os.path.isfile(options.lumimask):
            raise exceptions.IOError('Lumimask file %s does not exist.' %
                                     options.lumimask)

    scheduler = 'condor'
    use_server = '0'
    grid = ''

    nevents = GetDatasetNEvents(options.dataset)
    njobs = int(nevents * options.eventsize / (1000 * options.filesize))

    site = SelectSite(options.dataset, options.fraction)

    if site == 'T2_US':
        scheduler = 'remoteGlidein'
        use_server = '0'
        grid = '[GRID]\nse_white_list = T2_US_*'
    elif site == 'T2':
        print 'Warning: Neither FNAL nor T2_US have the dataset.'
        print 'This could mean more chances of problems in the stageout (exit code 60317).'
        print 'Increasing the number of jobs by facto 4.'
        scheduler = 'remoteGlidein'
        use_server = '0'
        njobs = 4 * njobs
    elif site != 'FNAL':
        raise exceptions.ValueError('No T2 site contains this dataset.')

    if njobs > 5000:
        print 'Warning: the number of jobs for this samples was reduce to 5000.'
        njobs = 5000

    if not isdata:
        datasetblock = 'total_number_of_events = -1\n'
        datasetblock = datasetblock + 'number_of_jobs = %d' % int(njobs)

        pycfg_params = 'tlbsmTag=%s useData=0' % options.tag.lower()
        if options.pycfg:
            pycfg_params = pycfg_params + ' ' + options.pycfg

        publish_data_name = options.dataset.split('/')[2] + '_' + options.tag
        ui_working_dir = options.dataset.replace('/AODSIM', '').replace(
            '/', '_')[1:] + '_' + options.tag
    else:
        datasetblock = 'total_number_of_lumis = -1\n'
        datasetblock = datasetblock + 'number_of_jobs = %d\n' % int(njobs)
        datasetblock = datasetblock + 'lumi_mask = %s' % options.lumimask

        pycfg_params = 'tlbsmTag=%s useData=1' % options.tag.lower()
        if options.pycfg:
            pycfg_params = pycfg_params + ' ' + options.pycfg

        publish_data_name = options.dataset.split('/')[2] + '_' + options.tag
        ui_working_dir = options.dataset.replace('/AOD', '').replace(
            '/', '_')[1:] + '_' + options.tag

    if options.extension > 0 and isdata:
        publish_data_name = publish_data_name + '_extension_v%d' % options.extension
        ui_working_dir = ui_working_dir + '_extension_v%d' % options.extension

    if options.bugfix > 0:
        publish_data_name = publish_data_name + '_bugfix_v%d' % options.bugfix
        ui_working_dir = ui_working_dir + '_bugfix_v%d' % options.bugfix

    settings = {
        'scheduler': scheduler,
        'use_server': use_server,
        'datasetpath': options.dataset,
        'pycfg_params': pycfg_params,
        'datasetblock': datasetblock,
        'publish_data_name': publish_data_name,
        'ui_working_dir': ui_working_dir,
        'grid': grid
    }

    filename = '%s/src/TopQuarkAnalysis/TopPairBSM/test/crab_template.cfg' % os.environ[
        'CMSSW_BASE']
    file = open(filename)
    template = string.Template(file.read())
    file.close()
    file = open('crab_%s.cfg' % ui_working_dir, 'w')
    file.write(template.safe_substitute(settings))
    file.close()
Пример #16
0
    def writeNameList(self, verbose=False, fail=False, **params):
        """
        Modify existing namelist files using information generated via genConversion
        Existing files will be copied to .bak
        :param verbose (optional -- default is False). If True provide more information on what is going on.
        :param fail (optional default is False). If True fail if a parameter not found.
        :keyword arguments are parameters and values.
        :return:  ordered dict of parameters and values used.
        """
        if self._readOnly:
            raise exceptions.IOError("Model is read only")

        params_used = collections.OrderedDict()  #
        files = collections.OrderedDict()  # list of files to be modified.
        for param, value in params.iteritems(
        ):  # extract data from conversion indexed by file --
            # could this code be moved into genVarToNameList as really a different view of the same data.
            # NO as we would need to do this only once we've finished generating namelist translate tables.
            # potential optimisation might be to cache this and trigger error in writeNameList if called after genNameList
            # search functions first
            if param in self._metaFn:  # got a meta function.
                if verbose:
                    "Running function %s" % self._metaFn[param].func_name
                metaFnValues = self._metaFn[param](
                    value)  # call the meta param function which returns a dict
                params_used[param] = metaFnValues  # and update return var
                for conv, v in metaFnValues.iteritems(
                ):  # iterate over result of fn.
                    if conv.file not in files:
                        files[conv.file] = [
                        ]  # if not come across the file set it to empty list
                    files[conv.file].append(
                        (v, conv))  # append the  value  & conversion info.
            elif param in self._convNameList:  # got it in convNameList ?
                for conv in self._convNameList[param]:
                    if conv.file not in files:
                        files[conv.file] = [
                        ]  # if not come across the file set it to empty list
                    files[conv.file].append(
                        (value, conv))  # append the value  & conversion
                    params_used[param] = value  # and update return var
            elif fail:
                raise exceptions.KeyError(
                    "Failed to find %s in metaFn or convNameList " % param)
            else:
                pass

        # now have conversion tuples ordered by file so let's process the files
        for file in files.keys():  # iterate over files
            # need to create backup? Only do if no back up exists. This allows generateNameList to be run multiple times
            # doing updates. First time it runs we assume we have a directory ready to be modified.
            filePath = os.path.join(self.dirPath,
                                    file)  # full path to namelist file
            # check file exists if not raise exception
            if not os.path.isfile(filePath):
                #raise exceptions.IOError("file %s does not exist"%(filePath))
                continue  # skip this file.
            backup_file = filePath + "_nl.bak"  # and full path to backup fie.
            if not os.path.isfile(backup_file):
                shutil.copyfile(filePath, backup_file)
            # now create the namelist file. Need a temp file
            with tempfile.NamedTemporaryFile(dir=self.dirPath,
                                             delete=False) as tmpNL:
                # Now construct the patch for the  namelist file for all conversion tuples.
                nlPatch = collections.OrderedDict(
                )  # path to exisiting namelist file
                for (value, conv) in files[file]:
                    if conv.namelist not in nlPatch:
                        nlPatch[conv.namelist] = collections.OrderedDict(
                        )  # dom't have ordered dict so make it
                    if type(
                            value
                    ) is np.ndarray:  # convert numpy array to list for writing.
                        value = value.tolist()
                    elif isinstance(value, unicode):
                        value = str(
                            value
                        )  # f90nml can't cope with unicode so convert it to string.
                    nlPatch[conv.namelist][conv.var] = copy.copy(
                        value
                    )  # copy the variable to be stored rather than the name.
                    if verbose:
                        print "Setting %s,%s to %s in %s" % (
                            conv.namelist, conv.var, value, filePath)
                try:
                    p = f90nml.patch(filePath, nlPatch,
                                     tmpNL.name)  # patch the namelist file
                    tmpNL.close()  # close the temp file once done.
                except StopIteration:
                    print "Problem in f90nml for %s writing to %s" % (
                        filePath, tmpNL.name), nlPatch
                    raise  # raise exception.

                if verbose: print "Patched %s to %s" % (filePath, tmpNL.name)
                shutil.move(
                    tmpNL.name,
                    filePath)  # and copy the patched file back in place.

        return params_used
Пример #17
0
    def import_kernel(self, name, base):
        if not base:
            base = self.my_dict['base']
            basename = self.getbasename()
        else:
            basename = self.get_basename(base)
        if not name in self.get_current_kernels(base):
            raise exceptions.IOError(
                "Requested kernel %s is not in module %s" % (name, base))

        inpath = os.path.abspath(base)
        if len(basename) > 0:
            top = 'volk_' + basename + '_'
        else:
            top = 'volk_'
        oldvolk = re.compile(top[:-1])

        self.convert_kernel(oldvolk, name, base, inpath, top)

        kernel = re.compile(name)
        search_kernels = Set([kernel])

        profile = re.compile('^\s*VOLK_PROFILE')
        puppet = re.compile('^\s*VOLK_PUPPET')
        infile = open(
            os.path.join(inpath, 'apps/', oldvolk.pattern + '_profile.cc'))
        otherinfile = open(
            os.path.join(self.my_dict['destination'],
                         'volk_' + self.my_dict['name'],
                         'apps/volk_' + self.my_dict['name'] + '_profile.cc'))
        dest = os.path.join(
            self.my_dict['destination'], 'volk_' + self.my_dict['name'],
            'apps/volk_' + self.my_dict['name'] + '_profile.cc')
        lines = infile.readlines()
        otherlines = otherinfile.readlines()
        open(dest, 'w+').write('')
        insert = False
        inserted = False
        for otherline in otherlines:

            if self.lastline.match(otherline):
                insert = True
            if insert and not inserted:
                inserted = True
                for line in lines:
                    if kernel.search(line):
                        if profile.match(line):
                            outline = re.sub(oldvolk,
                                             'volk_' + self.my_dict['name'],
                                             line)
                            open(dest, 'a').write(outline)
                        elif puppet.match(line):
                            outline = re.sub(oldvolk,
                                             'volk_' + self.my_dict['name'],
                                             line)
                            open(dest, 'a').write(outline)
                            args = re.search("(?<=VOLK_PUPPET_PROFILE).*",
                                             line)
                            m_func = args.group(0).split(',')[0]
                            func = re.search('(?<=' + top + ').*', m_func)
                            search_kernels.add(re.compile(func.group(0)))
                            self.convert_kernel(oldvolk, func.group(0), base,
                                                inpath, top)
            write_okay = True
            for kernel in search_kernels:
                if kernel.search(otherline):
                    write_okay = False
            if write_okay:
                open(dest, 'a').write(otherline)

        for kernel in search_kernels:
            print("Adding kernel %s from module %s" % (kernel.pattern, base))

        infile = open(os.path.join(inpath, 'lib/testqa.cc'))
        otherinfile = open(
            os.path.join(self.my_dict['destination'],
                         'volk_' + self.my_dict['name'], 'lib/testqa.cc'))
        dest = os.path.join(self.my_dict['destination'],
                            'volk_' + self.my_dict['name'], 'lib/testqa.cc')
        lines = infile.readlines()
        otherlines = otherinfile.readlines()
        open(dest, 'w+').write('')
        inserted = False
        insert = False
        for otherline in otherlines:
            if re.match('\s*', otherline) is None or re.match(
                    '\s*#.*', otherline) is None:
                insert = True
            if insert and not inserted:
                inserted = True
                for line in lines:
                    for kernel in search_kernels:
                        if kernel.search(line):
                            if self.volk_run_tests.match(line):
                                outline = re.sub(
                                    oldvolk, 'volk_' + self.my_dict['name'],
                                    line)
                                open(dest, 'a').write(outline)
            write_okay = True
            for kernel in search_kernels:
                if kernel.search(otherline):
                    write_okay = False
            if write_okay:
                open(dest, 'a').write(otherline)
Пример #18
0
    def remove_kernel(self, name):
        basename = self.my_dict['name']
        if len(basename) > 0:
            top = 'volk_' + basename + '_'
        else:
            top = 'volk_'
        base = os.path.join(self.my_dict['destination'], top[:-1])

        if not name in self.get_current_kernels():
            raise exceptions.IOError(
                "Requested kernel %s is not in module %s" % (name, base))

        inpath = os.path.abspath(base)
        kernel = re.compile(name)
        search_kernels = Set([kernel])
        profile = re.compile('^\s*VOLK_PROFILE')
        puppet = re.compile('^\s*VOLK_PUPPET')
        src_dest = os.path.join(inpath, 'apps/', top[:-1] + '_profile.cc')
        infile = open(src_dest)
        otherlines = infile.readlines()
        open(src_dest, 'w+').write('')

        for otherline in otherlines:
            write_okay = True
            if kernel.search(otherline):
                write_okay = False
                if puppet.match(otherline):
                    args = re.search("(?<=VOLK_PUPPET_PROFILE).*", otherline)
                    m_func = args.group(0).split(',')[0]
                    func = re.search('(?<=' + top + ').*', m_func)
                    search_kernels.add(re.compile(func.group(0)))
            if write_okay:
                open(src_dest, 'a').write(otherline)

        src_dest = os.path.join(inpath, 'lib/testqa.cc')
        infile = open(src_dest)
        otherlines = infile.readlines()
        open(src_dest, 'w+').write('')

        for otherline in otherlines:
            write_okay = True

            for kernel in search_kernels:
                if kernel.search(otherline):
                    write_okay = False

            if write_okay:
                open(src_dest, 'a').write(otherline)

        for kernel in search_kernels:
            infile = os.path.join(
                inpath,
                'kernels/' + top[:-1] + '/' + top + kernel.pattern + '.h')
            print("Removing kernel %s" % kernel.pattern)
            if os.path.exists(infile):
                os.remove(infile)
        # remove the orc proto-kernels if they exist. There are no puppets here
        # so just need to glob for files matching kernel name
        print(
            glob.glob(inpath + '/kernel/volk/asm/orc/' + top + name + '*.orc'))
        for orcfile in glob.glob(inpath + '/kernel/volk/asm/orc/' + top +
                                 name + '*.orc'):
            print(orcfile)
            if os.path.exists(orcfile):
                os.remove(orcfile)
#
#  make_grid_desc.py   ../Grids/ns_grid.sum    /home/data/GUDP-VIND_test/VINDdata/setup/ns_grid_depth_v5   ns_grid.nc
#
#####################################################################################
from numpy import *
import netCDF4 as netcdf

import sys
import exceptions
import datetime

usage_str = "usage: %s  <grid_summary_file>   <grid_description>  <netCDF_descriptor> " % sys.argv[
    0]

if len(sys.argv) != 4:
    raise exceptions.IOError(("%s expects 3 arguments\n\n" % sys.argv[0]) +
                             usage_str)
#
# ------------ read header file ------------
#
fin = open(sys.argv[1])
nx, ny, nz = map(int, fin.readline().split()[:3])
dlon, dlat = map(float, fin.readline().split()[:2])
lon0, lat0 = map(float, fin.readline().split()[:2])
fin.close()

#
# ------------ read grid_description ------------
#

fgr = open(sys.argv[2])
data = fgr.read().split()
Пример #20
0
    Convenience function to make restful HTTP requests.
    """
    conn = None
    try:
        conn = httplib.HTTPConnection(host, timeout=timeout)
        conn.request(method, url, body, headers)
        response = conn.getresponse()
        if status: assert (response.status == status)
        data = response.read()
        if response.length == 0: data = None
    except httplib.HTTPException, e:
        raise httplib.HTTPException("HTTP error: %d" % e.code)
    except socket.error, e:
        raise socket.error("Socket error: %s" % e.message)
    except exceptions.IOError, e:
        raise exceptions.IOError("IO error: %s" % e.message)
    finally:
        if conn: conn.close()
    return {
        "body": data,
        "status": response.status,
        "headers": dict(response.getheaders())
    }


class ClusterAddress(models.Model):
    """
    Holds metadata about all the known clusters.
    """
    owner = models.ForeignKey(User, db_index=True)
    address = models.CharField(max_length=128, unique=True)
Пример #21
0
    def compressFeatureData(self, s_ftfile):
        """
        @return:  1. the zip file path; 2. layer name at geoserver 
        """
        from osgeo import ogr

        def zipFiles(zipname, files, arcnames):
            assert len(files) == len(
                arcnames), "size of file names and rename container not equal"
            zipf = zipfile.ZipFile(zipname, 'w')
            for i in range(len(files)):
                if os.path.exists(files[i]):
                    zipf.write(files[i], arcnames[i])
            zipf = None

        ft = ogr.Open(s_ftfile)
        ftDrv = ft.GetDriver()

        sft = os.path.splitext(s_ftfile)[0]
        archive_files = [
            sft + '.shp', sft + '.shx', sft + '.prj', sft + '.dbf'
        ]
        filename = os.path.split(sft)[1]
        arcnames = [
            filename + '.shp', filename + '.shx', filename + '.prj',
            filename + '.dbf'
        ]

        logging.info("the driver of vector data {0} is {1}".format(
            s_ftfile, ftDrv.name))
        if (ftDrv.name != "ESRI Shapefile"):
            tempshpf = os.path.join(
                config.getConfigValue('server', 'tempPath'), str(uuid.uuid4()))
            shpDrv = ogr.GetDriverByName("ESRI Shapefile")

            shpft = shpDrv.CopyDataSource(ft, tempshpf + '.shp')
            if not shpft:
                raise exceptions.IOError(
                    "{0} format vector data to shapefile format fault".format(
                        s_ftfile))

            # close the vector datasource
            ft = None
            shpft = None

            # zip shape files and delete them
            # create an defautl prj file for this shapefile if thereis not projection information
            if not os.path.exists(tempshpf + '.prj'):
                f = open(tempshpf + '.prj', 'w')
                f.write(
                    'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]'
                )
                f.close()
            archive_files = [
                tempshpf + '.shp', tempshpf + '.shx', tempshpf + '.prj',
                tempshpf + '.dbf'
            ]
            zipFiles(tempshpf + '.zip', archive_files, arcnames)

            for f in archive_files:
                if os.path.exists(f):
                    os.remove(f)

            return tempshpf + '.zip', filename
        else:
            zipFiles(sft + '.zip', archive_files, arcnames)
            return sft + '.zip', filename