Example #1
0
def findpath(startdir,target):
    os.curdir(startdir)
    file=os.listdir(os.curdir)
    for eachfile in file:
        if os.path.isfile(eachfile):
            if eachfile==target:
                print(os.getcwd()+os.sep+target)
        if os.path.isdir(eachfile):
            findpath(eachfile,target)
            os.curdir(os.pardir)
Example #2
0
def chazhaolujing(startdir,target):
    os.curdir(startdir)
    file=os.listdir(os.curdir)
    for eachfile in file:
        if os.path.isfile(eachfile):
            ext=os.path.splitext(eachfile)[1]
            if ext in target:
                return (os.getcwd()+os.sep+ext+os.linesep)
        if os.path.isdir(eachfile):
            chazhaolujing(eachfile,target)
            os.chdir(os.pardir)
Example #3
0
def zonalStats(inputf,raster):
    """ Run zonal statistics """
    try:
        logging.info("Computing zonal statistics...")
        output = os.path.join(os.path.dirname(inputf),'zonalstats.dbf')
        logging.error('%s %s %s' % (inputf,raster,output))
        os.curdir('/tmp')
        arcpy.gp.ZonalStatisticsAsTable_sa(inputf,"NEAR_FID",raster, output, "DATA","ALL")
        #arcpy.gp.ZonalStatisticsAsTable_sa('/tmp/tmp9RkU25/hotspot_areas.shp',"NEAR_FID",'/tmp/tmp9RkU25/scaled_20100607.010500.img','/tmp/tmp9RkU25/output.dbf',"DATA","ALL")
        os.curdir(os.path.dirname(inputf))
    except:
        logging.error("Something happened during %s" % funcname())
        logging.error(sys.exc_info())
Example #4
0
 def save_as_bitmap(self):
     if self.selected_dir is None:
         self.selected_dir = os.curdir()
     save_filename = tkFileDialog.asksaveasfilename(initialdir=self.selected_dir, defaultextension='.bmp',
                                                    filetypes=[('Bitmap Image', '.bmp')])
     print save_filename
     self.modified_mask_image_label.pil_image.save(save_filename)
Example #5
0
    def Execute(self, temp_dir, main_form):
        """ Run a simulation.
            var
            OldDir: String
            AppTitle: String
        """
        #  Change the current directory to the application's temporary directory
        old_dir = os.curdir()
        os.chdir(temp_dir)

        #  Update the form's display
        #Update

        #  Save the title Windows' uses for the application
        app_title = main_form.getText()

        #  Run the simulation
        self.RunSimulation()

        #  Restore the application's title
        main_form.setText(app_title)

        #  Change back to the original directory
        os.chdir(old_dir)

        #  Display the run's status on the ResultsPage of the form
        self.DisplayRunStatus()
Example #6
0
class Mapper:
    #map out files{} -> classes{} & Imports{} -> functions{{inputs:[],returns:[]}
    #moduleMap = files{{classes{functions{{inputs:[],returns:[]},"imports":[]}
    moduleMap = {"files": {}}
    RootJsfileList = []
    RootDir = os.curdir()  #or js library folder path

    def __init__(self):
        pass

    def find_all_js_files(self, RootDir=RootDir):
        for root, dirnames, filenames in os.walk(RootDir):
            for filename in fnmatch.filter(filenames, '*.js'):
                self.moduleMap["files"] += {str(filename): ""}

    def find_imports_in_file(self):
        imports = re.findall()
        return imports

    def find_all_classes_in_file(self):
        pass

    def find_all_functions_in_class(self):
        pass

    def get_inputs_from_function(self):
        pass

    def parseJSfile(self):
        pass
Example #7
0
    def test_merge_utility(self, resource_group):
        app_name = 'helloWorldApp'
        yaml_files_path = "%s,%s,%s" % (_get_test_data_file('app.yaml'),
                                        _get_test_data_file('service.yaml'),
                                        _get_test_data_file('network.yaml'))
        self.kwargs.update({
            'resource_id':
            '',
            'resource_group':
            resource_group,
            'deployment_name':
            self.create_random_name(prefix='cli', length=24),
            'app_name':
            app_name,
            'input_yaml_files':
            yaml_files_path
        })

        # Test create
        self.cmd(
            'az mesh deployment create -g {rg} --input-yaml-files {input_yaml_files} --name {deployment_name}'
        )

        # Test delete
        self.cmd('az mesh app delete -g {rg} --name {app_name} --yes')

        # Delete the generated ARM template
        os.path.delete(os.path.combine(os.curdir(), 'merged-arm_rp.json'))
Example #8
0
 def __init__(self, web_url, username=None, password=None, logpath=None):
     self.web_url = web_url
     self.username = username
     self.password = password
     if logpath:
         self.logpath = logpath
     else:
         self.logpath = os.curdir()
Example #9
0
def trainWordEmbeddGensim():
    if "model.bin" in os.curdir("."):
        return
    else:
        model = Word2Vec(sentences, size=100, windows=5, min_count=25,\
                         workers=8)
        model.save("model.bin")
        return model
Example #10
0
def get_config_dir():
    """Get configuration directory."""
    confdir = os.environ.get("XDG_CONFIG_HOME")
    if not confdir:
        confdir = os.path.expanduser("~")
        if confdir == "~":
            confdir = os.curdir()
        confdir = os.path.join(confdir, ".config")
    return os.path.join(confdir, "rganalysis")
Example #11
0
def checkVideoDurationSeconds():
    # type: () -> None

    for video in os.listdir(os.curdir):
        if not video.endswith("mp4"):
            continue
        print(video)
        os.system(
            'ffprobe -i {} -show_entries format=duration -v quiet -of csv="p=0"'
            .format(os.path.join(os.curdir(), video)))
Example #12
0
 def __init__(self, host, file_, do_piped_output=True):
     self.dir = os.curdir()
     self.cmd = "python"
     self.is_piped = do_piped_output
     thread = subprocess.Popen("", executable=self.cmd)
     # TODO: savex please refactor code below
     self.file_ = file_
     self.host = host
     # TODO: implement method get_configuration in utils
     # or use existing config file which stores private key path and user
     self._config = utils.get_configuration(__file__)
     self.priv_key = self._config['priv_key_path']
     self.user = self._config['user']
Example #13
0
def clean(path: str) -> str:
    """
    Cleans the specified path by expanding shorthand elements, redirecting to
    the real path for symbolic links, and removing any relative components to
    return a complete, absolute path to the specified location.

    :param path:
        The source path to be cleaned
    """

    if not path or path == ".":
        path = os.curdir()

    if path.startswith("~"):
        path = os.path.expanduser(path)

    return os.path.realpath(os.path.abspath(path))
    def test_merge_utility(self, resource_group):
        app_name = 'helloWorldApp'
        yaml_files_path = "%s,%s,%s" % (_get_test_data_file('app.yaml'), _get_test_data_file('service.yaml'), _get_test_data_file('network.yaml'))
        self.kwargs.update({
            'resource_id': '',
            'resource_group': resource_group,
            'deployment_name': self.create_random_name(prefix='cli', length=24),
            'app_name': app_name,
            'input_yaml_files': yaml_files_path
        })

        # Test create
        self.cmd('az mesh deployment create -g {rg} --input-yaml-files {input_yaml_files} --name {deployment_name}')

        # Test delete
        self.cmd('az mesh app delete -g {rg} --name {app_name} --yes')

        # Delete the generated ARM template
        os.path.delete(os.path.combine(os.curdir(), 'merged-arm_rp.json'))
Example #15
0
 def render_GET(self,request):
     index_name = 'index.html'
     try:
       # determine scheduler index.html
       if hasattr(self.bh.scheduler,'index_html'):
            index_name = self.bh.scheduler.index_html
       # determine if application is a script file or frozen exe
       if hasattr(sys, 'frozen'):
             application_path = os.path.dirname(sys.executable)
       elif __file__:
             application_path = os.path.dirname(__file__)          
       index = os.path.join(application_path, index_name)
     except:
       index = os.path.join(os.curdir(), index_name)
     file = open(index, 'r')
     linestring = file.read()
     file.close
     request.write(linestring)
     request.finish()
     return server.NOT_DONE_YET
Example #16
0
def convertmodisvihdf2envi(hdffile, outpath): # This converts a LEDAPS-generated HDF to ENVI format via GDAL
    hdf=gdal.Open(hdffile)
    basename=os.path.basename(hdffile)
    dirname=os.path.dirname(os.path.abspath(hdffile))
    outpath=os.path.abspath(outpath)
    dtaglist=[]
    taglist=[]
    if len(dirname)==0:
        dirname=os.curdir()
    if len(outpath)==0:
        outpath=dirname
    print('Exporting '+basename+' to '+outpath)
    i=basename.rfind('/')+1
    j=basename.rfind('.')
    dataset=basename[i:j]
    sdsdict=hdf.GetSubDatasets()
    NDVIdat=outpath+'/ndvi/'+dataset+'_NDVI.dat'
    EVIdat=outpath+'/evi/'+dataset+'_EVI.dat'
    VIQdat=outpath+'/viq/'+dataset+'_VIQ.dat'
    PRdat=outpath+'/pixel_reliability/'+dataset+'_PR.dat'
    proclist=[['gdalTranslate', '-of','ENVI',[x for x in sdsdict if 'NDVI' in x[0]][0][0],NDVIdat],
    ['gdalTranslate', '-of','ENVI',[x for x in sdsdict if 'EVI' in x[0]][0][0],EVIdat],
    ['gdalTranslate', '-of','ENVI',[x for x in sdsdict if 'VI Quality' in x[0]][0][0],VIQdat],
    ['gdalTranslate', '-of','ENVI',[x for x in sdsdict if 'pixel reliability' in x[0]][0][0],PRdat]]
    b1name=proclist[0][3]
    b1=gdal.Open(b1name)
    rmlist=[]
    x=0
    for y in proclist:
        bandname=y[3]
        print('Processing ' +bandname)
        i=bandname.rfind(':')+1
        btag=bandname[i:]
        p=Popen(y)
        print(p.communicate())
    #    print('ENVI file created, updating header.')
        makemodishdrfile(y[4])
    hdf=None
    b1=None
    print('Processing complete.')
Example #17
0
	def new_sign_mark(self, name="", text=""):
		"""
		Find and create new sign mark
		"""
		old_dir = os.curdir()
		os.chdir(self.directory)

		sh.git("pull")
		mark = self.last_sign_mark() + 1

		new_dir = str(mark).zfill(3) + '-' + name
		readme = os.path.join(new_dir, 'readme.md')

		os.mkdir(new_dir)

		with open(readme, "w") as file:
			file.write('# Spis ' + mark + name)
			file.write(text)

		sh.git("add", readme)
		sh.git("push")

		os.chdir(old_dir)
Example #18
0
    def new_sign_mark(self, name="", text=""):
        """
		Find and create new sign mark
		"""
        old_dir = os.curdir()
        os.chdir(self.directory)

        sh.git("pull")
        mark = self.last_sign_mark() + 1

        new_dir = str(mark).zfill(3) + '-' + name
        readme = os.path.join(new_dir, 'readme.md')

        os.mkdir(new_dir)

        with open(readme, "w") as file:
            file.write('# Spis ' + mark + name)
            file.write(text)

        sh.git("add", readme)
        sh.git("push")

        os.chdir(old_dir)
Example #19
0
    def doLS(self,ev=None):
        error = ''
        tdir = self.cwd.get() # 这个是啥用?
        if not tdir:
            tdir=os.curdir()
        if not os.path.exists(tdir):
            error = tdir+':no such file'
        elif not os.path.isdir(tdir):
            error = tdir+':not a directory'

        if error:
            self.cwd.set(error)
            self.top.update()
            sleep(2)
            if not (hasattr(self,'last') and self.last): 
                # not(有last属性并且last有值的时候)反过来就是二者其中一个不满足就行了
                self.last=os.curdir

            self.cwd.set(self.last)
            self.dirs.config(selectbackground='LightSkyBlue')
            self.top.update()
            return

        self.cwd.set('FETCHING DIRECTORY CONTENTS...')
        self.top.update()
        dirlist = os.listdir(tdir)
        dirlist.sort()
        os.chdir(tdir)
        self.dirl.config(text=os.getcwd())
        self.dirs.delete(0,END)
        self.dirs.insert(END,os.curdir)
        self.dirs.insert(END,os.pardir)
        for eachFile in dirlist:
            self.dirs.insert(END,eachFile)
        self.cwd.set(os.curdir)
        self.dirs.config(selectbackground='LightSkyBlue')
Example #20
0
def make_pf(source, target):
    curfolder = os.curdir()
    src_folder = source
    tar_folder = target

    if (not (src_folder.endswith("/"))):
        src_folder += '/'
    dic = {}
    lst = os.listdir(src_folder)
    for item in lst:
        if (item != "Blank.jpg"):
            im = Image.open(src_folder + item, "r")
            xdim, ydim = im.size
            try:
                im = im.resize((int((xdim / ydim) * 150), 150))
                item = item.replace(".jpg", "")
                item = item.replace(".png", "")
                dic[item] = textExtract(im)
            except:
                pass

    try:
        os.chdir("profiles")
    except:
        os.mkdir("profiles")
        os.chdir("profiles")

    try:
        os.chdir(tar_folder)
    except:
        os.mkdir(tar_folder)
        os.chdir(tar_folder)

    for key in dic:
        dic[key].save(key + ".png")
    os.chdir(curfolder)
Example #21
0
import os

if __name__=='__main__':
	print('somecode %s'%os.curdir())
Example #22
0
    def link(self,
             target_desc,
             objects,
             output_filename,
             output_dir=None,
             libraries=None,
             library_dirs=None,
             runtime_library_dirs=None,
             export_symbols=None,
             debug=0,
             extra_preargs=None,
             extra_postargs=None,
             build_temp=None,
             target_lang=None):
        # First fixup.
        (objects, output_dir) = self._fix_object_args(objects, output_dir)
        (libraries, library_dirs, runtime_library_dirs) = \
            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)

        # First examine a couple of options for things that aren't implemented yet
        if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
            raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
        if runtime_library_dirs:
            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
        if extra_preargs or extra_postargs:
            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
        if len(export_symbols) != 1:
            raise DistutilsPlatformError, 'Need exactly one export symbol'
        # Next there are various things for which we need absolute pathnames.
        # This is because we (usually) create the project in a subdirectory of
        # where we are now, and keeping the paths relative is too much work right
        # now.
        sources = map(self._filename_to_abs, self.__sources)
        include_dirs = map(self._filename_to_abs, self.__include_dirs)
        if objects:
            objects = map(self._filename_to_abs, objects)
        else:
            objects = []
        if build_temp:
            build_temp = self._filename_to_abs(build_temp)
        else:
            build_temp = os.curdir()
        if output_dir:
            output_filename = os.path.join(output_dir, output_filename)
        # The output filename needs special handling: splitting it into dir and
        # filename part. Actually I'm not sure this is really needed, but it
        # can't hurt.
        output_filename = self._filename_to_abs(output_filename)
        output_dir, output_filename = os.path.split(output_filename)
        # Now we need the short names of a couple of things for putting them
        # into the project.
        if output_filename[-8:] == '.ppc.slb':
            basename = output_filename[:-8]
        elif output_filename[-11:] == '.carbon.slb':
            basename = output_filename[:-11]
        else:
            basename = os.path.strip(output_filename)[0]
        projectname = basename + '.mcp'
        targetname = basename
        xmlname = basename + '.xml'
        exportname = basename + '.mcp.exp'
        prefixname = 'mwerks_%s_config.h' % basename
        # Create the directories we need
        distutils.dir_util.mkpath(build_temp, dry_run=self.dry_run)
        distutils.dir_util.mkpath(output_dir, dry_run=self.dry_run)
        # And on to filling in the parameters for the project builder
        settings = {}
        settings['mac_exportname'] = exportname
        settings['mac_outputdir'] = output_dir
        settings['mac_dllname'] = output_filename
        settings['mac_targetname'] = targetname
        settings['sysprefix'] = sys.prefix
        settings['mac_sysprefixtype'] = 'Absolute'
        sourcefilenames = []
        sourcefiledirs = []
        for filename in sources + objects:
            dirname, filename = os.path.split(filename)
            sourcefilenames.append(filename)
            if not dirname in sourcefiledirs:
                sourcefiledirs.append(dirname)
        settings['sources'] = sourcefilenames
        settings['libraries'] = libraries
        settings[
            'extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
        if self.dry_run:
            print 'CALLING LINKER IN', os.getcwd()
            for key, value in settings.items():
                print '%20.20s %s' % (key, value)
            return
        # Build the export file
        exportfilename = os.path.join(build_temp, exportname)
        log.debug("\tCreate export file %s", exportfilename)
        fp = open(exportfilename, 'w')
        fp.write('%s\n' % export_symbols[0])
        fp.close()
        # Generate the prefix file, if needed, and put it in the settings
        if self.__macros:
            prefixfilename = os.path.join(os.getcwd(),
                                          os.path.join(build_temp, prefixname))
            fp = open(prefixfilename, 'w')
            fp.write('#include "mwerks_shcarbon_config.h"\n')
            for name, value in self.__macros:
                if value is None:
                    fp.write('#define %s\n' % name)
                else:
                    fp.write('#define %s %s\n' % (name, value))
            fp.close()
            settings['prefixname'] = prefixname

        # Build the XML file. We need the full pathname (only lateron, really)
        # because we pass this pathname to CodeWarrior in an AppleEvent, and CW
        # doesn't have a clue about our working directory.
        xmlfilename = os.path.join(os.getcwd(),
                                   os.path.join(build_temp, xmlname))
        log.debug("\tCreate XML file %s", xmlfilename)
        xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
        xmlbuilder.generate()
        xmldata = settings['tmp_projectxmldata']
        fp = open(xmlfilename, 'w')
        fp.write(xmldata)
        fp.close()
        # Generate the project. Again a full pathname.
        projectfilename = os.path.join(os.getcwd(),
                                       os.path.join(build_temp, projectname))
        log.debug('\tCreate project file %s', projectfilename)
        mkcwproject.makeproject(xmlfilename, projectfilename)
        # And build it
        log.debug('\tBuild project')
        mkcwproject.buildproject(projectfilename)
Example #23
0
def parseExptConfig(configFile, librariesToSublibrariesDict):
    parser = ConfigParser()
    results = parser.read(configFile)
    if len(results) == 0:
        return None, 1, 'Experiment config file not found'

    #output variables
    paramDict = dict()
    exitStatus = 0
    warningString = ''

    ##check all sections
    expectedSections = set([
        'experiment_settings', 'library_settings', 'counts_files',
        'filter_settings', 'sgrna_analysis', 'growth_values', 'gene_analysis'
    ])

    parsedSections = set(parser.sections())

    if len(expectedSections) != len(parsedSections) and len(
            expectedSections) != len(
                expectedSections.intersection(parsedSections)):
        return paramDict, 1, 'Config file does not have all required sections or has extraneous sections!\nExpected:' + ','.join(
            expectedSections) + '\nFound:' + ','.join(parsedSections)

    ##experiment settings
    if parser.has_option('experiment_settings', 'output_folder'):
        paramDict['output_folder'] = parser.get(
            'experiment_settings',
            'output_folder')  #ways to check this is a valid path?
    else:
        warningString += 'No output folder specified, defaulting to current directory\n.'
        paramDict['output_folder'] = os.curdir()

    if parser.has_option('experiment_settings', 'experiment_name'):
        paramDict['experiment_name'] = parser.get('experiment_settings',
                                                  'experiment_name')
    else:
        warningString += 'No experiment name specified, defaulting to \'placeholder_expt_name\'\n.'
        paramDict['experiment_name'] = 'placeholder_expt_name'

    ##library settings
    libraryDict = librariesToSublibrariesDict
    if parser.has_option('library_settings', 'library'):
        parsedLibrary = parser.get('library_settings', 'library')

        if parsedLibrary.lower() in libraryDict:
            paramDict['library'] = parsedLibrary.lower()
        else:
            warningString += 'Library name \"%s\" not recognized\n' % parsedLibrary
            exitStatus += 1

    else:
        warningString += 'No library specified\n'
        exitStatus += 1
        parsedLibrary = ''

    if 'library' in paramDict:
        if parser.has_option('library_settings', 'sublibraries'):
            parsedSubList = parser.get('library_settings',
                                       'sublibraries').strip().split('\n')

            paramDict['sublibraries'] = []

            for sub in parsedSubList:
                sub = sub.lower()
                if sub in libraryDict[paramDict['library']]:
                    paramDict['sublibraries'].append(sub)

                else:
                    warningString += 'Sublibrary %s not recognized\n' % sub

        else:
            paramDict['sublibraries'] = libraryDict[paramDict['library']]

    ##counts files
    if parser.has_option('counts_files', 'counts_file_string'):
        countsFileString = parser.get('counts_files',
                                      'counts_file_string').strip()

        paramDict['counts_file_list'] = []

        for stringLine in countsFileString.split('\n'):
            stringLine = stringLine.strip()

            if len(stringLine.split(':')) != 2 or len(
                    stringLine.split('|')) != 2:
                warningString += 'counts file entry could not be parsed: ' + stringLine + '\n'
                exitStatus += 1

            else:
                parsedPath = stringLine.split(':')[0]

                if os.path.isfile(parsedPath) == False:
                    warningString += 'Counts file not found: ' + parsedPath + '\n'
                    exitStatus += 1

                condition, replicate = stringLine.split(':')[1].split('|')

                paramDict['counts_file_list'].append(
                    (condition, replicate, parsedPath))

    else:
        warningString += 'No counts files entered\n'
        exitStatus += 1

    ##filter settings
    filterOptions = ['either', 'both']
    if parser.has_option('filter_settings', 'filter_type') and parser.get(
            'filter_settings', 'filter_type').lower() in filterOptions:
        paramDict['filter_type'] = parser.get('filter_settings',
                                              'filter_type').lower()
    else:
        warningString += 'Filter type not set or not recognized, defaulting to \'either\'\n'
        paramDict['filter_type'] = 'either'

    if parser.has_option('filter_settings', 'minimum_reads'):
        try:
            paramDict['minimum_reads'] = parser.getint('filter_settings',
                                                       'minimum_reads')
        except ValueError:
            warningString += 'Minimum read value not an integer, defaulting to 0\n'  #recommended value is 50 but seems arbitrary to default to that
            paramDict['minimum_reads'] = 0
    else:
        warningString += 'Minimum read value not found, defaulting to 0\n'  #recommended value is 50 but seems arbitrary to default to that
        paramDict['minimum_reads'] = 0

    ##sgRNA Analysis
    if parser.has_option('sgrna_analysis', 'condition_string'):
        conditionString = parser.get('sgrna_analysis',
                                     'condition_string').strip()

        paramDict['condition_tuples'] = []

        if 'counts_file_list' in paramDict:
            expectedConditions = set(
                list(zip(*paramDict['counts_file_list']))[0])
        else:
            expectedConditions = []

        enteredConditions = set()

        for conditionStringLine in conditionString.split('\n'):
            conditionStringLine = conditionStringLine.strip()

            if len(conditionStringLine.split(':')) != 3:
                warningString += 'Phenotype condition line not understood: ' + conditionStringLine + '\n'
                exitStatus += 1
            else:
                phenotype, condition1, condition2 = conditionStringLine.split(
                    ':')

                if condition1 not in expectedConditions or condition2 not in expectedConditions:
                    warningString += 'One of the conditions entered does not correspond to a counts file: ' + conditionStringLine + '\n'
                    exitStatus += 1
                else:
                    paramDict['condition_tuples'].append(
                        (phenotype, condition1, condition2))
                    enteredConditions.add(condition1)
                    enteredConditions.add(condition2)

        if len(paramDict['condition_tuples']) == 0:
            warningString += 'No phenotype score/condition pairs found\n'
            exitStatus += 1

        unusedConditions = list(expectedConditions - enteredConditions)
        if len(unusedConditions) > 0:
            warningString += 'Some conditions assigned to counts files will not be incorporated in sgRNA analysis:\n' \
                + ','.join(unusedConditions) + '\n'

    else:
        warningString += 'No phenotype score/condition pairs entered\n'
        exitStatus += 1

    pseudocountOptions = ['zeros only', 'all values', 'filter out']
    if parser.has_option(
            'sgrna_analysis', 'pseudocount_behavior') and parser.get(
                'sgrna_analysis',
                'pseudocount_behavior').lower() in pseudocountOptions:
        paramDict['pseudocount_behavior'] = parser.get(
            'sgrna_analysis', 'pseudocount_behavior').lower()
    else:
        warningString += 'Pseudocount behavior not set or not recognized, defaulting to \'zeros only\'\n'
        paramDict['pseudocount_behavior'] = 'zeros only'

    if parser.has_option('sgrna_analysis', 'pseudocount'):
        try:
            paramDict['pseudocount'] = parser.getfloat('sgrna_analysis',
                                                       'pseudocount')
        except ValueError:
            warningString += 'Pseudocount value not an number, defaulting to 0.1\n'
            paramDict['pseudocount'] = 0.1
    else:
        warningString += 'Pseudocount value not found, defaulting to 0.1\n'
        paramDict['pseudocount'] = 0.1

    ##Growth Values
    if parser.has_option('growth_values', 'growth_value_string') and len(
            parser.get('growth_values', 'growth_value_string').strip()) != 0:
        growthValueString = parser.get('growth_values',
                                       'growth_value_string').strip()

        if 'condition_tuples' in paramDict and 'counts_file_list' in paramDict:
            expectedComparisons = set(
                list(zip(*paramDict['condition_tuples']))[0])
            expectedReplicates = set(
                list(zip(*paramDict['counts_file_list']))[1])

            expectedTupleList = []

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    expectedTupleList.append((comp, rep))
        else:
            expectedTupleList = []

        enteredTupleList = []
        growthValueTuples = []

        for growthValueLine in growthValueString.split('\n'):
            growthValueLine = growthValueLine.strip()

            linesplit = growthValueLine.split(':')

            if len(linesplit) != 3:
                warningString += 'Growth value line not understood: ' + growthValueLine + '\n'
                exitStatus += 1
                continue

            comparison = linesplit[0]
            replicate = linesplit[1]

            try:
                growthVal = float(linesplit[2])
            except ValueError:
                warningString += 'Growth value not a number: ' + growthValueLine + '\n'
                exitStatus += 1
                continue

            curTup = (comparison, replicate)
            if curTup in expectedTupleList:
                if curTup not in enteredTupleList:
                    enteredTupleList.append(curTup)
                    growthValueTuples.append(
                        (comparison, replicate, growthVal))

                else:
                    warningString += ':'.join(
                        curTup) + ' has multiple growth values entered\n'
                    exitStatus += 1
            else:
                warningString += ':'.join(
                    curTup
                ) + ' was not expected given the specified counts file assignments and sgRNA phenotypes\n'
                exitStatus += 1

        #because we enforced no duplicates or unexpected values these should match up unless there were values not entered
        #require all growth values to be explictly entered if some were
        if len(enteredTupleList) != len(expectedTupleList):
            warningString += 'Growth values were not entered for all expected comparisons/replicates. Expected: ' + \
                ','.join([':'.join(tup) for tup in expectedTupleList]) + '\nEntered: ' + \
                ','.join([':'.join(tup) for tup in enteredTupleList]) + '\n'
            exitStatus += 1
        else:
            paramDict['growth_value_tuples'] = growthValueTuples

    else:
        warningString += 'No growth values--all phenotypes will be reported as log2enrichments\n'

        paramDict['growth_value_tuples'] = []

        if 'condition_tuples' in paramDict and 'counts_file_list' in paramDict:
            expectedComparisons = set(
                list(zip(*paramDict['condition_tuples']))[0])
            expectedReplicates = set(
                list(zip(*paramDict['counts_file_list']))[1])

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    paramDict['growth_value_tuples'].append((comp, rep, 1))

    ##Gene Analysis
    if parser.has_option('gene_analysis', 'collapse_to_transcripts'):
        try:
            paramDict['collapse_to_transcripts'] = parser.getboolean(
                'gene_analysis', 'collapse_to_transcripts')
        except ValueError:
            warningString += 'Collapse to transcripts entry not a recognized boolean value\n'
            exitStatus += 1
    else:
        paramDict['collapse_to_transcripts'] = True
        warningString += 'Collapse to transcripts defaulting to True\n'

    #pseudogene parameters
    if parser.has_option('gene_analysis', 'generate_pseudogene_dist'):
        paramDict['generate_pseudogene_dist'] = parser.get(
            'gene_analysis', 'generate_pseudogene_dist').lower()

        if paramDict['generate_pseudogene_dist'] not in [
                'auto', 'manual', 'off'
        ]:
            warningString += 'Generate pseudogene dist entry not a recognized option\n'
            exitStatus += 1
    else:
        paramDict['generate_pseudogene_dist'] = False
        warningString += 'Generate pseudogene dist defaulting to False\n'

    if 'generate_pseudogene_dist' in paramDict and paramDict[
            'generate_pseudogene_dist'] == 'manual':
        if parser.has_option('gene_analysis', 'pseudogene_size'):
            try:
                paramDict['pseudogene_size'] = parser.getint(
                    'gene_analysis', 'pseudogene_size')
            except ValueError:
                warningString += 'Pseudogene size entry not a recognized integer value\n'
                exitStatus += 1
        else:
            warningString += 'No pseudogene size provided\n'
            exitStatus += 1

        if parser.has_option('gene_analysis', 'num_pseudogenes'):
            try:
                paramDict['num_pseudogenes'] = parser.getint(
                    'gene_analysis', 'num_pseudogenes')
            except ValueError:
                warningString += 'Pseudogene number entry not a recognized integer value\n'
                exitStatus += 1
        else:
            warningString += 'No pseudogene size provided\n'

    #list possible analyses in param dict as dictionary with keys = analysis and values = analysis-specific params

    paramDict['analyses'] = dict()

    #analyze by average of best n
    if parser.has_option('gene_analysis', 'calculate_ave'):
        try:
            if parser.getboolean('gene_analysis', 'calculate_ave') == True:
                paramDict['analyses']['calculate_ave'] = []
        except ValueError:
            warningString += 'Calculate ave entry not a recognized boolean value\n'
            exitStatus += 1

        if 'calculate_ave' in paramDict['analyses']:
            if parser.has_option('gene_analysis', 'best_n'):
                try:
                    paramDict['analyses']['calculate_ave'].append(
                        parser.getint('gene_analysis', 'best_n'))
                except ValueError:
                    warningString += 'Best_n entry not a recognized integer value\n'
                    exitStatus += 1
            else:
                warningString += 'No best_n value provided for average analysis function\n'
                exitStatus += 1
    else:
        warningString += 'Best n average analysis not specified, defaulting to False\n'

    #analyze by Mann-Whitney
    if parser.has_option('gene_analysis', 'calculate_mw'):
        try:
            if parser.getboolean('gene_analysis', 'calculate_mw') == True:
                paramDict['analyses']['calculate_mw'] = []
        except ValueError:
            warningString += 'Calculate Mann-Whitney entry not a recognized boolean value\n'
            exitStatus += 1

    #analyze by K-S, skipping for now

    #analyze by nth best sgRNA
    if parser.has_option('gene_analysis', 'calculate_nth'):
        try:
            if parser.getboolean('gene_analysis', 'calculate_nth') == True:
                paramDict['analyses']['calculate_nth'] = []
        except ValueError:
            warningString += 'Calculate best Nth sgRNA entry not a recognized boolean value\n'
            exitStatus += 1

        if 'calculate_nth' in paramDict['analyses']:
            if parser.has_option('gene_analysis', 'nth'):
                try:
                    paramDict['analyses']['calculate_nth'].append(
                        parser.getint('gene_analysis', 'nth'))
                except ValueError:
                    warningString += 'Nth best sgRNA entry not a recognized integer value\n'
                    exitStatus += 1
            else:
                warningString += 'No Nth best value provided for that analysis function\n'
                exitStatus += 1
    else:
        warningString += 'Nth best sgRNA analysis not specified, defaulting to False\n'

    if len(paramDict['analyses']) == 0:
        warningString += 'No analyses selected to compute gene scores\n'  #should this raise exitStatus?

    return paramDict, exitStatus, warningString
Example #24
0
def teste():
    print("teste")
    print(os.curdir())
Example #25
0
 def path(self):
     return self.config.path or os.curdir()
Example #26
0
                  help='Site name for preset data directory (%default)')
parser.add_option('-D',
                  '--datdir',
                  default=None,
                  help='Sentinel-1 data directory (%default)')
(opts, args) = parser.parse_args()
if opts.datdir is None:
    if opts.site is not None:
        if opts.site.lower() == 'cihea':
            opts.datdir = os.path.join(DATDIR, 'Cihea', 'sigma0_speckle')
        elif opts.site.lower() == 'bojongsoang':
            opts.datdir = os.path.join(DATDIR, 'Bojongsoang', 'sigma0_speckle')
        else:
            raise ValueError('Error, unknown site >>> ' + opts.site)
    else:
        opts.datdir = os.curdir()

angs = []
for f in sorted(os.listdir(opts.datdir)):
    m = re.search('(' + '\d' * 8 + ')_resample.tif', f)
    if not m:
        continue
    fnam = os.path.join(opts.datdir, f)
    dstr = m.group(1)
    #print(dstr)
    command = 'gdalinfo'
    command += ' ' + fnam
    out = check_output(command, shell=True)
    iangle = None
    direction = None
    for line in out.decode().splitlines():
os.getegid()
os.geteuid()
os.getpid()
os.getppid()
os.getenv()
os.getenvb()
os.abort()
os.chmod()
os.chown()
os.close()
os.cpu_count()
os.kill()
os.open()
os.getgid()
os.chdir("dirname")                     # 改变当前脚本工作目录;相当于shell下cd
os.curdir()                             # 返回当前目录: ('.')
os.pardir()                             # 获取当前目录的父目录字符串名:('..')
os.mkdir('dirname')                     # 生成单级目录;相当于shell中mkdir dirname
os.makedirs('dirname1/dirname2')        # 可生成多层递归目录
os.rmdir('dirname')                     # 删除单级空目录,若目录不为空则无法删除,报错;相当于shell中rmdir dirname
os.removedirs('dirname1')               # 若目录为空,则删除,并递归到上一级目录,如若也为空,则删除,依此类推
os.listdir('dirname')                   # 列出指定目录下的所有文件和子目录,包括隐藏文件,并以列表方式打印
os.remove('1.txt')                      # 删除一个文件
os.rename("oldname","newname")          # 重命名文件/目录/移动目录/,只能同设备(在同一块磁盘,不同设备看下面shutil模块)。
os._exit(n)                             # 直接推出,不抛异常,不执行相关清理工作。常用在子进程中
os.stat('path/filename')                # 获取文件/目录信息
    # l = os.stat('path/filename')
    # print(list(l))
    # print(list(l)[2])
os.sep()                                # 输出操作系统特定的路径分隔符,win下为"\\",Linux下为"/"
os.linesep()                            # 输出当前平台使用的行分隔符,win下为"\t\n",Linux下为"\n"
def parseExptConfig(configFile, librariesToSublibrariesDict):
    parser = SafeConfigParser()
    results = parser.read(configFile)
    if len(results) == 0:
        return None, 1, "Experiment config file not found"

    # output variables
    paramDict = dict()
    exitStatus = 0
    warningString = ""

    ##check all sections
    expectedSections = set(
        [
            "experiment_settings",
            "library_settings",
            "counts_files",
            "filter_settings",
            "sgrna_analysis",
            "growth_values",
            "gene_analysis",
        ]
    )

    parsedSections = set(parser.sections())

    if len(expectedSections) != len(parsedSections) and len(expectedSections) != len(
        expectedSections.intersection(parsedSections)
    ):
        return (
            paramDict,
            1,
            "Config file does not have all required sections or has extraneous sections!\nExpected:"
            + ",".join(expectedSections)
            + "\nFound:"
            + ",".join(parsedSections),
        )

    ##experiment settings
    if parser.has_option("experiment_settings", "output_folder"):
        paramDict["output_folder"] = parser.get(
            "experiment_settings", "output_folder"
        )  # ways to check this is a valid path?
    else:
        warningString += "No output folder specified, defaulting to current directory\n."
        paramDict["output_folder"] = os.curdir()

    if parser.has_option("experiment_settings", "experiment_name"):
        paramDict["experiment_name"] = parser.get("experiment_settings", "experiment_name")
    else:
        warningString += "No experiment name specified, defaulting to 'placeholder_expt_name'\n."
        paramDict["experiment_name"] = "placeholder_expt_name"

    ##library settings
    libraryDict = librariesToSublibrariesDict
    if parser.has_option("library_settings", "library"):
        parsedLibrary = parser.get("library_settings", "library")

        if parsedLibrary.lower() in libraryDict:
            paramDict["library"] = parsedLibrary.lower()
        else:
            warningString += 'Library name "%s" not recognized\n' % parsedLibrary
            exitStatus += 1

    else:
        warningString += "No library specified\n"
        exitStatus += 1
        parsedLibrary = ""

    if "library" in paramDict:
        if parser.has_option("library_settings", "sublibraries"):
            parsedSubList = parser.get("library_settings", "sublibraries").strip().split("\n")

            paramDict["sublibraries"] = []

            for sub in parsedSubList:
                sub = sub.lower()
                if sub in libraryDict[paramDict["library"]]:
                    paramDict["sublibraries"].append(sub)

                else:
                    warningString += "Sublibrary %s not recognized\n" % sub

        else:
            paramDict["sublibraries"] = libraryDict[paramDict["library"]]

    ##counts files
    if parser.has_option("counts_files", "counts_file_string"):
        countsFileString = parser.get("counts_files", "counts_file_string").strip()

        paramDict["counts_file_list"] = []

        for stringLine in countsFileString.split("\n"):
            stringLine = stringLine.strip()

            if len(stringLine.split(":")) != 2 or len(stringLine.split("|")) != 2:
                warningString += "counts file entry could not be parsed: " + stringLine + "\n"
                exitStatus += 1

            else:
                parsedPath = stringLine.split(":")[0]

                if os.path.isfile(parsedPath) == False:
                    warningString += "Counts file not found: " + parsedPath + "\n"
                    exitStatus += 1

                condition, replicate = stringLine.split(":")[1].split("|")

                paramDict["counts_file_list"].append((condition, replicate, parsedPath))

    else:
        warningString += "No counts files entered\n"
        exitStatus += 1

    ##filter settings
    filterOptions = ["either", "both"]
    if (
        parser.has_option("filter_settings", "filter_type")
        and parser.get("filter_settings", "filter_type").lower() in filterOptions
    ):
        paramDict["filter_type"] = parser.get("filter_settings", "filter_type").lower()
    else:
        warningString += "Filter type not set or not recognized, defaulting to 'either'\n"
        paramDict["filter_type"] = "either"

    if parser.has_option("filter_settings", "minimum_reads"):
        try:
            paramDict["minimum_reads"] = parser.getint("filter_settings", "minimum_reads")
        except ValueError:
            warningString += (
                "Minimum read value not an integer, defaulting to 0\n"
            )  # recommended value is 50 but seems arbitrary to default to that
            paramDict["minimum_reads"] = 0
    else:
        warningString += (
            "Minimum read value not found, defaulting to 0\n"
        )  # recommended value is 50 but seems arbitrary to default to that
        paramDict["minimum_reads"] = 0

    ##sgRNA Analysis
    if parser.has_option("sgrna_analysis", "condition_string"):
        conditionString = parser.get("sgrna_analysis", "condition_string").strip()

        paramDict["condition_tuples"] = []

        if "counts_file_list" in paramDict:
            expectedConditions = set(zip(*paramDict["counts_file_list"])[0])
        else:
            expectedConditions = []

        enteredConditions = set()

        for conditionStringLine in conditionString.split("\n"):
            conditionStringLine = conditionStringLine.strip()

            if len(conditionStringLine.split(":")) != 3:
                warningString += "Phenotype condition line not understood: " + conditionStringLine + "\n"
                exitStatus += 1
            else:
                phenotype, condition1, condition2 = conditionStringLine.split(":")

                if condition1 not in expectedConditions or condition2 not in expectedConditions:
                    warningString += (
                        "One of the conditions entered does not correspond to a counts file: "
                        + conditionStringLine
                        + "\n"
                    )
                    exitStatus += 1
                else:
                    paramDict["condition_tuples"].append((phenotype, condition1, condition2))
                    enteredConditions.add(condition1)
                    enteredConditions.add(condition2)

        if len(paramDict["condition_tuples"]) == 0:
            warningString += "No phenotype score/condition pairs found\n"
            exitStatus += 1

        unusedConditions = list(expectedConditions - enteredConditions)
        if len(unusedConditions) > 0:
            warningString += (
                "Some conditions assigned to counts files will not be incorporated in sgRNA analysis:\n"
                + ",".join(unusedConditions)
                + "\n"
            )

    else:
        warningString += "No phenotype score/condition pairs entered\n"
        exitStatus += 1

    pseudocountOptions = ["zeros only", "all values", "filter out"]
    if (
        parser.has_option("sgrna_analysis", "pseudocount_behavior")
        and parser.get("sgrna_analysis", "pseudocount_behavior").lower() in pseudocountOptions
    ):
        paramDict["pseudocount_behavior"] = parser.get("sgrna_analysis", "pseudocount_behavior").lower()
    else:
        warningString += "Pseudocount behavior not set or not recognized, defaulting to 'zeros only'\n"
        paramDict["pseudocount_behavior"] = "zeros only"

    if parser.has_option("sgrna_analysis", "pseudocount"):
        try:
            paramDict["pseudocount"] = parser.getfloat("sgrna_analysis", "pseudocount")
        except ValueError:
            warningString += "Pseudocount value not an number, defaulting to 0.1\n"
            paramDict["pseudocount"] = 0.1
    else:
        warningString += "Pseudocount value not found, defaulting to 0.1\n"
        paramDict["pseudocount"] = 0.1

    ##Growth Values
    if (
        parser.has_option("growth_values", "growth_value_string")
        and len(parser.get("growth_values", "growth_value_string").strip()) != 0
    ):
        growthValueString = parser.get("growth_values", "growth_value_string").strip()

        if "condition_tuples" in paramDict and "counts_file_list" in paramDict:
            expectedComparisons = set(zip(*paramDict["condition_tuples"])[0])
            expectedReplicates = set(zip(*paramDict["counts_file_list"])[1])

            expectedTupleList = []

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    expectedTupleList.append((comp, rep))
        else:
            expectedTupleList = []

        enteredTupleList = []
        growthValueTuples = []

        for growthValueLine in growthValueString.split("\n"):
            growthValueLine = growthValueLine.strip()

            linesplit = growthValueLine.split(":")

            if len(linesplit) != 3:
                warningString += "Growth value line not understood: " + growthValueLine + "\n"
                exitStatus += 1
                continue

            comparison = linesplit[0]
            replicate = linesplit[1]

            try:
                growthVal = float(linesplit[2])
            except ValueError:
                warningString += "Growth value not a number: " + growthValueLine + "\n"
                exitStatus += 1
                continue

            curTup = (comparison, replicate)
            if curTup in expectedTupleList:
                if curTup not in enteredTupleList:
                    enteredTupleList.append(curTup)
                    growthValueTuples.append((comparison, replicate, growthVal))

                else:
                    warningString += ":".join(curTup) + " has multiple growth values entered\n"
                    exitStatus += 1
            else:
                warningString += (
                    ":".join(curTup)
                    + " was not expected given the specified counts file assignments and sgRNA phenotypes\n"
                )
                exitStatus += 1

        # because we enforced no duplicates or unexpected values these should match up unless there were values not entered
        # require all growth values to be explictly entered if some were
        if len(enteredTupleList) != len(expectedTupleList):
            warningString += (
                "Growth values were not entered for all expected comparisons/replicates. Expected: "
                + ",".join([":".join(tup) for tup in expectedTupleList])
                + "\nEntered: "
                + ",".join([":".join(tup) for tup in enteredTupleList])
                + "\n"
            )
            exitStatus += 1
        else:
            paramDict["growth_value_tuples"] = growthValueTuples

    else:
        warningString += "No growth values--all phenotypes will be reported as log2enrichments\n"

        paramDict["growth_value_tuples"] = []

        if "condition_tuples" in paramDict and "counts_file_list" in paramDict:
            expectedComparisons = set(zip(*paramDict["condition_tuples"])[0])
            expectedReplicates = set(zip(*paramDict["counts_file_list"])[1])

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    paramDict["growth_value_tuples"].append((comp, rep, 1))

    ##Gene Analysis
    if parser.has_option("gene_analysis", "collapse_to_transcripts"):
        try:
            paramDict["collapse_to_transcripts"] = parser.getboolean("gene_analysis", "collapse_to_transcripts")
        except ValueError:
            warningString += "Collapse to transcripts entry not a recognized boolean value\n"
            exitStatus += 1
    else:
        paramDict["collapse_to_transcripts"] = True
        warningString += "Collapse to transcripts defaulting to True\n"

    # pseudogene parameters
    if parser.has_option("gene_analysis", "generate_pseudogene_dist"):
        paramDict["generate_pseudogene_dist"] = parser.get("gene_analysis", "generate_pseudogene_dist").lower()

        if paramDict["generate_pseudogene_dist"] not in ["auto", "manual", "off"]:
            warningString += "Generate pseudogene dist entry not a recognized option\n"
            exitStatus += 1
    else:
        paramDict["generate_pseudogene_dist"] = False
        warningString += "Generate pseudogene dist defaulting to False\n"

    if "generate_pseudogene_dist" in paramDict and paramDict["generate_pseudogene_dist"] == "manual":
        if parser.has_option("gene_analysis", "pseudogene_size"):
            try:
                paramDict["pseudogene_size"] = parser.getint("gene_analysis", "pseudogene_size")
            except ValueError:
                warningString += "Pseudogene size entry not a recognized integer value\n"
                exitStatus += 1
        else:
            warningString += "No pseudogene size provided\n"
            exitStatus += 1

        if parser.has_option("gene_analysis", "num_pseudogenes"):
            try:
                paramDict["num_pseudogenes"] = parser.getint("gene_analysis", "num_pseudogenes")
            except ValueError:
                warningString += "Pseudogene number entry not a recognized integer value\n"
                exitStatus += 1
        else:
            warningString += "No pseudogene size provided\n"

    # list possible analyses in param dict as dictionary with keys = analysis and values = analysis-specific params

    paramDict["analyses"] = dict()

    # analyze by average of best n
    if parser.has_option("gene_analysis", "calculate_ave"):
        try:
            if parser.getboolean("gene_analysis", "calculate_ave") == True:
                paramDict["analyses"]["calculate_ave"] = []
        except ValueError:
            warningString += "Calculate ave entry not a recognized boolean value\n"
            exitStatus += 1

        if "calculate_ave" in paramDict["analyses"]:
            if parser.has_option("gene_analysis", "best_n"):
                try:
                    paramDict["analyses"]["calculate_ave"].append(parser.getint("gene_analysis", "best_n"))
                except ValueError:
                    warningString += "Best_n entry not a recognized integer value\n"
                    exitStatus += 1
            else:
                warningString += "No best_n value provided for average analysis function\n"
                exitStatus += 1
    else:
        warningString += "Best n average analysis not specified, defaulting to False\n"

    # analyze by Mann-Whitney
    if parser.has_option("gene_analysis", "calculate_mw"):
        try:
            if parser.getboolean("gene_analysis", "calculate_mw") == True:
                paramDict["analyses"]["calculate_mw"] = []
        except ValueError:
            warningString += "Calculate Mann-Whitney entry not a recognized boolean value\n"
            exitStatus += 1

    # analyze by K-S, skipping for now

    # analyze by nth best sgRNA
    if parser.has_option("gene_analysis", "calculate_nth"):
        try:
            if parser.getboolean("gene_analysis", "calculate_nth") == True:
                paramDict["analyses"]["calculate_nth"] = []
        except ValueError:
            warningString += "Calculate best Nth sgRNA entry not a recognized boolean value\n"
            exitStatus += 1

        if "calculate_nth" in paramDict["analyses"]:
            if parser.has_option("gene_analysis", "nth"):
                try:
                    paramDict["analyses"]["calculate_nth"].append(parser.getint("gene_analysis", "nth"))
                except ValueError:
                    warningString += "Nth best sgRNA entry not a recognized integer value\n"
                    exitStatus += 1
            else:
                warningString += "No Nth best value provided for that analysis function\n"
                exitStatus += 1
    else:
        warningString += "Nth best sgRNA analysis not specified, defaulting to False\n"

    if len(paramDict["analyses"]) == 0:
        warningString += "No analyses selected to compute gene scores\n"  # should this raise exitStatus?

    return paramDict, exitStatus, warningString
Example #29
0
 soup=BeautifulSoup(temp)
 frames=soup.find_all(attrs={'class':'list-item'})
 for item in frames:
     sec_frame=item.find(attrs={'class':'detail'})
     title=sec_frame.h3.a['title']
     href=sec_frame.h3.a['href']
     try:judge_href=sec_frame.find(attrs={'class':'score-dot'})['href']
     except:judge_href='-'
     store_title=sec_frame.find(attrs={'class':'store'})['title']
     store_href=sec_frame.find(attrs={'class':'store'})['href']
     try:is_top_rate_seller=sec_frame.find(attrs={'class':'top-rated-seller'}).contents[0]
     except:is_top_rate_seller='NO'
     thr_frame=item.find(attrs={'class':'info infoprice'})
     price=thr_frame.span.span.contents[0]
     price_unit=thr_frame.span.find_all('span')[-1].text
     try:del_price=thr_frame.find(attrs={'class':'original-price'}).contents[0]
     except:del_price='-'
     try:shipping_service=thr_frame.strong.contents[0]
     except:shipping_service='-'
     try:
         product_rate=thr_frame.find(itemprop="ratingValue").contents[0]
         feedback=thr_frame.find(attrs={'class':'rate-num'}).contents[0]
     except:product_rate='-';feedback='-'
     orders=thr_frame.find(title="Total Orders").contents[0]
     result=[title,href,judge_href,store_title,is_top_rate_seller,store_href,price,price_unit,
             del_price,shipping_service,product_rate,feedback,orders]
     print('='*80)
     for item in result:
         print(item)
 os.curdir(file_name)
Example #30
0
 def path(self):
     return self.config.path or os.curdir()
Example #31
0
 def curdir(self):
     f = self.environment['__file__']
     if f is None:
         return os.curdir()
     else:
         return os.path.dirname(f)
Example #32
0
"""distutils.mwerkscompiler
Example #33
0
def main():
    print("__file__", __file__)
    print("os.curdir()", os.curdir())

    print
Example #34
0
    def __init__(self, parent=None):
        super(FrontEnd, self).__init__(parent)
        self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)

        # Header image.. Need to get a Quality image
        self.hlabel = QLabel()
        self.hpixmap = QPixmap(os.curdir() + 'images\header.png')
        self.hlabel.setPixmap(self.hpixmap)
        #self.hlabel.setMaximumWidth(400)

        # Password Stuff
        self._password = qt.QLabel('Please Provide a Password')
        self._passwordEdit = qt.QLineEdit()
        self._passwordEdit.setMaximumWidth(500)
        self._passwordEdit.setText("Password")
        self._passwordEdit.setEchoMode(qt.QLineEdit.Password)
        self._passwordEdit.setMaximumWidth(400)

        # Get the folder path
        self._path = qt.QLabel("Please Provide a Path")
        self._path.setMaximumWidth(500)
        self._pathEdit = qt.QLineEdit()
        self._pathEdit.setText("/home or C:\\")
        self._pathEdit.setMaximumWidth(400)

        # Encrypt Button
        self._encryptButton = qt.QPushButton("Encrypt")
        self._encryptButton.clicked.connect(self.encryptFolder)
        #self._encryptButton.setStyleSheet("background-color: #79ff4d")
        self._encryptButton.setMaximumWidth(400)

        # Decrypt Button
        self._decryptButton = qt.QPushButton("Decrypt")
        self._decryptButton.clicked.connect(self.decryptFolder)
        #self._decryptButton.setStyleSheet("background-color: #ff4d4d")
        self._decryptButton.setMaximumWidth(400)

        # Footer Need to get a Quality image
        self.flabel = QLabel()
        self.fpixmap = QPixmap(os.curdir + '/images/footer.png')
        self.flabel.setPixmap(self.fpixmap)

        # Add widgets to the grid
        self.widget = QWidget()
        self.grid = qt.QGridLayout(self.widget)
        self.grid.setSpacing(10)
        self.grid.maximumSize()
        self.grid.addWidget(self.hlabel, 0, 0)
        self.grid.addWidget(self._password, 1, 0)
        self.grid.addWidget(self._passwordEdit, 2, 0)
        self.grid.addWidget(self._path, 3, 0)
        self.grid.addWidget(self._pathEdit, 4, 0)
        self.grid.addWidget(self._encryptButton, 5, 0)
        self.grid.addWidget(self._decryptButton, 6, 0)
        self.grid.addWidget(self.flabel, 7, 0)

        self.setLayout(self.grid)

        self.setGeometry(350, 300, 350, 300)
        self.setWindowTitle("Enkryptor- Keeps your files safe")
        self.show()
Example #35
0
def to_abs_model_file_path(model_file_path):
    if model_file_path is None:
        model_file_path = os.curdir()
    elif not os.path.isabs(model_file_path):
        model_file_path = os.path.join(os.curdir(), model_file_path)
    return model_file_path
import numpy as np
from sklearn import metrics
import time
import os
import glob
import re

# In[14]:

#read in the answers
answers = pd.read_csv('test_actuals.csv',
                      index_col=0,
                      header=None,
                      names=['Genres'])

path = os.curdir()
extension = 'csv'
os.chdir(path)
files = glob.glob('*.{}'.format(extension))
print(files)

# In[35]:

#create dictionary to store results
results = {}

# In[ ]:

# loop over the list read the file, and score the answers

# In[36]:
Example #37
0
 def get_idx_mock():
     ""
     p=os.curdir()
     return list_files_ext(p,'png')
Example #38
0
def getDrive(path=None):
    if not path: path = os.curdir()
    match = drivePattern.search(path)
    if not match:
        return ''
    return match.group('drive')
Example #39
0
def main():
    node_regs = ["CFixedIntervalsNRD", "CICPCriteriaNRD"]
    edge_regs = ["CICPCriteriaERD", "CLoopCloserERD"]
    optimizers = ["CLevMarqGSO"]

    datasets_dir = os.environ['dataset'] or os.curdir()
    datasets_dir += os.sep
    dataset_files = ["action_observations_map_short", "observation_only_short"]
    datasets = [datasets_dir + os.sep + d + os.sep for d in dataset_files]

    config_file = "".join([
        "{home}".format(home=os.environ["HOME"]), os.sep,
        "sharedWros/codes/various/mrpt/share/mrpt/config_files/",
        "graphslam-engine/odometry_2DRangeScans.ini"
    ])

    visuals = [True, False]

    combs = [[i, j, k, l, m] for i in node_regs for j in edge_regs
             for k in optimizers for l in datasets for m in visuals]
    total_combs = len(combs)

    # find the graphslam-engine executable
    bash_mrpt_var = "MRPT_DIR"

    if bash_mrpt_var in os.environ:
        mrpt_path = os.environ[bash_mrpt_var]
        app_path = "".join(
            [mrpt_path, os.sep, "bin", os.sep, "graphslam-engine"])
    elif subprocess.call(["which", "graphslam-engine"
                          ]):  # search for it in the user path
        app_path = "graphslam-engine"  # already in path
    else:  # can't be found - inform user
        raise MRPTExecutableNotFoundError("graphslam-engine")

    errors_num = 0

    # take all the possible combinations and execute graphSLAM
    for comb_i, curr_comb in enumerate(combs):
        #
        # Announce operation
        #
        intro_msg = "# {}/{}".format(comb_i + 1, total_combs)
        tPrint(intro_msg)
        node = curr_comb[0]
        edge = curr_comb[1]
        opt = curr_comb[2]
        dataset = curr_comb[3]
        disable_visuals = curr_comb[4] is False

        current_run_input = ""
        current_run_input += "\tNode Registration Decider: {}\n".format(node)
        current_run_input += "\tEdge Registration Decider: {}\n".format(edge)
        current_run_input += "\tGraphSLAM Optimizer      : {}\n".format(opt)
        current_run_input += "\tDataset                  : {}\n".format(
            dataset)
        current_run_input += "\t.ini Configuration File  : {}\n".format(
            config_file)
        current_run_input += "\tDisable visuals          : {}\n".format(
            disable_visuals)
        print(current_run_input)

        #
        # Execute it
        #
        redirect_null = True
        command_parts = [app_path]
        command_parts.extend(["--node-reg", node])
        command_parts.extend(["--edge-reg", edge])
        command_parts.extend(["--optimizer", opt])
        command_parts.extend(["--ini-file", config_file])
        command_parts.extend(["--rawlog", dataset + "basic.rawlog"])
        command_parts.extend(
            ["--ground-truth", dataset + "basic.rawlog.GT.txt"])
        if disable_visuals:
            command_parts.extend(["--disable-visuals"])

        if redirect_null:
            with open(os.devnull, 'w') as fp:
                result = subprocess.Popen(command_parts, stdout=fp).wait()
        else:
            result = subprocess.Popen(command_parts).wait()

        #
        # Announce results
        #
        output_dir_name_init = "graphslam_results"
        dir_exists = os.path.isdir(output_dir_name_init)
        if result == 0:
            sPrint(intro_msg)
            if dir_exists:
                print("Removing the graphslam_results directory")
                shutil.rmtree(output_dir_name_init)
        else:
            errors_num += 1
            code = "Error code: {r}".format(r=result)

            # is there a graphslam_results directory generated?
            if dir_exists:
                # If directory was created, rename it and add a new file in it
                # with the statistics of the current run

                # date string
                date_string = "Automatically generated file - {}".format(
                    time.strftime("%c"))
                date_string.replace(":", "_")
                date_string.replace(" ", "_")

                # new file
                with open(output_dir_name_init + os.sep + "test_info",
                          "w") as f:
                    f.writelines([
                        i + os.linesep
                        for i in [date_string, "=" * 60, current_run_input]
                    ])

                # moving results file
                output_dir_name_moved = "{d}_graphslam_results_test_{i}_of_{t}".format(
                    d=date_string, i=comb_i + 1, t=total_combs)
                os.rename(output_dir_name_init, output_dir_name_moved)
                logdir = "For more info on this see directory \"{d}\"".format(
                    d=output_dir_name_moved)
            else:
                logdir = "Results directory was not generated."

            ePrint("\t\n".join([intro_msg, code, logdir]))

    # total report
    iPrint("Summary:")
    if errors_num == 0:
        print("No execution errors were reported... {col}OK".format(
            col=Fore.RED))
    else:
        print("Failed executions: {}/{}... {col}ERROR".format(errors_num,
                                                              total_combs,
                                                              col=Fore.RED))
Example #40
0
#_*_coding:utf-8_*_

# Ver el directorio actual

import os

print os.getcwd()
print os.curdir()
Example #41
0
import sys
print sys.argv
command =  sys.argv[1]
path =sys.argv[2]
import time
for i in range(10):
    sys.stdout.write('#')
    time.sleep(0.1)
    sys.stdout.flush()#正常的会将10个#放到缓存中,然后十个之后一块显示出
    #但是用了flush()后就是有一个返回一个,就想就像进度条一样了


#4:os模块
os.getcwd()#获取当前工作目录
os.chdir("test1")#改变当前脚本的工作目录,相当于shell下的cd
os.curdir()#返回当前目录(.)
os.pardir()#获取当前目录的父目录字符串
os.makedirs("dirname/dirname1")#可生成多层的递归目录
os.removedirs("dirname1:")#若目录为空则删除,并递归到上一层
os.mkdir()#生成单级目录
os.rmdir()#删除单机空目录
os.listdir()#列出指定目录下的所有文件和子目录,并以列表的形式打印
#若目录为空,则删除,并递归到上一层目录;如果要删除dirname2,要写成dirname1/dirnam2,然后返回到dirname1,如果dirname1为空,则dirname1也将被删除
os.removedirs('dirname1/dirname2')
os.rename('name1','name2')
os.stat('day_01_str.py')#获取文件目录信息
os.sep#输出os特定的路径分隔符,win下为\\,linux下为/
os.linesep#输出当前平台使用的终止符,win下为\t\r\n,linux下为\n
os.pathsep#输出分割文件路径的
os.system("bash command")#运行shell命令,直接显示
os.environ()#获取系统环境变量
Example #42
0
def main():
    # Look for a ProPresenter installation to use default values from.
    if pro6_install:
        default_size = (pro6_install.output_width, pro6_install.output_height)
        scaling = {v: k
                   for k, v in SCALE_MODES.items()
                   }.get(pro6_install.foreground_scaling)
        library = pro6_install.get_library()
    else:
        default_size = (1280, 720)
        scaling = "fit"
        library = None

    parser = ArgumentParser(
        description=
        "Loads a directory or list of images into a ProPresenter document.")
    parser.add_argument("files",
                        type=str,
                        nargs='+',
                        help="Path(s) of files or directories to import.")
    parser.add_argument("--title", type=str, help="The title of the document.")
    parser.add_argument("--category",
                        type=str,
                        default="Presentation",
                        help="The type of document being created.")
    parser.add_argument("--width",
                        type=int,
                        default=default_size[0],
                        help="The width of the created document.")
    parser.add_argument("--height",
                        type=int,
                        default=default_size[1],
                        help="The height of the created document.")
    parser.add_argument("--interval",
                        type=int,
                        help="Seconds between each slide on the timeline.")
    parser.add_argument("--loop",
                        action="store_true",
                        help="If the timeline should loop.")
    parser.add_argument("--scaling",
                        type=str.lower,
                        choices=list(SCALE_MODES.keys()),
                        default=scaling,
                        help="How the image should be scaled to the document.")
    parser.add_argument(
        "--outdir",
        type=str,
        help="The directory where the document should be saved.")
    args = parser.parse_args()

    # Validate some arguments
    if args.width <= 0:
        print("Invalid image width: %i - must be a positive integer." %
              args.width)
    if args.height <= 0:
        print("Invalid image height: %i - must be a positive integer." %
              args.height)

    # Create a new document with the given settings.
    print("Creating '%s' document with resolution %i x %i..." %
          (args.category, args.width, args.height))
    doc = PresentationDocument(args.category, args.height, args.width)

    # Import files to the document.
    for item in args.files:
        import_file(doc, item)
    print("DONE! Imported %i files to document." % len(doc.slides()))

    # Setup the optional timeline.
    if args.interval:
        print("Setting timeline interval to %i seconds (loop: %s)." %
              (args.interval, args.loop))
        doc.create_slideshow(args.interval, args.loop)

    # Determine a title for the document if none specified.
    title = args.title
    if not title:
        if len(args.files) == 1:
            title = path.basename(path.splitext(args.files[0])[0])
        else:
            title = "Imported files"

    # Save the document to disk (prefers: --outdir, active library, current directory)
    doc.path = path.join(args.outdir or library or curdir(), title + ".pro6")
    doc.write()
    if library and not args.outdir:
        print("Document saved to library: %s" % title)
    else:
        print("Document saved: %s" % doc.path)
Example #43
0
os.chdir(path)

out_path = r'E:\just language\reports\data\out'
is_exist = os.path.exists(out_path)

if not is_exist:
    os.makedirs(out_path)

# 是否存在文件
os.path.isfile(filename)

# 列出目录和文件
os.listdir(path)

# 返回当前目录
os.curdir()

# 是不是一个目录
os.path.isdir(path)

# 文件夹大小
os.path.getsize(path)

# 分隔目录
os.path.split(path)

# 绝对路径
os.path.abspath(path)

# 分离文件名和拓展名 
os.path.splitext()
Example #44
0
def find_dat_files(directory=None):
    if directory is None:
        directory = os.curdir()
    data_list = sorted(glob.glob(os.path.join(directory, '*.dat')))
    return data_list
Example #45
0
are messages and output files makes a call/return model less useful.
Suggested enhancement: could be extended to allow multiple sets
of command-line arguments and/or inputs per test script, to run a
script multiple times (glob for multiple ".in*" files in Inputs?).
Might also seem simpler to store all test files in same directory
with different extensions, but this could grow large over time.
Could also save both stderr and stdout to Errors on failures, but
I prefer to have expected/actual output in Outputs on regressions.
################################################################################
"""

import sys, os, glob,time
from subprocess import Popen,PIPE

#configure args
testdir = sys.argv[1] if len(sys.argv) > 1 else os.curdir()
forcegen = len(sys.argv) > 2
print('Start tester: ',time.asctime())
print('in ',os.path.abspath(testdir))

def verbose(*args):
    print('-'*80)
    for arg in args:
        print(arg)
        
def quiet(*args): pass
trace = quiet

#glob scripts to be tested
testpatt = os.path.join(testdir,'Scripts','*.py')
testfiles = glob.glob(testpatt)
Example #46
0
import sqlite3
import os
import os.path
import sys

from twisted.internet.task import LoopingCall

try:
    # determine if application is a script file or frozen exe
    if hasattr(sys, 'frozen'):
        DB_DIR = os.path.dirname(sys.executable)
    else:
        DB_DIR = os.path.dirname(os.path.abspath(__file__))
except:
    DB_DIR = os.curdir()

VERSION_FN = os.path.join(DB_DIR, 'db-version')
DB_FN = os.path.join(DB_DIR, 'stats.db')

class Database():
    def __init__(self,bitHopper):
        self.curs = None
        self.bitHopper = bitHopper
        self.pool = bitHopper.pool
        self.check_database()
        
        self.shares = {}
        self.rejects = {}
        self.payout = {}
        self.user = {}
import os, time, pickle
import collabFiltering, preprocess

start_time = time.time()

"""
Parameters
----------
"""
m = 5 # number of items to recommend
n = 100 # number of more similar neighbours to visit
dist = "cosine" # distance to use: "cosine", "jaccard" or "basic"
normalizing = False # "True" to use the normalizing datas, "False" otherwise
U = 6 # recommend movies to U

path = os.curdir()

"""
Generate the "Movies" file
--------------------------
"""
listfiles = os.listdir(os.curdir)
if "Movies" not in listfiles:
    preprocess.CreateMovies()

fMovies = open("Movies","rb")
pM = pickle.Unpickler(fMovies)
Movies = pM.load()
fMovies.close()

os.chdir(path+"/training_set")
Example #48
0
    def link (self,
              target_desc,
              objects,
              output_filename,
              output_dir=None,
              libraries=None,
              library_dirs=None,
              runtime_library_dirs=None,
              export_symbols=None,
              debug=0,
              extra_preargs=None,
              extra_postargs=None,
              build_temp=None,
              target_lang=None):
        # First fixup.
        (objects, output_dir) = self._fix_object_args (objects, output_dir)
        (libraries, library_dirs, runtime_library_dirs) = \
            self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)

        # First examine a couple of options for things that aren't implemented yet
        if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
            raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
        if runtime_library_dirs:
            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
        if extra_preargs or extra_postargs:
            raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
        if len(export_symbols) != 1:
            raise DistutilsPlatformError, 'Need exactly one export symbol'
        # Next there are various things for which we need absolute pathnames.
        # This is because we (usually) create the project in a subdirectory of
        # where we are now, and keeping the paths relative is too much work right
        # now.
        sources = map(self._filename_to_abs, self.__sources)
        include_dirs = map(self._filename_to_abs, self.__include_dirs)
        if objects:
            objects = map(self._filename_to_abs, objects)
        else:
            objects = []
        if build_temp:
            build_temp = self._filename_to_abs(build_temp)
        else:
            build_temp = os.curdir()
        if output_dir:
            output_filename = os.path.join(output_dir, output_filename)
        # The output filename needs special handling: splitting it into dir and
        # filename part. Actually I'm not sure this is really needed, but it
        # can't hurt.
        output_filename = self._filename_to_abs(output_filename)
        output_dir, output_filename = os.path.split(output_filename)
        # Now we need the short names of a couple of things for putting them
        # into the project.
        if output_filename[-8:] == '.ppc.slb':
            basename = output_filename[:-8]
        elif output_filename[-11:] == '.carbon.slb':
            basename = output_filename[:-11]
        else:
            basename = os.path.strip(output_filename)[0]
        projectname = basename + '.mcp'
        targetname = basename
        xmlname = basename + '.xml'
        exportname = basename + '.mcp.exp'
        prefixname = 'mwerks_%s_config.h'%basename
        # Create the directories we need
        distutils.dir_util.mkpath(build_temp, dry_run=self.dry_run)
        distutils.dir_util.mkpath(output_dir, dry_run=self.dry_run)
        # And on to filling in the parameters for the project builder
        settings = {}
        settings['mac_exportname'] = exportname
        settings['mac_outputdir'] = output_dir
        settings['mac_dllname'] = output_filename
        settings['mac_targetname'] = targetname
        settings['sysprefix'] = sys.prefix
        settings['mac_sysprefixtype'] = 'Absolute'
        sourcefilenames = []
        sourcefiledirs = []
        for filename in sources + objects:
            dirname, filename = os.path.split(filename)
            sourcefilenames.append(filename)
            if not dirname in sourcefiledirs:
                sourcefiledirs.append(dirname)
        settings['sources'] = sourcefilenames
        settings['libraries'] = libraries
        settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
        if self.dry_run:
            print 'CALLING LINKER IN', os.getcwd()
            for key, value in settings.items():
                print '%20.20s %s'%(key, value)
            return
        # Build the export file
        exportfilename = os.path.join(build_temp, exportname)
        log.debug("\tCreate export file %s", exportfilename)
        fp = open(exportfilename, 'w')
        fp.write('%s\n'%export_symbols[0])
        fp.close()
        # Generate the prefix file, if needed, and put it in the settings
        if self.__macros:
            prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
            fp = open(prefixfilename, 'w')
            fp.write('#include "mwerks_shcarbon_config.h"\n')
            for name, value in self.__macros:
                if value is None:
                    fp.write('#define %s\n'%name)
                else:
                    fp.write('#define %s %s\n'%(name, value))
            fp.close()
            settings['prefixname'] = prefixname

        # Build the XML file. We need the full pathname (only lateron, really)
        # because we pass this pathname to CodeWarrior in an AppleEvent, and CW
        # doesn't have a clue about our working directory.
        xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
        log.debug("\tCreate XML file %s", xmlfilename)
        xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
        xmlbuilder.generate()
        xmldata = settings['tmp_projectxmldata']
        fp = open(xmlfilename, 'w')
        fp.write(xmldata)
        fp.close()
        # Generate the project. Again a full pathname.
        projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
        log.debug('\tCreate project file %s', projectfilename)
        mkcwproject.makeproject(xmlfilename, projectfilename)
        # And build it
        log.debug('\tBuild project')
        mkcwproject.buildproject(projectfilename)
def parseExptConfig(configFile, librariesToSublibrariesDict):
    parser = SafeConfigParser()
    parser.read(configFile)
    
    #output variables
    paramDict = dict()
    exitStatus = 0
    warningString = ''

    ##check all sections
    expectedSections = set(['experiment_settings',
         'library_settings',
         'counts_files',
         'filter_settings',
         'sgrna_analysis',
         'growth_values',
         'gene_analysis'])
         
    parsedSections = set(parser.sections())
    
    if len(expectedSections) != len(parsedSections) and len(expectedSections) != len(expectedSections.intersection(parsedSections)):
        return paramDict, 1, 'Config file does not have all required sections or has extraneous sections!\nExpected:' + ','.join(expectedSections)

    ##experiment settings
    if parser.has_option('experiment_settings','output_folder'):
        paramDict['output_folder'] = parser.get('experiment_settings','output_folder') #ways to check this is a valid path?
    else:
        warningString += 'No output folder specified, defaulting to current directory\n.'
        paramDict['output_folder'] = os.curdir()
        
    if parser.has_option('experiment_settings','experiment_name'):
        paramDict['experiment_name'] = parser.get('experiment_settings','experiment_name')
    else:
        warningString += 'No experiment name specified, defaulting to \'placeholder_expt_name\'\n.'
        paramDict['experiment_name'] = 'placeholder_expt_name'
        
        
    ##library settings
    libraryDict = librariesToSublibrariesDict
    if parser.has_option('library_settings','library'):
        parsedLibrary = parser.get('library_settings','library')
        
        if parsedLibrary.lower() in libraryDict:
            paramDict['library'] = parsedLibrary.lower()
        else:
            warningString += 'Library name not recognized\n'
            exitStatus += 1
            
    else:
        warningString += 'No library specified\n'
        exitStatus += 1
        parsedLibrary = ''
      
    if 'library' in paramDict:
        if parser.has_option('library_settings','sublibraries'):
            parsedSubList = parser.get('library_settings','sublibraries').strip().split('\n')
            
            paramDict['sublibraries'] = []
            
            for sub in parsedSubList:
                sub = sub.lower()
                if sub in libraryDict[paramDict['library']]:
                    paramDict['sublibraries'].append(sub)
                    
                else:
                    warningString += 'Sublibrary %s not recognized\n' % sub
                    
        else:
            paramDict['sublibraries'] = libraryDict[paramDict['library']]
            
    
    ##counts files
    if parser.has_option('counts_files','counts_file_string'):
        countsFileString = parser.get('counts_files','counts_file_string').strip()
        
        paramDict['counts_file_list'] = []
        
        for stringLine in countsFileString.split('\n'):
            stringLine = stringLine.strip()
            
            if len(stringLine.split(':')) != 2 or len(stringLine.split('|')) != 2:
                warningString += 'counts file entry could not be parsed: ' + stringLine + '\n'
                exitStatus += 1
            
            else:
                parsedPath = stringLine.split(':')[0]
                
                if os.path.isfile(parsedPath) == False:
                    warningString += 'Counts file not found: ' + parsedPath + '\n'
                    exitStatus += 1
                    
                condition, replicate = stringLine.split(':')[1].split('|')
                
                paramDict['counts_file_list'].append((condition, replicate,parsedPath))
            
    else:
        warningString += 'No counts files entered\n'
        exitStatus += 1
        
    
    ##filter settings
    filterOptions = ['either','both']
    if parser.has_option('filter_settings','filter_type') and parser.get('filter_settings','filter_type').lower() in filterOptions:
        paramDict['filter_type'] = parser.get('filter_settings','filter_type').lower()
    else:
        warningString += 'Filter type not set or not recognized, defaulting to \'either\'\n'
        paramDict['filter_type'] = 'either'
        
    if parser.has_option('filter_settings','minimum_reads'):
        try:
            paramDict['minimum_reads'] = parser.getint('filter_settings','minimum_reads')
        except ValueError:
            warningString += 'Minimum read value not an integer, defaulting to 0\n'  #recommended value is 50 but seems arbitrary to default to that
            paramDict['minimum_reads'] = 0
    else:
        warningString += 'Minimum read value not found, defaulting to 0\n'  #recommended value is 50 but seems arbitrary to default to that
        paramDict['minimum_reads'] = 0


    ##sgRNA Analysis
    if parser.has_option('sgrna_analysis','condition_string'):
        conditionString = parser.get('sgrna_analysis','condition_string').strip()

        paramDict['condition_tuples'] = []
        
        if 'counts_file_list' in paramDict:
            expectedConditions = set(zip(*paramDict['counts_file_list'])[0])
        else:
            expectedConditions = []

        enteredConditions = set()
        
        for conditionStringLine in conditionString.split('\n'):
            conditionStringLine = conditionStringLine.strip()

            if len(conditionStringLine.split(':')) != 3:
                warningString += 'Phenotype condition line not understood: ' + conditionStringLine + '\n'
                exitStatus += 1
            else:
                phenotype, condition1, condition2 = conditionStringLine.split(':')

                if condition1 not in expectedConditions or condition2 not in expectedConditions:
                    warningString += 'One of the conditions entered does not correspond to a counts file: ' + conditionStringLine + '\n'
                    exitStatus += 1
                else:
                    paramDict['condition_tuples'].append((phenotype,condition1,condition2))
                    enteredConditions.add(condition1)
                    enteredConditions.add(condition2)

        if len(paramDict['condition_tuples']) == 0:
            warningString += 'No phenotype score/condition pairs found\n'
            exitStatus += 1
            
        unusedConditions = list(expectedConditions - enteredConditions)
        if len(unusedConditions) > 0:
            warningString += 'Some conditions assigned to counts files will not be incorporated in sgRNA analysis:\n' \
                + ','.join(unusedConditions) + '\n'
        
    else:
        warningString += 'No phenotype score/condition pairs entered\n'
        exitStatus += 1
    
    
    pseudocountOptions = ['zeros only','all values','filter out']
    if parser.has_option('sgrna_analysis','pseudocount_behavior') and parser.get('sgrna_analysis','pseudocount_behavior').lower() in pseudocountOptions:
        paramDict['pseudocount_behavior'] = parser.get('sgrna_analysis','pseudocount_behavior').lower()
    else:
        warningString += 'Pseudocount behavior not set or not recognized, defaulting to \'zeros only\'\n'
        paramDict['pseudocount_behavior'] = 'zeros only'

    if parser.has_option('sgrna_analysis','pseudocount'):
        try:
            paramDict['pseudocount'] = parser.getfloat('sgrna_analysis','pseudocount')
        except ValueError:
            warningString += 'Pseudocount value not an number, defaulting to 0.1\n'
            paramDict['pseudocount'] = 0.1
    else:
        warningString += 'Pseudocount value not found, defaulting to 0.1\n'
        paramDict['pseudocount'] = 0.1

    
    ##Growth Values
    if parser.has_option('growth_values','growth_value_string') and len(parser.get('growth_values','growth_value_string').strip()) != 0:
        growthValueString = parser.get('growth_values','growth_value_string').strip()

        if 'condition_tuples' in paramDict and 'counts_file_list' in paramDict:
            expectedComparisons = set(zip(*paramDict['condition_tuples'])[0])
            expectedReplicates = set(zip(*paramDict['counts_file_list'])[1])

            expectedTupleList = []

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    expectedTupleList.append((comp,rep))
        else:
            expectedTupleList = []
             
        enteredTupleList = []
        growthValueTuples = []
        
        for growthValueLine in growthValueString.split('\n'):
            growthValueLine = growthValueLine.strip()
            
            linesplit = growthValueLine.split(':')
            
            if len(linesplit) != 3:
                warningString += 'Growth value line not understood: ' + growthValueLine + '\n'
                exitStatus += 1
                continue
                
            comparison = linesplit[0]
            replicate = linesplit[1]
            
            try:
                growthVal = float(linesplit[2])
            except ValueError:
                warningString += 'Growth value not a number: ' + growthValueLine + '\n'
                exitStatus += 1
                continue

            curTup = (comparison,replicate)
            if curTup in expectedTupleList:
                if curTup not in enteredTupleList:
                    enteredTupleList.append(curTup)
                    growthValueTuples.append((comparison,replicate,growthVal))
                    
                else:
                    warningString += ':'.join(curTup) + ' has multiple growth values entered\n'
                    exitStatus += 1
            else:
                warningString += ':'.join(curTup) + ' was not expected given the specified counts file assignments and sgRNA phenotypes\n'
                exitStatus += 1
        
        #because we enforced no duplicates or unexpected values these should match up unless there were values not entered
        #require all growth values to be explictly entered if some were
        if len(enteredTupleList) != len(expectedTupleList):
            warningString += 'Growth values were not entered for all expected comparisons/replicates. Expected: ' + \
                ','.join([':'.join(tup) for tup in expectedTupleList]) + '\nEntered: ' + \
                ','.join([':'.join(tup) for tup in enteredTupleList]) + '\n'
            exitStatus += 1
        else:
            paramDict['growth_value_tuples'] = growthValueTuples
            
    else:
        warningString += 'No growth values--all phenotypes will be reported as log2enrichments\n'
        
        paramDict['growth_value_tuples'] = []
        
        if 'condition_tuples' in paramDict and 'counts_file_list' in paramDict:
            expectedComparisons = set(zip(*paramDict['condition_tuples'])[0])
            expectedReplicates = set(zip(*paramDict['counts_file_list'])[1])

            for comp in expectedComparisons:
                for rep in expectedReplicates:
                    paramDict['growth_value_tuples'].append((comp,rep,1))
    
    ##Gene Analysis
    if parser.has_option('gene_analysis','collapse_to_transcripts'):
        try:
            paramDict['collapse_to_transcripts'] = parser.getboolean('gene_analysis','collapse_to_transcripts')
        except ValueError:
            warningString += 'Collapse to transcripts entry not a recognized boolean value\n'
            exitStatus += 1
    else:
        paramDict['collapse_to_transcripts'] = True
        warningString += 'Collapse to transcripts defaulting to True\n'


    #pseudogene parameters
    if parser.has_option('gene_analysis','generate_pseudogene_dist'):
        try:
            paramDict['generate_pseudogene_dist'] = parser.getboolean('gene_analysis','generate_pseudogene_dist')
        except ValueError:
            warningString += 'Generate pseudogene dist entry not a recognized boolean value\n'
            exitStatus += 1
    else:
        paramDict['generate_pseudogene_dist'] = False
        warningString += 'Generate pseudogene dist defaulting to False\n'

    if 'generate_pseudogene_dist' in paramDict and paramDict['generate_pseudogene_dist'] == True:
        if parser.has_option('gene_analysis','pseudogene_size'):
            try:
                paramDict['pseudogene_size'] = parser.getint('gene_analysis','pseudogene_size')
            except ValueError:
                warningString += 'Pseudogene size entry not a recognized integer value\n'
                exitStatus += 1
        else:
            warningString += 'No pseudogene size provided\n'
            exitStatus += 1

        if parser.has_option('gene_analysis','num_pseudogenes'):
            try:
                paramDict['num_pseudogenes'] = parser.getint('gene_analysis','num_pseudogenes')
            except ValueError:
                warningString += 'Pseudogene number entry not a recognized integer value\n'
                exitStatus += 1
        else:
            warningString += 'No pseudogene size provided\n'

    #list possible analyses in param dict as dictionary with keys = analysis and values = analysis-specific params
    
    paramDict['analyses'] = dict()

    #analyze by average of best n
    if parser.has_option('gene_analysis','calculate_ave'):
        try:
            if parser.getboolean('gene_analysis','calculate_ave') == True:
                paramDict['analyses']['calculate_ave'] = []
        except ValueError:
            warningString += 'Calculate ave entry not a recognized boolean value\n'
            exitStatus += 1

        if 'calculate_ave' in paramDict['analyses']:
            if parser.has_option('gene_analysis','best_n'):
                try:
                    paramDict['analyses']['calculate_ave'].append(parser.getint('gene_analysis','best_n'))
                except ValueError:
                    warningString += 'Best_n entry not a recognized integer value\n'
                    exitStatus += 1
            else:
                warningString += 'No best_n value provided for average analysis function\n'
                exitStatus += 1
    else:
        warningString += 'Best n average analysis not specified, defaulting to False\n'
    
    #analyze by Mann-Whitney
    if parser.has_option('gene_analysis','calculate_mw'):
        try:
            if parser.getboolean('gene_analysis','calculate_mw') == True:
                paramDict['analyses']['calculate_mw'] = []
        except ValueError:
            warningString += 'Calculate Mann-Whitney entry not a recognized boolean value\n'
            exitStatus += 1

    #analyze by K-S, skipping for now

    #analyze by nth best sgRNA
    if parser.has_option('gene_analysis','calculate_nth'):
        try:
            if parser.getboolean('gene_analysis','calculate_nth') == True:
                paramDict['analyses']['calculate_nth'] = []
        except ValueError:
            warningString += 'Calculate best Nth sgRNA entry not a recognized boolean value\n'
            exitStatus += 1

        if 'calculate_nth' in paramDict['analyses']:
            if parser.has_option('gene_analysis','nth'):
                try:
                    paramDict['analyses']['calculate_nth'].append(parser.getint('gene_analysis','nth'))
                except ValueError:
                    warningString += 'Nth best sgRNA entry not a recognized integer value\n'
                    exitStatus += 1
            else:
                warningString += 'No Nth best value provided for that analysis function\n'
                exitStatus += 1
    else:
        warningString += 'Nth best sgRNA analysis not specified, defaulting to False\n'


    if len(paramDict['analyses']) == 0:
        warningString += 'No analyses selected to compute gene scores\n' #should this raise exitStatus?

    return paramDict, exitStatus, warningString