Exemplo n.º 1
0
def cleanAmberProject(sourceDirectory):
    for file in scandir(sourceDirectory):
        if path.isdir(file):
            cleanAmberProject(file)
        else:
            if "AmberProblem.txt" in file.name:
                os.remove(file)
Exemplo n.º 2
0
def creation_directory(directory: str):
    """
    This function allows you to factor out the verification 
    for the creation of the results file
    """
    if path.isdir(directory):
        pass
    else:
        mkdir(directory)
Exemplo n.º 3
0
def mkdir_p(mypath):
    from errno import EEXIST
    from os import makedirs,path
    try:
        makedirs(mypath)
    except OSError as exc:
        if exc.errno == EEXIST and path.isdir(mypath):
            pass
        else: raise
Exemplo n.º 4
0
def foundLegacyKeyStore(repository):
    found = False
    #find if any jks file in the project
    for file in scandir(repository):
        if path.isdir(file):
            found = foundLegacyKeyStore(file)
            if found:
                return found
        elif '.jks' in file.name.lower():
            return True
    return found
Exemplo n.º 5
0
def getTranslatorHome ():
	# Check that the path to the translator can be found before returning its absolute path
	from string import split
	from os import environ, path, pathsep

	translatorEnvVariable = 'IMPERIAL_TRANSLATOR_HOME'

	if not environ.has_key(translatorEnvVariable):
		errorMessage = "=" * 80 + \
"\nYou need to set the environment variable '" + translatorEnvVariable + "' to point to the base directory of the translator infrastructure." + \
"\nThis is the path ending with the directory 'OP2_ROSE_Fortran'." + \
"\nFor example, 'export IMPERIAL_TRANSLATOR_HOME=/usr/joe/bloggs/subdir/OP_ROSE_FORTRAN'\n" + "=" * 80 
		debug.exitMessage(errorMessage)
	
	translatorHome = split(environ.get(translatorEnvVariable), pathsep)[0]
	if not path.isdir(translatorHome):
		debug.exitMessage("The source-to-source translator path '%s' is not a directory" % (translatorHome))

	return translatorHome
Exemplo n.º 6
0
def process_wattbike_cfg(**kwargs):
    '''
    read in a config variables, validate and process
    will return the index of the fields in the dat file
    
    minimum args expected are:
      valid_config_keys, valid_headers, valid_mavg_units, valid_delim
      headers, delim, mavg, mavg_unit, dir_name, datfile_delim, dir_ext 
    '''
    try:
        for k in valid_config_keys:
            kwargs[k]
    except:
        raise MissingConfigException(k)
    
    fields = kwargs['header'].split(kwargs['delim'])

    # validate that config values are ok
    field_indexes=[]
    for f in fields:
        try:
            index = kwargs['valid_headers'].index(f)
            field_indexes.append(index)
        except ValueError:
            raise UnknownHeaderException(f)

    if kwargs['mavg'] < 0 or kwargs['mavg'] > 100:
        raise MAVGOutOfRangeException(kwargs['mavg'])

    if not kwargs['mavg_unit'] in kwargs['valid_mavg_units']:
        raise InvalidMAVG_UNITException(kwargs['mavg_unit'])

    if not kwargs['delim'] in kwargs['valid_delims']:
        raise InvalidDELIMException(kwargs['delim'])

    if not kwargs['datfile_delim'] in kwargs['valid_delims']:
        raise InvalidDATFILE_DELIMException(kwargs['datfile_delim'])

    if not path.isdir(kwargs['dir_name']):
        raise InvalidDIR_NAMEException(kwargs['dir_name'])

    return(field_indexes)
Exemplo n.º 7
0
def findValueOfTheVersionOfDependencyRecurse(versionVar, repository):
    #find all files with .properties or .gradle extension
    # print("Trying to find out this :"+repository)
    #search the key in them
    for file in scandir(repository):
        # print("in scanDir with  :"+file.path)
        if path.isdir(file):
            # print("Recurse with :"+file.path)
            result = findValueOfTheVersionOfDependencyRecurse(
                versionVar, file.path)
            if result != None:
                return result
        else:
            if ".gradle" in file.name or ".properties" in file.name:
                #read the file
                fileStream = open(file, "r")
                for line in fileStream:
                    if versionVar in line and "=" in line:
                        result = line.split("=")[-1].replace("\n", "")
                        # print("Have found version value of "+versionVar+"="+result)
                        return result
    return None
Exemplo n.º 8
0
def parse_stats_from_runs(experiment_name):
    """
    Analyses a list of given stats from a group of runs saved under an
    "experiment_name" folder. Creates a summary .csv file which can be used by
    plotting functions in utilities.save_plot. Saves a file of the format:

        run0_gen0       run1_gen0       .   .   .   run(n-1)_gen0
        run0_gen1       run1_gen1       .   .   .   run(n-1)_gen1
        run0_gen2       run1_gen2       .   .   .   run(n-1)_gen2
        .               .               .   .   .   .
        .               .               .   .   .   .
        .               .               .   .   .   .
        run0_gen(n-1)   run1_gen(n-1)   .   .   .   run(n-1)_gen(n-1)
        run0_gen(n)     run1_gen(n)     .   .   .   run(n-1)_gen(n)
        
    Generated file is compatible with
        
        utilities.save_plot.save_average_plot_across_runs()
    
    :param experiment_name: The name of a collecting folder within the
    ./results folder which holds multiple runs.
    :param graph: A boolean flag for whether or not to save figure.
    :return: Nothing.
    """

    # Since results files are not kept in source directory, need to escape
    # one folder.
    file_path = path.join(getcwd(), "..", "results")

    # Check for use of experiment manager.
    if experiment_name:
        file_path = path.join(file_path, experiment_name)

    else:
        s = "scripts.parse_stats.parse_stats_from_runs\n" \
            "Error: experiment name not specified."
        raise Exception(s)

    # Find list of all runs contained in the specified folder.
    runs = [
        run for run in listdir(file_path)
        if path.isdir(path.join(file_path, run))
    ]

    # Place to store the header for full stats file.
    header = ""

    # Array to store all stats
    full_stats = []

    # Get list of all stats to parse. Check stats file of first run from
    # runs folder.
    ping_file = path.join(file_path, str(runs[0]), "stats.tsv")

    # Load in data and get the names of all stats.
    stats = list(pd.read_csv(ping_file, sep="\t"))

    # Make list of stats we do not wish to parse.
    no_parse_list = ["gen", "total_inds", "time_adjust"]

    for stat in [
            stat for stat in stats
            if stat not in no_parse_list and not stat.startswith("Unnamed")
    ]:
        # Iterate over all stats.
        print("Parsing", stat)
        summary_stats = []

        # Iterate over all runs
        for run in runs:
            # Get file name
            file_name = path.join(file_path, str(run), "stats.tsv")

            # Load in data
            data = pd.read_csv(file_name, sep="\t")

            try:
                # Try to extract specific stat from the data.
                if list(data[stat]):
                    summary_stats.append(list(data[stat]))
                else:
                    s = "scripts.parse_stats.parse_stats_from_runs\n" \
                        "Error: stat %s is empty for run %s." % (stat, run)
                    raise Exception(s)

            except KeyError:
                # The requested stat doesn't exist.
                s = "scripts.parse_stats.parse_stats_from_runs\nError: " \
                    "stat %s does not exist in run %s." % (stat, run)
                raise Exception(s)

        try:
            # Generate numpy array of all stats
            summary_stats = np.array(summary_stats)

            # Append Stat to header.
            header = header + stat + "_mean,"

            summary_stats_mean = np.nanmean(summary_stats, axis=0)
            full_stats.append(summary_stats_mean)

            # Append Stat to header.
            header = header + stat + "_std,"
            summary_stats_std = np.nanstd(summary_stats, axis=0)
            full_stats.append(summary_stats_std)
            summary_stats = np.transpose(summary_stats)

            # Save stats as a .csv file.
            np.savetxt(path.join(file_path, (stat + ".csv")),
                       summary_stats,
                       delimiter=",")

            # Graph stat by calling graphing function.
            save_average_plot_across_runs(path.join(file_path,
                                                    (stat + ".csv")))

        except FloatingPointError:
            print("scripts.stats_parser.parse_stats_from_runs\n"
                  "Warning: FloatingPointError encountered while parsing %s "
                  "stats." % (stat))

    # Convert and rotate full stats
    full_stats = np.array(full_stats)
    full_stats = np.transpose(full_stats)

    # Save full stats to csv file.
    np.savetxt(path.join(file_path, "full_stats.csv"),
               full_stats,
               delimiter=",",
               header=header[:-1])
Exemplo n.º 9
0
    "SingleEleCR": "Wen",
    "SingleMuCR": "Wmn",
    "GammaCR": "gjets"
}
procmap = {
    "Data": "data",
    "ZJets": "zjets",
    "WJets": "wjets",
    "DYJets": "zll",
    "GJets": "gjets",
    "TTJets": "top",
    "DiBoson": "diboson",
    "QCD": "qcd"
}
signalmap = {"Axial": "axial"}
if not path.isdir("Systematics"): mkdir("Systematics")


def validHisto(hs, total=0, threshold=0.2):
    return hs.Integral() > threshold * total


def validShape(up, dn):
    return any(up[ibin] != dn[ibin]
               for ibin in range(1,
                                 up.GetNbinsX() +
                                 1)) and validHisto(up) and validHisto(dn)


def SaveRegion(region, save):
    region = Region(path=region, show=False, autovar=True)
Exemplo n.º 10
0
def migrateGradleBuildFile(repository, projectMetaData):
    print(RED +
          '\n\n\n\nStarting a new Migration\n-------------------------\n')
    print(RED + 'Working dir:' + repository)
    print(RED + 'ROOT_PYTHON_PATH:' + ROOT_PYTHON_PATH)
    #check if it's an Eclipse project
    if not path.isdir(repository + '\\src'):
        print(RED + "Not an eclipse project, returning")
        return
    global REPO_NAME
    REPO_NAME = repository.split('\\')[-1]
    print(RED + 'RepoName is the following:' + REPO_NAME)
    #First open the source gradle file
    buildGradle = repository + "\\app\\build.gradle"
    #Parse it and paste it in a temp file:
    #Need to find the file
    referentGradleBuildFile = open(buildGradle, "r")
    destinationFile = open(buildGradle + '_temp', "w+")
    #and update it according to our generic config file

    dependenciesBloc = False
    signingConfigBloc = False
    #the depth when reading dependencies in {}
    dependencyDepth = 0
    signingConfigDepth = 0
    for line in referentGradleBuildFile:
        #When reaching signing config, keep source
        if (dependenciesBloc):
            #managing depth in {} bloc
            if '{' in line:
                dependencyDepth = dependencyDepth + 1
            elif '}' in line:
                dependencyDepth = dependencyDepth - 1
            if (dependencyDepth == 0):
                #add the crashlitics and the junit blocs
                dependenciesBloc = False

            #Do the stuff: Log those you don't find in ref
            pass
        if (signingConfigBloc):

            #Probably a more general problem, linked with keystore: As to be handled by ProjMetaData

            #managing depth in {} bloc
            if '{' in line:
                signingConfigDepth = signingConfigDepth + 1
            elif '}' in line:
                signingConfigDepth = signingConfigDepth - 1
            if (signingConfigDepth == 0):
                signingConfigBloc = False

            #you are in signin in, just copy initial element from your ProjectMetaData
            # You also have this block later to handle :'(
            # buildTypes {
            #     release {
            #         signingConfig signingConfigs.release
            #         minifyEnabled true
            #         shrinkResources true
            #         useProguard true
            #         proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
            #         //add tests coverage using Jacoco
            #         testCoverageEnabled false
            #     }
            #     debug {
            #         // Run code coverage reports by default on debug builds.
            #         // testCoverageEnabled = true
            #         signingConfig signingConfigs.debug
            #         applicationIdSuffix '.debug'
            #         versionNameSuffix '.debug'
            #         //add tests coverage using Jacoco
            #         testCoverageEnabled true
            #         useProguard false
            #     }
            # }

            pass
        else:
            if 'dependencies' in line:
                dependencyDepth = 1
                dependenciesBloc = True
            elif 'signingConfigs {' in line:
                signingConfigDepth = 1
                signingConfigBloc = True
            #When reaching the dependencies block, use our own line if not already the good format
            #List all the unknown library
            #else copy line
            destinationFile.write(line)
    destinationFile.close
    referentGradleBuildFile.close
Exemplo n.º 11
0
def parse_stats_from_runs(experiment_name):
    """
    Analyses a list of given stats from a group of runs saved under an
    "experiment_name" folder. Creates a summary .csv file which can be used by
    plotting functions in utilities.save_plot. Saves a file of the format:

        run0_gen0       run1_gen0       .   .   .   run(n-1)_gen0
        run0_gen1       run1_gen1       .   .   .   run(n-1)_gen1
        run0_gen2       run1_gen2       .   .   .   run(n-1)_gen2
        .               .               .   .   .   .
        .               .               .   .   .   .
        .               .               .   .   .   .
        run0_gen(n-1)   run1_gen(n-1)   .   .   .   run(n-1)_gen(n-1)
        run0_gen(n)     run1_gen(n)     .   .   .   run(n-1)_gen(n)
        
    Generated file is compatible with
        
        utilities.save_plot.save_average_plot_across_runs()
    
    :param experiment_name: The name of a collecting folder within the
    ./results folder which holds multiple runs.
    :param graph: A boolean flag for whether or not to save figure.
    :return: Nothing.
    """

    # Since results files are not kept in source directory, need to escape
    # one folder.
    file_path = path.join(getcwd(), "..", "results")
    
    # Check for use of experiment manager.
    if experiment_name:
        file_path = path.join(file_path, experiment_name)
    
    else:
        s = "scripts.parse_stats.parse_stats_from_runs\n" \
            "Error: experiment name not specified."
        raise Exception(s)
    
    # Find list of all runs contained in the specified folder.
    runs = [run for run in listdir(file_path) if
            path.isdir(path.join(file_path, run))]
    
    # Place to store the header for full stats file.
    header = ""

    # Array to store all stats
    full_stats = []

    # Get list of all stats to parse. Check stats file of first run from
    # runs folder.
    ping_file = path.join(file_path, str(runs[0]), "stats.tsv")

    # Load in data and get the names of all stats.
    stats = list(pd.read_csv(ping_file, sep="\t"))
    
    # Make list of stats we do not wish to parse.
    no_parse_list = ["gen", "total_inds", "time_adjust"]
    
    for stat in [stat for stat in stats if stat not in no_parse_list and
                 not stat.startswith("Unnamed")]:
        # Iterate over all stats.
        print("Parsing", stat)
        summary_stats = []

        # Iterate over all runs
        for run in runs:
            # Get file name
            file_name = path.join(file_path, str(run), "stats.tsv")

            # Load in data
            data = pd.read_csv(file_name, sep="\t")

            try:
                # Try to extract specific stat from the data.
                if list(data[stat]):
                    summary_stats.append(list(data[stat]))
                else:
                    s = "scripts.parse_stats.parse_stats_from_runs\n" \
                        "Error: stat %s is empty for run %s." % (stat, run)
                    raise Exception(s)

            except KeyError:
                # The requested stat doesn't exist.
                s = "scripts.parse_stats.parse_stats_from_runs\nError: " \
                    "stat %s does not exist in run %s." % (stat, run)
                raise Exception(s)

        try:
            # Generate numpy array of all stats
            summary_stats = np.array(summary_stats)
            
            # Append Stat to header.
            header = header + stat + "_mean,"
            
            summary_stats_mean = np.nanmean(summary_stats, axis=0)
            full_stats.append(summary_stats_mean)
    
            # Append Stat to header.
            header = header + stat + "_std,"
            summary_stats_std = np.nanstd(summary_stats, axis=0)
            full_stats.append(summary_stats_std)
            summary_stats = np.transpose(summary_stats)
    
            # Save stats as a .csv file.
            np.savetxt(path.join(file_path, (stat + ".csv")), summary_stats,
                       delimiter=",")
    
            # Graph stat by calling graphing function.
            save_average_plot_across_runs(path.join(file_path, (stat +
                                                                ".csv")))
        
        except FloatingPointError:
            print("scripts.stats_parser.parse_stats_from_runs\n"
                  "Warning: FloatingPointError encountered while parsing %s "
                  "stats." % (stat))
            
    # Convert and rotate full stats
    full_stats = np.array(full_stats)
    full_stats = np.transpose(full_stats)

    # Save full stats to csv file.
    np.savetxt(path.join(file_path, "full_stats.csv"), full_stats,
               delimiter=",", header=header[:-1])
Exemplo n.º 12
0
def recursiveCargoDoc():
    for filename in listdir(specificPath):
        if "PARS MANIFESTS" in filename and filename[-4:] == ".pdf" or filename[-4:] == ".PDF":
            cargoDoc(specificPath+'\\'+filename)
        elif(path.isdir(specificPath+"\\"+filename) and not filename=="Flattened"):
            recursiveCargoDoc(specificPath+"\\"+filename)
Exemplo n.º 13
0
                            elif(k.getObject()['/T']=="Consignee"):
                                consignee=k.getObject()['/V']
                            elif(k.getObject()['/T']=="Shipper"):
                                shipper=k.getObject()['/V']
                            elif(k.getObject()['/T']=="ETA DATE"):
                                eta=k.getObject()['/V']
                            elif(k.getObject()['/T']=="undefined"):
                                portOfLoading=k.getObject()['/V']
                            elif(k.getObject()['/T']=="Port of Discharge"):
                                portOfDischarge=k.getObject()['/V']
                            elif(k.getObject()['/T']=="Description of goods"):
                                description=k.getObject()['/V']    
                        except KeyError:
                            True
        

if __name__ == '__main__':
    
    specificPath = ''
    for i in range(len(argv)):
        if i!=0:
            specificPath+=argv[i]
            if i != len(argv) - 1:
                specificPath+=" "
#     destinationOfFiles = workOrderLocation[:workOrderLocation.rfind('\\')] + "\\"
#     print(specificPath)
    if path.isdir(specificPath):
        recursiveCargoDoc(specificPath)
    elif "PARS MANIFESTS" in specificPath and (specificPath[-4:] == ".pdf" or specificPath[-4:] == ".PDF"):
        cargoDoc(specificPath)