Exemplo n.º 1
0
def fillDAG_GHEP(jobsub, tag, xsec_a_path, out, main_tune):
    # check if job is done already
    if isDoneGHEP(out):
        msg.warning(
            "Standard mctest ghep files found in " + out + " ... " + msg.BOLD +
            "skipping standard:fillDAG_GHEP\n", 1)
        return
    # not done, add jobs to dag
    msg.info("\tAdding standard mctest (ghep) jobs\n")
    # in parallel mode
    jobsub.add("<parallel>")
    # common options
    inputFile = "gxspl-vA-" + tag + ".xml"
    if not (main_tune is None):
        inputFile = main_tune + "-gxspl-vA-" + tag + ".xml"

    options = " --seed " + mcseed + " --cross-sections input/" + inputFile
    if not (main_tune is None):
        options = options + " --tune " + main_tune

    # loop over keys and generate gevgen command
    for key in nuPDG.iterkeys():
        cmd = "gevgen -n " + nEvents[key] + " -e " + energy[key] + " -p " + nuPDG[key] + " -t " + targetPDG[key] + \
              " -r " + key + " --event-generator-list " + generatorList[key] + options
        logFile = "gevgen_" + key + ".log"
        jobsub.addJob(xsec_a_path + "/" + inputFile, out, logFile, cmd, None)
    # done
    jobsub.add("</parallel>")
Exemplo n.º 2
0
def partcheck(data1, data2, key, n=100):
    """Do full comparison entry by entry.

    Keyword arguments:
    data1 -- numpy array with data
    data2 -- numpy array with data
    key -- dataset being checked
    n -- number of entries to compare in each part of the file
    """

    msg.warning("%(key)s dataset too big to fit in memory. "
                "Comparing first / last / random %(n)d entries." % {
                    "key": key,
                    "n": n
                })

    N = len(data1)
    entries = range(n)
    entries.extend(range(N - n, N))
    entries.extend(np.random.randint(low=n, high=N - n, size=n))

    for i in entries:
        if not np.array_equal(data1[i], data2[i]):
            msg.error("Different entry %(id)d in %(key)s dataset." % {
                "key": key,
                "id": i
            })
            sys.exit(1)
Exemplo n.º 3
0
def goto_city(driver):
    msg.info("Goint to city tab...")
    try:
        driver.execute_script("switchMenu(1)")
        msg.info("done")
    except:
        msg.warning("failed")
Exemplo n.º 4
0
def fillDAGMerge(jobsub, tag, out):
    # check if job is done already
    if isDoneMerge(tag, out):
        msg.warning(
            "Nucleus merged splines found in " + out + " ... " + msg.BOLD +
            "skipping nua:fillDAGMerge\n", 1)
        return
    # not done, add jobs to dag
    msg.info("\tAdding nucleus splines (merge) jobs\n")
    # in serial mode
    jobsub.add("<serial>")
    # common options
    xmlFile = "gxspl-vA-" + tag + ".xml"
    # merge splines job
    cmd = "gspladd -d input -o " + xmlFile
    inputs = out + "/*.xml"
    logFile = "gspladd.log"
    jobsub.addJob(inputs, out, logFile, cmd)
    # convert to root job
    rootFile = "xsec-vA-" + tag + ".root"
    cmd = "gspl2root -p " + nuPDG + " -t " + ",".join(
        targets) + " -o " + rootFile + " -f input/" + xmlFile
    inputs = out + "/" + xmlFile
    logFile = "gspl2root.log"
    jobsub.addJob(inputs, out, logFile, cmd)
    # done
    jobsub.add("</serial>")
Exemplo n.º 5
0
def update_data(data, keys, skip=None):
    """Remove not requested datasets.

    Keyword arguments:
    data -- dicionary with data
    keys -- user-requested keys
    skip -- the key not to delete
    """

    loopkeys = list(data.keys())
    for key in loopkeys:
        if key == skip:
            continue
        if key not in keys:
            del data[key]

    if not len(data):
        msg.error("No datasets to process.")
        sys.exit(1)

    check.get_size(data)

    for key in keys:
        if key not in data.keys():
            msg.warning("%s requested, but not found." % key)
Exemplo n.º 6
0
def update_data(data, keys, skip=None):

    """Remove not requested datasets.

    Keyword arguments:
    data -- dicionary with data
    keys -- user-requested keys
    skip -- the key not to delete
    """

    for key in data.keys():
        if key == skip:
            continue
        if key not in keys:
            del data[key]

    if not len(data):
        msg.error("No datasets to process.")
        sys.exit(1)

    check.get_size(data)

    for key in keys:
        if key not in data.keys():
            msg.warning("%s requested, but not found." % key)
Exemplo n.º 7
0
def fillDAG_data (jobsub, tag, date, xsec_a_path, outEvents, outRep, outRepSng):
  # check if job is done already
  if isDoneData (tag, date, outRep, outRepSng):
    msg.warning ("xsec validation plots found in " + outRep + " ... " + msg.BOLD + "skipping xsecval:fillDAG_data\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding xsec validation (data) jobs\n")    
  # in parallel mode
  jobsub.add ("<parallel>")
  # one job for all comparisons without errors
  inFile  = "file_list-" + tag + "-" + date + ".xml"
  outFile = "genie_" + tag + "-" + date + "-world_nu_xsec_data_comp-all-withref"
  cmd = "gvld_nu_xsec -g input/" + inFile + " -o " + outFile
  # add the command to dag
  inputs = outRep + "/" + inFile + " " + xsec_a_path + "/xsec-vA-" + tag + ".root " + outEvents + "/*.ghep.root"
  logFile = "gvld_nu_xsec_all.log"
  jobsub.addJob (inputs, outRep, logFile, cmd)
  # job per comparison with error
  for comp in comparisons:
    outFile = "genie_" + tag + "-" + date + "-world_nu_xsec_data_comp-" + comp
    cmd = "gvld_nu_xsec -e -g input/" + inFile + " -o " + outFile + " -c " + comp
    logFile = "gvld_nu_xsec_" + comp + ".log"
    jobsub.addJob (inputs, outRepSng, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 8
0
    def generate_remote_synthesis_makefile(self):
        if self.connection.ssh_user == None or self.connection.ssh_server == None:
            p.warning("Connection data is not given. "
                "Accessing environmental variables in the makefile")
        p.info("Generating makefile for remote synthesis.")

        top_mod = self.modules_pool.get_top_module()
        if not os.path.exists(top_mod.fetchto):
            p.warning("There are no modules fetched. "
                "Are you sure it's correct?")

        ise_path = self.__figure_out_ise_path()
        tcl = self.__search_tcl_file()

        if tcl == None:
            self.__generate_tcl()
            tcl = "run.tcl"
        files = self.modules_pool.build_very_global_file_list()

        sff = SourceFileFactory()
        files.add(sff.new(tcl))
        files.add(sff.new(top_mod.syn_project))

        self.make_writer.generate_remote_synthesis_makefile(files=files, name=top_mod.syn_name,
        cwd=os.getcwd(), user=self.connection.ssh_user, server=self.connection.ssh_server, ise_path=ise_path)
Exemplo n.º 9
0
def fillDAG_GST(jobsub, out, tunes):
    # check if job is done already
    if isDoneGST(out, tunes):
        msg.warning(
            "hadronization test gst files found in " + out + " ... " +
            msg.BOLD + "skipping hadronization:fillDAG_GST\n", 1)
        return
    # not done, add jobs to dag
    msg.info("\tAdding hadronization test (gst) jobs\n")

    # in parallel mode
    jobsub.add("<parallel>")
    # loop over keys and generate gntpc command
    for key in nuPDG.iterkeys():
        inputFile = "gntp." + key + ".ghep.root"
        logFile = "gntpc" + key + ".log"
        cmd = "gntpc -f gst -i input/" + inputFile
        jobsub.addJob(out + "/" + inputFile, out, logFile, cmd, None)
        # same for tunes if specified
        if not (tunes is None):
            for tn in range(len(tunes)):
                cmdTune = "gntpc -f gst -i input/" + tunes[tn] + "-" + inputFile
                jobsub.addJob(
                    out + "/" + tunes[tn] + "/" + tunes[tn] + "-" + inputFile,
                    out + "/" + tunes[tn], tunes[tn] + "-" + logFile, cmdTune,
                    None)

    # done
    jobsub.add("</parallel>")
Exemplo n.º 10
0
def fillDAG_sanity (jobsub, events, out):
  # check if job is done already
  if isDoneSanity (out):
    msg.warning ("Standard mctest sanity checks log files found in " + out + " ... " + msg.BOLD + \
                 "skipping standard:fillDAG_sanity\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding mctest sanity checks jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common options
  options = " --add-event-printout-in-error-log --event-record-print-level 2 --max-num-of-errors-shown 10 " + \
            " --check-energy-momentum-conservation " + \
            " --check-charge-conservation " + \
            " --check-for-pseudoparticles-in-final-state " + \
            " --check-for-off-mass-shell-particles-in-final-state " + \
            " --check-for-num-of-final-state-nucleons-inconsistent-with-target " + \
            " --check-vertex-distribution " + \
            " --check-decayer-consistency"
  # loop over keys and generate gvld_sample_scan command
  for key in nuPDG.iterkeys():
    inputFile = "gntp." + key + ".ghep.root"
    output = "gntp." + key + ".ghep.root.sanity.log"
    cmd = "gvld_sample_scan -f input/" + inputFile + " -o " + output + options
    logFile = "gvld_sample_scan." + key + ".log"
    jobsub.addJob (events + "/" + inputFile, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 11
0
    def __init__(self, config: Config):
        self.config = config
        self.visited = set()
        self.to_be_visited = set()

        self.state_file_path = os.path.join(config.directory, _STATE_FILENAME)

        if os.path.exists(self.state_file_path):  # there is a state file
            with open(self.state_file_path, "rb") as state_file:
                self.visited, self.to_be_visited = pickle.load(state_file)
                msg.info(
                    f"Read state (visited: {len(self.visited)}, to be visited: {len(self.to_be_visited)})"
                )
        else:
            msg.info("Initializing...")

            initial_set = set(["/"])
            for category in config.categories:
                initial_set.update(category.seed)

            # TODO: Add links from the sitemap
            # for sitemap_url in url.filter_valid_links(sitemap_urls, categories, base_url):
            # 	to_be_visited.add(sitemap_url)

            for link in initial_set:
                page_content, page_links = url.content_and_links(
                    config.base_url + link)

                if page_content is None:
                    msg.warning(
                        f"Unable to reach {link} (no internet connection?)")
                    continue

                self.add_links(page_links)
Exemplo n.º 12
0
    def generate_remote_synthesis_makefile(self):
        if self.connection.ssh_user == None or self.connection.ssh_server == None:
            p.warning("Connection data is not given. "
                      "Accessing environmental variables in the makefile")
        p.info("Generating makefile for remote synthesis.")

        top_mod = self.modules_pool.get_top_module()
        if not os.path.exists(top_mod.fetchto):
            p.warning("There are no modules fetched. "
                      "Are you sure it's correct?")

        ise_path = self.__figure_out_ise_path()
        tcl = self.__search_tcl_file()

        if tcl == None:
            self.__generate_tcl()
            tcl = "run.tcl"
        files = self.modules_pool.build_very_global_file_list()

        sff = SourceFileFactory()
        files.add(sff.new(tcl))
        files.add(sff.new(top_mod.syn_project))

        self.make_writer.generate_remote_synthesis_makefile(
            files=files,
            name=top_mod.syn_name,
            cwd=os.getcwd(),
            user=self.connection.ssh_user,
            server=self.connection.ssh_server,
            ise_path=ise_path)
Exemplo n.º 13
0
def fillDAG_sanity(jobsub, events, out):
    # check if job is done already
    if isDoneSanity(out):
        msg.warning ("Standard mctest sanity checks log files found in " + out + " ... " + msg.BOLD + \
                     "skipping standard:fillDAG_sanity\n", 1)
        return
    # not done, add jobs to dag
    msg.info("\tAdding mctest sanity checks jobs\n")
    # in parallel mode
    jobsub.add("<parallel>")
    # common options
    options = " --add-event-printout-in-error-log --event-record-print-level 2 --max-num-of-errors-shown 10 " + \
              " --check-energy-momentum-conservation " + \
              " --check-charge-conservation " + \
              " --check-for-pseudoparticles-in-final-state " + \
              " --check-for-off-mass-shell-particles-in-final-state " + \
              " --check-for-num-of-final-state-nucleons-inconsistent-with-target " + \
              " --check-vertex-distribution " + \
              " --check-decayer-consistency"
    # loop over keys and generate gvld_sample_scan command
    for key in nuPDG.iterkeys():
        inputFile = "gntp." + key + ".ghep.root"
        output = "gntp." + key + ".ghep.root.sanity.log"
        cmd = "gvld_sample_scan -f input/" + inputFile + " -o " + output + options
        logFile = "gvld_sample_scan." + key + ".log"
        jobsub.addJob(events + "/" + inputFile, out, logFile, cmd)
    # done
    jobsub.add("</parallel>")
Exemplo n.º 14
0
def getBuild(tag, date, path):
    # get build with defined tag and date and save in path
    buildName = "genie_" + tag + "_buildmaster_" + date
    # check if build aready exists
    if os.path.isdir(path + "/" + buildName):
        msg.warning(
            path + "/" + buildName + " already exists ... " + msg.BOLD +
            "skipping jenkins:getBuild\n", 1)
        return buildName
    # no build
    tarball = buildName + ".tgz"
    # check it build available
    if tarball not in getBuildList():
        msg.error("There is no artifact for " + msg.BOLD + tarball + "\n")
        print "Available artifacts:\n"
        for artifact in getBuildList():
            print "\t" + artifact + "\n"
        sys.exit(1)
    # download build
    msg.info("Downloading " + msg.BOLD + tarball)
    urllib.urlretrieve(url + "/artifact/genie_builds/" + tarball,
                       path + "/" + tarball)
    # extract the build
    msg.info("Extracting to " + msg.BOLD + path + "/" + buildName + "\n")
    tarfile.open(path + "/" + tarball, 'r').extractall(path + "/" + buildName)
    # return buildName
    return buildName
Exemplo n.º 15
0
 def alchemist(driver):
     try:
         msg.info("Goint to alchemist tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(7)")
     except:
         msg.warning("couldn't find alchemist tab")
         return False
Exemplo n.º 16
0
 def magnus_hermeticus(driver):
     try:
         msg.info("Goint to magnus hermeticus tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(14)")
     except:
         msg.warning("couldn't find magnus hermeticus tab")
         return False
Exemplo n.º 17
0
 def maleficia(driver):
     try:
         msg.info("Goint to maleficia tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(9)")
     except:
         msg.warning("couldn't find maleficia tab")
         return False
Exemplo n.º 18
0
 def city_gate(driver):
     try:
         msg.info("Goint to city gate tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(17)")
     except:
         msg.warning("couldn't find city gate tab")
         return False
Exemplo n.º 19
0
 def mercenary(driver):
     try:
         msg.info("Goint to mercenary tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(8)")
     except:
         msg.warning("couldn't find mercenary tab")
         return False
Exemplo n.º 20
0
 def armour_smith(driver):
     try:
         msg.info("Goint to armour smith tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(5)")
     except:
         msg.warning("couldn't find armour smith tab")
         return False
Exemplo n.º 21
0
 def general_goods(driver):
     try:
         msg.info("Goint to general goods tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(6)")
     except:
         msg.warning("couldn't find general goods tab")
         return False
Exemplo n.º 22
0
 def overview(driver):
     try:
         msg.info("Goint to overview tab...")
         return driver.find_element_by_css_selector(
             "#mainmenu > a:nth-child(1)")
     except:
         msg.warning("couldn't find overview tab")
         return False
Exemplo n.º 23
0
 def country_zone(driver, zone_number):
     try:
         msg.info(f"Goint to country zone {zone_number}...")
         return driver.find_element_by_css_selector(
             f"#submenu2 > a:nth-child({zone_number})")
     except:
         msg.warning(f"couldn't find country zone {zone_number}")
         return False
Exemplo n.º 24
0
 def recruiting(driver):
     try:
         msg.info("Goint to recruiting tab...")
         return driver.find_element_by_css_selector(
             "#mainmenu > a:nth-child(5)")
     except:
         msg.warning("couldn't find recruiting tab")
         return False
Exemplo n.º 25
0
 def highscore(driver):
     try:
         msg.info("Goint to highscore tab...")
         return driver.find_element_by_css_selector(
             "#mainmenu > a:nth-child(4)")
     except:
         msg.warning("couldn't find highscore tab")
         return False
Exemplo n.º 26
0
 def premium(driver):
     try:
         msg.info("Goint to premium tab...")
         return driver.find_element_by_css_selector(
             "#mainmenu > a:nth-child(6)")
     except:
         msg.warning("couldn't find premium tab")
         return False
Exemplo n.º 27
0
 def work(driver):
     try:
         msg.info("Goint to work tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(1)")
     except:
         msg.warning("couldn't find work tab")
         return False
Exemplo n.º 28
0
 def auction_house(driver):
     try:
         msg.info("Goint to auction house tab...")
         return driver.find_element_by_css_selector(
             "#submenu1 > a:nth-child(15)")
     except:
         msg.warning("couldn't find auction house tab")
         return False
Exemplo n.º 29
0
 def event_zone(driver):
     try:
         msg.info("Goint to event zone...")
         return driver.find_element_by_css_selector(
             f"#submenu2 > a:last-child")
     except:
         msg.warning("couldn't find event zone")
         return False
Exemplo n.º 30
0
 def subtab(driver, subtab_number):
     try:
         msg.info(f"Going to subtab {subtab_number}...")
         return driver.find_element_by_css_selector(
             f"ul#mainnav > li > table > tbody > tr > td:nth-child({subtab_number}) > a"
         )
     except:
         msg.warning("Couldn't find subtab {subtab_number}")
         return False
Exemplo n.º 31
0
def merge_data(data1,
               data2,
               match,
               print_warnings=True,
               show_progress=False,
               sorted=True):
    """Merge data1 and data2 respect to match key

    Keyword arguments:
    data1 -- dictionary with data
    data2 -- dictionary with data
    match -- common key use to order data

    if the order of the eventids (or matching idx) is sorted, we can consider:
        index2 = np.array([np.searchsorted(data2[match], i)])
    """

    data, keys1, keys2 = build_data_dict(data1, data2, match)

    # don't use enumerate here because we only want to increment the counter
    # when we have a match
    ct = 0
    for i in data1[match]:
        index1 = np.array([ct])
        index2, = np.where(data2[match] == i)
        if not index2.size:
            if print_warnings:
                msg.warning("%(key)s = %(val)d found in the first file, "
                            "but not in the second one." % {
                                "key": match,
                                "val": i
                            })
            continue
        data[match][ct] = i
        for key in keys1:
            data[key][ct] = data1[key][index1]
        for key in keys2:
            data[key][ct] = data2[key][index2]

        if show_progress:
            if ct % 100 == 0:
                print("finished event {}".format(ct))
        ct += 1

    # TODO - pass in a value here; generally speaking, it is not right to
    # never allow the match index value to be zero - it might be so
    # legitimately; but for now...
    badidx = np.where(data[match] == 0)
    if len(badidx[0] > 1):
        data[match] = np.delete(data[match], badidx, axis=0)
    for key in keys1:
        data[key] = np.delete(data[key], badidx, axis=0)
    for key in keys2:
        data[key] = np.delete(data[key], badidx, axis=0)

    return data
Exemplo n.º 32
0
 def submit(self):
   self.dag.close()
   msg.info ("Done with dag file. Ready to submit.\n")
   # check if run is not empty
   if os.stat(self.dagFile).st_size == 0:
     msg.warning ("Dag file: " + self.dagFile + " is empty. " + msg.RED + msg.BOLD + "NO JOBS TO RUN!!!\n")
     exit (0)
   # submit dag
   msg.info ("Submitting: " + self.dagFile + "\n")
   subprocess.Popen (self.setup + self.subdag, shell=True, executable="/bin/bash")
Exemplo n.º 33
0
def fillDAG_cmp( jobsub, tag, date, xsec_a_path, eventdir, reportdir, main_tune, tunes, regretags, regredir ):

   # check if job is done already
   if resultsExist ( tag, date, reportdir ):
      msg.warning ("MINERvA comparisons plots found in " + reportdir + " ... " + msg.BOLD + "skipping minerva:fillDAG_cmp\n", 1)
      return

   # not done, add jobs to dag
   msg.info ("\tAdding MINERvA comparisons (plots) jobs\n")    
   # in serial mode
   jobsub.add ("<serial>")
   config  = "global-minerva-cfg-" + tag + "_" + date + ".xml"
   plotfile = "genie_" + tag + "-minerva.pdf"
   tablechi2 = "genie_" + tag + "-minerva-summary-chi2.txt"
   tableks = "genie_" + tag + "-minerva-summary-KS.txt"
   cmd = "gvld_general_comparison --no-root-output --global-config input/" + config + " -o " + plotfile 
   cmd = cmd + " --summary-chi2-table " + tablechi2
   cmd = cmd + " --summary-KS-table " + tableks
   
   # add the command to dag
   # --> old format --> inputs = reportdir + "/*.xml " + eventdir + "/*.ghep.root "
   inputs = reportdir + "/*.xml " + xsec_a_path + "/xsec-vA-" + tag + ".root " + eventdir + "/*.ghep.root "

   if not (main_tune is None):
      inputs = reportdir + "/*.xml " + xsec_a_path + "/" + main_tune + "-xsec-vA-" + tag + ".root " + eventdir + "/*.ghep.root "
   
   if not (tunes is None):
      for tn in range(len(tunes)):
         # --> old format --> inputs = " " + inputs + eventdir + "/" + tunes[tn] + "/*.ghep.root " 
	 inputs = " " + inputs + xsec_a_path + "/" + tunes[tn] + "/" + tunes[tn] + "-xsec-vA-" + tag + ".root " \
	           + eventdir + "/" + tunes[tn] + "/*.ghep.root "
   
   logfile = "gvld_general_comparison.log"
   
   regre = None
   if not (regretags is None):
      # --> now we need XSec --> regreOK = commonFunctions.regreInputOK( "minerva", regretags, regredir, len(data_struct), None, None )
      regreOK = commonFunctions.regreInputOK( "minerva", regretags, regredir, len(data_struct), "vA", "/xsec/nuA" )
      if regreOK:
         regre = ""
         for rt in range(len(regretags)):
            # NOTE: no need to fetch rtune because we don't get xsec, otherwise it's part of regretags 
	    # regre = regre + regredir + "/" + regretags[rt] + "/events/minerva/*.ghep.root " 
	    # NOTE (11/12/19): now we DO NEED to fetch xsec...
            rversion, rtune = regretags[rt].split("/")
	    regre = regre + regredir + "/" + regretags[rt] + "/xsec/nuA/" + rtune + "-xsec-vA-" + rversion + ".root " 
            regre = regre + regredir + "/" + regretags[rt] + "/events/minerva/*.ghep.root "
      else:
         msg.info( "\t\tNO input for regression will be copied over \n" )
	 regre = None
   
   jobsub.addJob ( inputs, reportdir, logfile, cmd, regre )
   
   # done
   jobsub.add ("</serial>")
Exemplo n.º 34
0
def fillDAGMerge(jobsub, tag, out, main_tune, tunes):
    # check if job is done already
    if isDoneMerge(tag, out, main_tune, tunes):
        msg.warning(
            "Nucleus merged splines found in " + out + " ... " + msg.BOLD +
            "skipping nua:fillDAGMerge\n", 1)
        return
    # not done, add jobs to dag
    msg.info("\tAdding nucleus splines (merge) jobs\n")

    # in serial mode
    jobsub.add("<serial>")

    # common options
    xmlFile = "gxspl-vA-" + tag + ".xml"
    if not (main_tune is None):
        xmlFile = main_tune + "-gxspl-vA-" + tag + ".xml"
    # merge splines job
    cmd = "gspladd -d input -o " + xmlFile
    inputs = out + "/gxspl*.xml"
    logFile = "gspladd.log"
    jobsub.addJob(inputs, out, logFile, cmd, None)
    # convert to root job
    rootFile = "xsec-vA-" + tag + ".root"
    if not (main_tune is None):
        rootFile = main_tune + "-xsec-vA-" + tag + ".root"
    cmd = "gspl2root -p " + ",".join(nuPDG) + " -t " + ",".join(
        targets) + " -o " + rootFile + " -f input/" + xmlFile
    if not (main_tune is None):
        cmd = cmd + " --tune " + main_tune
    inputs = out + "/" + xmlFile
    logFile = "gspl2root.log"
    jobsub.addJob(inputs, out, logFile, cmd, None)
    # same for tunes if specified
    if not (tunes is None):
        for tn in range(len(tunes)):
            xmlTune = tunes[tn] + "-gxspl-vA-" + tag + ".xml"
            cmdTune = "gspladd -d input -o " + xmlTune
            logTune = tunes[tn] + "-gspladd.log"
            jobsub.addJob(out + "/" + tunes[tn] + "/" + tunes[tn] + "*.xml",
                          out + "/" + tunes[tn], logTune, cmdTune, None)
            rootTune = tunes[tn] + "-xsec-vA-" + tag + ".root"
            logTune = tunes[tn] + "-gspl2root.log"
            cmdTune = "gspl2root -p " + ",".join(nuPDG) + " -t " + ",".join(
                targets
            ) + " -o " + rootTune + " -f input/" + xmlTune + " --tune " + tunes[
                tn]
            jobsub.addJob(out + "/" + tunes[tn] + "/" + xmlTune,
                          out + "/" + tunes[tn], logTune, cmdTune, None)

    # done
    jobsub.add("</serial>")
Exemplo n.º 35
0
def merge_data(data1, data2, match,
               print_warnings=True, show_progress=False, sorted=True):

    """Merge data1 and data2 respect to match key

    Keyword arguments:
    data1 -- dictionary with data
    data2 -- dictionary with data
    match -- common key use to order data

    if the order of the eventids (or matching idx) is sorted, we can consider:
        index2 = np.array([np.searchsorted(data2[match], i)])
    """

    data, keys1, keys2 = build_data_dict(data1, data2, match)

    # don't use enumerate here because we only want to increment the counter
    # when we have a match
    ct = 0
    for i in data1[match]:
        index1 = np.array([ct])
        index2, = np.where(data2[match] == i)
        if not index2.size:
            if print_warnings:
                msg.warning("%(key)s = %(val)d found in the first file, "
                            "but not in the second one."
                            % {"key": match, "val": i})
            continue
        data[match][ct] = i
        for key in keys1:
            data[key][ct] = data1[key][index1]
        for key in keys2:
            data[key][ct] = data2[key][index2]

        if show_progress:
            if ct % 100 == 0:
                print("finished event {}".format(ct))
        ct += 1

    # TODO - pass in a value here; generally speaking, it is not right to 
    # never allow the match index value to be zero - it might be so
    # legitimately; but for now...
    badidx = np.where(data[match] == 0)
    if len(badidx[0] > 1):
        data[match] = np.delete(data[match], badidx, axis=0)
    for key in keys1:
        data[key] = np.delete(data[key], badidx, axis=0)
    for key in keys2:
        data[key] = np.delete(data[key], badidx, axis=0)

    return data
Exemplo n.º 36
0
def copy(source, output, keys):

    """Copy requested datasets.

    Keyword arguments:
    source -- input file
    output -- output file
    keys -- keys to be copied
    """

    for k in keys:
        if k not in source:
            msg.warning("%s requested, but not found." % k)
            continue
        else:
            msg.info("Copying %s" % k)
            source.copy(k, output)
Exemplo n.º 37
0
def fillDAG_GST (jobsub, out):
  # check if job is done already
  if isDoneGST (out):
    msg.warning ("xsec validation gst files found in " + out + " ... " + msg.BOLD + "skipping xsecval:fillDAG_GST\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding xsec validation (gst) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # loop over keys and generate gntpc command
  for key in nuPDG.iterkeys():
    inputFile = "gntp." + key + ".ghep.root"
    logFile = "gntpc" + key + ".log"
    cmd = "gntpc -f gst -i input/" + inputFile
    jobsub.addJob (out + "/" + inputFile, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 38
0
def fillDAG_data (jobsub, tag, date, xsec_n_path, outEvents, outRep):
  # check if job is done already
  if isDoneData (tag, date, outRep):
    msg.warning ("hadronization test plots found in " + outRep + " ... " + msg.BOLD + "skipping hadronization:fillDAG_data\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding hadronization test (plots) jobs\n")    
  # in serial mode
  jobsub.add ("<serial>")
  inFile  = "file_list-" + tag + "-" + date + ".xml"
  outFile = "genie_" + tag + "-hadronization_test.ps"
  cmd = "gvld_hadronz_test -g input/" + inFile + " -o " + outFile
  # add the command to dag
  inputs = outRep + "/" + inFile + " " + xsec_n_path + "/xsec-vN-" + tag + ".root " + outEvents + "/*.ghep.root"
  logFile = "gvld_hadronz_test.log"
  jobsub.addJob (inputs, outRep, logFile, cmd)
  # done
  jobsub.add ("</serial>")
Exemplo n.º 39
0
def fillDAGPart (jobsub, tag, out):
  # check if job is done already
  if isDonePart (out):
    msg.warning ("Nucleons splines found in " + out + " ... " + msg.BOLD + "skipping nun:fillDAGPart\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding nucleon splines (part) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common options
  inputs = "none"
  # loop over keys and generate proper command
  for key in nuPDG.iterkeys():
    cmd = "gmkspl -p " + nuPDG[key] + " -t " + targetPDG[key] + " -n " + nKnots + " -e " + maxEnergy \
          + " -o " + outXML[key] + " --event-generator-list " + generatorList[key]
    logFile = "gmkspl." + outXML[key] + ".log"
    jobsub.addJob (inputs, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 40
0
def fillDAGEv (jobsub, tag, xsec_a_path, out):
  # check if job is done already
  if isDoneEv (out):
    msg.warning ("Repeatability test events found in " + out + " ... " + msg.BOLD + "skipping reptest:fillDAGEv\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding repeatability test (gevgen) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common options
  inputFile = "gxspl-vA-" + tag + ".xml"
  options = " -p 14 -t 1000260560 -e 0.1,50 -f 1/x --seed 123456 --cross-sections input/" + inputFile
  # loop over runs and generate gevgen command
  for run in runs:
    cmd = "gevgen " + options + " -r " + run
    logFile = "gevgen_" + run + ".log"
    jobsub.addJob (xsec_a_path + "/" + inputFile, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 41
0
def fillDAG_GHEP (jobsub, tag, xsec_a_path, out):
  # check if job is done already
  if isDoneGHEP (out):
    msg.warning ("xsec validation ghep files found in " + out + " ... " + msg.BOLD + "skipping xsecval:fillDAG_GHEP\n", 1)
    return
  #not done, add jobs to dag
  msg.info ("\tAdding xsec validation (ghep) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common configuration
  inputFile = "gxspl-vA-" + tag + ".xml"
  options   = " -n " + nEvents + " -e " + energy + " -f " + flux + " --seed " + mcseed + \
              " --cross-sections input/" + inputFile + " --event-generator-list " + generatorList
  # loop over keys and generate gevgen command
  for key in nuPDG.iterkeys():
    cmd = "gevgen " + options + " -p " + nuPDG[key] + " -t " + targetPDG[key] + " -r " + key
    logFile = "gevgen_" + key + ".log"
    jobsub.addJob (xsec_a_path + "/" + inputFile, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 42
0
    def __find_provider_verilog_file(self, req, v_file):
        from srcfile import SourceFileFactory
        import os
        vf_dirname = v_file.dirname
        sff = SourceFileFactory()

        h_file = os.path.join(vf_dirname, req)
        if os.path.exists(h_file) and not os.path.isdir(h_file):
            return sff.new(h_file)

        inc_dirs = self.__parse_vlog_opt(v_file.vlog_opt)

        for dir in inc_dirs:
            dir = os.path.join( os.getcwd(), dir)
            if not os.path.exists(dir) or not os.path.isdir(dir):
                p.warning("Include path "+dir+" doesn't exist")
                continue
            h_file = os.path.join(dir, req)
            if os.path.exists(h_file) and not os.path.isdir(h_file):
                return sff.new(h_file)
        return None
Exemplo n.º 43
0
def fillDAGTest (jobsub, events, out):
  # check if job is done already
  if isDoneTest (out):
    msg.warning ("Repeatability test logs found in " + out + " ... " + msg.BOLD + "skipping reptest:fillDAGTest\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding repeatability test (gvld) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common options
  options = " --add-event-printout-in-error-log --max-num-of-errors-shown 10 "
  input1 = "gntp." + runs[0] + ".ghep.root" 
  # loop over runs and generate proper command
  for run in runs[1:]:
    input2 = "gntp." + run + ".ghep.root"
    output = "reptest_runs" + runs[0] + "vs" + run + ".log"
    logFile = "gvld_repeatability_test_" + runs[0] + "vs" + run + ".log"
    cmd = "gvld_repeatability_test --first-sample input/" + input1 + \
          " --second-sample input/" + input2 + options + " -o " + output
    jobsub.addJob (events + "/*.ghep.root", out, logFile, cmd)
  # done  
  jobsub.add ("</parallel>")
Exemplo n.º 44
0
def fillDAGPart (jobsub, tag, xsec_n_path, out):
  # check if job is done already
  if isDonePart (tag, out):
    msg.warning ("Nucleus splines found in " + out + " ... " + msg.BOLD + "skipping nua:fillDAGPart\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding nucleus splines (part) jobs\n")
  # in parallel mode
  jobsub.add ("<parallel>")
  # common options
  inputFile = "gxspl-vN-" + tag + ".xml"
  inputs = xsec_n_path + "/*.xml"
  options = " --input-cross-sections input/" + inputFile
  # loop over targets and generate proper command
  for t in targets:
    outputFile = "gxspl_" + t + ".xml"
    cmd = "gmkspl -p " + nuPDG + " -t " + t + " -n " + nKnots + " -e " + maxEnergy + options + \
          " --output-cross-sections " + outputFile
    logFile = "gxspl_" + t + ".xml.log"
    jobsub.addJob (inputs, out, logFile, cmd)
  # done
  jobsub.add ("</parallel>")
Exemplo n.º 45
0
def getBuild (tag, date, path):
  # get build with defined tag and date and save in path 
  buildName = "genie_" + tag + "_buildmaster_" + date
  # check if build aready exists
  if os.path.isdir (path + "/" + buildName):
    msg.warning (path + "/" + buildName + " already exists ... " + msg.BOLD + "skipping jenkins:getBuild\n", 1)
    return buildName
  # no build
  tarball = buildName + ".tgz"
  # check it build available
  if tarball not in getBuildList():
    msg.error ("There is no artifact for " + msg.BOLD + tarball + "\n")
    print "Available artifacts:\n"
    for artifact in getBuildList(): print "\t" + artifact + "\n"
    sys.exit (1)
  # download build
  msg.info ("Downloading " + msg.BOLD + tarball)
  urllib.urlretrieve (url + "/artifact/genie_builds/" + tarball, path + "/" + tarball)
  # extract the build
  msg.info ("Extracting to " + msg.BOLD + path + "/" + buildName + "\n")
  tarfile.open(path + "/" + tarball, 'r').extractall(path + "/" + buildName)
  # return buildName
  return buildName
Exemplo n.º 46
0
def merge_data(data1, data2, match,
               print_warnings=True, show_progress=False, sorted=True):

    """Merge data1 and data2 respect to match key

    Keyword arguments:
    data1 -- dictionary with data
    data2 -- dictionary with data
    match -- common key use to order data

    if the order of the eventids (or matching idx) is sorted, we can consider:
        index2 = np.array([np.searchsorted(data2[match], i)])
    """

    data, keys1, keys2 = build_data_dict(data1, data2, match)

    for ct, i in enumerate(data1[match]):
        index1 = np.array([ct])
        index2, = np.where(data2[match] == i)
        if not index2.size:
            if print_warnings:
                msg.warning("%(key)s = %(val)d found in the first file, "
                            "but not in the second one."
                            % {"key": match, "val": i})
            continue
        data[match][ct] = i
        for key in keys1:
            data[key][ct] = data1[key][index1]
        for key in keys2:
            data[key][ct] = data2[key][index2]

        if show_progress:
            if ct % 100 == 0:
                print("finished event {}".format(ct))

    return data
Exemplo n.º 47
0
def get_keys(source, keys, skip):

    """Compare user-requested keys with datasets in source file.
    Return keys to copy.

    Keyword arguments:
    source -- input file
    keys -- user-requested keys
    skip -- matching dataset already copied
    """

    if not keys:
        keys = source.keys()
    else:
        keys = [k.strip() for k in keys.split(',')]
        for k in keys:
            if k not in source.keys():
                keys.remove(k)
                msg.warning("%s requested, but not found." % k)

    if skip in keys:
        keys.remove(skip)

    return keys
Exemplo n.º 48
0
    def run_remote_synthesis(self):
        ssh = self.connection
        cwd = os.getcwd()

        p.vprint("The program will be using ssh connection: "+str(ssh))
        if not ssh.is_good():
            p.error("SSH connection failure. Remote host doesn't response.")
            quit()

        if not os.path.exists(self.top_module.fetchto):
            p.warning("There are no modules fetched. Are you sure it's correct?")

        files = self.modules_pool.build_very_global_file_list()
#        tcl = self.__search_tcl_file()
#        if tcl == None:
        self.__generate_tcl()
        tcl = "run.tcl"

        sff = SourceFileFactory()
        files.add(sff.new(tcl))
        files.add(sff.new(self.top_module.syn_project))

        dest_folder = ssh.transfer_files_forth(files,
            dest_folder=self.top_module.syn_name)
        syn_cmd = "cd "+dest_folder+cwd+" && xtclsh run.tcl"

        p.vprint("Launching synthesis on " + str(ssh) + ": " + syn_cmd)
        ret = ssh.system(syn_cmd)
        if ret == 1:
            p.error("Synthesis failed. Nothing will be transfered back")
            quit()

        cur_dir = os.path.basename(cwd)
        os.chdir("..")
        ssh.transfer_files_back(what=dest_folder+cwd, where=".")
        os.chdir(cur_dir)
Exemplo n.º 49
0
def fillDAGMerge (jobsub, tag, out): 
  # check if job is done already
  if isDoneMerge (tag, out):
    msg.warning ("Nucleons merged splines found in " + out + " ... " + msg.BOLD + "skipping nun:fillDAGMerge\n", 1)
    return
  # not done, add jobs to dag
  msg.info ("\tAdding nucleon splines (merge) jobs\n")
  # in serial mode
  jobsub.add ("<serial>")
  # common options
  xmlFile = "gxspl-vN-" + tag + ".xml"  
  # merge splines job
  cmd = "gspladd -d input -o " + xmlFile
  inputs = out + "/*.xml"
  logFile = "gspladd.log"
  jobsub.addJob (inputs, out, logFile, cmd)
  # convert to root job
  rootFile = "xsec-vN-" + tag + ".root"
  cmd = "gspl2root -p 12,-12,14,-14,16,-16 -t 1000010010,1000000010 -o " + rootFile + " -f input/" + xmlFile
  inputs = out + "/" + xmlFile
  logFile = "gspladd.log"
  jobsub.addJob (inputs, out, logFile, cmd)
  # done
  jobsub.add ("</serial>")
Exemplo n.º 50
0
            msg.warning("%s requested, but not found." % k)
            continue
        else:
            msg.info("Copying %s" % k)
            source.copy(k, output)


if __name__ == '__main__':

    msg.box("HDF5 MANIPULATOR: EXTRACT")

    args = parser()
    f = h5py.File(args.input, 'r')
    o = h5py.File(args.output, 'w')

    print "The following datasets were found in %s:\n" % args.input
    msg.list_dataset(f)

    copy(f, o, [k.strip() for k in args.keys.split(',')])

    if len(o):
        print "\nThe following dataset were saved in %s:\n" % args.output
        msg.list_dataset(o)
    else:
        msg.warning("No datasets were copied.")

    f.close()
    o.close()

    msg.info("Done")
Exemplo n.º 51
0
    def __search_use_clauses(self):
        """
        Reads a file and looks for 'use' clause. For every 'use' with
        non-standard library a tuple (lib, file) is returned in a list.

        """
        # get set for top module in time. FIX this
        std_libs = ['std', 'ieee']
        if global_mod.top_module.action == "simulation":
            try:
                if global_mod.sim_tool == "isim":
                    std_libs = flow.XilinxsiminiReader().get_libraries()
                elif global_mod.sim_tool == "vsim":
                    std_libs = flow.ModelsiminiReader().get_libraries()
                else:   # global_mod.sim_tool == None:
                    p.warning("Could not determine simulation tool. Defaulting to ISim")
                    std_libs =  flow.ISIM_STARDAND_LIBS
            except RuntimeError as e:
                #std_libs =  flow.MODELSIM_STANDARD_LIBS
                print "I/O error: ({0})".format(e.message)
                p.error("Picking standard ISim simulation libraries. Try to fix the error.")
                std_libs =  flow.ISIM_STARDAND_LIBS
        elif global_mod.top_module.action == "synthesis":
            #print("setting std libs for synthesis...")
            if global_mod.top_module.target == "xilinx":
                std_libs = flow.ISE_STANDARD_LIBS
            elif global_mod.top_module.target == "altera":
                std_libs = flow.QUARTUS_STANDARD_LIBS

        import re
        try:
            f = open(self.path, "r")
            text = f.readlines()
        except UnicodeDecodeError:
            return []

        use_pattern = re.compile("^[ \t]*use[ \t]+([^ ]+)[ \t]*.*$")
        lib_pattern = re.compile("([^.]+)\.([^.]+)\.all")

        use_lines = []
        for line in text:
            #identifiers and keywords are case-insensitive in VHDL
            line_lower = line.lower()
            m = re.match(use_pattern, line_lower)
            if m != None:
                use_lines.append(m.group(1))


        #print("use_lines: " + ' '.join(use_lines))
        #print("std_libs: " + ' '.join(std_libs))
        ret = set() 
        for line in use_lines:
            m = re.match(lib_pattern, line)
            if m != None:
                #omit standard libraries. Fix this. Ignore ISim std libraries
                #print("m.group(1): " + (m.group(1)).lower())
                #print("std_libs: " + ' '.join(std_libs))
                if (m.group(1)).lower() in std_libs:
                    continue
                #print("Did not take the continue statement")
                if self.library != "work":
                    #if a file is put in a library, `work' points this library
                    new = (self.library.lower(), m.group(2).lower())
                else:
                    new = (m.group(1).lower(), m.group(2).lower())
                
                #p.vprint("new: " + ' '.join(new))
                #dont add if the tuple is already in the list
                if new in self.dep_provides:
                    continue
                ret.add(new)
                
        f.close()
        return ret
Exemplo n.º 52
0
    def solve(self, fileset):
        n_iter = 0
        max_iter = 100
        import copy

        fset = fileset.filter(IDependable);
        f_nondep = []

        done = False
        while not done and (n_iter < max_iter):
            n_iter = n_iter+1
            done = True
            for f in fset:
                if not f._dep_fixed:
                    idx = fset.index(f)
                    k = self.__lookup_post_provider(files=fset, start_index=idx, file=f);

                    if k:
                        done = False
                        #swap
                        fset[idx], fset[k] = fset[k], fset[idx]

        if(n_iter == max_iter):
            p.error("Maximum number of iterations reached when trying to solve the dependencies."+
            "Perhaps a cyclic inter-dependency problem.");
            return None

        for f in fset:
            if f._dep_fixed:
                f_nondep.append(copy.copy(f))
                del f

        f_nondep.sort(key=lambda f: f.dep_index)
        from srcfile import VHDLFile, VerilogFile
        for f in [file for file in fset if isinstance(file, VHDLFile)]:
            p.vprint(f.path)
            if f.dep_requires:
                for req in f.dep_requires:
                    pf = self.__find_provider_vhdl_file([file for file in fset if isinstance(file, VHDLFile)], req)
                    if not pf:
                        p.error("Missing dependency in file "+str(f)+": " + req[0]+'.'+req[1])
                    else:
                        p.vprint("--> " + pf.path);
                        if pf.path != f.path:
                            f.dep_depends_on.append(pf)
            #get rid of duplicates by making a set from the list and vice versa
            f.dep_depends_on = list(set(f.dep_depends_on))

        import srcfile as sf

        for f in [file for file in fset if isinstance(file, VerilogFile)]:
            p.vprint(f.path)
            if f.dep_requires:
                for req in f.dep_requires:
                    pf = self.__find_provider_verilog_file(req, f)
                    if not pf:
                        p.warning("Cannot find depending for file "+str(f)+": "+req)
                    else:
                        p.vprint("--> " + pf.path)
                        f.dep_depends_on.append(pf)
            #get rid of duplicates by making a set from the list and vice versa
            f.dep_depends_on = list(set(f.dep_depends_on))



        newobj = sf.SourceFileSet();
        newobj.add(f_nondep);
        for f in fset:
            try:
                if not f._dep_fixed:
                    newobj.add(f)
            except:
                newobj.add(f)

    #search for SV includes (BFS algorithm)
        from srcfile import SVFile
        for f in [file for file in newobj if isinstance(file, SVFile)]:
            stack = f.dep_depends_on[:]
            while stack:
                qf = stack.pop(0)
                if qf.dep_requires:
                    f.dep_requires.extend(qf.dep_requires)
                    for req in qf.dep_requires:
                        pf = self.__find_provider_verilog_file(req, f)
                        if not pf:
                            p.warning("Cannot find include for file "+str(f)+": "+req)
                        else:
                            p.vprint("--> " + pf.path)
                            f.dep_depends_on.append(pf)
                            stack.append(pf)
             #get rid of duplicates by making a set from the list and vice versa
            f.dep_depends_on = list(set(f.dep_depends_on))
      

        for k in newobj:
            p.vprint(str(k.dep_index) + " " + k.path + str(k._dep_fixed))
        return newobj
Exemplo n.º 53
0
    def parse_manifest(self):
        if self.isparsed == True or self.isfetched == False:
            return
        if self.manifest == None:
            self.manifest = self.__search_for_manifest()
        if self.path == None:
            raise RuntimeError()
        manifest_parser = ManifestParser()

		# For non-top modules
        if(self.parent != None):
            manifest_parser.add_arbitrary_code("target=\""+str(global_mod.top_module.target)+"\"")
            manifest_parser.add_arbitrary_code("action=\""+str(global_mod.top_module.action)+"\"")
			# syn_device and sim_tool will be set for non-top modules
            manifest_parser.add_arbitrary_code("syn_device=\""+str(global_mod.top_module.syn_device)+"\"")

        manifest_parser.add_arbitrary_code("__manifest=\""+self.path+"\"")
        manifest_parser.add_arbitrary_code(global_mod.options.arbitrary_code)

        if self.manifest == None:
            p.vprint("No manifest found in module "+str(self))
        else:
            manifest_parser.add_manifest(self.manifest)
            p.vprint("Parsing manifest file: " + str(self.manifest))

        opt_map = None
        try:
            opt_map = manifest_parser.parse()
        except NameError as ne:
            p.echo("Error while parsing {0}:\n{1}: {2}.".format(self.manifest, type(ne), ne))
            quit()

        if(opt_map["fetchto"] != None):
            fetchto = path_mod.rel2abs(opt_map["fetchto"], self.path)
            self.fetchto = fetchto
        else:
            fetchto = self.fetchto

        if self.ise == None:
            self.ise = "13.1"

        if "local" in opt_map["modules"]:
            local_paths = self.__make_list(opt_map["modules"]["local"])
            local_mods = []
            for path in local_paths:
                if path_mod.is_abs_path(path):
                    p.error("Found an absolute path (" + path + ") in a manifest")
                    p.rawprint("(" + self.path + ")")
                    quit()
                path = path_mod.rel2abs(path, self.path)
                local_mods.append(self.pool.new_module(parent=self, url=path, source="local", fetchto=fetchto))
            self.local = local_mods
        else:
            self.local = []

        self.vmap_opt = opt_map["vmap_opt"]
        self.vcom_opt = opt_map["vcom_opt"]
        self.vsim_opt = opt_map["vsim_opt"]
        self.vlog_opt = opt_map["vlog_opt"]

        #if self.vlog_opt == "":
        #    self.vlog_opt = global_mod.top_module.vlog_opt
        #if self.vcom_opt == "":
        #    self.vcom_opt = global_mod.top_module.vcom_opt
        #if self.vsim_opt == "":
        #    self.vsim_opt = global_mod.top_module.vsim_opt
       # if self.vmap_opt == "":
        #    self.vmap_opt = global_mod.top_module.vmap_opt

        self.library = opt_map["library"]
        self.include_dirs = []
        if opt_map["include_dirs"] != None:
            if isinstance(opt_map["include_dirs"], basestring):
                self.include_dirs.append(opt_map["include_dirs"])
            else:
                self.include_dirs.extend(opt_map["include_dirs"])

        for dir in self.include_dirs:
            if path_mod.is_abs_path(dir):
                p.warning(self.path + " contains absolute path to an include directory: " +
                dir)
            if not os.path.exists(dir):
                p.warning(self.path + " has an unexisting include directory: " + dir)

        if opt_map["files"] == []:
            self.files = SourceFileSet()
        else:
            opt_map["files"] = self.__make_list(opt_map["files"])
            paths = []
            for path in opt_map["files"]:
                if not path_mod.is_abs_path(path):
                    path = path_mod.rel2abs(path, self.path)
                    paths.append(path)
                else:
                    p.warning(path + " is an absolute path. Omitting.")
                if not os.path.exists(path):
                    p.error("File listed in " + self.manifest.path + " doesn't exist: "
                    + path +".\nExiting.")
                    quit()

            from srcfile import VerilogFile, VHDLFile
            self.files = self.__create_file_list_from_paths(paths=paths);
            for f in self.files:
                if isinstance(f, VerilogFile):
                    f.vsim_opt = self.vsim_opt
                elif isinstance(f, VHDLFile):
                    f.vcom_opt = self.vcom_opt

        if "svn" in opt_map["modules"]:
            opt_map["modules"]["svn"] = self.__make_list(opt_map["modules"]["svn"])
            svn_mods = []
            for url in opt_map["modules"]["svn"]:
                svn_mods.append(self.pool.new_module(parent=self, url=url, source="svn", fetchto=fetchto))
            self.svn = svn_mods
        else:
            self.svn = []

        if "git" in opt_map["modules"]:
            opt_map["modules"]["git"] = self.__make_list(opt_map["modules"]["git"])
            git_mods = []
            for url in opt_map["modules"]["git"]:
                git_mods.append(self.pool.new_module(parent=self, url=url, source="git", fetchto=fetchto))
            self.git = git_mods
        else:
            self.git = []

        self.target = opt_map["target"]
        self.action = opt_map["action"]

        if opt_map["syn_name"] == None and opt_map["syn_project"] != None:
            self.syn_name = opt_map["syn_project"][:-5] #cut out .xise from the end
        else:
            self.syn_name = opt_map["syn_name"]
        self.syn_device = opt_map["syn_device"];
        self.syn_grade = opt_map["syn_grade"];
        self.syn_package= opt_map["syn_package"];
        self.syn_project = opt_map["syn_project"];
        self.syn_top = opt_map["syn_top"];

        self.isparsed = True

        for m in self.submodules():
            m.parse_manifest()