Esempio n. 1
0
def validate(dump_dir, p4_file, log_file, config):
    try:
        result = validate_p4(p4_file, dump_dir, config["compiler_bin"],
                             log_file)
    except TimeoutError:
        log.error("Validation timed out.")
        dump_file(TIMEOUT_DIR, p4_file)
        dump_file(TIMEOUT_DIR, log_file)
        # reset the dump directory
        return util.EXIT_FAILURE
    if result != util.EXIT_SUCCESS:
        info_file = p4_file.with_suffix("").joinpath(
            f"{p4_file.stem}_info.json")
        bug_dir = None
        if result == util.EXIT_UNDEF:
            log.error("Found instance of unstable code!")
            bug_dir = UNDEF_DIR
        else:
            log.error("Failed to validate the P4 code!")
            bug_dir = VALIDATION_BUG_DIR
        log.error("Rerun the example with:")
        out_file = bug_dir.joinpath(p4_file.name)
        log.error("python3 bin/validate_p4_translation -u -i %s", out_file)
        dump_file(bug_dir, log_file)
        dump_file(bug_dir, p4_file)
        dump_file(bug_dir, info_file)
        if config["do_prune"]:
            info_file = bug_dir.joinpath(f"{p4_file.stem}_info.json")
            p4_cmd = f"{PRUNER_BIN} "
            p4_cmd += f"--config {info_file} "
            p4_cmd += f" {bug_dir.joinpath(f'{p4_file.stem}.p4')} "
            p4_cmd += f" --working-dir {bug_dir.joinpath(f'{p4_file.stem}')}"
            log.info("Pruning P4 file with command %s ", p4_cmd)
            util.start_process(p4_cmd)
    return result
Esempio n. 2
0
def check(idx, config):
    test_id = generate_id()
    test_name = f"{test_id}_{idx}"
    dump_dir = OUTPUT_DIR.joinpath(f"dmp_{test_name}")
    util.check_dir(dump_dir)
    log_file = dump_dir.joinpath(f"{test_name}.log")
    p4_file = dump_dir.joinpath(f"{test_name}.p4")
    seed = int.from_bytes(os.getrandom(8), "big")
    log.info("Testing P4 program: %s - Seed: %s", p4_file.name, seed)
    # generate a random program
    result, p4_file = generate_p4_prog(P4RANDOM_BIN, p4_file, config, seed)
    if result.returncode != util.EXIT_SUCCESS:
        log.error("Failed generate P4 code!")
        dump_result(result, GENERATOR_BUG_DIR, p4_file)
        # reset the dump directory
        util.del_dir(dump_dir)
        return result.returncode
    # check compilation
    result = compile_p4_prog(config["compiler_bin"], p4_file, dump_dir)
    if result.returncode != util.EXIT_SUCCESS:
        if not is_known_bug(result):
            log.error("Failed to compile the P4 code!")
            log.error("Found a new bug!")
            dump_result(result, CRASH_BUG_DIR, p4_file)
            dump_file(CRASH_BUG_DIR, p4_file)
            if config["do_prune"]:
                info_file = CRASH_BUG_DIR.joinpath(f"{p4_file.stem}_info.json")
                info = validation.INFO
                # customize the main info with the new information
                info["compiler"] = str(config["compiler_bin"])
                info["exit_code"] = result.returncode
                info["p4z3_bin"] = str(P4Z3_BIN)
                info["out_dir"] = str(CRASH_BUG_DIR)
                info["input_file"] = str(p4_file)
                info["allow_undef"] = False
                info["err_string"] = result.stderr.decode("utf-8")
                log.error("Dumping configuration to %s.", info_file)
                with open(info_file, 'w') as json_file:
                    json.dump(info, json_file, indent=2, sort_keys=True)
                p4_cmd = f"{PRUNER_BIN} "
                p4_cmd += f"--config {info_file} "
                p4_cmd += f" {CRASH_BUG_DIR.joinpath(f'{p4_file.stem}.p4')} "
                log.error("Pruning P4 file with command %s ", p4_cmd)
                util.start_process(p4_cmd)
        # reset the dump directory
        util.del_dir(dump_dir)
        return result
    # check validation
    if config["do_validate"]:
        result = validate(dump_dir, p4_file, log_file, config)
    elif config["use_blackbox"]:
        result = run_p4_test(dump_dir, p4_file, log_file, config)

    # reset the dump directory
    util.del_dir(dump_dir)
    return result
Esempio n. 3
0
def ovs_vsctl_is_ovs_bridge(bridge):
    """
    This function verifies whether given port is an OVS bridge. If it is an
    OVS bridge then it will return True.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "br-exists", bridge])
    return ret == 0
Esempio n. 4
0
def ovs_vsctl_add_port_to_bridge(bridge, iface):
    """
    This function adds given interface to the bridge.
    """
    ret, _out, _err = util.start_process(
        ["ovs-vsctl", "add-port", bridge, iface])
    return ret
Esempio n. 5
0
def ovs_vsctl_is_ovs_bridge(bridge):
    """
    This function verifies whether given port is an OVS bridge. If it is an
    OVS bridge then it will return True.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "br-exists", bridge])
    return ret == 0
Esempio n. 6
0
def ovs_vsctl_add_port_to_bridge(bridge, iface):
    """
    This function adds given interface to the bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "add-port", bridge,
                                          iface])
    return ret
Esempio n. 7
0
def ovs_get_physical_interface(bridge):
    """
    This function tries to figure out which is the physical interface that
    belongs to the bridge. If there are multiple physical interfaces assigned
    to this bridge then it will return the first match.
    """
    ret, out, _err = util.start_process(["ovs-vsctl", "list-ifaces", bridge])

    if ret == 0:
        ifaces = out.splitlines()
        for iface in ifaces:
            ret, out, _err = util.start_process(["ovs-vsctl", "get",
                                                 "Interface", iface, "type"])
            if ret == 0:
                if ('""' in out) or ('system' in out):
                    return iface  # this should be the physical interface
    return None
Esempio n. 8
0
def ovs_get_physical_interface(bridge):
    """
    This function tries to figure out which is the physical interface that
    belongs to the bridge. If there are multiple physical interfaces assigned
    to this bridge then it will return the first match.
    """
    ret, out, _err = util.start_process(["ovs-vsctl", "list-ifaces", bridge])

    if ret == 0:
        ifaces = out.splitlines()
        for iface in ifaces:
            ret, out, _err = util.start_process(
                ["ovs-vsctl", "get", "Interface", iface, "type"])
            if ret == 0:
                if ('""' in out) or ('system' in out):
                    return iface  # this should be the physical interface
    return None
Esempio n. 9
0
def start_grafana(grafana_home_path, grafana_port):
    args = [
        GRAFANA_BIN, '--homepath', grafana_home_path, '--config', 'custom.ini'
    ]
    log_path = path.join(PROMTIMER_DIR, 'logs/grafana.log')
    logging.info(
        'starting grafana server (on localhost:{}; logging to {})'.format(
            grafana_port, log_path))
    # Don't specify a log file as it is done within the custom.ini file
    # otherwise the output is duplicated.
    return util.start_process(args, None, PROMTIMER_DIR)
def launch_prometheus():
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        sys.exit(util.EXIT_FAILURE)
    cmd = "kubectl get pods -n istio-system -lapp=prometheus "
    cmd += " -o jsonpath={.items[0].metadata.name}"
    prom_pod_name = util.get_output_from_proc(cmd).decode("utf-8")
    cmd = f"kubectl port-forward -n istio-system {prom_pod_name} 9090"
    prom_proc = util.start_process(cmd, preexec_fn=os.setsid)
    time.sleep(2)
    prom_api = PrometheusConnect(url="http://localhost:9090", disable_ssl=True)

    return prom_proc, prom_api
def launch_storage_mon():
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        sys.exit(util.EXIT_FAILURE)
    cmd = "kubectl get pods -lapp=storage-upstream "
    cmd += " -o jsonpath={.items[0].metadata.name}"
    storage_pod_name = util.get_output_from_proc(cmd).decode("utf-8")
    cmd = f"kubectl port-forward {storage_pod_name} 8090:8080"
    storage_proc = util.start_process(cmd, preexec_fn=os.setsid)
    # Let settle things in a bit
    time.sleep(2)

    return storage_proc
Esempio n. 12
0
def ovs_vsctl_set(table, record, column, key, value):
    """
    This function allows to alter the OVS database. If column is a map, then
    caller should also set the key, otherwise the key should be left as an
    empty string.
    """
    if key is None:
        index = column
    else:
        index = "%s:%s" % (column, key)
    index_value = "%s=%s" % (index, value)
    ret, _out, _err = util.start_process(
        ["ovs-vsctl", "set", table, record, index_value])
    return ret
Esempio n. 13
0
def ovs_vsctl_set(table, record, column, key, value):
    """
    This function allows to alter the OVS database. If column is a map, then
    caller should also set the key, otherwise the key should be left as an
    empty string.
    """
    if key is None:
        index = column
    else:
        index = "%s:%s" % (column, key)
    index_value = "%s=%s" % (index, value)
    ret, _out, _err = util.start_process(["ovs-vsctl", "set", table, record,
                                          index_value])
    return ret
Esempio n. 14
0
def runFire(scenario):
    """!
    Run fire fore given scenario
    @param scenario Scenario to run simulations for
    @return stdout as a string
    @return stderr as a string
    """
    raster = None
    if scenario.perim:
        scenario.perim, raster = rasterize_perim(scenario.run_output,
                                                 scenario.perim,
                                                 scenario.year,
                                                 scenario.fire)
    cmd = os.path.join(Settings.HOME_DIR, Settings.BINARY)
    a = ['"' + scenario.run_output + '"', scenario.day, str(scenario.lat), str(scenario.lon), scenario.start_time]
    if scenario.keep_intensity:
        a = a + ["-i"]
    args = ' '.join(a)
    if scenario.actuals_only:
        args = args + " -a"
    if scenario.no_async:
        args += " -s"
    if scenario.current_size and 1 < int(scenario.current_size) and not raster:
        args += " --size " + str(int(scenario.current_size))
    if raster:
        args = args + ' --perim "' + raster + '"'
    if scenario.ffmc:
        args += " --ffmc " + scenario.ffmc
    if scenario.dmc:
        args += " --dmc " + scenario.dmc
    if scenario.dc:
        args += " --dc " + scenario.dc
    if scenario.apcp_0800:
        args += " --apcp_0800 " + scenario.apcp_0800
    if scenario.score:
        args += " --score " + scenario.score
    # run generated command for parsing data
    run_what = [cmd] + shlex.split(args.replace('\\', '/'))
    logging.info("Running: " + ' '.join(run_what))
    t0 = timeit.default_timer()
    stdout, stderr = finish_process(start_process(run_what, scenario.simulation_flags, Settings.HOME_DIR))
    t1 = timeit.default_timer()
    logging.info("Took {}s to run simulations".format(t1 - t0))
    return stdout, stderr
Esempio n. 15
0
 def maybe_start(self, log_dir):
     """
     Starts the Prometheus instance that serves stats for this source.
     """
     log_path = path.join(log_dir, 'prom-{}.log'.format(self._short_name))
     listen_addr = '0.0.0.0:{}'.format(self.port())
     args = [
         Source.PROMETHEUS_BIN, '--config.file',
         path.join(util.get_root_dir(),
                   'noscrape.yml'), '--storage.tsdb.path',
         path.join(self._cbcollect_dir,
                   'stats_snapshot'), '--storage.tsdb.no-lockfile',
         '--storage.tsdb.retention.time', '10y', '--query.lookback-delta',
         '600s', '--web.listen-address', listen_addr
     ]
     logging.info(
         'starting prometheus server on {} against {}; logging to {}'.
         format(listen_addr, path.join(self._cbcollect_dir,
                                       'stats_snapshot'), log_path))
     return util.start_process(args, log_path)
Esempio n. 16
0
def start_prometheuses(cbcollects, base_port, log_dir):
    nodes = []
    for i, cbcollect in enumerate(cbcollects):
        log_path = path.join(log_dir, 'prom-{}.log'.format(i))
        listen_addr = '0.0.0.0:{}'.format(base_port + i)
        args = [
            PROMETHEUS_BIN, '--config.file',
            path.join(util.get_root_dir(),
                      'noscrape.yml'), '--storage.tsdb.path',
            path.join(cbcollect,
                      'stats_snapshot'), '--storage.tsdb.retention.time',
            '10y', '--web.listen-address', listen_addr
        ]
        logging.info(
            'starting prometheus server {} (on {}; logging to {})'.format(
                i, listen_addr, log_path))
        node = util.start_process(args, log_path)
        nodes.append(node)

    return nodes
Esempio n. 17
0
def run_bmv2_test(out_dir, p4_input, use_psa=False):
    cmd = "python3 "
    cmd += f"{P4C_DIR}/backends/bmv2/run-bmv2-test.py "
    cmd += f"{P4C_DIR} -v "
    if use_psa:
        cmd += "-p "
    cmd += f"-bd {P4C_DIR}/build "
    cmd += f"{out_dir}/{p4_input.name} "
    test_proc = util.start_process(cmd, cwd=out_dir)

    def signal_handler(sig, frame):
        log.warning("run_bmv2_test: Caught Interrupt, exiting...")
        os.kill(test_proc.pid, signal.SIGINT)
        sys.exit(1)

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    stdout, stderr = test_proc.communicate()
    return test_proc, stdout, stderr
Esempio n. 18
0
def ovs_vsctl_show():
    ret, _out, _err = util.start_process(["ovs-vsctl", "show"])
    print _out
Esempio n. 19
0
def ovs_vsctl_add_trunk_port(port, trunk):
    ret, _out, err = util.start_process(["set", "port", port, trunk])
    return ret
Esempio n. 20
0
def ovs_vsctl_del_port_from_bridge(port):
    """
    This function removes given port from a OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "del-port", port])
    return ret
Esempio n. 21
0
def ifconfig(ip, port):
    ret, _out, err = util.start_process(["ip", "addr", "add", ip, "dev", port])
    return err
Esempio n. 22
0
def iplink(port, status):
    ret, _out, err = util.start_process(["ip", "link", "set", port, status])
    return err
Esempio n. 23
0
def ovs_vsctl_del_bridge(bridge):
    """
    This function deletes the OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "del-br", bridge])
    return ret
Esempio n. 24
0
def ovs_vsctl_del_bridge(bridge):
    """
    This function deletes the OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "del-br", bridge])
    return ret
Esempio n. 25
0
def ovs_vsctl_admin_port(bridge, port, tag):
    final_tag = "%s=%s" % ("tag", tag)
    ret, _out, err = util.start_process(
        ["ovs-vsctl", "add-port", bridge, port, final_tag])
    return ret
Esempio n. 26
0
def ovs_vsctl_set_admin(port, type):
    final_type = "type=" + type
    ret, _out, err = util.start_process(
        ["ovs-vsctl", "set", "Interface", port, final_type])
def start_fortio(gateway_url):
    cmd = f"{FILE_DIR}/bin/fortio "
    cmd += "load -c 50 -qps 300 -jitter -t 0 -loglevel Warning "
    cmd += f"http://{gateway_url}/productpage"
    fortio_proc = util.start_process(cmd, preexec_fn=os.setsid)
    return fortio_proc
Esempio n. 28
0
def run_tofino_test(out_dir, p4_input, stf_file_name):
    # we need to change the working directory
    # tofino scripts make some assumptions where to dump files
    prog_name = p4_input.stem
    # we need to create a specific test dir in which we can run tests
    test_dir = out_dir.joinpath("test_dir")
    util.check_dir(test_dir)
    util.copy_file(stf_file_name, test_dir)
    template_name = test_dir.joinpath(f"{prog_name}.py")
    # use a test template that runs stf tests
    util.copy_file(f"{FILE_DIR}/tofino_test_template.py", template_name)

    # initialize the target install
    log.info("Building the tofino target...")
    config_cmd = f"{TOFINO_DIR}/pkgsrc/p4-build/configure "
    config_cmd += "--with-tofino --with-p4c=bf-p4c "
    config_cmd += f"--prefix={TOFINO_DIR}/install "
    config_cmd += f"--bindir={TOFINO_DIR}/install/bin "
    config_cmd += f"P4_NAME={prog_name} "
    config_cmd += f"P4_PATH={p4_input.resolve()} "
    config_cmd += "P4_VERSION=p4-16 "
    config_cmd += "P4_ARCHITECTURE=tna "
    result = util.exec_process(config_cmd, cwd=out_dir)
    if result.returncode != util.EXIT_SUCCESS:
        return result, result.stdout, result.stderr
    # create the target
    make_cmd = f"make -C {out_dir} "
    result = util.exec_process(make_cmd)
    if result.returncode != util.EXIT_SUCCESS:
        return result, result.stdout, result.stderr
    # install the target in the tofino folder
    make_cmd = f"make install -C {out_dir} "
    result = util.exec_process(make_cmd)
    if result.returncode != util.EXIT_SUCCESS:
        return result, result.stdout, result.stderr
    procs = []
    test_proc = None
    # start the target in the background
    log.info("Starting the tofino model...")
    os_env = os.environ.copy()
    os_env["SDE"] = f"{TOFINO_DIR}"
    os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install"

    model_cmd = f"{TOFINO_DIR}/run_tofino_model.sh "
    model_cmd += f"-p {prog_name} "
    proc = util.start_process(model_cmd,
                              preexec_fn=os.setsid,
                              env=os_env,
                              cwd=out_dir)
    procs.append(proc)
    # start the binary for the target in the background
    log.info("Launching switchd...")
    os_env = os.environ.copy()
    os_env["SDE"] = f"{TOFINO_DIR}"
    os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install"

    switch_cmd = f"{TOFINO_DIR}/run_switchd.sh "
    switch_cmd += "--arch tofino "
    switch_cmd += f"-p {prog_name} "
    proc = util.start_process(switch_cmd,
                              preexec_fn=os.setsid,
                              env=os_env,
                              cwd=out_dir)
    procs.append(proc)

    # wait for a bit
    time.sleep(2)
    # finally we can run the test
    log.info("Running the actual test...")
    test_cmd = f"{TOFINO_DIR}/run_p4_tests.sh "
    test_cmd += f"-t {test_dir} "
    os_env = os.environ.copy()
    os_env["SDE"] = f"{TOFINO_DIR}"
    os_env["SDE_INSTALL"] = f"{TOFINO_DIR}/install"
    # inserting this path is necessary for the tofino_test_template.py
    os_env["PYTHONPATH"] = f"${{PYTHONPATH}}:{ROOT_DIR}"
    test_proc = util.start_process(test_cmd, env=os_env, cwd=out_dir)

    def signal_handler(sig, frame):
        log.warning("run_tofino_test: Caught Interrupt, exiting...")
        cleanup(procs)
        os.kill(test_proc.pid, signal.SIGINT)
        os.kill(test_proc.pid, signal.SIGTERM)
        sys.exit(1)

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    stdout, stderr = test_proc.communicate()
    cleanup(procs)
    return test_proc, stdout, stderr
Esempio n. 29
0
def ovs_vsctl_add_bridge(bridge):
    """
    This function creates an OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "add-br", bridge])
    return ret
Esempio n. 30
0
def run(config_file, args):
    """!
    Run fire based on configuartion file and command line arguments
    @param config_file Configuration file to read Scenario from
    @param args argparser to pass to Scenario
    @return None
    """
    scenario = Scenario(config_file, args)
    had_output = scenario.is_current()
    ensure_dir(os.path.dirname(scenario.run_output))
    sizes = None
    # try to find a perimeter and import it
    changed = False
    def clean_flag():
        if os.path.exists(scenario.mapflag):
            os.remove(scenario.mapflag)
    def cleanup():
        if os.path.exists(scenario.run_output):
            logging.error("Removing output after run failed for " + scenario.fire)
            shutil.rmtree(scenario.run_output, True)
        clean_flag()
    if scenario.force or not (had_output or os.path.exists(scenario.runflag)):
        # HACK: do this right away so that running more than once shouldn't do the same fire in both processes
        try:
            ensure_dir(scenario.run_output)
            write_file(scenario.run_output, "running", " ")
            stdout, stderr = runFire(scenario)
            write_file(scenario.run_output, "output.txt", '\n'.join(stdout.split('\r\n')))
            os.remove(scenario.runflag)
            changed = True
        except KeyboardInterrupt:
            # this doesn't work for some reason
            cleanup()
        except Exception as e:
            logging.fatal("Error running " + scenario.fire)
            print(e)
            traceback.print_exc()
            cleanup()
            return
    # don't delete output if maps fail
    if (changed or scenario.force_maps or scenario.check_maps) and not (scenario.no_maps or os.path.exists(scenario.runflag)):
        t0 = timeit.default_timer()
        try:
            scenario.save_point_shp()
            from mxd import makeMaps
            pdf_out = makeMaps(scenario, scenario.run_output, scenario.force_maps or changed, scenario.hide)
        except Exception as e:
            logging.fatal(e)
            traceback.print_exc()
            cleanup()
            # run didn't work before so run it now
            run(config_file, args)
        try:
            if scenario.outbase == Settings.OUTPUT_DEFAULT:
                district_folder = os.path.join(Settings.FIRE_ROOT, FOLDER_BY_TLA[scenario.fire[:3]])
                to_folder = os.path.join(district_folder, scenario.fire)
                to_file = os.path.join(to_folder, 'FireSTARR', os.path.basename(pdf_out))
                if not os.path.exists(to_folder):
                    print("*********************************************")
                    print("Need to make fire folder for {}".format(scenario.fire))
                    print("*********************************************")
                    cmd = r'C:\Windows\System32\cscript.exe'
                    run_what = [cmd, os.path.join(district_folder, "CreateFireFolder.vbe"), str(int(scenario.fire[3:]))]
                    logging.debug("Running: " + ' '.join(run_what))
                    finish_process(start_process(run_what, Settings.PROCESS_FLAGS, district_folder))
                    if not os.path.exists(to_folder):
                        logging.fatal("MAKING FOLDER FAILED")
                        sys.exit(-1)
                if not os.path.exists(to_file):
                    try_copy(pdf_out, to_file)
                csv_out = os.path.splitext(pdf_out)[0] + "_assets.csv"
                csv_to = os.path.splitext(to_file)[0] + "_assets.csv"
                if not os.path.exists(csv_to):
                    try_copy(csv_out, csv_to)
        except Exception as e:
            logging.fatal("Couldn't copy to file plan")
            print(e)
            traceback.print_exc()
            clean_flag()
        t1 = timeit.default_timer()
        logging.info("Took {}s to make maps".format(t1 - t0))
Esempio n. 31
0
def ovs_vsctl_add_bridge(bridge):
    """
    This function creates an OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "add-br", bridge])
    return ret
Esempio n. 32
0
def makeMaps(scenario, run_output, force_maps, hide):
    """!
    @param scenario Scenario to use settings from
    @param run_output Folder where simulation output resides
    @param force_maps Whether or not to force making maps if they already exist
    @param hide Whether or not to show perimeter closest to date on map
    @return Path to final output pdf
    """
    import pdf
    from pdf import makePDF
    perimeters = PerimeterList(scenario.year, scenario.fire)
    sim_output = readSimOutput(run_output)
    startup = find_lines(sim_output, 'Startup indices ')
    startup = startup[0] if (
        len(startup) > 0) else "Startup indices are not valid"
    prefix = 'actuals_' if scenario.actuals_only else 'wxshield_'
    fire_prefix = scenario.fire + "_" + ('actual_'
                                         if scenario.actuals_only else '')
    probs = [
        x for x in os.listdir(os.path.join(Settings.HOME_DIR, run_output))
        if x.startswith(prefix) and x[-3:] == "asc"
    ]
    day0 = find_day(probs[0]) - 1
    jds = map(find_day, probs)
    dates = map(find_date, probs)
    days = map(lambda x: x - day0, jds)
    extent = None
    perim = None
    ensure_dir(scenario.outbase)
    out_dir = os.path.join(scenario.outbase, scenario.fire[:3])
    ensure_dir(out_dir)
    for_time = os.path.basename(scenario.run_output)
    pdf_output = os.path.abspath(
        os.path.join(out_dir, fire_prefix + for_time + ".pdf"))
    copied = os.path.join(scenario.outbase, os.path.basename(pdf_output))
    # HACK: if any one map is required then make them all
    if not (force_maps or not os.path.exists(pdf_output)):
        logging.info("Maps already exist for " + scenario.fire)
        return copied
    for_time = os.path.basename(scenario.run_output)
    mapflag = os.path.join(out_dir,
                           scenario.fire + "_" + for_time + "_mapsinprogress")
    if os.path.exists(mapflag):
        logging.info("Maps already being made for " + scenario.fire)
        return copied
    write_file(os.path.dirname(mapflag), os.path.basename(mapflag), " ")
    map_output = getMapOutput(run_output)
    logging.info("Making maps for " + scenario.fire)
    # HACK: run in parallel but assume this works for now
    wxshield = getWxSHIELDFile(dates[0], scenario.fire, map_output)
    processes = []
    run_what = r'python.exe firestarr\getWxshield.py {} {} {} {} {} "{}"'.format(
        scenario.lat, scenario.lon, dates[0], days[-1], scenario.fire,
        map_output)
    if 'overridden' in startup:
        startup_values = map(lambda x: x.strip(),
                             startup[startup.find('(') + 1:-1].split(','))
        logging.debug(startup_values)
        # HACK: just use known positions for now
        #~ (0.0mm, FFMC 92.0, DMC 59.0, DC 318.0)
        #~ print(startup_values[0][:-2], startup_values[1][5:].strip(), startup_values[2][4:].strip(), startup_values[0][3:].strip())
        apcp = float(startup_values[0][:-2])
        ffmc = float(startup_values[1][5:].strip())
        dmc = float(startup_values[2][4:].strip())
        dc = float(startup_values[3][3:].strip())
        run_what += ' --apcp_0800 {} --ffmc {} --dmc {} --dc {}'.format(
            apcp, ffmc, dmc, dc)
    logging.debug(run_what)
    processes.append(
        start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR))
    arcpy.env.overwriteOutput = True
    ensure_dir(os.path.dirname(out_dir))
    ensure_dir(out_dir)
    # keep these until the end so they lock the file names
    mxd_paths = []
    mxd_names = []
    risk_paths = []
    risk_names = []
    scores = []
    txtFuelRaster = find_line(sim_output, 'Fuel raster is ', 'Fuel raster is ')
    suffix = findSuffix(txtFuelRaster)
    env_push()
    png_processes = []
    arcpy.env.scratchWorkspace = ensure_dir(
        arcpy.CreateScratchName(scenario.fire + os.path.basename(run_output),
                                "", "Workspace",
                                arcpy.GetSystemEnvironment('TEMP')))
    for i in reversed(xrange(len(days))):
        f = os.path.join(
            run_output, probs[i].replace(prefix,
                                         'sizes_').replace('.asc', '.csv'))
        run_what = r'python.exe firestarr\plotsize.py "{}" "{}"'.format(
            f, days[i])
        png_processes = [
            start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR)
        ] + png_processes
    for i in reversed(xrange(len(days))):
        finish_process(png_processes[i])
        arcpy.env.addOutputsToMap = False
        prob_input = os.path.join(run_output, probs[i])
        c_prob = arcpy.sa.Int(arcpy.sa.Raster(prob_input) * 10)
        shp_class = os.path.join(
            map_output, probs[i].replace(".asc",
                                         "_class_poly.shp").replace("-", "_"))
        # keep getting 'WARNING: Error of opening hash table for code page.' when we save to file plan
        poly = "in_memory\poly"
        logging.debug("Converting to polygon")
        arcpy.RasterToPolygon_conversion(c_prob, poly, "SIMPLIFY")
        del c_prob
        #~ print(shp_class)
        arcpy.CopyFeatures_management(poly, shp_class)
        del poly
        perim = None if hide else perimeters.find_perim(
            scenario.fire, dates[i])
        copyMXD = None
        if len(days) - 1 == i:
            # we need to get the extent from the last map
            copyMXD, theMXD, extent = getProjectionMXD(i,
                                                       scenario.actuals_only,
                                                       scenario.run_output,
                                                       scenario.fire, extent,
                                                       perim)
            run_what = r'python.exe firestarr\saveboth.py "{}" "{}"'.format(
                copyMXD, fire_prefix + dates[i] + ".png")
            processes.append(
                start_process(run_what, Settings.PROCESS_FLAGS,
                              Settings.HOME_DIR))
            del theMXD
            run_what = r'python.exe firestarr\assets.py {} "{}" {} "{}" {}'.format(
                i, scenario.run_output, scenario.fire, extent, prefix)
        else:
            copyMXD = getProjectionMXDName(i, scenario.actuals_only,
                                           scenario.run_output, scenario.fire,
                                           extent, perim)
            run_what = r'python.exe firestarr\getProjectionMXD.py {} "{}" {} "{}"'.format(
                i, scenario.run_output, scenario.fire, extent)
            if scenario.actuals_only:
                run_what += ' --actuals'
            if perim:
                run_what += ' --perim "{}"'.format(perim)
        processes.append(
            start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR))
        mxd_paths = [copyMXD] + mxd_paths
        mxd_names = [fire_prefix + dates[i] + ".png"] + mxd_names
        start_raster = os.path.join(run_output, scenario.fire + '.tif')
        fire_raster = None
        if os.path.exists(start_raster):
            fire_raster = arcpy.sa.Raster(start_raster)
        # need to make sure the extent is the same for all rasters or they don't add properly
        env_push()
        setSnapAndExtent(prob_input)

        def by_intensity(intensity):
            letter = intensity.upper()[0]
            prob_i = os.path.join(
                run_output,
                prob_input.replace(prefix, 'intensity_{}_'.format(letter)))
            ra = Settings.RAMPART_MASK.format(intensity, suffix)
            logging.debug(prob_i)
            raster = arcpy.sa.Int(
                arcpy.sa.Raster(prob_i) * arcpy.sa.Raster(ra))
            if fire_raster is not None:
                # don't count anything in the starting perimeter
                # HACK: will not consider fires that start from just a size
                raster = arcpy.sa.Con(arcpy.sa.IsNull(fire_raster), raster, 0)
            raster = arcpy.sa.Con(arcpy.sa.IsNull(raster), 0, raster)
            return raster

        low_raster = by_intensity('low')
        moderate_raster = by_intensity('moderate')
        high_raster = by_intensity('high')
        total_raster = low_raster + moderate_raster + high_raster
        total_raster = arcpy.sa.SetNull(0 == total_raster, total_raster)
        total_path = os.path.join(
            map_output,
            prob_input.replace(prefix, 'RA_').replace('.asc', '.tif'))
        total_raster.save(total_path)
        del low_raster
        del moderate_raster
        del high_raster
        score = arcpy.RasterToNumPyArray(total_raster, nodata_to_value=0).sum()
        # .58 so that 10 for social & economic gives a 10 total score
        score = fixK(score / 1000000.0 / 0.58)
        env_pop()
        run_what = r'python.exe firestarr\getRiskMXD.py {} "{}" {} "{}" "{}"'.format(
            i, scenario.run_output, scenario.fire, extent, score)
        if scenario.actuals_only:
            run_what += ' --actuals'
        if perim:
            run_what += ' --perim "{}"'.format(perim)
        processes.append(
            start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR))
        copyMXD = getRiskMXDName(i, scenario.actuals_only, scenario.run_output,
                                 scenario.fire, extent, perim)
        risk_paths = [copyMXD] + risk_paths
        risk_names = [
            os.path.join(os.path.dirname(copyMXD),
                         fire_prefix + dates[i] + "_risk.png")
        ] + risk_names
        scores = [score] + scores
    env_pop()
    copyMXD = getFuelMXDName(fire_prefix, scenario.run_output, scenario.fire,
                             extent, perim)
    run_what = r'python.exe firestarr\getFuelMXD.py {} "{}" {} "{}"'.format(
        fire_prefix, scenario.run_output, scenario.fire, extent)
    if perim:
        run_what += ' --perim "{}"'.format(perim)
    processes.append(
        start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR))
    mxd_paths = [copyMXD] + mxd_paths
    mxd_names = [fire_prefix + "_fuels.png"] + mxd_names
    mxd_names = map(lambda x: os.path.abspath(os.path.join(map_output, x)),
                    mxd_names)
    copyMXD = getImpactMXDName(fire_prefix, scenario.run_output, scenario.fire,
                               extent, perim)
    run_what = r'python.exe firestarr\getImpactMXD.py {} "{}" {} "{}"'.format(
        fire_prefix, scenario.run_output, scenario.fire, extent)
    if perim:
        run_what += ' --perim "{}"'.format(perim)
    processes.append(
        start_process(run_what, Settings.PROCESS_FLAGS, Settings.HOME_DIR))
    risk_paths = [copyMXD] + risk_paths
    risk_names = [
        os.path.join(os.path.dirname(copyMXD), fire_prefix + "_impact.png")
    ] + risk_names
    for process in processes:
        finish_process(process)
    # HACK: put in not generated images for any missing maps
    if len(mxd_names) < 6:
        mxd_names = (
            mxd_names +
            [os.path.join(Settings.HOME_DIR, 'not_generated.png')] * 6)[:6]
    if len(risk_names) < 6:
        risk_names = (
            risk_names +
            [os.path.join(Settings.HOME_DIR, 'not_generated.png')] * 6)[:6]
    logging.debug(mxd_names + [wxshield] + risk_names)
    makePDF(scenario.fire, days, dates, mxd_names, wxshield, risk_names,
            sim_output, pdf_output, scores)
    try_copy(pdf_output, copied)
    # HACK: use known file name for assets
    csv_orig = os.path.abspath(
        os.path.join(run_output, fire_prefix + for_time + "_assets.csv"))
    csv_output = os.path.abspath(
        os.path.join(out_dir, os.path.basename(csv_orig)))
    csv_copied = os.path.join(scenario.outbase, os.path.basename(csv_orig))
    try_copy(csv_orig, csv_output)
    try_copy(csv_orig, csv_copied)
    fixtime(scenario.fire, parse(for_time.replace('_', ' ')),
            [pdf_output, copied, csv_orig, csv_copied])
    try:
        tryForceRemove(mapflag)
    except:
        pass
    # shouldn't need any of these intermediary outputs
    shutil.rmtree(map_output, True)
    return copied
Esempio n. 33
0
def ovs_vsctl_del_port_from_bridge(port):
    """
    This function removes given port from a OVS bridge.
    """
    ret, _out, _err = util.start_process(["ovs-vsctl", "del-port", port])
    return ret