Ejemplo n.º 1
0
def CreateCapacityFactors(M):
    """
	Steps to creating capacity factors:
	1. Collect all possible processes
	2. Find the ones _not_ specified in CapacityFactorProcess
	3. Set them, based on CapacityFactorTech.
	"""
    # Shorter names, for us lazy programmer types
    CFP = M.CapacityFactorProcess

    # Step 1
    processes = set((t, v) for i, t, v, o in M.Efficiency.sparse_iterkeys())

    all_cfs = set((s, d, t, v) for s, d, (
        t, v) in cross_product(M.time_season, M.time_of_day, processes))

    # Step 2
    unspecified_cfs = all_cfs.difference(CFP.sparse_iterkeys())

    # Step 3

    # Some hackery: We futz with _constructed because Pyomo thinks that this
    # Param is already constructed.  However, in our view, it is not yet,
    # because we're specifically targeting values that have not yet been
    # constructed, that we know are valid, and that we will need.

    if unspecified_cfs:
        # CFP._constructed = False
        for s, d, t, v in unspecified_cfs:
            CFP[s, d, t, v] = M.CapacityFactorTech[s, d, t]
Ejemplo n.º 2
0
def get_spec_2017_jobs_iterator():
    name = 'spec-2017'
    params = input_space.name_params_map[name]
    for p in cross_product(params.kernels, params.cpu_types, params.workloads,
                           params.sizes):
        kwargs = lists_to_dict(['kernel', 'cpu', 'workload', 'size'], p)
        yield kwargs
Ejemplo n.º 3
0
    def build_full_task_dag(cls):
        """
        A class method which builds a full :class:`TaskDAG` where only
        subclasses of ``cls`` are included in the DAG.
        If `cls` is :class:`BaseTask` then the DAG contains all tasks.

        The :class:`TaskDAG` instance represents the dependencies between
        :class:`BaseTask` subclasses based on the events they produce and
        depend on.

        :rtype: :class:`TaskDAG`
        """
        dag = TaskDAG()
        # Add all existing tasks to the dag.
        for task in BaseTask.plugins:
            if task is not cls and issubclass(task, cls):
                dag.add_task(task)

        # Create the edges of the graph by creating an edge between each pair of
        # tasks T1, T2 where T1 produces an event E and T2 depends on the event
        # E.
        from itertools import product as cross_product
        events = cls.build_task_event_dependency_graph()
        for event_producers, event_consumers in events.values():
            for task1, task2 in cross_product(event_producers, event_consumers):
                dag.add_dependency(task1, task2)

        return dag
Ejemplo n.º 4
0
def get_parsec_20_04_jobs_iterator():
    name = 'parsec-20.04'
    params = input_space.name_params_map[name]
    for p in cross_product(params.kernels, params.cpu_types, params.mem_sys,
                           params.num_cpus, params.workloads, params.sizes):
        kwargs = lists_to_dict(
            ['kernel', 'cpu', 'mem_sys', 'num_cpu', 'workload', 'size'], p)
        yield kwargs
Ejemplo n.º 5
0
def get_boot_exit_jobs_iterator():
    name = 'boot-exit'
    params = input_space.name_params_map[name]
    for p in cross_product(params.kernels, params.cpu_types, params.mem_sys,
                           params.num_cpus, params.boot_types):
        kwargs = lists_to_dict(
            ['kernel', 'cpu', 'mem_sys', 'num_cpu', 'boot_type'], p)
        yield kwargs
Ejemplo n.º 6
0
def get_gapbs_jobs_iterator():
    name = 'gapbs'
    params = input_space.name_params_map[name]
    for p in cross_product(params.kernels, params.cpu_types, params.num_cpus,
                           params.mem_sys, params.workloads, params.synthetic,
                           params.n_nodes):
        kwargs = lists_to_dict([
            'kernel', 'cpu', 'num_cpu', 'mem_sys', 'workload', 'synthetic',
            'n_nodes'
        ], p)
        yield kwargs
Ejemplo n.º 7
0
    def _chain_synthesis_pathways(self, reaction):
        """Determine the synthesis pathway of a reaction.

        Arguments:
            reaction (str): The reaction to check.

        Returns:
            Set[FrozenSet[str]]: All the synthesis pathways of this reaction.
        """
        reactant_networks = [
            self.graph.nodes[reactant]['pathways']
            for reactant in self.graph.predecessors(reaction)
        ]
        pathways = set()
        for networks in cross_product(*reactant_networks):
            pathways.add(frozenset(set.union({reaction}, *networks)))
        return pathways
Ejemplo n.º 8
0
def CreateDemands(M):
    """
	Steps to create the demand distributions
	1. Use Demand keys to ensure that all demands in commodity_demand are used
	2. Find any slices not set in DemandDefaultDistribution, and set them based
	on the associated SegFrac slice.
	3. Validate that the DemandDefaultDistribution sums to 1.
	4. Find any per-demand DemandSpecificDistribution values not set, and set
	set them from DemandDefaultDistribution.  Note that this only sets a
	distribution for an end-use demand if the user has *not* specified _any_
	anything for that end-use demand.  Thus, it is up to the user to fully
	specify the distribution, or not.  No in-between.
	 5. Validate that the per-demand distributions sum to 1.
	"""

    # Step 0: some setup for a couple of reusable items

    # iget(2): 2 = magic number to specify the third column.  Currently the
    # demand in the tuple (s, d, dem)
    DSD_dem_getter = iget(2)

    # Step 1
    used_dems = set(dem for p, dem in M.Demand.sparse_iterkeys())
    unused_dems = sorted(M.commodity_demand.difference(used_dems))
    if unused_dems:
        for dem in unused_dems:
            msg = ("Warning: Demand '{}' is unused\n")
            SE.write(msg.format(dem))

    # Step 2
    DDD = M.DemandDefaultDistribution  # Shorter, for us lazy programmer types
    unset_defaults = set(M.SegFrac.sparse_iterkeys())
    unset_defaults.difference_update(DDD.sparse_iterkeys())
    if unset_defaults:
        # Some hackery because Pyomo thinks that this Param is constructed.
        # However, in our view, it is not yet, because we're specifically
        # targeting values that have not yet been constructed, that we know are
        # valid, and that we will need.
        # DDD._constructed = False
        for tslice in unset_defaults:
            DDD[tslice] = M.SegFrac[tslice]
        # DDD._constructed = True

    # Step 3
    total = sum(i for i in DDD.itervalues())
    if abs(value(total) - 1.0) > 0.001:
        # We can't explicitly test for "!= 1.0" because of incremental rounding
        # errors associated with the specification of demand shares by time slice,
        # but we check to make sure it is within the specified tolerance.

        key_padding = max(map(get_str_padding, DDD.sparse_iterkeys()))

        format = "%%-%ds = %%s" % key_padding
        # Works out to something like "%-25s = %s"

        items = sorted(DDD.items())
        items = '\n   '.join(format % (str(k), v) for k, v in items)

        msg = (
            'The values of the DemandDefaultDistribution parameter do not '
            'sum to 1.  The DemandDefaultDistribution specifies how end-use '
            'demands are distributed among the time slices (i.e., time_season, '
            'time_of_day), so together, the data must total to 1.  Current '
            'values:\n   {}\n\tsum = {}')

        raise Exception(msg.format(items, total))

    # Step 4
    DSD = M.DemandSpecificDistribution

    demands_specified = set(
        map(DSD_dem_getter, (i for i in DSD.sparse_iterkeys())))
    unset_demand_distributions = used_dems.difference(demands_specified)
    unset_distributions = set(
        cross_product(M.time_season, M.time_of_day,
                      unset_demand_distributions))

    if unset_distributions:
        # Some hackery because Pyomo thinks that this Param is constructed.
        # However, in our view, it is not yet, because we're specifically
        # targeting values that have not yet been constructed, that we know are
        # valid, and that we will need.
        # DSD._constructed = False
        for s, d, dem in unset_distributions:
            DSD[s, d, dem] = DDD[s, d]
        # DSD._constructed = True

    # Step 5
    for dem in used_dems:
        keys = (k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem)
        total = sum(DSD[i] for i in keys)

        if abs(value(total) - 1.0) > 0.001:
            # We can't explicitly test for "!= 1.0" because of incremental rounding
            # errors associated with the specification of demand shares by time slice,
            # but we check to make sure it is within the specified tolerance.

            keys = [
                k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem
            ]
            key_padding = max(map(get_str_padding, keys))

            format = "%%-%ds = %%s" % key_padding
            # Works out to something like "%-25s = %s"

            items = sorted((k, DSD[k]) for k in keys)
            items = '\n   '.join(format % (str(k), v) for k, v in items)

            msg = (
                'The values of the DemandSpecificDistribution parameter do not '
                'sum to 1.  The DemandSpecificDistribution specifies how end-use '
                'demands are distributed per time-slice (i.e., time_season, '
                'time_of_day).  Within each end-use Demand, then, the distribution '
                'must total to 1.\n\n   Demand-specific distribution in error: '
                ' {}\n\n   {}\n\tsum = {}')

            raise Exception(msg.format(dem, items, total))
Ejemplo n.º 9
0
def get_local_remote_ip(printer=StatusPrinter()):

    printer("Probing Network From Both Sides")

    Address = namedtuple("Address",  "ip_str int")

    def read_ip(msg, ip_str, printer):
        if '127.0.0.1' not in ip_str:
            printer("{:>10}:  {}".format(msg, ip_str))
            return Address(ip_str, int(ipaddress.IPv4Address(ip_str)))
        else:
            return None

    # get all ipv4 addresses among the local network adapters
    printer("Local Addresses:")
    local_addresses = set()
    with Indent(printer):
        adapters = ifaddr.get_adapters()
        for adapter in adapters:
            for ip in adapter.ips:
                ip_str = str(ip.ip)
                if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', ip_str):
                    address = read_ip(adapter.nice_name, ip_str, printer)
                    if address is not None:
                        local_addresses.add(address)
        printer('')

    # get all ipv4 addresses among the device's known routes
    printer("Device Address Candidates:")
    device_addresses = set()
    with Indent(printer):
        # keep only things that look like ip addresses
        device_ip_strs = set(re.findall(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+',
            str(
                sed(
                    # TODO: this would be cleaner with 'adb shell netcfg' which I discovered too late

                    # dump the routing table
                    adb(['shell', 'ip', 'route']),

                    # print only lines containing 'src', and only the part after the 'src'
                    ['-n', r's#^.*src\(.*\)$#\1#p']
                    )
                ).strip()))

        for ip_str in device_ip_strs:
            address = read_ip("route entry",  ip_str, printer)
            if address is not None:
                device_addresses.add(address)
        printer('')

    # In an ip address, the more significant bits are subnet bits, less significant ones are network bits
    # XOR will give subnet zeros when two ip addresses are in the same subnet. Therefore, smaller distances
    # returned by this function indicate that the addresses are more likely to be able to talk to each other
    def subnet_distance(ip_a, ip_b):
        return ip_a.int ^ ip_b.int

    # Ping local from remote, and ping remote from local
    # Return true if both succeed
    def can_talk(local, remote, printer):

        printer("Pinging {} -> {}".format(remote, local))
        with Indent(printer):
            printer('''adb shell 'ping -c 4 {} && echo SUCCESS || echo FAIL' '''.format(local))
            with Indent(printer):
                remote2local = str(adb(['shell', 'ping -c 4 {} && echo SUCCESS || echo FAIL'.format(local)]))
                printer(remote2local)

        if 'SUCCESS' in remote2local:
            printer("Pinging {} -> {}".format(local, remote))
            with Indent(printer):
                printer('ping -c 4 {}'.format(local))
                with Indent(printer):
                    try:
                        local2remote = ping(['-c', '4', remote])
                    except sh.ErrorReturnCode as err:
                        local2remote = err
                    printer(local2remote)

            if local2remote.exit_code == 0:
                return True
        return False

    # sort local/remote pairs by distance
    matches = SortedDict()
    for local_ip, remote_ip in cross_product(local_addresses, device_addresses):
        matches[subnet_distance(local_ip, remote_ip)] = (local_ip.ip_str, remote_ip.ip_str)

    # check connectivity (nearest first)
    for local, remote in matches.values():
        if can_talk(local, remote, printer):
            return (local, remote)