示例#1
0
def start_with_graph():
    global dag
    dag = DAG()
    dag.from_dict({'a': ['b', 'c'],
                   'b': ['d'],
                   'c': ['d'],
                   'd': []})
示例#2
0
def generate_dag(optimal_indvidual, stage_name, num_nodes):
    # create nodes for the graph
    nodes = np.empty((0), dtype=np.str)
    for n in range(1, (num_nodes + 1)):
        nodes = np.append(nodes, ''.join([stage_name, "_", str(n)]))

    # initialize directed asyclic graph (DAG) and add nodes to it
    dag = DAG()
    for n in nodes:
        dag.add_node(n)

    # split best indvidual found via GA to identify vertices connections and connect them in DAG
    edges = np.split(optimal_indvidual, np.cumsum(range(num_nodes - 1)))[1:]
    v2 = 2
    for e in edges:
        v1 = 1
        for i in e:
            if i:
                dag.add_edge(''.join([stage_name, "_",
                                      str(v1)]),
                             ''.join([stage_name, "_",
                                      str(v2)]))
            v1 += 1
        v2 += 1

    # delete nodes not connected to anyother node from DAG
    for n in nodes:
        if len(dag.predecessors(n)) == 0 and len(dag.downstream(n)) == 0:
            dag.delete_node(n)
            nodes = np.delete(nodes, np.where(nodes == n)[0][0])

    return dag, nodes
示例#3
0
def start_with_graph():
    global dag
    dag = DAG()
    dag.from_dict({'a': ['b', 'c'],
                   'b': ['d'],
                   'c': ['d'],
                   'd': []})
示例#4
0
def test_all_downstreams_pass_graph():
    dag2 = DAG()
    dag2.from_dict({'a': ['c'],
                    'b': ['d'],
                    'c': ['d'],
                    'd': []})
    assert dag.all_downstreams('a', dag2.graph) == ['c', 'd']
    assert dag.all_downstreams('b', dag2.graph) == ['d']
    assert dag.all_downstreams('d', dag2.graph) == []
示例#5
0
def test_all_downstreams_pass_graph():
    dag2 = DAG()
    dag2.from_dict({'a': ['c'],
                    'b': ['d'],
                    'c': ['d'],
                    'd': []})
    assert dag.all_downstreams('a', dag2.graph) == ['c', 'd']
    assert dag.all_downstreams('b', dag2.graph) == ['d']
    assert dag.all_downstreams('d', dag2.graph) == []
示例#6
0
def get_bags(data):
    dag = DAG()
    for line in data:
        bag, contains = line.split(" bags contain ")
        for c in contains.split(","):
            m = RE_CONTAIN.match(c.strip())
            if m:
                dag.add_edge(bag, m.group(2), int(m.group(1)))
    return dag
示例#7
0
 def __init__(self, hps=None):
     """Init HyperparameterSpace."""
     self._hyperparameters = OrderedDict()
     self._condition_dict = OrderedDict()
     self._forbidden_list = []
     self._hp_count = 0
     self._dag = DAG()
     if hps is not None:
         self._hps2ds(hps)
示例#8
0
    def test_prune_dag_should_remove_nodes_preceding_given_node(self):
        dag = DAG()
        dag.add_edges(sources=['b', 'c'], target='a')
        dag.add_edges(sources=['a'], target='d')
        dag.add_edges(sources=['d'], target='e')
        self.assertCountEqual(['c', 'b', 'a', 'd', 'e'], dag.to_string())

        dag.prune_upto('d')
        self.assertCountEqual(['d', 'e'], dag.to_string())
示例#9
0
 def test_add_edge(self):
     dag = DAG()
     dag.add_edges(sources=['b', 'c'], target='a')
     self.assertCountEqual(dag.heads(), ['a'])
     self.assertTrue(dag.has('a'))
     self.assertTrue(dag.has('b'))
     self.assertTrue(dag.has('c'))
     self.assertFalse(dag.has('d'))
     self.assertTrue(dag.has_path('b', 'a'))
     self.assertFalse(dag.has_path('a', 'b'))
示例#10
0
def inital_graph():
    dag = DAG()
    #dag.from_dict({0:[2],
    #               1:[2,3],
    #               2:[4],
    #               3:[4,5],
    #               4:[],
    #               5:[]})
    dag.from_dict({0: [], 1: [], 2: [], 3: [0, 2], 4: [0], 5: [0, 2, 3]})
    return dag
def allocate(participants, constraints=Constraints()):
	# first construct the DAG with the constraints
	dag = DAG()
	for giver in participants:
		for taker in participants:
			if constraints.is_allowed(giver,taker):
				dag.connect(dag.node(giver),dag.node(taker))
	completed = allocate_loop(dag)
	pairs = extract_pairs(completed)

	return pairs
示例#12
0
 def test_add(self):
     g = DAG()
     g.add_node(1)
     g.add_node(2)
     g.add_node(3, inputs = {1,2})
     g.add_node(0, outputs={1})
     self.assertEqual(g._head_nodes, g.to_nodes({0,2}))
     self.assertEqual(g._leaf_nodes, g.to_nodes({3}))
示例#13
0
class Chemistry:
    def __init__(self, data):
        self.reactions = collections.defaultdict(list)
        pattern = r'(\d+ \w+)'
        self.dag = DAG('FUEL')
        self.dag.AddNode('ORE', 1)
        for line in data:
            chems = re.findall(pattern, line)
            # Only one result for these inputs.
            result = chems.pop().split(' ')
            # result = (X, node_name)
            self.dag.AddNode(result[1], result[0])
            reactors = dict([
                (n, int(x)) for x, n in [p.strip().split(' ') for p in chems]
            ])
            value = (int(result[0]), reactors)
            self.reactions[result[1]] = value
        for k in self.reactions:
            self.dag.AddEdges(k, self.reactions[k][1])

    def GetDesired(self, desired, formula_source, formula_dest):
        return int(math.ceil(desired / (formula_dest * 1.0)) * formula_source)

    def ComputeIngredients(self, desired_fuel=1):
        sorted_nodes = self.dag.TopologicalSort()
        desired_amounts = collections.defaultdict(int)
        desired_amounts['FUEL'] = desired_fuel
        for node in sorted_nodes:
            if node == 'ORE':
                break
            formula_dest, reactors = self.reactions[node]
            desired = desired_amounts[node]
            for reactor_name in reactors:
                formula_source = reactors[reactor_name]
                new = self.GetDesired(desired, formula_source, formula_dest)
                desired_amounts[reactor_name] += new
        return desired_amounts['ORE']

    def ComputeMaxFuel(self, total_ore):
        ore_start = self.ComputeIngredients()
        start_fuel = total_ore / ore_start
        end_fuel = 2 * start_fuel
        while abs(end_fuel - start_fuel) > 1:
            mid_fuel = (start_fuel + end_fuel) / 2
            ore = self.ComputeIngredients(mid_fuel)

            if ore < total_ore:
                start_fuel = mid_fuel
            if ore > total_ore:
                end_fuel = mid_fuel
        return start_fuel
示例#14
0
文件: node.py 项目: wholooks/d11dpool
 def __init__(self, *, env, name):
     self.seq_no = 0
     self.env = env
     self.name = name
     self.out_pipe = BroadcastPipe(env=env, sender=self.name)
     self.in_pipe = simpy.Store(self.env)
     self.neighbours = set()
     self.dag = DAG()
     # required to help prune dag for large simulations
     self.blocks_received = []
     self.shares_sent = []
     # Track num_shares_sent separately as shares_sent is pruned when block is received
     self.num_shares_sent = 0
     self.shares_not_rewarded = {}
     self.num_blocks = 0
示例#15
0
 def __init__(self, desc=None):
     """Init SearchSpace."""
     super(SearchSpace, self).__init__()
     if desc is None:
         desc = SearchSpaceConfig().to_json()
     for name, item in desc.items():
         self.__setattr__(name, item)
         self.__setitem__(name, item)
     self._params = OrderedDict()
     self._condition_dict = OrderedDict()
     self._forbidden_list = []
     self._hp_count = 0
     self._dag = DAG()
     if desc is not None:
         self.form_desc(desc)
示例#16
0
文件: p4sc_CLI.py 项目: P4SC/p4sc
    def do_end_of_sfc(self, line):
        """End of SFC"""
        self.exactly_n_args(line.split(), 0)
        if not self.haveAssignedDir or self.targetDir == '':
            print "Please using assign_dir first to indicate work directory"
        else:
            # judge DAG structure
            if self.dag.validate()[0] is False:
                print "You have constructed a SFC with non-DAG structure, reject it"
            else:
                topoOrder, topoOrder_str = self.dag.topological_sort(), ""
                for item in topoOrder:
                    topoOrder_str += item
                    topoOrder_str += ","
                topoOrder_str = topoOrder_str[:-1]

                # create sfc request in terms of txt
                cmd = "echo \"%s\" >> %s/sfc%d.txt" % (
                    topoOrder_str, self.targetDir, self.sfcNumber)
                status, output = commands.getstatusoutput(cmd)
                if status != 0:
                    print "Cannot create SFC request"
                    print output
                    return

            self.dag = DAG()
            self.sfcNumber += 1
def getPrograms(input, output):
	states = [i for i in range(len(output)+1)]
	delta = getDelta(states, input, output)
	start = 0 
	accept = len(output)
	dag = DAG(states, delta, start, accept)
	return dag
示例#18
0
 def __init__(self, desc=None):
     """Init SearchSpace."""
     super(SearchSpace, self).__init__()
     if desc is None:
         desc = SearchSpaceConfig().to_dict()
         if desc.type is not None:
             desc = ClassFactory.get_cls(ClassType.SEARCHSPACE,
                                         desc.type).get_space(desc)
     for name, item in desc.items():
         self.__setattr__(name, item)
         self.__setitem__(name, item)
     self._params = OrderedDict()
     self._condition_dict = OrderedDict()
     self._forbidden_list = []
     self._hp_count = 0
     self._dag = DAG()
     if desc is not None:
         self.form_desc(desc)
示例#19
0
 def __init__(self, data):
     self.reactions = collections.defaultdict(list)
     pattern = r'(\d+ \w+)'
     self.dag = DAG('FUEL')
     self.dag.AddNode('ORE', 1)
     for line in data:
         chems = re.findall(pattern, line)
         # Only one result for these inputs.
         result = chems.pop().split(' ')
         # result = (X, node_name)
         self.dag.AddNode(result[1], result[0])
         reactors = dict([
             (n, int(x)) for x, n in [p.strip().split(' ') for p in chems]
         ])
         value = (int(result[0]), reactors)
         self.reactions[result[1]] = value
     for k in self.reactions:
         self.dag.AddEdges(k, self.reactions[k][1])
示例#20
0
def test_from_dict_at_instantiation():
    temp_dict = {'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}
    dag = DAG(temp_dict)
    assert dag.graph == {
        'a': set(['b', 'c']),
        'b': set('d'),
        'c': set('d'),
        'd': set()
    }
示例#21
0
def generate_dag(optimal_indvidual, stage_name, num_nodes):
    # optimal_individual为本stage的二进制字符串
    # create nodes for the graph
    nodes = np.empty((0), dtype=np.str)
    # 给stage的节点命名,比如s1 stage,节点名字为s1_1,s1_2,...
    for n in range(1, (num_nodes + 1)):
        nodes = np.append(nodes, ''.join([stage_name, "_", str(n)]))

    # initialize directed asyclic graph (DAG) and add nodes to it
    # 加入所有节点
    dag = DAG()
    for n in nodes:
        dag.add_node(n)

    # split best indvidual found via GA to identify vertices connections and connect them in DAG
    # cumsum累积和,cumsum([0, 1, 2, 3])返回[0, 1, 3, 6]
    # 在这里体现为比如有4个node,二进制字符串长度为6,切割成s[:0], s[0:1], s[1:3], s[3:6]
    # 即连接每个节点的二进制字符串
    # 最后再删除第一个节点没有连的数据(上面的s[:0])
    edges = np.split(optimal_indvidual, np.cumsum(range(num_nodes - 1)))[1:]
    v2 = 2
    # 遍历所有节点的连接情况
    for e in edges:
        v1 = 1
        # 遍历这个节点的二进制字符串
        # 如果是1,添加边到dag
        # 这里其实for循环替代v1会好看些
        for i in e:
            if i:
                dag.add_edge(''.join([stage_name, "_",
                                      str(v1)]),
                             ''.join([stage_name, "_",
                                      str(v2)]))
            v1 += 1
        v2 += 1

    # delete nodes not connected to anyother node from DAG
    # 删除孤立的点
    for n in nodes:
        if len(dag.predecessors(n)) == 0 and len(dag.downstream(n)) == 0:
            dag.delete_node(n)
            nodes = np.delete(nodes, np.where(nodes == n)[0][0])

    return dag, nodes
示例#22
0
 def make_diamond():
     g = DAG()
     g.add_node(0)
     g.add_node(1, inputs={0})
     g.add_node(2, inputs={0})
     g.add_node(3, inputs={1,2})
     
     return g
示例#23
0
    def __init__(self, input_file):
        self.cfg = self._read(input_file)

        self.info = self.cfg['pipeline']

        self.owner = self.info['owner']
        self.basename = self.info['basename']
        self.version = self.info['version']
        self.dag = DAG()
        self.stages = {}

        for name in self.info['stages']:
            self.stages[name] = self.load_stage(name)
            self.dag.add_node(name)

        for name in self.info['stages']:
            stage_info = self.cfg[name]
            for parent in stage_info['depends-on']:
                self.dag.add_edge(parent, name)
示例#24
0
 def test_find_not_reachable_with_some_reachable_reversed_order(self):
     dag = DAG()
     dag.add_edges(sources=['b', 'c'], target='a')
     dag.add_edges(sources=['d'], target='e')
     # The first argument is the shares sent by a node. They are send in the
     # order d, e, b, c. We assume that if there is a path from c to a,
     # then there is a path as d -> e -> b -> c, and therefore return empty
     # list
     self.assertCountEqual(
         dag.find_not_reachable(['d', 'e', 'b', 'c'], 'a'), [])
示例#25
0
def allocate(participants, constraints=Constraints()):
	# first construct the DAG with the constraints
	dag = DAG()
	for giver in participants:
		for taker in participants:
			if constraints.is_allowed(giver,taker):
				dag.connect(dag.node(giver),dag.node(taker))
	compelted = None

	# This loop is a sanity check and a hack, fix it!
	# while True:
	completed = allocate_loop(dag)
	# correct = True
	# for x in completed.nodes():
	# 	if not constraints.is_allowed(x.obj,completed.children(x)[0].obj):
	# 		correct = False
	# 		break
	# if correct: break
	pairs = extract_pairs(completed)

	return pairs
def validate_dependency_debug(graph):
    assert graph, "Graph is empty,please check!!"
    #convert_to_std_list_graph(graph)
    #dag.from_dict(graph)
    #noinspection PyBroadException
    is_valid = False
    dag = DAG()
    try:
        for key, value in graph.items():
            dag.add_node(key)

        for key, value in graph.items():
            for v in value:
                if v is not '':
                    dag.add_edge(key, v)
    except KeyError as e:
        logger.log_error("KeyError while adding to dag, msg is {}".format(e))
    except DAGValidationError as e:
        logger.log_error(
            "DAGValidationError while adding to dag, msg is {},please check dependent relationship"
            .format(e))
    except Exception as e:
        logger.log_error("Exception while adding to dag, msg is {}".format(e))
    else:
        is_valid = True

    assert is_valid, "===> key(testcase):{},value(dependent):{}".format(
        key, value)
    def __init__(self, host='127.0.0.1', port=5000, analytics=False, light_client=False,own_key=None):
        """
        Client that interacts with other peers in the network.

        light_client=True, to disable peering and storing peer information.
        """
        self.host = host
        self.port = port
        self.is_shutdown = False

        self.state = StateDB()
        self.keypair = Keypair.from_genesis_file(read_genesis_state())

        if own_key:
            self.keypair = own_key

        self.dag = DAG()
        self.peers = Peers(port)
        self.FIXED_PEERS = (('127.0.0.1',5000),('127.0.0.1',5001),('127.0.0.1',5003))
        self.sessions = {}

        self.logger = logging.getLogger('main')
        self.is_light_client = light_client
        self.lock = Lock()

        self.broadcast_executor = ThreadPoolExecutor(max_workers=8)
        self.tx_executor = ThreadPoolExecutor(max_workers=8)
        self.query_executor = ThreadPoolExecutor(max_workers=4)

        self.analytics_enabled = analytics
        self.analytics_doc_id = None

        self.metrics_lock = Lock()
        self.collect_metrics = False
        self.metrics_start = None
        self.metrics_end = None
        self.transactions_count = 0

        self.txn_insert_times = {}
示例#28
0
def create_random_dag(node_names, node_user_map):
    """
    Randomly generates a DAG graph. Start of by creating a list that represents the hierarchy.
    Each element in the list is another list showing the nodes in that level.
    The number of nodes in each level is random. Then use this hierarchy to create nodes
    and edges between nodes. Edges are created by randomly selecting a node in the previous level
    as a parent.

    Args:
        node_names (list): list of all the nodes to be used
        node_user_map (dict): use node name as a key to get all the users for node
    
    Returns:
        graph (DAG): returns a randomly generated DAG object
    """
    # the number of nodes to create will be the same as the length of the node_names list
    node_num = len(node_names)
    hierarchy = []
    curr_num_of_nodes = 0
    hierarchy.append([curr_num_of_nodes])
    curr_num_of_nodes += 1
    # create a hierarchy for the nodes
    while curr_num_of_nodes < node_num:
        nodes_to_create = random.choice(
            list(range(curr_num_of_nodes, node_num)))
        level = [i for i in range(curr_num_of_nodes, nodes_to_create + 1)]
        curr_num_of_nodes += len(level)
        hierarchy.append(level)

    # create empty graph object without passing in input matrix
    graph = DAG(node_names, node_user_map)

    # use the hierarchy to create the nodes and edges
    for level in range(len(hierarchy)):
        if level == 0:
            graph.add_node(f"Node 0", node_user_map["Node 0"])
        else:
            for num in hierarchy[level]:
                curr_node_name = f"Node {num}"
                graph.add_node(curr_node_name, node_user_map[curr_node_name])
                parent_level = level - 1
                # randomly choose a node a level above in the hierarchy as the parent
                parent_node_num = random.choice(hierarchy[parent_level])
                parent_node_name = f"Node {parent_node_num}"
                graph.add_edge(parent_node_name, curr_node_name)

    # for node in graph.node_list:
    # print(f"node: {node}, edges: {graph.node_list[node].edges.keys()}")
    return graph
示例#29
0
    def test_prune_should_remove_nodes_preceding_given_hash(self):
        dag = DAG()
        dag.add_edges(sources=['b', 'c'], target='a')
        dag.add_edges(sources=['a'], target='d')
        dag.add_edges(sources=['d'], target='e')

        env = simpy.Environment()
        node = Node(env=env, name='node 1')
        node.dag = dag
        node.blocks_received = ['c', 'b', 'a', 'd', 'e']

        self.assertCountEqual(['c', 'b', 'a', 'd', 'e'], dag.to_string())
        node._prune()
        self.assertCountEqual(['d', 'e'], node.dag.to_string())
示例#30
0
def ops2dag(merged_ops):
    """Load ops dict into dag."""
    dag = DAG()
    outs = {
        op['outputs'].name: op
        for name, op in merged_ops.items() if op['outputs'] is not None
    }
    for name, node in merged_ops.items():
        inps = node['inputs']
        pre_node_name = 'root'
        dag.add_node_if_not_exists(name)
        if inps is not None:
            for inp in inps:
                pre_node = outs.get(inp.name)
                if pre_node is not None:
                    pre_node_name = pre_node.op_name
                    dag.add_edge(pre_node_name, name)
        else:
            dag.add_edge(pre_node_name, name)
    return dag
示例#31
0
文件: test.py 项目: kd0kfo/dag
 def test_dag_pickle(self):
     import os.path as OP
     import os
     from dag import DAG,Process,load
     # PICKLE
     d = DAG()
     d.add_process(Process("ple",['a','b'],['c','d'],"-brief"))
     d.add_process(Process("ple",['c','e'],['f']))
     filename = d.save()
     self.assertTrue(OP.isfile(filename))
     
     # UNPICKLE
     try:
         with open(filename,"rb") as file:
             d2 = load(file)
     finally:
         os.unlink(filename)
示例#32
0
def ops2dag(merged_ops):
    """Load ops dict into dag."""
    dag = DAG()
    dot = DagGraphVisual()
    dot.node(name='root', label='root')
    outs = {op['outputs'].name: op for name, op in merged_ops.items() if op['outputs'] is not None}
    outs = {k.replace('Conv2D:0', 'BiasAdd:0'): v for k, v in outs.items()}
    for name, node in merged_ops.items():
        inps = node['inputs']
        pre_node_name = 'root'
        dag.add_node_if_not_exists(name)
        dot.node(name=name, label=name)
        if inps is not None:
            for inp in inps:
                pre_node = outs.get(inp.name)
                if pre_node is not None:
                    pre_node_name = pre_node.op_name
                    dag.add_edge(pre_node_name, name)
                    dot.edge(pre_node_name, name)
        else:
            dag.add_edge(pre_node_name, name)
            dot.edge(pre_node_name, name)
    dot.show()
    return dag
示例#33
0
# -*- coding:utf-8 -*-

import sys

from dag import DAG
from processor import Processor
import config
from optparse import OptionParser

parser = OptionParser()
parser.add_option("-c", "--config_path")

if __name__ == "__main__":
    (options, args) = parser.parse_args()

    config.json_path = options.config_path
    config.json_dump_path = config.json_path + "_dump"

    dag = DAG()
    dag.build_dag(config.json_path)

    proc = Processor(dag)
    proc.start()

    err = dag.dump_graph()
    sys.exit(err)
示例#34
0
def test_all_downstreams_pass_graph():
    dag2 = DAG()
    dag2.from_dict({"a": ["c"], "b": ["d"], "c": ["d"], "d": []})
    assert dag.all_downstreams("a", dag2.graph) == ["c", "d"]
    assert dag.all_downstreams("b", dag2.graph) == ["d"]
    assert dag.all_downstreams("d", dag2.graph) == []
示例#35
0
文件: fetch.py 项目: gpruim/caac-map
def fetch_resources_by_topic(worksheets):
    """Smooth out into a nice JSON-able data structure.

    { "deadbeef": { "id": "deadbeef"
                  , "subtopics": { "fa1afe1": { "id": "fa1afe1"
                                              , "topic_id": "deadbeef"
                                              , "dag": { "names": ["fadedfad"]
                                                       , "vertices": [{"incomingNames": "feeddeaf"}]
                                                        }
                                              , "resources": { "fadedfad": { "id": "fadedfad"
                                                                           , "topic_id": "deadbeef"
                                                                           , "subtopic_id": "fa1afe1"
                                                                           , "etc": "stuff"
                                                                            }
                                                              }
                                               }
                   }
    """
    topics = {}
    for topic_id, csvurl in worksheets:
        topic = {}
        topic['id'] = topic_id
        topic['subtopics'] = subtopics = defaultdict(lambda: defaultdict(dict))

        raw = _get(csvurl)
        reader = csv.reader(io.StringIO(raw))
        headers = next(reader)

        for row in reader:
            resource = dict(zip(headers, row))
            resource['id'] = resource['uid']
            resource['topic_id'] = topic_id

            subtopic = subtopics[resource['subtopic_id']]
            subtopic['resources'][resource['id']] = resource

            if 'dag' not in subtopic:
                # First time seeing it. Populate!
                subtopic['dag'] = DAG()
                subtopic['id'] = resource['subtopic_id']
                subtopic['topic_id'] = topic_id

        # Populate DAGs.
        # ==============
        # We have to do this in a second loop so that we can tell whether
        # before_this and after_this are in fact in the same subtopic as a
        # given resource. The base data is not clean on this point.

        for subtopic in subtopics.values():
            for resource in subtopic['resources'].values():

                # Relax the py-dag API to be more like the js DAG we had.
                d = subtopic['dag']
                add_node = lambda node: d.add_node(
                    node) if node and node not in d.graph else None
                add_edge = lambda a, b: d.add_edge(
                    a, b) if a and b and a != b else None

                add_node(resource['id'])
                if resource['before_this'] in subtopic['resources']:
                    add_node(resource['before_this'])
                    add_edge(resource['before_this'], resource['id'])
                if resource['after_this'] in subtopic['resources']:
                    add_node(resource['after_this'])
                    add_edge(resource['id'], resource['after_this'])

        # Convert DAGs to the format that the JavaScript expects.
        for subtopic in subtopics.values():
            dag = subtopic['dag']
            subtopic['dag'] = { "names": dag.topological_sort()
                              , "vertices": \
                                      {k: {"incomingNames": list(dag.graph[k])} for k in dag.graph}
                               }

        topics[topic_id] = topic
    return topics
示例#36
0
class SearchSpace(dict):
    """A search space for HyperParameter.

    :param hps: a dict contain HyperParameters, condition and forbidden.
    :type hps: dict, default is `None`.
    """
    def __init__(self, desc=None):
        """Init SearchSpace."""
        super(SearchSpace, self).__init__()
        if desc is None:
            desc = SearchSpaceConfig().to_dict()
            if desc.type is not None:
                desc = ClassFactory.get_cls(ClassType.SEARCHSPACE,
                                            desc.type).get_space(desc)
        for name, item in desc.items():
            self.__setattr__(name, item)
            self.__setitem__(name, item)
        self._params = OrderedDict()
        self._condition_dict = OrderedDict()
        self._forbidden_list = []
        self._hp_count = 0
        self._dag = DAG()
        if desc is not None:
            self.form_desc(desc)

    @classmethod
    def get_space(self, desc):
        """Get Space."""
        return desc

    def form_desc(self, desc):
        """Create SearchSpace base on hyper-parameters object."""
        if 'hyperparameters' not in desc:
            return
        for space_dict in desc["hyperparameters"]:
            param = ParamsFactory.create_search_space(
                param_name=space_dict.get("key"),
                param_slice=space_dict.get('slice'),
                param_type=PARAM_TYPE_MAP[space_dict.get("type").upper()],
                param_range=space_dict.get("range"),
                generator=space_dict.get("generator"),
                sample_num=space_dict.get('sample_num'))
            self.add_hp(param)
        if "condition" in desc:
            for condition in desc["condition"]:
                _condition = ParamsFactory.create_condition(
                    self.get_hp(condition.get("child")),
                    self.get_hp(condition.get("parent")),
                    CONDITION_TYPE_MAP[condition.get("type").upper()],
                    condition.get("range"))
                self.add_condition(_condition)
        if "forbidden" in desc:
            for forbiddens in desc["forbidden"]:
                _forbiddens = []
                for _name, _value in forbiddens.items():
                    _forbiddens.append(
                        ForbiddenEqualsClause(param_name=self.get_hp(_name),
                                              value=_value))
                self.add_forbidden_clause(ForbiddenAndConjunction(_forbiddens))

    def sample(self):
        """Get the Sample of SearchSpace."""
        return self.decode(self.get_sample_space(1)[0])

    def verify_constraints(self, sample):
        """Verify condition."""
        for condition in self.get("condition", []):
            _type = condition["type"]
            child = condition["child"]  # eg. trainer.optimizer.params.momentum
            parent = condition["parent"]  # eg. trainer.optimizer.type
            _range = condition["range"]  # eg. range': ['SGD']
            if _type == "EQUAL" or _type == "IN":
                if parent in sample and sample[parent] in _range:
                    if child not in sample:
                        sample[child] = self.get_hp(child).sample()[0]
                elif child in sample:
                    del sample[child]
            if _type == "NOT_EQUAL":
                if parent in sample and sample[parent] in _range:
                    if child in sample:
                        del sample[child]
                elif child not in sample:
                    sample[child] = self.get_hp(child).sample()[0]
            # TODO condition type: IN, parent type: range
        return sample

    def size(self):
        """Get the size of SearchSpace, also the count of HyperParametera contained in this SearchSpace.

        :return: the size of SearchSpace.
        :rtype: int.

        """
        return self._hp_count

    def add_params(self, params):
        """Add params to the search space.

        :param list prams: List[HyperParameter].
        :return: List of added hyperparameters (same as input)
        :rtype: list

        """
        for param in params:
            if not ParamsFactory.is_params(param):
                raise TypeError("HyperParameter '%s' is not an instance of "
                                "SearchSpace.common.hyper_parameter."
                                "HyperParameter." % str(params))

        for param in params:
            self._add_hp(param)
        self._sort_hps()
        return self

    def add_hp(self, hyperparameter):
        """Add one hyperparameter to the hyperparameter space.

        :param HyperParameter hyperparameter: instance of `HyperParameter` to add.
        :return: hyperparameter (same as input)
        :rtype: HyperParameter

        """
        if not ParamsFactory.is_params(hyperparameter):
            raise TypeError("The method add_hp must be called "
                            "with an instance of SearchSpace."
                            "hyper_parameter.HyperParameter.")

        self._add_hp(hyperparameter)
        return self

    def _add_hp(self, hyperparameter):
        """Add one hyperparameter to the hyperparameter space.

        :param HyperParameter hyperparameter: instance of `HyperParameter` to add.

        """
        if hyperparameter.name in self._params:
            raise ValueError("HyperParameter `%s` is already in SearchSpace!" %
                             hyperparameter.name)
        self._params[hyperparameter.name] = hyperparameter
        self._hp_count = self._hp_count + 1
        self._dag.add_node(hyperparameter.name)

    def add_condition(self, condition):
        """Add new condition to the current SearchSpace.

        :param condition: `condition` that need to add.
        :type condition: instance of `Condition`.
        """
        if not ParamsFactory.is_condition(condition):
            raise ValueError('Not a valid condition {}'.format(condition))
        child_name = condition.child.name
        parent_name = condition.parent.name
        try:
            self._dag.add_edge(parent_name, child_name)
        except KeyError:
            raise KeyError('Hyperparameter in condition {} not exist in'
                           'current SearchSpace.'.format(condition))
        except DAGValidationError:
            raise KeyError('Current condition {} valid DAG rule in current'
                           'SearchSpace, can not be added!'.format(condition))
        if parent_name not in self._condition_dict:
            self._condition_dict[parent_name] = {}
        self._condition_dict[parent_name][child_name] = condition

    def add_forbidden_clause(self, forbidden_conjunction):
        """Add new ForbiddenAndConjunction to the current SearchSpace.

        :param forbidden_conjunction:  ForbiddenAndConjunction
        :type forbidden_conjunction: instance of `ForbiddenAndConjunction`.
        """
        if not isinstance(forbidden_conjunction, ForbiddenAndConjunction):
            raise ValueError(
                'Not a valid condition {}'.format(forbidden_conjunction))
        self._forbidden_list.append(forbidden_conjunction)

    def _sort_hps(self):
        """Sort the hyperparameter dictionary."""
        return

    def params(self):
        """Return the list of all hyperparameters.

        :return: List[HyperParameter]
        :rtype: list

        """
        return list(self._params.values())

    def get_hp_names(self):
        """Return the list of name of all hyperparameters.

        :return: List[str]
        :rtype: list

        """
        return list(self._params.keys())

    def get_hp(self, name):
        """Get HyperParameter by its name.

        :param str name: The name of HyperParameter.
        :return: HyperParameter
        :rtype: HyperParameter

        """
        hp = self._params.get(name)

        if hp is None:
            raise KeyError("HyperParameter '%s' does not exist in this "
                           "configuration space." % name)
        else:
            return hp

    def get_sample_space(self, n=1000, gridding=False):
        """Get the sampled param space from the current SearchSpace.

        :param int n: number of samples.
        :param bool gridding: use gridding sample or random sample.
        :return: shape is (n, len(self._hyperparameters)).
        :rtype: np.array

        """
        if gridding:
            return self._get_grid_sample_space()
        else:
            return self._get_random_sample_space(n)

    def _get_random_sample_space(self, n):
        """Get the sampled param space from the current SearchSpace.

        here we use the random sample, and return a np array of shape
        n*_hp_count, which is a sampled param space for GP or
        other model to predict.

        :param int n: sample count.
        :return: shape is (n, len(self._hyperparameters)).
        :rtype: np.array

        """
        parameters_array = np.zeros((n, self._hp_count))
        i = 0
        for _, hp in self._params.items():
            column = hp.sample(n=n, decode=False)
            parameters_array[:, i] = column
            i = i + 1
        return parameters_array

    def _generate_grid(self):
        """Get the all possible values for each of the tunables."""
        grid_axes = []
        for _, hp in self._params.items():
            grid_axes.append(hp.get_grid_axis(hp.slice))
        return grid_axes

    def _get_grid_sample_space(self):
        """Get the sampled param space from the current SearchSpace.

        here we use the random sample, and return a np array of shape
        n*len(_hyperparameters), which is a sampled param space for GP or
        other model to predict.

        :return: np.array, shape is (n, len(self._hyperparameters)).
        :rtype: np.array

        """
        param_list = [[]]
        params_grid = self._generate_grid()
        for param_grid in params_grid:
            param_list = [
                param_x + [param_y] for param_x in param_list
                for param_y in param_grid
            ]
        return param_list

    def decode(self, param_list):
        """Inverse transform a param list to original param dict.

        :param list param_list: the param list come from a search,
            in which params order are same with self._hyperparameters
        :return: the inverse transformed param dictionary.
        :rtype: dict

        """
        if len(param_list) != self._hp_count:
            raise ValueError(
                "param_list length not equal to SearchSpace size!")
        i = 0
        assigned_forbidden_dict = {}
        inversed_param_dict = {}
        final_param_dict = {}
        for name, hp in self._params.items():
            param_value = param_list[i]

            forbidden_flag = False
            forbidden_value = []
            for forbidden_conjunction in self._forbidden_list:
                if name in forbidden_conjunction._forbidden_dict:
                    forbidden_flag = True

                    total_len = assigned_forbidden_dict.__len__(
                    ) + forbidden_conjunction._forbidden_dict.__len__()
                    union_len = len(
                        set(
                            list(assigned_forbidden_dict.items()) +
                            list(forbidden_conjunction._forbidden_dict.items())
                        ))
                    # if assigned_forbidden_dict has same or similar forbidden conjunction
                    #  with `forbidden_conjunction`.
                    if (total_len - union_len) == \
                            forbidden_conjunction._forbidden_dict.__len__() - 1:
                        forbidden_value.append(
                            forbidden_conjunction._forbidden_dict.get(name))

            inversed_param_dict[name] = hp.decode(param_value, forbidden_value)
            if forbidden_flag:
                assigned_forbidden_dict[name] = inversed_param_dict[name]

            i = i + 1
        # check condition vaild
        # use DAG Breadth-First-Search to check each condition
        q = Queue()
        for ind_name in self._dag.ind_nodes():
            q.put(ind_name)
        while not q.empty():
            parent = q.get()
            final_param_dict[parent] = inversed_param_dict[parent]
            child_list = self._dag.downstream(parent)
            for child in child_list:
                condition = self._condition_dict[parent][child]
                if condition.evaluate(inversed_param_dict[parent]):
                    q.put(child)
        return final_param_dict
示例#37
0
def DAG_Attack(model, test_dataset, args):

    # Hyperparamter for DAG

    num_iterations = 20
    gamma = 0.5
    num = 15

    gpu = args.gpu

    # set device configuration
    device_ids = []

    if gpu == 'gpu':

        if not torch.cuda.is_available():
            print("No cuda available")
            raise SystemExit

        device = torch.device(args.device1)

        device_ids.append(args.device1)

        if args.device2 != -1:
            device_ids.append(args.device2)

        if args.device3 != -1:
            device_ids.append(args.device3)

        if args.device4 != -1:
            device_ids.append(args.device4)

    else:
        device = torch.device("cpu")

    if len(device_ids) > 1:
        model = nn.DataParallel(model, device_ids=device_ids)

    model = model.to(device)

    adversarial_examples = []

    for batch_idx in range(len(test_dataset)):
        image, label = test_dataset.__getitem__(batch_idx)

        image = image.unsqueeze(0)
        pure_label = label.squeeze(0).numpy()

        image, label = image.clone().detach().requires_grad_(
            True).float(), label.clone().detach().float()
        image, label = image.to(device), label.to(device)

        # Change labels from [batch_size, height, width] to [batch_size, num_classes, height, width]
        label_oh = make_one_hot(label.long(), n_classes, device)

        if args.attacks == 'DAG_A':

            adv_target = torch.zeros_like(label_oh)

        elif args.attacks == 'DAG_B':

            adv_target = generate_target_swap(label_oh.cpu().numpy())
            adv_target = torch.from_numpy(adv_target).float()

        elif args.attacks == 'DAG_C':

            # choice one randome particular class except background class(0)
            unique_label = torch.unique(label)
            target_class = int(random.choice(unique_label[1:]).item())

            adv_target = generate_target(label_oh.cpu().numpy(),
                                         target_class=target_class)
            adv_target = torch.from_numpy(adv_target).float()

        else:
            print(
                "wrong adversarial attack types : must be DAG_A, DAG_B, or DAG_C"
            )
            raise SystemExit

        adv_target = adv_target.to(device)

        _, _, _, _, _, image_iteration = DAG(model=model,
                                             image=image,
                                             ground_truth=label_oh,
                                             adv_target=adv_target,
                                             num_iterations=num_iterations,
                                             gamma=gamma,
                                             no_background=True,
                                             background_class=0,
                                             device=device,
                                             verbose=False)

        if len(image_iteration) >= 1:

            adversarial_examples.append([image_iteration[-1], pure_label])

        del image_iteration

    print('total {} {} images are generated'.format(len(adversarial_examples),
                                                    args.attacks))

    return adversarial_examples
示例#38
0
def blank_setup():
    global dag
    dag = DAG()
示例#39
0
def start_with_graph():
    global dag
    dag = DAG()
    dag.from_dict({"a": ["b", "c"], "b": ["d"], "c": ["d"], "d": []})
示例#40
0
文件: gsub.py 项目: kd0kfo/dag
def create_dag(input_filename, parsers, init_file=None,
               engine=dag.Engine.SHELL, num_cores=None):
    """
    Takes an input file that contains a list of commands and generates a dag.
    Jobs that have all of their prerequisites met are started, unless the
    --setup_only flag is provided.

    @param input_filename: Filename to be parsed into a DAG
    @type input_filename: str
    @param parsers: Dictionary that maps string command names
    to functions that are used to create DAG.
    @type parsers: dict
    @param init_file: Optional file to be used for startup variables
    and routines.
    @type init_file: file
    @param num_processors: Optional number of processors used in multiprocessing.
    @type num_processors: int
    @return:  DAG object if successful. Otherwise, None is returned
    @rtype: dag.DAG
    """

    import dag.util as dag_utils
    from dag import DAG, Engine, DagException

    # PROJECT SPECIFIC DEFINES. FACTOR OUT.
    if not init_file:
        init_file = dag_utils.open_user_init()

    # If we still don't have the init file, there is a problem.
    if not init_file:
        if init_file is None:
            raise DagException("No init file provided.")
        else:
            raise DagException("Could not open init file ({0}). File not found."
                               .format(init_file.name))

    init_code = compile(init_file.read(), init_file.name, "exec")
    exec(init_code)

    root_dag = DAG()
    root_dag.engine = engine
    root_dag.num_cores = num_cores
    parser_kmap = {}  # used as the second argument of parser functions (below)
    # dependencies dict is used to allow the user
    # to define explicit dependencies.
    dependencies = {}
    with open(input_filename, "r") as infile:
        for line in infile:
            line = line.strip()
            if len(line) == 0:
                continue
            if line[0] == '#':  # Comment line continue
                continue
            if line[0] == '%':  # Preprocess
                (parser_kmap, extra_processes,
                 dependencies) = preprocess_line(line,
                                                 parser_kmap, dependencies)
                for extra_proc in extra_processes:
                    root_dag.add_process(extra_proc)
                continue
            tokens = line.split(' ')
            for token in tokens:
                if not token:
                    tokens.remove(token)
            pname = tokens[0]
            parser_args = tokens[1:]  # used by function below
            
            # Is the process name set explicitly?
            process_name = None # This is an option internal name for the process, AKA workunit_name
            if pname[0] == '@':
                process_name = pname[1:]
                pname = parser_args[0]
                parser_args = parser_args[1:]

            if root_dag.engine == Engine.SHELL:
                import dag.shell
                proc_list = dag.shell.parse_shell(pname, parser_args,
                                                  parser_kmap, parsers, init_code)
                num_procs = len(root_dag.processes)
                for proc in proc_list:
                    proc.workunit_name = "%s-%d" % (proc.cmd, num_procs)
                    num_procs += 1
            else:
                if not pname in parsers.keys():
                    print("No function for %s" % pname)
                    print("Known functions: ", parsers.keys())
                    raise DagException("Unknown Function: {0}".format(pname))

                funct = "%s(parser_args,parser_kmap)" % parsers[pname]
                print("Running %s" % funct)
                proc_list = eval(funct)   # uses parser_args

            if proc_list is None:
                continue

            # If given explicitly set workunit name
            if process_name:
                proc_count = 1
                use_suffix = len(proc_list) > 1
                for i in proc_list:
                    if use_suffix:
                        i.workunit_name = "%s-%d" % (process_name, proc_count)
                    else:
                        i.workunit_name = process_name
                    proc_count += 1


            for i in proc_list:
                root_dag.add_process(i)

    # Set explicit dependencies, if any
    for parent_name in dependencies:
        print("Added dependency of %s" % parent_name)
        NO_SUCH_FMT = "No such process '%s'"
        parent_process = root_dag.get_process(parent_name)
        if not parent_process:
            print(NO_SUCH_FMT % parent_name)
            continue
        for child in dependencies[parent_name]:
            child_proc = root_dag.get_process(child)
            if not child_proc:
                print(NO_SUCH_FMT % child)
                continue
            if child not in [proc.workunit_name for proc in parent_process.children]:
                parent_process.children.append(child_proc)
                print("%s depends on %s" % (child, parent_name))

    return root_dag
示例#41
0
 def test_find_not_reachable_with_some_reachable(self):
     dag = DAG()
     dag.add_edges(sources=['b', 'c'], target='a')
     dag.add_edges(sources=['d'], target='e')
     self.assertCountEqual(
         dag.find_not_reachable(['b', 'c', 'd', 'e'], 'a'), ['d', 'e'])