Esempio n. 1
0
    def setUp(self):
        # Module looks like:
        # op1 -> op2    ->     op3
        #          \          /
        #           -> op4 ->

        self.module = Module()
        self.op1 = dense.DenseS()
        self.op2 = dense.DenseM()
        self.op3 = dense.DenseL()
        self.module = mutation_ops.append(self.module, self.op1)
        self.module = mutation_ops.append(self.module, self.op2)
        self.module = mutation_ops.append(self.module, self.op3)
        self.op4 = dense.Dropout()
        self.module = mutation_ops.insert(self.module,
                                          self.op3,
                                          self.op2,
                                          op=self.op4,
                                          between=False)

        # Setting up sub module for those cases:
        #  first    ->      last
        #    \             /
        #     -> branch ->
        self.sub_module = Module()
        self.first, self.last, self.branch = dense.DenseS(), dense.DenseL(
        ), dense.DenseM()
        self.sub_module = mutation_ops.append(self.sub_module, self.first)
        self.sub_module = mutation_ops.append(self.sub_module, self.last)
        self.sub_module = mutation_ops.insert(self.sub_module, self.first,
                                              self.last, self.branch)
Esempio n. 2
0
def get_insertion_points_after(module: Module, target: Module) -> (Base, Base):
    insertion_after = []
    last = module.find_last()[0]
    target_first = target.find_first()
    for child in module.children:
        if ((is2D(target_first) and is2D(child))
                or is1D(target_first)) and child != last:
            insertion_after += [child]
    return insertion_after
Esempio n. 3
0
def combine(patterns, num_nets, min_size, max_size, include_optimal=False):

    all_patterns_used = False
    nets = []
    if include_optimal:
        optimal = combine_optimal(patterns,
                                  size=random.randint(min_size, max_size))
        if optimal:
            nets += [optimal]
            draw = shuffle([
                i for i, p in enumerate(patterns)
                if not any(p.ID == q.predecessor.ID for q in optimal.patterns)
            ])
        else:
            draw = randomized_index(patterns)
    else:
        draw = randomized_index(patterns)

    for i in range(num_nets):
        # Setup:
        net = Module()

        for _ in range(random.randint(min_size, max_size)):
            # Selecting random patterns:
            pattern, draw = patterns[draw[0]], draw[1:]

            # Adding to net:
            net.children += [copy.deepcopy(pattern)]
            net.children[-1].used_result = select_result(pattern)

            if len(draw) == 0:
                draw = randomized_index(patterns)
                all_patterns_used = True
                break  # --> Cannot use same pattern twice in a network...

        # Placing 2D layers first:
        net.children.sort(key=lambda x: 0 if x.type == "2D" else 1)

        # Connecting patterns together:
        ops = net.connect_all_sub_modules_sequential()

        net.patterns = net.children
        net.children = ops

        # Done
        nets += [net]

    if not all_patterns_used:
        nets += combine([patterns[x] for x in draw], 1, min_size, max_size)

    # Checking for duplicated networks:
    remove_duplicates(nets)
    # for net in nets:
    #     set_learning_rate(net)
    return nets
Esempio n. 4
0
def get_insertion_points_before(module: Module, target: Module,
                                after: Base) -> (Base, Base):
    insertion_before = []
    first = module.find_first()
    target_last = target.find_last()[0]
    for child in module.children:
        if _is_before(child, after):
            if child != first and child != after and (is2D(target_last) or
                                                      (is1D(target_last)
                                                       and is1D(child))):
                insertion_before += [child]
    return insertion_before
Esempio n. 5
0
    def test_possible_insertion_points(self):
        conv = Conv3x3()
        after, before = mutation.get_possible_insertion_points(self.module, operation=conv)
        self.assertEqual(len(after), 0, "Conv 2D could be inserted after a 1D layer or input.")
        self.assertEqual(len(before), len(self.module.children)-1, "Should be able to be inserted before any except first.")

        # Network setup:
        # 2D input -> conv1     ->      op1 -> op2
        #                 \            /
        #                  -> conv2 ->
        module = Module()
        conv1 = Conv5x5()
        conv2 = Conv3x3()
        op1 = dense.DenseS()
        op2 = dense.DenseL()
        module = mutation_ops.append(module, conv1)
        module = mutation_ops.append(module, op1)
        module = mutation_ops.append(module, op2)
        module = mutation_ops.insert(module, first=conv1, last=op1, op=conv2)

        after, before = mutation.get_possible_insertion_points(module, operation=conv)
        self.assertIn(conv1, after, "Should be able to insert after conv1")
        self.assertIn(conv2, after, "Should be able to insert after conv2")
        self.assertEqual(len(after), 2, "More nodes in after than it should be...")
        self.assertNotIn(conv1, before, "Should not be able to insert before first node.")

        op3 = dense.DenseM()
        after, before = mutation.get_possible_insertion_points(module, operation=op3)
        self.assertEqual(len(after), 3, "Should have been able to insert after every node except last.")
        self.assertIn(op1, after, "Op1 should have been in after list.")
        self.assertIn(op1, before, "Op1 should have been in before list.")
        self.assertIn(op2, before, "Op2 should have been in before list.")
Esempio n. 6
0
    def branching_factor(p: Module):
        def recursive_branching(current, branching, seen):
            if current in seen:
                return branching
            seen += [current]
            factor = []
            for _next in current.next:
                factor += [
                    max(
                        branching,
                        recursive_branching(
                            _next, branching + (len(current.next) - 1), seen))
                ]
            for _prev in current.prev:
                factor += [
                    max(
                        branching,
                        recursive_branching(
                            _prev, branching - (len(current.prev) - 1), seen))
                ]
            return max(factor)

        firsts = p.find_firsts()
        seen = []
        return max(recursive_branching(first, 0, seen) for first in firsts)
Esempio n. 7
0
    def test_assemble_with_pooling_op(self):
        with open("./datasets/cifar10-home-ssh.json", "r") as f:
            config = json.load(f)
        server = config.servers[0]

        individ = Module()
        individ = mutation_ops.append(individ, Conv3x3())
        individ = mutation_ops.append(individ, Conv3x3())
        individ = mutation_ops.append(individ, dense.DenseL())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], dense.DenseL())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], MaxPooling2x2())
        individ = mutation_ops.insert(individ, individ.children[1],
                                      individ.children[2], AvgPooling2x2())
        individ = mutation_ops.append(individ, dense.DenseL())

        training, evalutation, name, inputs = cifar10.configure(
            config.classes_in_classifier, server)

        model = assemble(individ, config.input_format,
                         config.classes_in_classifier)

        training_history = training(
            model=model,
            device="/gpu:0",  # server['device'],
            epochs=0,
            batch_size=1000)
Esempio n. 8
0
    def test_training_integration(self):
        import os
        os.chdir("..")
        from src.buildingblocks.module import Module
        from src.ea_nas.evolutionary_operations import mutation_operators as op

        from src.buildingblocks.ops.convolution import Conv3x3
        from src.buildingblocks.ops.dense import (
            DenseL as DenseLarge,
            Dropout,
        )
        from src.buildingblocks.ops.pooling import MaxPooling2x2

        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

        module = Module()
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        module = op.append(module, Conv3x3())
        module = op.append(module, Dropout())
        Module = op.append(module, MaxPooling2x2())
        module = op.append(module, DenseLarge())
        module = op.append(module, Dropout())
        module = op.append(module, DenseLarge())

        (x_train, y_train), (x_test,
                             y_test) = keras.datasets.cifar10.load_data()
        x_val = x_train[45000:] / 255
        y_val = y_train[45000:]
        x_train = x_train[:45000] / 255
        y_train = y_train[:45000]
        x_test = x_test / 255

        y_train = keras.utils.to_categorical(y_train, num_classes=10)
        y_test = keras.utils.to_categorical(y_test, num_classes=10)
        y_val = keras.utils.to_categorical(y_val, num_classes=10)

        labels, data = shuffle(x_train, y_train)
        with tf.device("/GPU:0"):
            keras.backend.set_session(
                tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                    allow_growth=False,
                    per_process_gpu_memory_fraction=1.0,
                ),
                                                 allow_soft_placement=True,
                                                 log_device_placement=True)))
            model = assemble(module, in_shape=(32, 32, 3), classes=10)
            metrics = model.fit(
                data,
                labels,
                epochs=10,
                batch_size=250,
                verbose=0,
                validation_data=(x_val, y_val),
            )

            results = model.evaluate(x_test, y_test)
Esempio n. 9
0
 def setUp(cls):
     cls.module = Module()
     cls.op1 = dense.DenseS()
     cls.op2 = dense.DenseM()
     cls.op3 = dense.DenseL()
     cls.module = mutation_ops.append(cls.module, cls.op1)
     cls.module = mutation_ops.append(cls.module, cls.op2)
     cls.module = mutation_ops.append(cls.module, cls.op3)
Esempio n. 10
0
    def test_output_is_correct_shape2D(self):
        module = Module("ConvNet")
        l1, l2, l3, l4 = Conv5x5(), Conv5x5(), Conv5x5(), Conv5x5()
        module = mutate.append(module, l1)
        module = mutate.append(module, l2)
        module = mutate.append(module, l3)
        module = mutate.append(module, l4)

        from src.frameworks.keras import module_to_model
        classes = 10
        model = module_to_model(module, [32, 32, 3], classes=classes)
        self.assertTrue(model.output.shape[0].value is None, "Got wrong output shape.")
        self.assertEqual(model.output.shape[1].value, classes, "Got wrong output shape.")
Esempio n. 11
0
def get_possible_insertion_points(module: Module,
                                  operation: Base) -> (list, list):
    insertion_after = []
    insertion_before = []
    first = module.find_first()
    last = module.find_last()[0]
    for child in module.children:
        if is1D(operation):
            if not is2D(
                    child
            ) and child != first:  # Cannot be inserted before 2D op.
                insertion_before += [child]
            if child != last:
                insertion_after += [child]  # Can be inserted after any.
        elif is2D(operation):
            if (not is1D(child) and
                    child != last):  # Cannot be inserted after a linear layer
                insertion_after += [child]
            if child != first:
                insertion_before += [child]  # Can be inserted before any.

        # Can only insert after input layer if shape of input is same as shape of first layer.

    return insertion_after, insertion_before
Esempio n. 12
0
def init_population(individs,
                    in_shape,
                    network_min_layers=1,
                    network_max_layers=10):
    population = []
    for i in range(individs):
        root = Module()
        for i in range(random.randint(network_min_layers, network_max_layers)):
            if i > 0:
                root = mutate(root, make_copy=False)
            else:
                first = random_sample(operators1D_votes) if len(
                    in_shape) == 2 else random_sample(operators2D_votes)
                root = append(root, first())
        population += [root]
    return population
Esempio n. 13
0
    def test_dropout_is_placed_correctly_in_simple_assembled_network(self):
        from tensorflow import keras
        from src.frameworks.keras import module_to_model
        module = Module(name="TestDropout")
        l1, l2, l3, l4 = DenseL(), DenseM(), DenseS(), DenseM()

        # Building model like:
        # l1 ->  l2  -> l3
        #    \         /
        #     -> l4 ->
        module = mutate.append(module, l1)
        module = mutate.append(module, l2)
        module = mutate.append(module, l3)
        module = mutate.insert(module, first=l1, last=l3, op=l4, between=False)

        # Assemble and display:
        model = module_to_model(module, input_shape=[784], classes=10)
        keras.utils.plot_model(model, to_file="tests/output/TestDropoutSimple.png")
Esempio n. 14
0
def rank_children(module: Module) -> Module:
    """ Ranks all children of module in breadth first order. This
        makes sorting all nodes after Keras operations possible.
    """
    for node in module.children:
        node.rank = -1

    queue = [module.find_first()]
    rank = 0
    while queue:
        node = queue.pop(0)  # type: Base

        # Should wait to queue next nodes if one or more previous nodes are "unprocessed"
        if (not node.prev) or all(_prev.rank >= 0 for _prev in node.prev):
            queue += [_next for _next in node.next]
            node.rank = rank
            rank += 1
    if any(child.rank == -1 for child in module.children):
        raise ValueError("Ranking could not be computed due to bad graph")
Esempio n. 15
0
def assemble(module: Module,
             in_shape: tuple = (784, ),
             classes: int = 10,
             is_root: bool = True,
             indent=""):
    # 1. Rank and sort all child operations using breadth-first:
    rank_children(module)
    operations = sorted(module.children, key=attrgetter('rank'))

    # 2. Connect keras operations together:
    if not isinstance(in_shape[0], int) and not is_root:
        in_shape = tuple(dim.value for dim in in_shape if dim.value)
    input = keras.layers.Input(shape=in_shape)

    for node in operations:
        node.keras_tensor = connect_operation_to_previous(
            node, node.prev, input, indent)

    # FLATTENING ANY CONV OUTPUT:
    previous_tensor = operations[-1].keras_tensor

    # DEFINING OUTPUT FOR THE MODEL:
    if is_root:
        if len(previous_tensor.shape) > 2:
            previous_tensor = keras.layers.Flatten()(previous_tensor)
        output = keras.layers.Dense(units=classes,
                                    activation="softmax")(previous_tensor)
    else:
        output = previous_tensor
    # 3. Create a trainable keras.models.Model for module:
    try:
        module.keras_operation = keras.models.Model(inputs=[input],
                                                    outputs=[output],
                                                    name="-".join(
                                                        module.ID.split()))
    except ValueError as e:
        print(indent + "    - Crashed with input: {} and output: {}".format(
            input.name, output.shape))
        raise e
    return module.keras_operation
Esempio n. 16
0
def apply_mutation_operator(module: Module, operator: Base,
                            operators: list) -> Module:
    if operator == "append":
        last = module.find_last()[0]
        if is1D(last):
            operation = random_sample(operators1D_votes)()
        else:
            operation = random_sample(operators2D_votes + operators1D_votes)()
        module = append(module, operation)

    elif operator == "remove":
        module = remove(module, random_sample(module.children))

    elif operator == "insert" or operator == "insert-between":
        operation = random_sample(operators)()
        first, last = find_placement(module, operation)
        i = 0
        while not first or not last:
            operation = random_sample(operators)()
            first, last = find_placement(module, operation)
            i += 1
            if i == 20:
                break
        if first and last:
            module = insert(module, first, last, operation,
                            operator == "insert-between")
    elif operator == "connect":
        possibilities = list(range(len(module.children)))
        module = connect(
            module=module,
            first=module.children[possibilities.pop(
                random.randint(0,
                               len(possibilities) - 1))],
            last=module.children[possibilities.pop(
                random.randint(0,
                               len(possibilities) - 1))],
        )
    # Else: operator == "identity": do nothing...

    return module
Esempio n. 17
0
    def test_dropout_is_placed_correctly_in_complex_assembled_network(self):
        from tensorflow import keras
        from src.frameworks.keras import module_to_model
        module = Module(name="TestDropout")
        l1, l2, l3, l4, l5, l6 = Conv5x5(), DenseL(), Conv3x3(dropout=False), AvgPooling2x2(), DenseL(), Conv3x3(dropout=False)

        # Building model with form:
        #         ->  l5 ->
        #      /            \
        #      |---------->  l3 ->
        #      |                   \
        # -> l1 -------> l2 ------> l5 ->
        #       \                  /
        #         -----> l4 ----->
        module = mutate.append(module, l1)
        module = mutate.append(module, l2)
        module = mutate.append(module, l5)
        module = mutate.insert(module, first=l1, last=l5, op=l3, between=False)
        module = mutate.insert(module, first=l1, last=l5, op=l4, between=False)
        module = mutate.insert(module, first=l1, last=l3, op=l6, between=False)

        # Assemble and display:
        model = module_to_model(module, input_shape=[32, 32, 3], classes=10)
        keras.utils.plot_model(model, to_file="tests/output/TestDropoutComplex.png")
Esempio n. 18
0
def combine_optimal(patterns, size):
    if not all(p.optimal_result() for p in patterns):
        return None

    optimal = Module()
    sorted_patterns = [pattern for pattern in patterns]
    sorted_patterns.sort(key=lambda p: p.optimal_result().score(),
                         reverse=True)

    for i in range(size):
        pattern = sorted_patterns.pop(0)
        optimal.children += [copy.deepcopy(pattern)]
        optimal.children[-1].used_result = pattern.optimal_result()

    dim2 = [p for p in optimal.children if p.type == "2D"]
    dim2.sort(key=lambda p: p.used_result.distance)
    dim1 = [p for p in optimal.children if p.type == "1D"]
    dim1.sort(key=lambda p: p.used_result.distance)

    optimal.children = dim2 + dim1
    optimal.patterns = optimal.children
    optimal.children = optimal.connect_all_sub_modules_sequential()

    return optimal
Esempio n. 19
0
 def fitness(p: Module):
     return p.test_acc()
Esempio n. 20
0
 def setUp(self):
     self.module = Module()
     self.module = mutation_ops.append(self.module, DenseS())
     self.module = mutation_ops.append(self.module, DenseM())
     self.module = mutation_ops.append(self.module, DenseL())
     self.module = mutation_ops.append(self.module, Dropout())