def test_create_more_nodes(self):
        from deephyper.nas.space import AutoKSearchSpace
        from deephyper.nas.space.node import VariableNode
        from deephyper.nas.space.op.op1d import Dense

        struct = AutoKSearchSpace((5, ), (1, ), regression=True)

        vnode1 = VariableNode()
        struct.connect(struct.input_nodes[0], vnode1)

        vnode1.add_op(Dense(10))

        vnode2 = VariableNode()
        vnode2.add_op(Dense(10))

        struct.connect(vnode1, vnode2)

        struct.set_ops([0, 0])

        falias = "test_auto_keras_search_spaceure"
        struct.draw_graphviz(f"{falias}.dot")

        model = struct.create_model()
        from tensorflow.keras.utils import plot_model

        plot_model(model, to_file=f"{falias}.png", show_shapes=True)
Exemple #2
0
def test_mirror_node():
    vnode = VariableNode()
    vop = Dense(10)
    vnode.add_op(vop)
    vnode.add_op(Dense(20))

    mnode = MirrorNode(vnode)

    vnode.set_op(0)

    assert vnode.op == vop
    assert mnode.op == vop
Exemple #3
0
def add_mlp_op_(node):
    node.add_op(Dense(100, tf.nn.relu))
    node.add_op(Dense(100, tf.nn.tanh))
    node.add_op(Dense(100, tf.nn.sigmoid))
    node.add_op(Dense(500, tf.nn.relu))
    node.add_op(Dense(500, tf.nn.tanh))
    node.add_op(Dense(500, tf.nn.sigmoid))
    node.add_op(Dense(1000, tf.nn.relu))
    node.add_op(Dense(1000, tf.nn.tanh))
    node.add_op(Dense(1000, tf.nn.sigmoid))
Exemple #4
0
def test_mime_node():
    vnode = VariableNode()
    vop = Dense(10)
    vnode.add_op(vop)
    vnode.add_op(Dense(20))

    mnode = MimeNode(vnode)
    mop = Dense(30)
    mnode.add_op(mop)
    mnode.add_op(Dense(40))

    vnode.set_op(0)

    assert vnode.op == vop
    assert mnode.op == mop
Exemple #5
0
    def build(
        self,
        input_shape,
        output_shape,
        regression=True,
        num_units=(1, 11),
        num_layers=10,
        **kwargs
    ):
        """
        Args:
            input_shape (tuple, optional): True shape of inputs (no batch size dimension). Defaults to (2,).
            output_shape (tuple, optional): True shape of outputs (no batch size dimension).. Defaults to (1,).
            num_layers (int, optional): Maximum number of layers to have. Defaults to 10.
            num_units (tuple, optional): Range of number of units such as range(start, end, step_size). Defaults to (1, 11).
            regression (bool, optional): A boolean defining if the model is a regressor or a classifier. Defaults to True.

        Returns:
            AutoKSearchSpace: A search space object based on tf.keras implementations.
        """
        ss = AutoKSearchSpace(input_shape, output_shape, regression=regression)

        prev_node = ss.input_nodes[0]

        for _ in range(num_layers):
            vnode = VariableNode()
            vnode.add_op(Identity())
            for i in range(*num_units):
                vnode.add_op(Dense(i, tf.nn.relu))

            ss.connect(prev_node, vnode)
            prev_node = vnode

        return ss
Exemple #6
0
def add_dense_to_(node):
    node.add_op(Identity())  # we do not want to create a layer in this case

    activations = [None, swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
    for units in range(16, 97, 16):
        for activation in activations:
            node.add_op(Dense(units=units, activation=activation))
    def build(self,
              input_shape,
              output_shape,
              units=[128, 64, 32, 16, 8, 16, 32, 64, 128],
              num_layers=5,
              **kwargs):
        ss = KSearchSpace(input_shape, output_shape)

        inp = ss.input_nodes[0]

        # auto-encoder
        units = [128, 64, 32, 16, 8, 16, 32, 64, 128]
        prev_node = inp
        d = 1
        for i in range(len(units)):
            vnode = VariableNode()
            vnode.add_op(Identity())
            if d == 1 and units[i] < units[i + 1]:
                d = -1
                for u in range(min(2, units[i]), max(2, units[i]) + 1, 2):
                    vnode.add_op(Dense(u, tf.nn.relu))
                latente_space = vnode
            else:
                for u in range(min(units[i], units[i + d]),
                               max(units[i], units[i + d]) + 1, 2):
                    vnode.add_op(Dense(u, tf.nn.relu))
            ss.connect(prev_node, vnode)
            prev_node = vnode

        out2 = ConstantNode(op=Dense(output_shape[0][0], name="output_0"))
        ss.connect(prev_node, out2)

        # regressor
        prev_node = latente_space
        # prev_node = inp
        for _ in range(num_layers):
            vnode = VariableNode()
            for i in range(16, 129, 16):
                vnode.add_op(Dense(i, tf.nn.relu))

            ss.connect(prev_node, vnode)
            prev_node = vnode

        out1 = ConstantNode(op=Dense(output_shape[1][0], name="output_1"))
        ss.connect(prev_node, out1)

        return ss
Exemple #8
0
def create_search_space(
    input_shape=(32, 32, 3),
    output_shape=(10, ),
    num_filters=8,
    num_blocks=4,
    normal_cells=2,
    reduction_cells=1,
    repetitions=3,
    *args,
    **kwargs,
):

    ss = AutoKSearchSpace(input_shape, output_shape, regression=False)
    source = prev_input = ss.input_nodes[0]

    # look over skip connections within a range of the 3 previous nodes
    hidden_states = collections.deque([source, source], maxlen=2)

    for ri in range(repetitions):
        for nci in range(normal_cells):
            # generate a normal cell
            cout = generate_cell(
                ss,
                hidden_states,
                num_blocks,
                strides=1,
                mime=ri + nci > 0,
                num_filters=num_filters,
            )
            hidden_states.append(cout)

        if ri < repetitions - 1:  # we don't want the last cell to be a reduction cell
            for rci in range(reduction_cells):
                # generate a reduction cell
                cout = generate_cell(
                    ss,
                    hidden_states,
                    num_blocks,
                    strides=2,
                    mime=ri + rci > 0,
                    num_filters=num_filters,
                )
                hidden_states.append(cout)

    # out_node = ConstantNode(op=Dense(100, activation=tf.nn.relu))
    out_dense = VariableNode()
    out_dense.add_op(Identity())
    for units in [10, 20, 50, 100, 200, 500, 1000]:
        out_dense.add_op(Dense(units, activation=tf.nn.relu))
    ss.connect(cout, out_dense)

    out_dropout = VariableNode()
    out_dropout.add_op(Identity())
    for drop_rate in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 0.8]:
        out_dropout.add_op(Dropout(rate=drop_rate))
    ss.connect(out_dense, out_dropout)

    return ss
Exemple #9
0
def add_dense_op_(node):
    # node.add_op(Identity())
    node.add_op(Dense(units=10))
    node.add_op(Dense(units=50))
    node.add_op(Dense(units=100))
    node.add_op(Dense(units=200))
    node.add_op(Dense(units=250))
    node.add_op(Dense(units=500))
    node.add_op(Dense(units=750))
    node.add_op(Dense(units=1000))
Exemple #10
0
def add_mlp_op_(node):
    node.add_op(Identity())
    node.add_op(Dense(100, tf.nn.relu))
    node.add_op(Dense(100, tf.nn.tanh))
    node.add_op(Dense(100, tf.nn.sigmoid))
    node.add_op(Dropout(0.3))
    node.add_op(Dense(500, tf.nn.relu))
    node.add_op(Dense(500, tf.nn.tanh))
    node.add_op(Dense(500, tf.nn.sigmoid))
    node.add_op(Dropout(0.4))
    node.add_op(Dense(1000, tf.nn.relu))
    node.add_op(Dense(1000, tf.nn.tanh))
    node.add_op(Dense(1000, tf.nn.sigmoid))
    node.add_op(Dropout(0.5))
Exemple #11
0
def create_structure(input_shape=(2,), output_shape=(1,), *args, **kwargs):
    struct = AutoKSearchSpace(input_shape, output_shape, regression=False)

    n1 = ConstantNode(op=Conv1D(filter_size=20, num_filters=128), name='N')
    
    struct.connect(struct.input_nodes[0], n1)

    n2 = ConstantNode(op=Activation(activation='relu'), name='N')
    struct.connect(n1, n2)

    n3 = ConstantNode(op=MaxPooling1D(pool_size=1, padding='same'), name='N')
    struct.connect(n2, n3)

    n4 = ConstantNode(op=Conv1D(filter_size=10, num_filters=128),name='N')
    struct.connect(n3, n4)

    n5 = ConstantNode(op=Activation(activation='relu'), name='N')
    struct.connect(n4, n5)

    n6 = ConstantNode(op=MaxPooling1D(pool_size=10, padding='same'), name='N')
    struct.connect(n5, n6)

    n7 = ConstantNode(op=Flatten(), name='N')
    struct.connect(n6, n7)

    n8 = ConstantNode(op=Dense(units=200), name='N')
    struct.connect(n7, n8)

    n9 = ConstantNode(op=Activation(activation='relu'), name='N')
    struct.connect(n8, n9)

    n10 = ConstantNode(op=Dropout(rate=0.1), name='N')
    struct.connect(n9, n10)

    n11 = ConstantNode(op=Dense(units=20), name='N')
    struct.connect(n10, n11)

    n12 = ConstantNode(op=Activation(activation='relu'), name='N')
    struct.connect(n11, n12)

    n13 = ConstantNode(op=Dropout(rate=0.1), name='N')
    struct.connect(n12, n13)

    return struct
Exemple #12
0
def create_structure(input_shape=[(1, ), (942, ), (5270, ), (2048, )],
                     output_shape=(1, ),
                     num_cells=2,
                     *args,
                     **kwargs):
    struct = AutoKSearchSpace(input_shape, output_shape, regression=True)
    input_nodes = struct.input_nodes

    output_submodels = [input_nodes[0]]

    for i in range(1, 4):
        cnode1 = ConstantNode(name='N', op=Dense(1000, tf.nn.relu))
        struct.connect(input_nodes[i], cnode1)

        cnode2 = ConstantNode(name='N', op=Dense(1000, tf.nn.relu))
        struct.connect(cnode1, cnode2)

        vnode1 = VariableNode(name='N3')
        add_mlp_op_(vnode1)
        struct.connect(cnode2, vnode1)

        output_submodels.append(vnode1)

    merge1 = ConstantNode(name='Merge')
    # merge1.set_op(Concatenate(struct, merge1, output_submodels))
    merge1.set_op(Concatenate(struct, output_submodels))

    cnode4 = ConstantNode(name='N', op=Dense(1000, tf.nn.relu))
    struct.connect(merge1, cnode4)

    prev = cnode4

    for i in range(num_cells):
        cnode = ConstantNode(name='N', op=Dense(1000, tf.nn.relu))
        struct.connect(prev, cnode)

        merge = ConstantNode(name='Merge')
        # merge.set_op(AddByPadding(struct, merge, [cnode, prev]))
        merge.set_op(AddByPadding(struct, [cnode, prev]))

        prev = merge

    return struct
    def test_create_one_vnode_with_wrong_output_shape(self):
        from deephyper.nas.space import KSearchSpace

        struct = KSearchSpace((5, ), (1, ))

        from deephyper.nas.space.node import VariableNode

        vnode = VariableNode()

        struct.connect(struct.input_nodes[0], vnode)

        from deephyper.nas.space.op.op1d import Dense

        vnode.add_op(Dense(10))

        struct.set_ops([0])

        with pytest.raises(WrongOutputShape):
            struct.create_model()
    def test_create_multiple_inputs_with_one_vnode(self):
        from deephyper.nas.space import KSearchSpace
        from deephyper.nas.space.node import VariableNode, ConstantNode
        from deephyper.nas.space.op.op1d import Dense
        from deephyper.nas.space.op.merge import Concatenate

        struct = KSearchSpace([(5, ), (5, )], (1, ))

        merge = ConstantNode()
        merge.set_op(Concatenate(struct, struct.input_nodes))

        vnode1 = VariableNode()
        struct.connect(merge, vnode1)

        vnode1.add_op(Dense(1))

        struct.set_ops([0])

        struct.create_model()
def create_search_space(
        input_shape=(11937, ), output_shape=(86, ), num_layers=10, *args,
        **kwargs):

    # print("input_shape:", input_shape, ", output_shape:", output_shape,", num_layers:", num_layers)
    arch = KSearchSpace(input_shape, output_shape)
    source = prev_input = arch.input_nodes[0]

    # look over skip connections within a range of the 3 previous nodes
    anchor_points = collections.deque([source], maxlen=3)

    for _ in range(num_layers):
        vnode = VariableNode()
        add_dense_to_(vnode)

        arch.connect(prev_input, vnode)

        # * Cell output
        cell_output = vnode

        cmerge = ConstantNode()
        cmerge.set_op(AddByProjecting(arch, [cell_output], activation='relu'))

        for anchor in anchor_points:
            skipco = VariableNode()
            skipco.add_op(Tensor([]))
            skipco.add_op(Connect(arch, anchor))
            arch.connect(skipco, cmerge)

        # ! for next iter
        prev_input = cmerge
        anchor_points.append(prev_input)

    cout = ConstantNode(op=Dense(86, activation="sigmoid"))
    arch.connect(prev_input, cout)
    return arch
    def test_create_one_vnode(self):
        from deephyper.nas.space import KSearchSpace

        struct = KSearchSpace((5, ), (1, ))

        from deephyper.nas.space.node import VariableNode

        vnode = VariableNode()

        struct.connect(struct.input_nodes[0], vnode)

        from deephyper.nas.space.op.op1d import Dense

        vnode.add_op(Dense(1))

        struct.set_ops([0])

        falias = "test_keras_search_spaceure"
        struct.draw_graphviz(f"{falias}.dot")

        model = struct.create_model()
        from tensorflow.keras.utils import plot_model

        plot_model(model, to_file=f"{falias}.png", show_shapes=True)
Exemple #17
0
 def gen_vnode(self) -> VariableNode:
     vnode = VariableNode()
     for i in range(1, 11):
         vnode.add_op(Dense(i, tf.nn.relu))
     return vnode