コード例 #1
0
def create_search_space(
    input_shape=(32, 32, 3),
    output_shape=(10, ),
    num_filters=8,
    num_blocks=4,
    normal_cells=2,
    reduction_cells=1,
    repetitions=3,
    *args,
    **kwargs,
):

    ss = AutoKSearchSpace(input_shape, output_shape, regression=False)
    source = prev_input = ss.input_nodes[0]

    # look over skip connections within a range of the 3 previous nodes
    hidden_states = collections.deque([source, source], maxlen=2)

    for ri in range(repetitions):
        for nci in range(normal_cells):
            # generate a normal cell
            cout = generate_cell(
                ss,
                hidden_states,
                num_blocks,
                strides=1,
                mime=ri + nci > 0,
                num_filters=num_filters,
            )
            hidden_states.append(cout)

        if ri < repetitions - 1:  # we don't want the last cell to be a reduction cell
            for rci in range(reduction_cells):
                # generate a reduction cell
                cout = generate_cell(
                    ss,
                    hidden_states,
                    num_blocks,
                    strides=2,
                    mime=ri + rci > 0,
                    num_filters=num_filters,
                )
                hidden_states.append(cout)

    # out_node = ConstantNode(op=Dense(100, activation=tf.nn.relu))
    out_dense = VariableNode()
    out_dense.add_op(Identity())
    for units in [10, 20, 50, 100, 200, 500, 1000]:
        out_dense.add_op(Dense(units, activation=tf.nn.relu))
    ss.connect(cout, out_dense)

    out_dropout = VariableNode()
    out_dropout.add_op(Identity())
    for drop_rate in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 0.8]:
        out_dropout.add_op(Dropout(rate=drop_rate))
    ss.connect(out_dense, out_dropout)

    return ss
コード例 #2
0
ファイル: dense_skipco.py プロジェクト: bigwater/NASBigData
def add_dense_to_(node):
    node.add_op(Identity())  # we do not want to create a layer in this case

    activations = [None, swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
    for units in range(16, 97, 16):
        for activation in activations:
            node.add_op(Dense(units=units, activation=activation))
コード例 #3
0
ファイル: feed_forward.py プロジェクト: bigwater/deepspace
    def build(
        self,
        input_shape,
        output_shape,
        regression=True,
        num_units=(1, 11),
        num_layers=10,
        **kwargs
    ):
        """
        Args:
            input_shape (tuple, optional): True shape of inputs (no batch size dimension). Defaults to (2,).
            output_shape (tuple, optional): True shape of outputs (no batch size dimension).. Defaults to (1,).
            num_layers (int, optional): Maximum number of layers to have. Defaults to 10.
            num_units (tuple, optional): Range of number of units such as range(start, end, step_size). Defaults to (1, 11).
            regression (bool, optional): A boolean defining if the model is a regressor or a classifier. Defaults to True.

        Returns:
            AutoKSearchSpace: A search space object based on tf.keras implementations.
        """
        ss = AutoKSearchSpace(input_shape, output_shape, regression=regression)

        prev_node = ss.input_nodes[0]

        for _ in range(num_layers):
            vnode = VariableNode()
            vnode.add_op(Identity())
            for i in range(*num_units):
                vnode.add_op(Dense(i, tf.nn.relu))

            ss.connect(prev_node, vnode)
            prev_node = vnode

        return ss
コード例 #4
0
def add_dropout_op_(node):
    node.add_op(Identity())
    node.add_op(Dropout(rate=0.5))
    node.add_op(Dropout(rate=0.4))
    node.add_op(Dropout(rate=0.3))
    node.add_op(Dropout(rate=0.2))
    node.add_op(Dropout(rate=0.1))
    node.add_op(Dropout(rate=0.05))
コード例 #5
0
def add_dense_op_(node):
    node.add_op(Identity())
    node.add_op(Dense(units=10))
    node.add_op(Dense(units=50))
    node.add_op(Dense(units=100))
    node.add_op(Dense(units=200))
    node.add_op(Dense(units=250))
    node.add_op(Dense(units=500))
    node.add_op(Dense(units=750))
    node.add_op(Dense(units=1000))
コード例 #6
0
def generate_conv_node(strides, mime=False):
    if mime:
        if strides > 1:
            node = MimeNode(next(cycle_reduction_nodes), name="Conv")
        else:
            node = MimeNode(next(cycle_normal_nodes), name="Conv")
    else:
        node = VariableNode(name="Conv")
        if strides > 1:
            reduction_nodes.append(node)
        else:
            normal_nodes.append(node)

    padding = "valid" if strides > 1 else "same"
    node.add_op(Identity())
    node.add_op(
        Conv2D(filters=8, kernel_size=(1, 1), strides=strides,
               padding=padding))
    node.add_op(
        Conv2D(filters=8, kernel_size=(3, 3), strides=strides,
               padding=padding))
    node.add_op(
        Conv2D(filters=8, kernel_size=(5, 5), strides=strides,
               padding=padding))
    node.add_op(AvgPool2D(pool_size=(3, 3), strides=strides, padding=padding))
    node.add_op(MaxPool2D(pool_size=(3, 3), strides=strides, padding=padding))
    node.add_op(MaxPool2D(pool_size=(5, 5), strides=strides, padding=padding))
    node.add_op(MaxPool2D(pool_size=(7, 7), strides=strides, padding=padding))
    node.add_op(
        SeparableConv2D(kernel_size=(3, 3),
                        filters=8,
                        strides=strides,
                        padding=padding))
    node.add_op(
        SeparableConv2D(kernel_size=(5, 5),
                        filters=8,
                        strides=strides,
                        padding=padding))
    node.add_op(
        SeparableConv2D(kernel_size=(7, 7),
                        filters=8,
                        strides=strides,
                        padding=padding))
    if strides == 1:
        node.add_op(
            Conv2D(
                filters=8,
                kernel_size=(3, 3),
                strides=strides,
                padding=padding,
                dilation_rate=2,
            ))
    return node
コード例 #7
0
ファイル: conv_lstm_2d.py プロジェクト: bigwater/NASBigData
def add_convlstm_to_(node):
    node.add_op(Identity())  # we do not want to create a layer in this case
    activations = [None, tf.nn.swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
    # for filters in range(16, 97, 16):
    filters = 8
    for activation in activations:
        node.add_op(
            tf.keras.layers.ConvLSTM2D(
                filters=filters,
                kernel_size=1,
                activation=activation,
                padding="same",
                return_sequences=True,
            ))
コード例 #8
0
ファイル: uno_mlp_1.py プロジェクト: bigwater/candlepb
def add_mlp_op_(node):
    node.add_op(Identity())
    node.add_op(Dense(100, tf.nn.relu))
    node.add_op(Dense(100, tf.nn.tanh))
    node.add_op(Dense(100, tf.nn.sigmoid))
    node.add_op(Dropout(0.3))
    node.add_op(Dense(500, tf.nn.relu))
    node.add_op(Dense(500, tf.nn.tanh))
    node.add_op(Dense(500, tf.nn.sigmoid))
    node.add_op(Dropout(0.4))
    node.add_op(Dense(1000, tf.nn.relu))
    node.add_op(Dense(1000, tf.nn.tanh))
    node.add_op(Dense(1000, tf.nn.sigmoid))
    node.add_op(Dropout(0.5))
コード例 #9
0
    def build(self,
              input_shape,
              output_shape,
              units=[128, 64, 32, 16, 8, 16, 32, 64, 128],
              num_layers=5,
              **kwargs):
        ss = KSearchSpace(input_shape, output_shape)

        inp = ss.input_nodes[0]

        # auto-encoder
        units = [128, 64, 32, 16, 8, 16, 32, 64, 128]
        prev_node = inp
        d = 1
        for i in range(len(units)):
            vnode = VariableNode()
            vnode.add_op(Identity())
            if d == 1 and units[i] < units[i + 1]:
                d = -1
                for u in range(min(2, units[i]), max(2, units[i]) + 1, 2):
                    vnode.add_op(Dense(u, tf.nn.relu))
                latente_space = vnode
            else:
                for u in range(min(units[i], units[i + d]),
                               max(units[i], units[i + d]) + 1, 2):
                    vnode.add_op(Dense(u, tf.nn.relu))
            ss.connect(prev_node, vnode)
            prev_node = vnode

        out2 = ConstantNode(op=Dense(output_shape[0][0], name="output_0"))
        ss.connect(prev_node, out2)

        # regressor
        prev_node = latente_space
        # prev_node = inp
        for _ in range(num_layers):
            vnode = VariableNode()
            for i in range(16, 129, 16):
                vnode.add_op(Dense(i, tf.nn.relu))

            ss.connect(prev_node, vnode)
            prev_node = vnode

        out1 = ConstantNode(op=Dense(output_shape[1][0], name="output_1"))
        ss.connect(prev_node, out1)

        return ss
コード例 #10
0
ファイル: search_space.py プロジェクト: bigwater/NASBigData
def add_lstm_seq_(node):
    node.add_op(Identity())  # we do not want to create a layer in this case
    for units in range(16, 1025, 16):
        node.add_op(LSTM(units=units, return_sequences=True, stateful=False))
コード例 #11
0
def add_conv_op_(node):
    node.add_op(Identity())
    node.add_op(Conv1D(filter_size=3, num_filters=8))
    node.add_op(Conv1D(filter_size=4, num_filters=8))
    node.add_op(Conv1D(filter_size=5, num_filters=8))
    node.add_op(Conv1D(filter_size=6, num_filters=8))
コード例 #12
0
def add_pooling_op_(node):
    node.add_op(Identity())
    node.add_op(MaxPooling1D(pool_size=3, padding='same'))
    node.add_op(MaxPooling1D(pool_size=4, padding='same'))
    node.add_op(MaxPooling1D(pool_size=5, padding='same'))
    node.add_op(MaxPooling1D(pool_size=6, padding='same'))
コード例 #13
0
def add_activation_op_(node):
    node.add_op(Identity())
    node.add_op(Activation(activation='relu'))
    node.add_op(Activation(activation='tanh'))
    node.add_op(Activation(activation='sigmoid'))
コード例 #14
0
def generate_conv_node(strides, mime=False, first=False, num_filters=8):
    if mime:
        if strides > 1:
            node = MimeNode(next(cycle_reduction_nodes), name="Conv")
        else:
            node = MimeNode(next(cycle_normal_nodes), name="Conv")
    else:
        node = VariableNode(name="Conv")
        if strides > 1:
            reduction_nodes.append(node)
        else:
            normal_nodes.append(node)

    padding = "same"
    if first:
        node.add_op(Identity())
    else:
        node.add_op(Zero())
    node.add_op(Identity())
    node.add_op(
        Conv2D(
            filters=num_filters,
            kernel_size=(3, 3),
            strides=strides,
            padding=padding,
            activation=tf.nn.relu,
        ))
    node.add_op(
        Conv2D(
            filters=num_filters,
            kernel_size=(5, 5),
            strides=strides,
            padding=padding,
            activation=tf.nn.relu,
        ))
    node.add_op(AvgPool2D(pool_size=(3, 3), strides=strides, padding=padding))
    node.add_op(MaxPool2D(pool_size=(3, 3), strides=strides, padding=padding))
    node.add_op(
        SeparableConv2D(kernel_size=(3, 3),
                        filters=num_filters,
                        strides=strides,
                        padding=padding))
    node.add_op(
        SeparableConv2D(kernel_size=(5, 5),
                        filters=num_filters,
                        strides=strides,
                        padding=padding))
    if strides == 1:
        node.add_op(
            Conv2D(
                filters=num_filters,
                kernel_size=(3, 3),
                strides=strides,
                padding=padding,
                dilation_rate=2,
            ))
        node.add_op(
            Conv2D(
                filters=num_filters,
                kernel_size=(5, 5),
                strides=strides,
                padding=padding,
                dilation_rate=2,
            ))
    return node