예제 #1
0
    def generate(self, model_len=None, model_width=None):
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        # output_node_id = graph.add_layer(StubReLU(), output_node_id)
        output_node_id = graph.add_layer(
            self.conv(temp_input_channel, model_width, kernel_size=3),
            output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(model_width),
                                         output_node_id)
        # output_node_id = graph.add_layer(self.pooling(kernel_size=3, stride=2, padding=1), output_node_id)

        output_node_id = self._make_layer(graph, model_width, 2,
                                          output_node_id, 1)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2,
                                          output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2,
                                          output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2,
                                          output_node_id, 2)

        output_node_id = graph.add_layer(self.global_avg_pooling(),
                                         output_node_id)
        graph.add_layer(
            StubDense(model_width * self.block_expansion, self.n_output_node),
            output_node_id)
        return graph
예제 #2
0
    def generate(self,
                 model_len=Constant.MLP_MODEL_LEN,
                 model_width=Constant.MLP_MODEL_WIDTH):
        """Generates a Multi-Layer Perceptron.

        Args:
            model_len: An integer. Number of hidden layers.
            model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the
                number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this
                value.

        Returns:
            An instance of the class Graph. Represents the neural architecture graph of the generated model.
        """
        if type(model_width) is list and not len(model_width) == model_len:
            raise ValueError(
                'The length of \'model_width\' does not match \'model_len\'')
        elif type(model_width) is int:
            model_width = [model_width] * model_len

        graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(
                StubDense(n_nodes_prev_layer, width), output_node_id)
            output_node_id = graph.add_layer(
                StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node),
                        output_node_id)
        return graph
예제 #3
0
    def generate(self, model_len=None, model_width=None):
        """Generates a Multi-Layer Perceptron.

        Args:
            model_len: An integer. Number of hidden layers.
            model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the
                number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this
                value.

        Returns:
            An instance of the class Graph. Represents the neural architecture graph of the generated model.
        """
        if model_len is None:
            model_len = Constant.MODEL_LEN
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        if type(model_width) is list and not len(model_width) == model_len:
            raise ValueError('The length of \'model_width\' does not match \'model_len\'')
        elif type(model_width) is int:
            model_width = [model_width] * model_len

        graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
            output_node_id = graph.add_layer(StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
        return graph
예제 #4
0
 def generate(self, model_len, model_width):
     graph = Graph(self.input_shape, False)
     temp_input_channel = self.input_shape[-1]
     output_node_id = 0
     output_node_id = graph.add_layer(StubReLU(), output_node_id)
     output_node_id = graph.add_layer(
         self.conv(temp_input_channel, model_width, kernel_size=7),
         output_node_id)
     output_node_id = graph.add_layer(self.batch_norm(model_width),
                                      output_node_id)
     output_node_id = graph.add_layer(
         self.pooling(kernel_size=3, stride=2, padding=1), output_node_id)
     for layer in self.layers:
         output_node_id = self._make_layer(graph, model_width, layer,
                                           output_node_id)
         model_width *= 2
     output_node_id = graph.add_layer(self.global_avg_pooling(),
                                      output_node_id)
     graph.add_layer(
         StubDense(
             int(model_width / 2) * self.block_expansion,
             self.n_output_node), output_node_id)
     return graph
예제 #5
0
    def generate(self, model_len=Constant.MLP_MODEL_LEN, model_width=Constant.MLP_MODEL_WIDTH):
        if type(model_width) is list and not len(model_width) == model_len:
            raise ValueError('The length of \'model_width\' does not match \'model_len\'')
        elif type(model_width) is int:
            model_width = [model_width] * model_len

        graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
            output_node_id = graph.add_layer(StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
        return graph
예제 #6
0
    def generate(self, model_len=None, model_width=None):
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        # output_node_id = graph.add_layer(StubReLU(), output_node_id)
        output_node_id = graph.add_layer(self.conv(temp_input_channel, model_width, kernel_size=3), output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(model_width), output_node_id)
        # output_node_id = graph.add_layer(self.pooling(kernel_size=3, stride=2, padding=1), output_node_id)

        output_node_id = self._make_layer(graph, model_width, 2, output_node_id, 1)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2, output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2, output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, 2, output_node_id, 2)

        output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id)
        graph.add_layer(StubDense(model_width * self.block_expansion, self.n_output_node), output_node_id)
        return graph
예제 #7
0
def get_add_skip_model():
    graph = Graph((32, 32, 3), False)
    output_node_id = 0

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv2d(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization2d(3),
                                     output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv2d(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization2d(3),
                                     output_node_id)

    temp_node_id = output_node_id

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv2d(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization2d(3),
                                     output_node_id)

    temp_node_id = graph.add_layer(StubReLU(), temp_node_id)
    temp_node_id = graph.add_layer(StubConv2d(3, 3, 1), temp_node_id)
    temp_node_id = graph.add_layer(StubBatchNormalization2d(3), temp_node_id)
    output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id])

    temp_node_id = output_node_id

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv2d(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization2d(3),
                                     output_node_id)

    temp_node_id = graph.add_layer(StubReLU(), temp_node_id)
    temp_node_id = graph.add_layer(StubConv2d(3, 3, 1), temp_node_id)
    temp_node_id = graph.add_layer(StubBatchNormalization2d(3), temp_node_id)
    output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id])

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv2d(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization2d(3),
                                     output_node_id)

    output_node_id = graph.add_layer(StubFlatten(), output_node_id)
    output_node_id = graph.add_layer(StubDropout2d(Constant.CONV_DROPOUT_RATE),
                                     output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(
        StubDense(graph.node_list[output_node_id].shape[0], 5), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(5, 5), output_node_id)
    graph.add_layer(StubSoftmax(), output_node_id)

    graph.produce_model().set_weight_to_graph()

    return graph
예제 #8
0
    def generate(self,
                 model_len=Constant.MODEL_LEN,
                 model_width=Constant.MODEL_WIDTH):
        """Generates a CNN.

        Args:
            model_len: An integer. Number of convolutional layers.
            model_width: An integer. Number of filters for the convolutional layers.

        Returns:
            An instance of the class Graph. Represents the neural architecture graph of the generated model.
        """
        pooling_len = int(model_len / 4)
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        for i in range(model_len):
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            output_node_id = graph.add_layer(
                self.conv(temp_input_channel, model_width, kernel_size=3),
                output_node_id)
            output_node_id = graph.add_layer(self.batch_norm(model_width),
                                             output_node_id)
            temp_input_channel = model_width
            if pooling_len == 0 or ((i + 1) % pooling_len == 0
                                    and i != model_len - 1):
                output_node_id = graph.add_layer(self.pooling(),
                                                 output_node_id)

        output_node_id = graph.add_layer(self.global_avg_pooling(),
                                         output_node_id)
        output_node_id = graph.add_layer(
            self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id)
        output_node_id = graph.add_layer(
            StubDense(graph.node_list[output_node_id].shape[0], model_width),
            output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        graph.add_layer(StubDense(model_width, self.n_output_node),
                        output_node_id)
        return graph
예제 #9
0
    def generate(self,
                 model_len=Constant.MODEL_LEN,
                 model_width=Constant.MODEL_WIDTH):
        pooling_len = int(model_len / 4)
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        for i in range(model_len):
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            output_node_id = graph.add_layer(
                StubConv(temp_input_channel, model_width, kernel_size=3),
                output_node_id)
            output_node_id = graph.add_layer(
                StubBatchNormalization(model_width), output_node_id)
            temp_input_channel = model_width
            if pooling_len == 0 or ((i + 1) % pooling_len == 0
                                    and i != model_len - 1):
                output_node_id = graph.add_layer(StubPooling(), output_node_id)

        output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id)
        output_node_id = graph.add_layer(
            StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id)
        output_node_id = graph.add_layer(
            StubDense(graph.node_list[output_node_id].shape[0], model_width),
            output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        graph.add_layer(StubDense(model_width, self.n_output_node),
                        output_node_id)
        return graph
예제 #10
0
    def generate(self, model_len=None, model_width=None):
        """Generates a CNN.

        Args:
            model_len: An integer. Number of convolutional layers.
            model_width: An integer. Number of filters for the convolutional layers.

        Returns:
            An instance of the class Graph. Represents the neural architecture graph of the generated model.
        """
        if model_len is None:
            model_len = Constant.MODEL_LEN
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        pooling_len = int(model_len / 4)
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        stride = 1
        for i in range(model_len):
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            output_node_id = graph.add_layer(self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id)
            output_node_id = graph.add_layer(self.conv(temp_input_channel,
                                                       model_width,
                                                       kernel_size=3,
                                                       stride=stride), output_node_id)
            # if stride == 1:
            #     stride = 2
            temp_input_channel = model_width
            if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):
                output_node_id = graph.add_layer(self.pooling(), output_node_id)

        output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id)
        output_node_id = graph.add_layer(self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id)
        output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], model_width),
                                         output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
        return graph
예제 #11
0
    def generate(self, model_len=None, model_width=None):
        if model_len is None:
            model_len = Constant.MODEL_LEN
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        # First convolution
        output_node_id = 0
        output_node_id = graph.add_layer(self.conv(temp_input_channel, model_width, kernel_size=7),
                                         output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(num_features=self.num_init_features), output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        db_input_node_id = graph.add_layer(self.max_pooling(kernel_size=3, stride=2, padding=1), output_node_id)
        # Each DensebLock
        num_features = self.num_init_features
        for i, num_layers in enumerate(self.block_config):
            db_input_node_id = self._dense_block(num_layers=num_layers, num_input_features=num_features,
                                                 bn_size=self.bn_size, growth_rate=self.growth_rate,
                                                 drop_rate=self.drop_rate,
                                                 graph=graph, input_node_id=db_input_node_id)
            num_features = num_features + num_layers * self.growth_rate
            if i != len(self.block_config) - 1:
                db_input_node_id = self._transition(num_input_features=num_features,
                                                    num_output_features=num_features // 2,
                                                    graph=graph, input_node_id=db_input_node_id)
                num_features = num_features // 2
        # Final batch norm
        out = graph.add_layer(self.batch_norm(num_features), db_input_node_id)

        out = graph.add_layer(StubReLU(), out)
        out = graph.add_layer(self.adaptive_avg_pooling(), out)
        # Linear layer
        graph.add_layer(StubDense(num_features, self.n_output_node), out)
        return graph
예제 #12
0
def get_pooling_model():
    graph = Graph((32, 32, 3), False)
    output_node_id = 0

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubPooling(2), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id)
    output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id)

    output_node_id = graph.add_layer(StubFlatten(), output_node_id)
    output_node_id = graph.add_layer(StubDropout(Constant.CONV_DROPOUT_RATE),
                                     output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(
        StubDense(graph.node_list[output_node_id].shape[0], 5), output_node_id)

    output_node_id = graph.add_layer(StubReLU(), output_node_id)
    output_node_id = graph.add_layer(StubDense(5, 5), output_node_id)
    graph.add_layer(StubSoftmax(), output_node_id)

    graph.produce_model().set_weight_to_graph()

    return graph
예제 #13
0
    def generate(self, model_len=None, model_width=None):
                
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        
        #out_planes = self.in_planes*self.n_output_node
        out_planes = 24
        
        output_node_id = graph.add_layer(self.conv(temp_input_channel,
                                                       self.in_planes,
                                                       kernel_size=3,
                                                       stride=1,
                                                       padding=1), output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
       
        output_node_id = self._make_layer(graph, output_node_id)

       
        output_node_id = graph.add_layer(self.conv(out_planes,
                                                       out_planes*4,
                                                       kernel_size=1,
                                                       stride=1,
                                                       padding=0), output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        
        output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id)
        graph.add_layer(StubDense(out_planes*4, self.n_output_node), output_node_id)
        return graph