예제 #1
0
 def run(self) -> None:
     self.LOGGER.info("MLPooling Processor Running!")
     TensorFlowUtils.device_memory_limit(self.device_idx, self.mem_limit)
     # self.ds_train, self.ds_test = self._make_dataset()
     self.ds_train = self.org_dataset[0]
     self.ds_test = self.org_dataset[1]
     self.LOGGER.info("make tensorflow dataset object!")
     self._do_work()
     self.LOGGER.info("MLPooling Processor terminated!")
예제 #2
0
    def build(self):
        ## model
        model_nm = self.param_dict["model_nm"]
        algorithm_type = self.param_dict["algorithm_type"]

        ## DNN Parameter Setting
        input_units = self.param_dict["input_units"]
        output_units = self.param_dict["output_units"]
        hidden_units = self.param_dict["hidden_units"]
        initial_weight = self.param_dict.get("initial_weight", 0.1)
        act_fn = TensorFlowUtils.get_active_fn(
            self.param_dict.get("act_fn", "relu"))
        dropout_prob = self.param_dict.get("dropout_prob", 0.1)
        optimizer_fn = TensorFlowUtils.get_optimizer_fn(
            self.param_dict.get("optimizer_fn", "adam"))
        learning_rate = self.param_dict.get("learning_rate", 0.1)
        units = TensorFlowUtils.get_units(input_units, hidden_units,
                                          output_units)

        ### keras Model
        self.model = tf.keras.Sequential()
        self.inputs = tf.keras.Input(shape=(units[0], ), name=model_nm + '_X')

        # Multi-Layer Perceptron
        TensorFlowUtils.mlp_block(self.model, units, act_fn, dropout_prob,
                                  initial_weight, model_nm + "mlp",
                                  algorithm_type)
        TensorFlowUtils.compile_model(self.model, algorithm_type, output_units,
                                      optimizer_fn, learning_rate)
예제 #3
0
    def build(self):
        ## model
        model_nm = self.param_dict["model_nm"]
        algorithm_type = self.param_dict["algorithm_type"]

        ## DNN Parameter Setting
        input_units = self.param_dict["input_units"]
        output_units = self.param_dict["output_units"]
        hidden_units = self.param_dict["hidden_units"]
        initial_weight = self.param_dict.get("initial_weight", 0.1)
        act_fn = TensorFlowUtils.get_active_fn(self.param_dict.get("act_fn", "relu"))
        dropout_prob = self.param_dict.get("dropout_prob", 0.1)
        optimizer_fn = TensorFlowUtils.get_optimizer_fn(self.param_dict.get("optimizer_fn", "adam"))
        learning_rate = self.param_dict.get("learning_rate", 0.1)
        cell_units = self.param_dict["cell_units"]
        rnn_cell = self.param_dict["rnn_cell"]
        seq_length = self.param_dict["seq_length"]

        cell = TensorFlowUtils.get_rnn_cell(rnn_cell)

        ### keras Model
        self.model = tf.keras.Sequential()
        self.inputs = tf.keras.Input(shape=(seq_length * input_units, ), name= model_nm + '_X')
        self.model.add(self.inputs)

        self.model.add(
            tf.keras.layers.Reshape(
                (seq_length, input_units),
                name="{}_input_reshape".format(model_nm)
            )
        )

        self.model.add(
            cell(
                units=cell_units,
                activation=act_fn,
                dropout=dropout_prob,
                name="{}_cell".format(model_nm),
            )
        )

        units = TensorFlowUtils.get_units(cell_units, hidden_units, output_units)

        # Multi-Layer Perceptron
        TensorFlowUtils.mlp_block(
            self.model, units, act_fn, dropout_prob, initial_weight,
            model_nm+"mlp", algorithm_type
        )

        self.model.summary()
        TensorFlowUtils.compile_model(self.model, algorithm_type, output_units, optimizer_fn, learning_rate)
예제 #4
0
    def build(self):
        ## model
        model_nm = self.param_dict["model_nm"]
        algorithm_type = self.param_dict["algorithm_type"]

        ## DNN Parameter Setting
        input_units = self.param_dict["input_units"]
        output_units = self.param_dict["output_units"]
        hidden_units = self.param_dict["hidden_units"]
        initial_weight = self.param_dict.get("initial_weight", 0.1)
        act_fn = TensorFlowUtils.get_active_fn(self.param_dict.get("act_fn", "relu"))
        dropout_prob = self.param_dict.get("dropout_prob", 0.1)
        optimizer_fn = TensorFlowUtils.get_optimizer_fn(self.param_dict.get("optimizer_fn", "adam"))
        learning_rate = self.param_dict.get("learning_rate", 0.1)
        filter_sizes = self.param_dict["filter_sizes"]
        pool_sizes = self.param_dict["pool_sizes"]
        num_filters = self.param_dict["num_filters"]
        pooling_fn = self.param_dict["pooling_fn"]
        conv_fn = self.param_dict["conv_fn"]

        pooling = TensorFlowUtils.get_pooling_fn(pooling_fn)
        conv = TensorFlowUtils.get_conv_fn(conv_fn)
        conv_stride = None
        pooling_stride = None

        ### keras Model
        self.model = tf.keras.Sequential()
        self.inputs = tf.keras.Input(shape=(input_units,), name= model_nm + '_X')
        self.model.add(self.inputs)

        if "1D" in conv_fn:
            conv_stride = 1
            pooling_stride = 2
            self.model.add(
                tf.keras.layers.Reshape(
                    (input_units, 1),
                    name="{}_input_reshape".format(model_nm)
                )
            )

        for i, filter_size in enumerate(filter_sizes):
            # Convolution Layer
            conv_cls = conv(
                kernel_size=filter_size,
                filters=num_filters,
                strides=conv_stride,
                padding="SAME",
                activation=act_fn,
                name="{}_conv_{}".format(model_nm, i)
            )
            self.model.add(conv_cls)

            # Pooling Layer
            pooled_cls = pooling(
                pool_size=pool_sizes[i],
                strides=pooling_stride,
                padding='SAME',
                name="{}_pool_{}".format(model_nm, i))
            self.model.add(pooled_cls)

        flatten_cls = tf.keras.layers.Flatten()
        self.model.add(flatten_cls)
        self.model.add(
            tf.keras.layers.Dropout(
                dropout_prob
            )
        )

        units = TensorFlowUtils.get_units(self.model.output_shape[1], hidden_units, output_units)

        # Multi-Layer Perceptron
        TensorFlowUtils.mlp_block(
            self.model, units, act_fn, dropout_prob, initial_weight,
            model_nm+"mlp", algorithm_type
        )
        self.model.summary()
        TensorFlowUtils.compile_model(self.model, algorithm_type, output_units, optimizer_fn, learning_rate)