示例#1
0
    def layering(self):
        input_node = self.input_node
        for idx in range(self.hidden_layer_num):
            self.layers['affine' + str(idx)] = tfl.Affine(
                self.params['W' + str(idx)],
                input_node,
                self.params['b' + str(idx)],
                name='affine' + str(idx),
                graph=self)
            self.layers['activation' + str(idx)] = self.activator(
                self.layers['affine' + str(idx)],
                name='activation' + str(idx),
                graph=self)
            input_node = self.layers['activation' + str(idx)]

        idx = self.hidden_layer_num
        self.layers['affine' + str(idx)] = tfl.Affine(
            self.params['W' + str(idx)],
            self.layers['activation' + str(idx - 1)],
            self.params['b' + str(idx)],
            name='affine' + str(idx),
            graph=self)
        self.output = self.layers['affine' + str(idx)]

        self.error = tfl.SoftmaxWithCrossEntropyLoss(self.output,
                                                     self.target_node,
                                                     name="SCEL",
                                                     graph=self)
示例#2
0
    def layering(self, activator=tfe.Activator.ReLU.value):
        self.activator = activator

        self.affine0 = tfl.Affine(self.params['W0'],
                                  self.input_node,
                                  self.params['b0'],
                                  name="A0",
                                  graph=self)
        self.activation0 = activator(self.affine0, name="O0", graph=self)

        self.affine1 = tfl.Affine(self.params['W1'],
                                  self.input_node,
                                  self.params['b1'],
                                  name="A1",
                                  graph=self)
        self.activation1 = activator(self.affine1, name="O1", graph=self)

        self.affine2 = tfl.Affine2(self.params['W2'],
                                   self.activation0,
                                   self.activation1,
                                   self.params['b2'],
                                   name="A2",
                                   graph=self)
        self.output = activator(self.affine2, name="O2", graph=self)

        self.error = tfl.SquaredError(self.output,
                                      self.target_node,
                                      name="SE",
                                      graph=self)
示例#3
0
    def layering(self, refitting=False):
        input_node = self.input_node

        if not refitting:
            self.output_mean_list['affine'] = {}
            self.output_variance_list['affine'] = {}
            self.output_skewness_list['affine'] = {}
            self.output_kurtosis_list['affine'] = {}

            self.output_mean_list['activation'] = {}
            self.output_variance_list['activation'] = {}
            self.output_skewness_list['activation'] = {}
            self.output_kurtosis_list['activation'] = {}

        for idx in range(self.hidden_layer_num):
            self.layers['affine' + str(idx)] = tfl.Affine(
                self.params['W' + str(idx)],
                input_node,
                self.params['b' + str(idx)],
                name='affine' + str(idx),
                graph=self)
            self.layers['activation' + str(idx)] = self.activator(
                self.layers['affine' + str(idx)],
                name='activation' + str(idx),
                graph=self)
            input_node = self.layers['activation' + str(idx)]

            if not refitting:
                self.output_mean_list['affine'][idx] = []
                self.output_variance_list['affine'][idx] = []
                self.output_skewness_list['affine'][idx] = []
                self.output_kurtosis_list['affine'][idx] = []

                self.output_mean_list['activation'][idx] = []
                self.output_variance_list['activation'][idx] = []
                self.output_skewness_list['activation'][idx] = []
                self.output_kurtosis_list['activation'][idx] = []

        idx = self.hidden_layer_num
        self.layers['affine' + str(idx)] = tfl.Affine(
            self.params['W' + str(idx)],
            self.layers['activation' + str(idx - 1)],
            self.params['b' + str(idx)],
            name='affine' + str(idx),
            graph=self)
        self.output = self.layers['affine' + str(idx)]

        if not refitting:
            self.output_mean_list['affine'][idx] = []
            self.output_variance_list['affine'][idx] = []
            self.output_skewness_list['affine'][idx] = []
            self.output_kurtosis_list['affine'][idx] = []

        self.error = tfl.SoftmaxWithCrossEntropyLoss(self.output,
                                                     self.target_node,
                                                     name="SCEL",
                                                     graph=self)
示例#4
0
    def layering(self, activator=tfe.Activator.ReLU.value):
        self.activator = activator

        for idx in range(self.hidden_layer_num):
            self.layers['affine' + str(idx)] = tfl.Affine(
                self.params['W' + str(idx)], self.input_node, self.params['b' + str(idx)], name='affine' + str(idx), graph=self
            )
            self.layers['activation' + str(idx)] = activator(self.layers['affine' + str(idx)], name='activation' + str(idx), graph=self)

        idx = self.hidden_layer_num
        self.layers['affine' + str(idx)] = tfl.Affine(
            self.params['W' + str(idx)], self.input_node, self.params['b' + str(idx)], name='affine' + str(idx), graph=self
        )
        self.output = activator(self.layers['affine' + str(idx)], name='output', graph=self)

        #self.last_layer = SoftmaxWithCrossEntropyLoss()


        self.error = tfl.SquaredError(self.output, self.target_node, name="SE", graph=self)
示例#5
0
 def layering(self, activator=tfe.Activator.ReLU.value):
     self.activator = activator
     u0 = tfl.Affine(self.params['W0'],
                     self.input_node,
                     self.params['b0'],
                     name="A0")
     o0 = activator(u0, name="O0")
     u1 = tfl.Affine(self.params['W1'], o0, self.params['b1'], name="A1")
     self.output = activator(u1, name="O1")
     self.error = tfl.SquaredError(self.output, self.target_node, name="SE")
     if isinstance(self, nx.Graph):
         self.add_edge(self.params['W0'], u0)
         self.add_edge(self.input_node, u0)
         self.add_edge(self.params['b0'], u0)
         self.add_edge(u0, o0)
         self.add_edge(self.params['W1'], u1)
         self.add_edge(o0, u1)
         self.add_edge(self.params['b1'], u1)
         self.add_edge(u1, self.output)
         self.add_edge(self.output, self.error)
         self.add_edge(self.error, self.target_node)
示例#6
0
 def layering(self, activator=tfe.Activator.ReLU.value):
     self.activator = activator
     self.affine = tfl.Affine(self.params['W0'],
                              self.input_node,
                              self.params['b0'],
                              name="A",
                              graph=self)
     self.output = activator(self.affine, name="O", graph=self)
     self.error = tfl.SquaredError(self.output,
                                   self.target_node,
                                   name="SE",
                                   graph=self)
示例#7
0
 def layering(self, activator=tfe.Activator.ReLU.value):
     self.activator = activator
     u = tfl.Affine(self.params['W0'],
                    self.input_node,
                    self.params['b0'],
                    name="A")
     self.output = activator(u, name="O")
     self.error = tfl.SquaredError(self.output, self.target_node, name="SE")
     if isinstance(self, nx.Graph):
         self.add_edge(self.params['W0'], u)
         self.add_edge(self.input_node, u)
         self.add_edge(self.params['b0'], u)
         self.add_edge(u, self.output)
         self.add_edge(self.output, self.error)
         self.add_edge(self.error, self.target_node)
示例#8
0
    def layering(self, refitting=False):
        input_node = self.input_node

        if not refitting:
            self.output_mean_list['conv'] = {}
            self.output_variance_list['conv'] = {}
            self.output_skewness_list['conv'] = {}
            self.output_kurtosis_list['conv'] = {}

            self.output_mean_list['pooling'] = {}
            self.output_variance_list['pooling'] = {}
            self.output_skewness_list['pooling'] = {}
            self.output_kurtosis_list['pooling'] = {}

            self.output_mean_list['affine'] = {}
            self.output_variance_list['affine'] = {}
            self.output_skewness_list['affine'] = {}
            self.output_kurtosis_list['affine'] = {}

            self.output_mean_list['activation'] = {}
            self.output_variance_list['activation'] = {}
            self.output_skewness_list['activation'] = {}
            self.output_kurtosis_list['activation'] = {}

        for idx, cnn_param in enumerate(self.cnn_param_list):
            if cnn_param['type'] == 'conv':
                self.layers['conv' + str(idx)] = tfl.Convolution(
                    w       =self.params['W' + str(idx)],
                    x       =input_node,
                    b       =self.params['b' + str(idx)],
                    pad     =cnn_param['pad'],
                    stride  =cnn_param['stride'],
                    name    ='conv' + str(idx),
                    graph   =self
                )

                if self.use_batch_normalization:
                    self.layers['batch_normal' + str(idx)] = tfl.BatchNormalization(
                        x       =self.layers['conv' + str(idx)],
                        gamma   =self.params['gamma' + str(idx)],
                        beta    =self.params['beta' + str(idx)],
                        running_mean=self.params['running_mean' + str(idx)],
                        running_var=self.params['running_var' + str(idx)],
                        name    ='batch_normal' + str(idx),
                        graph   =self
                    )
                    next_input_node = self.layers['batch_normal' + str(idx)]
                else:
                    next_input_node = self.layers['conv' + str(idx)]

                self.layers['activation' + str(idx)] = self.activator(
                    u       =next_input_node,
                    name    ='activation' + str(idx),
                    graph   =self
                )

                if self.use_dropout:
                    self.layers['dropout' + str(idx)] = tfl.Dropout(
                        x=self.layers['activation' + str(idx)],
                        dropout_ratio=self.dropout_ratio_list[idx],
                        name='dropout' + str(idx),
                        graph=self
                    )
                    input_node = self.layers['dropout' + str(idx)]
                else:
                    input_node = self.layers['activation' + str(idx)]

                if not refitting:
                    self.output_mean_list['conv'][idx] = []
                    self.output_variance_list['conv'][idx] = []
                    self.output_skewness_list['conv'][idx] = []
                    self.output_kurtosis_list['conv'][idx] = []

                    self.output_mean_list['activation'][idx] = []
                    self.output_variance_list['activation'][idx] = []
                    self.output_skewness_list['activation'][idx] = []
                    self.output_kurtosis_list['activation'][idx] = []

            elif cnn_param['type'] == 'pooling':
                self.layers['pooling' + str(idx)] = tfl.Pooling(
                    x=input_node,
                    filter_h=cnn_param['filter_h'],
                    filter_w=cnn_param['filter_w'],
                    stride=cnn_param['stride'],
                    name='pooling' + str(idx),
                    graph=self
                )
                input_node = self.layers['pooling' + str(idx)]

                if not refitting:
                    self.output_mean_list['pooling'][idx] = []
                    self.output_variance_list['pooling'][idx] = []
                    self.output_skewness_list['pooling'][idx] = []
                    self.output_kurtosis_list['pooling'][idx] = []

        self.layers['reshape'] = tfl.Reshape(
            u       =input_node,
            p_shape =self.shape_before_fc,
            n_shape =self.num_neurons_flatten_for_fc,
            name    ='reshape',
            graph   =self
        )

        idx += 1
        self.layers['affine' + str(idx)] = tfl.Affine(
            w       =self.params['W' + str(idx)],
            x       =self.layers['reshape'],
            b       =self.params['b' + str(idx)],
            name    ='affine' + str(idx),
            graph   =self
        )

        if self.use_batch_normalization:
            self.layers['batch_normal' + str(idx)] = tfl.BatchNormalization(
                x=self.layers['affine' + str(idx)],
                gamma=self.params['gamma' + str(idx)],
                beta=self.params['beta' + str(idx)],
                running_mean=self.params['running_mean' + str(idx)],
                running_var=self.params['running_var' + str(idx)],
                name='batch_normal' + str(idx),
                graph=self
            )
            next_input_node = self.layers['batch_normal' + str(idx)]
        else:
            next_input_node = self.layers['affine' + str(idx)]

        self.layers['activation' + str(idx)] = self.activator(
            u       =next_input_node,
            name    ='activation' + str(idx),
            graph   =self
        )

        if self.use_dropout:
            self.layers['dropout' + str(idx)] = tfl.Dropout(
                x=self.layers['activation' + str(idx)],
                dropout_ratio=self.dropout_ratio_list[idx],
                name='dropout' + str(idx),
                graph=self
            )
            input_node = self.layers['dropout' + str(idx)]
        else:
            input_node = self.layers['activation' + str(idx)]

        if not refitting:
            self.output_mean_list['affine'][idx] = []
            self.output_variance_list['affine'][idx] = []
            self.output_skewness_list['affine'][idx] = []
            self.output_kurtosis_list['affine'][idx] = []

            self.output_mean_list['activation'][idx] = []
            self.output_variance_list['activation'][idx] = []
            self.output_skewness_list['activation'][idx] = []
            self.output_kurtosis_list['activation'][idx] = []

        idx += 1
        self.layers['affine' + str(idx)] = tfl.Affine(
            w       =self.params['W' + str(idx)],
            x       =input_node,
            b       =self.params['b' + str(idx)],
            name    ='affine' + str(idx),
            graph   =self
        )

        self.last_layer_idx = idx

        if not refitting:
            self.output_mean_list['affine'][idx] = []
            self.output_variance_list['affine'][idx] = []
            self.output_skewness_list['affine'][idx] = []
            self.output_kurtosis_list['affine'][idx] = []

        self.output = self.layers['affine' + str(idx)]

        self.error = tfl.SoftmaxWithCrossEntropyLoss(self.output, self.target_node, name="SCEL", graph=self)
示例#9
0
    def layering(self, refitting=False):
        input_node = self.input_node

        for idx, cnn_param in enumerate(self.cnn_param_list):
            if cnn_param['type'] == 'conv':
                self.layers['conv' + str(idx)] = tfl.Convolution(
                    w=self.params['W' + str(idx)],
                    x=input_node,
                    b=self.params['b' + str(idx)],
                    pad=cnn_param['pad'],
                    stride=cnn_param['stride'],
                    name='conv' + str(idx),
                    graph=self)
                self.layers['activation' + str(idx)] = self.activator(
                    u=self.layers['conv' + str(idx)],
                    name='activation' + str(idx),
                    graph=self)
                input_node = self.layers['activation' + str(idx)]
            elif cnn_param['type'] == 'pooling':
                self.layers['pooling' + str(idx)] = tfl.Pooling(
                    w=self.params['W' + str(idx)],
                    x=input_node,
                    pad=cnn_param['pad'],
                    stride=cnn_param['stride'],
                    name='pooling' + str(idx),
                    graph=self)
                input_node = self.layers['pooling' + str(idx)]

        self.layers['reshape'] = tfl.Reshape(
            u=input_node,
            p_shape=self.shape_before_fc,
            n_shape=self.num_neurons_flatten_for_fc,
            name='reshape',
            graph=self)

        idx += 1
        self.layers['affine' + str(idx)] = tfl.Affine(
            w=self.params['W' + str(idx)],
            x=self.layers['reshape'],
            b=self.params['b' + str(idx)],
            name='affine' + str(idx),
            graph=self)
        self.layers['activation' + str(idx)] = self.activator(
            u=self.layers['affine' + str(idx)],
            name='activation' + str(idx),
            graph=self)

        idx += 1
        self.layers['affine' + str(idx)] = tfl.Affine(
            w=self.params['W' + str(idx)],
            x=self.layers['activation' + str(idx - 1)],
            b=self.params['b' + str(idx)],
            name='affine' + str(idx),
            graph=self)

        self.output = self.layers['affine' + str(idx)]

        self.error = tfl.SoftmaxWithCrossEntropyLoss(self.output,
                                                     self.target_node,
                                                     name="SCEL",
                                                     graph=self)
示例#10
0
    def layering(self, refitting=False):
        input_node = self.input_node

        if not refitting:
            self.output_mean_list['affine'] = {}
            self.output_variance_list['affine'] = {}
            self.output_skewness_list['affine'] = {}
            self.output_kurtosis_list['affine'] = {}

            self.output_mean_list['activation'] = {}
            self.output_variance_list['activation'] = {}
            self.output_skewness_list['activation'] = {}
            self.output_kurtosis_list['activation'] = {}

        for idx in range(self.hidden_layer_num):
            self.layers['affine' + str(idx)] = tfl.Affine(
                self.params['W' + str(idx)],
                input_node,
                self.params['b' + str(idx)],
                name='affine' + str(idx),
                graph=self
            )

            if self.use_batch_normalization:
                self.layers['batch_normal' + str(idx)] = tfl.BatchNormalization(
                    x=self.layers['affine' + str(idx)],
                    gamma=self.params['gamma' + str(idx)],
                    beta=self.params['beta' + str(idx)],
                    running_mean=self.params['running_mean' + str(idx)],
                    running_var=self.params['running_var' + str(idx)],
                    name='batch_normal' + str(idx),
                    graph=self
                )
                next_input_node = self.layers['batch_normal' + str(idx)]
            else:
                next_input_node = self.layers['affine' + str(idx)]

            self.layers['activation' + str(idx)] = self.activator(
                next_input_node,
                name='activation' + str(idx),
                graph=self
            )

            if self.use_dropout:
                self.layers['dropout' + str(idx)] = tfl.Dropout(
                    x=self.layers['activation' + str(idx)],
                    dropout_ratio=self.dropout_ratio_list[idx],
                    name='dropout' + str(idx),
                    graph=self
                )
                input_node = self.layers['dropout' + str(idx)]
            else:
                input_node = self.layers['activation' + str(idx)]

            if not refitting:
                self.output_mean_list['affine'][idx] = []
                self.output_variance_list['affine'][idx] = []
                self.output_skewness_list['affine'][idx] = []
                self.output_kurtosis_list['affine'][idx] = []

                self.output_mean_list['activation'][idx] = []
                self.output_variance_list['activation'][idx] = []
                self.output_skewness_list['activation'][idx] = []
                self.output_kurtosis_list['activation'][idx] = []

        idx = self.hidden_layer_num

        if self.use_dropout:
            input_node = self.layers['dropout' + str(idx - 1)]
        else:
            input_node = self.layers['activation' + str(idx - 1)]

        self.layers['affine' + str(idx)] = tfl.Affine(
            self.params['W' + str(idx)],
            input_node,
            self.params['b' + str(idx)],
            name='affine' + str(idx),
            graph=self
        )

        if not refitting:
            self.output_mean_list['affine'][idx] = []
            self.output_variance_list['affine'][idx] = []
            self.output_skewness_list['affine'][idx] = []
            self.output_kurtosis_list['affine'][idx] = []

        self.output = self.layers['affine' + str(idx)]

        self.error = tfl.SoftmaxWithCrossEntropyLoss(self.output, self.target_node, name="SCEL", graph=self)