def __init__(self, input_dim=(1, 28, 28),
                 conv_param={'filter_num': 30,
                             'filter_size': 5, 'pad': 0, 'stride': 1},
                 hidden_size=100, output_size=10, weight_init_std=0.01):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2 *
                            filter_pad) / filter_stride + 1
        pool_output_size = int(
            filter_num * (conv_output_size/2) * (conv_output_size/2))

        self.params = {
            'W1': weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size),
            'b1': np.zeros(filter_num),
            'W2': weight_init_std * np.random.randn(pool_output_size, hidden_size),
            'b2': np.zeros(hidden_size),
            'W3': weight_init_std * np.random.randn(hidden_size, output_size),
            'b3': np.zeros(output_size)
        }

        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(
            self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad'])
        self.layers['Relu'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = SoftmaxWithLoss()
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01,
                 use_batchnorm=False):
        self.params = {
            'W1': weight_init_std * np.random.randn(input_size, hidden_size),
            'b1': np.zeros(hidden_size),
            'W2': weight_init_std * np.random.randn(hidden_size, output_size),
            'b2': np.zeros(output_size)
        }

        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        if use_batchnorm:
            self.params['gamma1'] = np.ones(hidden_size)
            self.params['beta1'] = np.zeros(hidden_size)
            self.layers['BatchNorm1'] = BatchNormalization(
                gamma=self.params['gamma1'], beta=self.params['beta1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
        self.use_batchnorm = use_batchnorm

        self.lastLayer = SoftmaxWithLoss()
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        self.params = {'W1': weight_init_std * np.random.randn(input_size, hidden_size), 'b1': np.zeros(hidden_size),
                       'W2': weight_init_std * np.random.randn(hidden_size, output_size), 'b2': np.zeros(output_size)}

        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss()
Exemple #4
0
    def __init__(self,
                 input_dim=(1, 28, 28),
                 conv_param={
                     'filter_num': 30,
                     'filter_size': 5,
                     'pad': 0,
                     'stride': 1
                 },
                 hidden_size=100,
                 output_size=10,
                 weight_init_std=0.01):

        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        # conv_output_size: 24
        conv_output_size = (input_size - filter_size +
                            2 * filter_pad) / filter_stride + 1
        # pool_output_size: 30 * 12 * 12 = 4320
        pool_output_size = int(filter_num * (conv_output_size / 2) *
                               (conv_output_size / 2))

        self.params = {}
        # W1.shape: (30, 1, 5, 5)
        self.params['W1'] = weight_init_std * \
            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        # b1.shape: (30)
        self.params['b1'] = np.zeros(filter_num)
        # output of convolusion: (N, 30, 24, 24)

        # W2.shape: (4320, 100)
        self.params['W2'] = weight_init_std * \
            np.random.randn(pool_output_size, hidden_size)
        # b2.shape: (100)
        self.params['b2'] = np.zeros(hidden_size)
        # output of Affine:

        self.params['W3'] = weight_init_std * \
            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'], filter_stride,
                                           filter_pad)
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss()
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
Exemple #6
0
    def __init__(self, D, T):
        self.T = T

        # 初期値の設定
        self.params = {}
        self.params['W'] = np.random.normal(0, 0.4, (D, D)).astype(np.float64)
        self.params['A'] = np.random.normal(0, 0.4, (1, D)).astype(np.float64)
        self.params['b'] = np.array([0], dtype=np.float64)

        self.MaskADD = MaskADD()
        self.MatMul = MatMul(self.params['W'])
        self.Relu = Relu()
        self.ADD = ADD()
        self.Affine = Affine(self.params['A'], self.params['b'])
        self.Sigmoid = Sigmoid()
        self.sigmoid_loss = SigmoidWithLoss()
    def __init__(self,
                 input_dim=(1, 10, 10),
                 conv_param={
                     'filter_num': 30,
                     'filter_size': 5,
                     'pad': 0,
                     'stride': 1
                 },
                 hidden_size=10,
                 output_size=10,
                 weight_init_std=0.01):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        pad = conv_param['pad']
        stride = conv_param['stride']
        input_size = input_dim[1]

        conv_output_size = 1 + (input_size - filter_size + 2 * pad) // stride
        pool_output_size = int(filter_num * (conv_output_size / 2) *
                               (conv_output_size / 2))

        self.params = {}
        self.params['w1'] = weight_init_std * np.random.randn(
            filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['w2'] = weight_init_std * np.random.randn(
            pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['w3'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['w1'],
                                           self.params['b1'],
                                           conv_param['stride'],
                                           conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(2, 2, stride=2)

        self.layers['Affine1'] = Affine(self.params['w2'], self.params['b2'])
        self.layers['Relu2'] = Relu()

        self.layers['Affine2'] = Affine(self.params['w3'], self.params['b3'])
        self.output_layer = SoftmaxWithLoss()
Exemple #8
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01):
        self.params = {}
        self.params['w1'] = weight_init_std * np.random.randn(
            input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['w2'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['w1'], self.params['b1'])
        self.layers['Relu'] = Relu()
        self.layers['Affine2'] = Affine(self.params['w2'], self.params['b2'])
        self.output_layer = SoftmaxWithLoss()
Exemple #9
0
class GNN:
    def __init__(self, D, T):
        self.T = T

        # 初期値の設定
        self.params = {}
        self.params['W'] = np.random.normal(0, 0.4, (D, D)).astype(np.float64)
        self.params['A'] = np.random.normal(0, 0.4, (1, D)).astype(np.float64)
        self.params['b'] = np.array([0], dtype=np.float64)

        self.MaskADD = MaskADD()
        self.MatMul = MatMul(self.params['W'])
        self.Relu = Relu()
        self.ADD = ADD()
        self.Affine = Affine(self.params['A'], self.params['b'])
        self.Sigmoid = Sigmoid()
        self.sigmoid_loss = SigmoidWithLoss()

    def forward(self, x, H):
        for _ in range(self.T):
            a = self.MaskADD.forward(x, H)
            r = self.MatMul.forward(a)
            x = self.Relu.forward(r)
        
        h = self.ADD.forward(x)
        s = self.Affine.forward(h)
        return s
    
    def predict(self, x, H):
        s = self.forward(x, H)
        p = self.Sigmoid.forward(s)
        return p.flatten()
    
    def loss(self, x, H, y):
        s = self.forward(x, H)
        L = self.sigmoid_loss.forward(s, y)
        return L
    
    def get_gradient(self, x, H, y):
        f = lambda w: self.loss(x, H, y)

        grads = {}
        grads['W'] = gradient(f, self.params['W'])
        grads['A'] = gradient(f, self.params['A'])
        grads['b'] = gradient(f, self.params['b'])

        return grads