def _convert_to_nn(self, svm_model, y_train, x_val):
        #convert to linear NN
        print('converting {} model to linear NN'.format(
            self.__class__.__name__))
        W = svm_model.coef_.T
        B = svm_model.intercept_

        if numpy.unique(y_train).size == 2:
            linear_layer = Linear(W.shape[0], 2)
            linear_layer.W = numpy.concatenate([-W, W], axis=1)
            linear_layer.B = numpy.concatenate([-B, B], axis=0)
        else:
            linear_layer = Linear(*(W.shape))
            linear_layer.W = W
            linear_layer.B = B

        svm_model = self.model
        nn_model = Sequential([Flatten(), linear_layer])
        if not self.use_gpu: nn_model.to_numpy()

        #sanity check model conversion
        self._sanity_check_model_conversion(svm_model, nn_model, x_val)
        print('model conversion sanity check passed')
        return nn_model
コード例 #2
0
    def test_Linear(self):
        np.random.seed(42)
        torch.manual_seed(42)

        batch_size, n_in, n_out = 2, 3, 4
        for _ in range(100):
            # layers initialization
            torch_layer = torch.nn.Linear(n_in, n_out)
            custom_layer = Linear(n_in, n_out)
            custom_layer.W = torch_layer.weight.data.numpy()
            custom_layer.b = torch_layer.bias.data.numpy()

            layer_input = np.random.uniform(
                -10, 10, (batch_size, n_in)).astype(np.float32)
            next_layer_grad = np.random.uniform(
                -10, 10, (batch_size, n_out)).astype(np.float32)

            # 1. check layer output
            custom_layer_output = custom_layer.updateOutput(layer_input)
            layer_input_var = Variable(torch.from_numpy(layer_input),
                                       requires_grad=True)
            torch_layer_output_var = torch_layer(layer_input_var)
            self.assertTrue(
                np.allclose(torch_layer_output_var.data.numpy(),
                            custom_layer_output,
                            atol=1e-6))

            # 2. check layer input grad
            custom_layer_grad = custom_layer.updateGradInput(
                layer_input, next_layer_grad)
            torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
            torch_layer_grad_var = layer_input_var.grad
            self.assertTrue(
                np.allclose(torch_layer_grad_var.data.numpy(),
                            custom_layer_grad,
                            atol=1e-6))

            # 3. check layer parameters grad
            custom_layer.accGradParameters(layer_input, next_layer_grad)
            weight_grad = custom_layer.gradW
            bias_grad = custom_layer.gradb
            torch_weight_grad = torch_layer.weight.grad.data.numpy()
            torch_bias_grad = torch_layer.bias.grad.data.numpy()
            self.assertTrue(
                np.allclose(torch_weight_grad, weight_grad, atol=1e-6))
            self.assertTrue(np.allclose(torch_bias_grad, bias_grad, atol=1e-6))
コード例 #3
0
    def _read_txt_helper(path):
        with open(path,'rb') as f:
            content = f.read().split('\n')

            modules = []
            c = 0
            line = content[c]

            while len(line) > 0:
                if line.startswith(Linear.__name__): # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of linear layer
                    Linear <rows_of_W> <columns_of_W>
                    <flattened weight matrix W>
                    <flattened bias vector>
                    '''
                    _,m,n = line.split();   m = int(m); n = int(n)
                    layer = Linear(m,n)
                    layer.W = np.array([float(weightstring) for weightstring in content[c+1].split() if len(weightstring) > 0]).reshape((m,n))
                    layer.B = np.array([float(weightstring) for weightstring in content[c+2].split() if len(weightstring) > 0])
                    modules.append(layer)
                    c+=3 # the description of a linear layer spans three lines

                elif line.startswith(Convolution.__name__): # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of convolution layer
                    Convolution <rows_of_W> <columns_of_W> <depth_of_W> <number_of_filters_W> <stride_axis_0> <stride_axis_1>
                    <flattened filter block W>
                    <flattened bias vector>
                    '''

                    _,h,w,d,n,s0,s1 = line.split()
                    h = int(h); w = int(w); d = int(d); n = int(n); s0 = int(s0); s1 = int(s1)
                    layer = Convolution(filtersize=(h,w,d,n), stride=(s0,s1))
                    layer.W = np.array([float(weightstring) for weightstring in content[c+1].split() if len(weightstring) > 0]).reshape((h,w,d,n))
                    layer.B = np.array([float(weightstring) for weightstring in content[c+2].split() if len(weightstring) > 0])
                    modules.append(layer)
                    c+=3 #the description of a convolution layer spans three lines

                elif line.startswith(SumPool.__name__): # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of sum pooling layer
                    SumPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _,h,w,s0,s1 = line.split()
                    h = int(h); w = int(w); s0 = int(s0); s1 = int(s1)
                    layer = SumPool(pool=(h,w),stride=(s0,s1))
                    modules.append(layer)
                    c+=1 # one line of parameterized layer description

                elif line.startswith(MaxPool.__name__): # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of max pooling layer
                    MaxPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _,h,w,s0,s1 = line.split()
                    h = int(h); w = int(w); s0 = int(s0); s1 = int(s1)
                    layer = MaxPool(pool=(h,w),stride=(s0,s1))
                    modules.append(layer)
                    c+=1 # one line of parameterized layer description

                elif line.startswith(Flatten.__name__): # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Flatten()) ; c+=1 #one line of parameterless layer description
                elif line.startswith(Rect.__name__): # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Rect()) ; c+= 1 #one line of parameterless layer description
                elif line.startswith(Tanh.__name__): # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Tanh()) ; c+= 1 #one line of parameterless layer description
                elif line.startswith(SoftMax.__name__): # @UndefinedVariable import error suppression for PyDev users
                    modules.append(SoftMax()) ; c+= 1 #one line of parameterless layer description
                else:
                    raise ValueError('Layer type identifier' + [s for s in line.split() if len(s) > 0][0] +  ' not supported for reading from plain text file')

                #skip info of previous layers, read in next layer header
                line = content[c]



        return Sequential(modules)
コード例 #4
0
    def _read_txt_helper(path):
        with open(path, 'rb') as f:
            content = f.read().split('\n')

            modules = []
            c = 0
            line = content[c]

            while len(line) > 0:
                if line.startswith(
                        Linear.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of linear layer
                    Linear <rows_of_W> <columns_of_W>
                    <flattened weight matrix W>
                    <flattened bias vector>
                    '''
                    _, m, n = line.split()
                    m = int(m)
                    n = int(n)
                    layer = Linear(m, n)
                    layer.W = np.array([
                        float(weightstring)
                        for weightstring in content[c + 1].split()
                        if len(weightstring) > 0
                    ]).reshape((m, n))
                    layer.B = np.array([
                        float(weightstring)
                        for weightstring in content[c + 2].split()
                        if len(weightstring) > 0
                    ])
                    modules.append(layer)
                    c += 3  # the description of a linear layer spans three lines

                elif line.startswith(
                        Convolution.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of convolution layer
                    Convolution <rows_of_W> <columns_of_W> <depth_of_W> <number_of_filters_W> <stride_axis_0> <stride_axis_1>
                    <flattened filter block W>
                    <flattened bias vector>
                    '''

                    _, h, w, d, n, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    d = int(d)
                    n = int(n)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = Convolution(filtersize=(h, w, d, n),
                                        stride=(s0, s1))
                    layer.W = np.array([
                        float(weightstring)
                        for weightstring in content[c + 1].split()
                        if len(weightstring) > 0
                    ]).reshape((h, w, d, n))
                    layer.B = np.array([
                        float(weightstring)
                        for weightstring in content[c + 2].split()
                        if len(weightstring) > 0
                    ])
                    modules.append(layer)
                    c += 3  #the description of a convolution layer spans three lines

                elif line.startswith(
                        SumPool.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of sum pooling layer
                    SumPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _, h, w, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = SumPool(pool=(h, w), stride=(s0, s1))
                    modules.append(layer)
                    c += 1  # one line of parameterized layer description

                elif line.startswith(
                        MaxPool.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of max pooling layer
                    MaxPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _, h, w, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = MaxPool(pool=(h, w), stride=(s0, s1))
                    modules.append(layer)
                    c += 1  # one line of parameterized layer description

                elif line.startswith(
                        Flatten.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Flatten())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        Rect.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Rect())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        Tanh.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Tanh())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        SoftMax.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(SoftMax())
                    c += 1  #one line of parameterless layer description
                else:
                    raise ValueError(
                        'Layer type identifier' +
                        [s for s in line.split() if len(s) > 0][0] +
                        ' not supported for reading from plain text file')

                #skip info of previous layers, read in next layer header
                line = content[c]

        return Sequential(modules)