Exemple #1
0
def _read_txt_old(path):
    print('loading plain text model from', path)

    with open(path, 'rb') as f:
        content = f.read().split('\n')

        modules = []
        c = 0
        line = content[c]
        while len(line) > 0:
            if line.startswith(
                    Linear.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                lineparts = line.split()
                m = int(lineparts[1])
                n = int(lineparts[2])
                mod = Linear(m, n)
                for i in range(m):
                    c += 1
                    mod.W[i, :] = np.array([
                        float(val) for val in content[c].split()
                        if len(val) > 0
                    ])

                c += 1
                mod.B = np.array([float(val) for val in content[c].split()])
                modules.append(mod)

            elif line.startswith(
                    Rect.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                modules.append(Rect())
            elif line.startswith(
                    Tanh.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                modules.append(Tanh())
            elif line.startswith(
                    SoftMax.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                modules.append(SoftMax())
            elif line.startswith(
                    BinStep.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                modules.append(BinStep())
            elif line.startswith(
                    NegAbs.__name__
            ):  # @UndefinedVariable import error suppression for PyDev users
                modules.append(NegAbs())
            else:
                raise ValueError('Layer type ' +
                                 [s for s in line.split() if len(s) > 0][0] +
                                 ' not supported by legacy plain text format.')

            c += 1
            line = content[c]

        return Sequential(modules)
    def test_SoftMax(self):
        np.random.seed(42)
        torch.manual_seed(42)

        batch_size, n_in = 2, 4
        for _ in range(100):
            # layers initialization
            torch_layer = torch.nn.Softmax(dim=1)
            custom_layer = SoftMax()

            layer_input = np.random.uniform(
                -10, 10, (batch_size, n_in)).astype(np.float32)
            next_layer_grad = np.random.random(
                (batch_size, n_in)).astype(np.float32)
            next_layer_grad /= next_layer_grad.sum(axis=-1, keepdims=True)
            next_layer_grad = next_layer_grad.clip(1e-5, 1.)
            next_layer_grad = 1. / next_layer_grad

            # 1. check layer output
            custom_layer_output = custom_layer.updateOutput(layer_input)
            layer_input_var = Variable(torch.from_numpy(layer_input),
                                       requires_grad=True)
            torch_layer_output_var = torch_layer(layer_input_var)
            self.assertTrue(
                np.allclose(torch_layer_output_var.data.numpy(),
                            custom_layer_output,
                            atol=1e-5))

            # 2. check layer input grad
            custom_layer_grad = custom_layer.updateGradInput(
                layer_input, next_layer_grad)
            torch_layer_output_var.backward(torch.from_numpy(next_layer_grad))
            torch_layer_grad_var = layer_input_var.grad
            self.assertTrue(
                np.allclose(torch_layer_grad_var.data.numpy(),
                            custom_layer_grad,
                            atol=1e-5))
Exemple #3
0
    def _read_txt_helper(path):
        with open(path, 'rb') as f:
            content = f.read().split('\n')

            modules = []
            c = 0
            line = content[c]

            while len(line) > 0:
                if line.startswith(
                        Linear.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of linear layer
                    Linear <rows_of_W> <columns_of_W>
                    <flattened weight matrix W>
                    <flattened bias vector>
                    '''
                    _, m, n = line.split()
                    m = int(m)
                    n = int(n)
                    layer = Linear(m, n)
                    layer.W = np.array([
                        float(weightstring)
                        for weightstring in content[c + 1].split()
                        if len(weightstring) > 0
                    ]).reshape((m, n))
                    layer.B = np.array([
                        float(weightstring)
                        for weightstring in content[c + 2].split()
                        if len(weightstring) > 0
                    ])
                    modules.append(layer)
                    c += 3  # the description of a linear layer spans three lines

                elif line.startswith(
                        Convolution.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of convolution layer
                    Convolution <rows_of_W> <columns_of_W> <depth_of_W> <number_of_filters_W> <stride_axis_0> <stride_axis_1>
                    <flattened filter block W>
                    <flattened bias vector>
                    '''

                    _, h, w, d, n, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    d = int(d)
                    n = int(n)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = Convolution(filtersize=(h, w, d, n),
                                        stride=(s0, s1))
                    layer.W = np.array([
                        float(weightstring)
                        for weightstring in content[c + 1].split()
                        if len(weightstring) > 0
                    ]).reshape((h, w, d, n))
                    layer.B = np.array([
                        float(weightstring)
                        for weightstring in content[c + 2].split()
                        if len(weightstring) > 0
                    ])
                    modules.append(layer)
                    c += 3  #the description of a convolution layer spans three lines

                elif line.startswith(
                        SumPool.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of sum pooling layer
                    SumPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _, h, w, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = SumPool(pool=(h, w), stride=(s0, s1))
                    modules.append(layer)
                    c += 1  # one line of parameterized layer description

                elif line.startswith(
                        MaxPool.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    '''
                    Format of max pooling layer
                    MaxPool <mask_heigth> <mask_width> <stride_axis_0> <stride_axis_1>
                    '''

                    _, h, w, s0, s1 = line.split()
                    h = int(h)
                    w = int(w)
                    s0 = int(s0)
                    s1 = int(s1)
                    layer = MaxPool(pool=(h, w), stride=(s0, s1))
                    modules.append(layer)
                    c += 1  # one line of parameterized layer description

                elif line.startswith(
                        Flatten.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Flatten())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        Rect.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Rect())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        Tanh.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(Tanh())
                    c += 1  #one line of parameterless layer description
                elif line.startswith(
                        SoftMax.__name__
                ):  # @UndefinedVariable import error suppression for PyDev users
                    modules.append(SoftMax())
                    c += 1  #one line of parameterless layer description
                else:
                    raise ValueError(
                        'Layer type identifier' +
                        [s for s in line.split() if len(s) > 0][0] +
                        ' not supported for reading from plain text file')

                #skip info of previous layers, read in next layer header
                line = content[c]

        return Sequential(modules)
Exemple #4
0
train_x, test_x, train_y, test_y = X[:60000], X[60000:], Y[:60000], Y[60000:]

#### build model
net = Sequential()
net.add(Dense(784, 400))
net.add(ReLU())
# net.add(Sigmoid())
# net.add(SoftPlus())
# net.add(Dropout())
net.add(Dense(400, 128))
net.add(ReLU())
# net.add(BatchMeanSubtraction())
# net.add(ReLU())
# net.add(Dropout())
net.add(Dense(128, 10))
net.add(SoftMax())

# criterion = MultiLabelCriterion()  # loss function
criterion = CrossEntropyCriterion()
###############################
#### optimizer config
# Iptimizer params
# optimizer_config = {'learning_rate': 1e-2, 'momentum': 0.9}
#

optimizer_config = {
    'alpha': 0.01,
    'beta_1': 0.9,
    'beta_2': 0.999,
    'epsilon': 1e-8
}
DNN_HIDDEN_UNITS_DEFAULT = '20'
LEARNING_RATE_DEFAULT = 1e-2
MAX_EPOCHS_DEFAULT = 1500
EVAL_FREQ_DEFAULT = 20

training_data = None
training_label = None
test_data = None
test_label = None
training_label_encoded = None
test_label_encoded = None

FLAGS = None

relu = ReLU()
softmax = SoftMax()
cross = CrossEntropy()

plot_train = []
plot_test = []
plot_loss = []
test_loss = []
x = []
descent = 0

def encode(label):
    encoded = np.zeros((len(label), 2))
    for i in range(len(label)):
        encoded[i][0] = label[i]
        encoded[i][1] = 1-label[i]
    return encoded
}

optimizer_state = {}

# Looping params
n_epoch = 20
batch_size = 1024

for activation_name, activation in activations.items():
    nn1 = Sequential()
    nn1.add(Dense(784, 100))
    nn1.add(activation())
    nn1.add(Dense(100, 50))
    nn1.add(activation())
    nn1.add(Dense(50, 10))
    nn1.add(SoftMax())

    print("****************************************************")
    print(f"Training NN with {activation_name} without Batch Normalization")
    print("****************************************************")

    loss_history1 = fit(X_train, y_train, X_val, y_val, nn1, n_epoch,
                        batch_size, criterion, optimizer, optimizer_config,
                        optimizer_state)

    nn2 = Sequential()
    nn2.add(Dense(784, 100))
    nn2.add(BatchMeanSubtraction(1))
    nn2.add(activation())
    nn2.add(Dense(100, 50))
    nn2.add(BatchMeanSubtraction(1))