Beispiel #1
0
        def __init__(self,
                     kernel_initializer,
                     kernel_regularizer,
                     name='upsample_merge'):
            super().__init__(name=name)

            self.conv_lateral = Sequential([
                tf.layers.Conv2D(
                    256,
                    1,
                    1,
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization()
            ])

            self.conv_merge = Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization()
            ])
Beispiel #2
0
    def build(self, input_shape):
        self.expand_conv = Sequential([
            tf.layers.Conv2D(input_shape[3] * self._expansion_factor,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.depthwise_conv = Sequential([
            DepthwiseConv2D(3,
                            strides=self._strides,
                            padding='same',
                            use_bias=False,
                            kernel_initializer=self._kernel_initializer,
                            kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.linear_conv = Sequential([
            tf.layers.Conv2D(self._filters,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(),
            tf.layers.Dropout(self._dropout_rate)
        ])

        super().build(input_shape)
Beispiel #3
0
    def __init__(self,
                 num_anchors,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='classification_subnet'):
        super().__init__(name=name)

        self.num_anchors = num_anchors

        self.pre_conv = Sequential([
            Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization(),
                activation,
            ]) for _ in range(4)
        ])

        self.out_conv = tf.layers.Conv2D(
            num_anchors * 4,
            3,
            1,
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)
Beispiel #4
0
 def __init__(self):
     #contructor
     #model inherits from the module class
     Module.__init__(self)
     #the model is constitued of 2 linear layer with activation layer
     self.l1 = Sequential(Linear(2, 16), ReLu(), Linear(16, 92))
     self.s1 = TanhS()
     self.l2 = Linear(92, 2)
Beispiel #5
0
    def __init__(self,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='feature_pyramid_network'):
        super().__init__(name=name)

        self.p6_from_c5 = Sequential([
            tf.layers.Conv2D(
                256,
                3,
                2,
                padding='same',
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p7_from_p6 = Sequential([
            activation,
            tf.layers.Conv2D(
                256,
                3,
                2,
                padding='same',
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p5_from_c5 = Sequential([
            tf.layers.Conv2D(
                256,
                1,
                1,
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p4_from_c4p5 = FeaturePyramidNetwork.UpsampleMerge(
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='upsample_merge_c4p5')
        self.p3_from_c3p4 = FeaturePyramidNetwork.UpsampleMerge(
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='upsample_merge_c3p4')
Beispiel #6
0
class Custom_Model(Module):
    def __init__(self):
        #contructor
        #model inherits from the module class
        Module.__init__(self)
        #the model is constitued of 2 linear layer with activation layer
        self.l1 = Sequential(Linear(2, 16), ReLu(), Linear(16, 92))
        self.s1 = TanhS()
        self.l2 = Linear(92, 2)

    def forward(self, input):
        #forward pass defined as in pytorch
        input = self.l1.forward(input)
        output = self.l2.forward(self.s1.forward(input))
        return output

    def backward(self, dlp):
        #backward pass is only backward on all layers
        dlp = self.l2.backward(dlp)
        dlp = self.s1.backward(dlp)
        dlp = self.l1.backward(dlp)
        return dlp
Beispiel #7
0
    def __init__(self,
                 num_anchors,
                 num_classes,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='classification_subnet'):
        super().__init__(name=name)

        self.num_anchors = num_anchors
        self.num_classes = num_classes

        self.pre_conv = Sequential([
            Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization(),
                activation,
            ]) for _ in range(4)
        ])

        pi = 0.01
        bias_prior_initializer = tf.constant_initializer(-math.log((1 - pi) / pi))

        self.out_conv = tf.layers.Conv2D(
            num_anchors * num_classes,
            3,
            1,
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            bias_initializer=bias_prior_initializer)
Beispiel #8
0
def build_model(parameters):
    model = Sequential(
        optimizer=stochastic_gradient_descent(learning_rate=parameters[1],
                                              decay=parameters[2],
                                              momentum=parameters[3]))
    for units_of_layer in parameters[0]:
        model.add(Dense(units_of_layer))
    model.add(Dense(1))
    return model
Beispiel #9
0
    def __init__(self,
                 backbone,
                 levels,
                 num_classes,
                 activation,
                 dropout_rate,
                 kernel_initializer,
                 kernel_regularizer,
                 name='retinanet_base'):
        super().__init__(name=name)

        self.backbone = build_backbone(backbone, activation=activation, dropout_rate=dropout_rate)

        if backbone == 'densenet':
            # TODO: check if this is necessary
            # DenseNet has preactivation architecture,
            # so we need to apply activation before passing features to FPN
            self.postprocess_bottom_up = {
                cn: Sequential([
                    Normalization(),
                    activation
                ])
                for cn in ['C3', 'C4', 'C5']
            }
        else:
            self.postprocess_bottom_up = None

        self.fpn = FeaturePyramidNetwork(
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)

        self.classification_subnet = ClassificationSubnet(
            num_anchors=levels.num_anchors,  # TODO: level anchor boxes
            num_classes=num_classes,
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='classification_subnet')

        self.regression_subnet = RegressionSubnet(
            num_anchors=levels.num_anchors,  # TODO: level anchor boxes
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='regression_subnet')
Beispiel #10
0
from links import *
from model import Sequential
from dqn import DQN
import numpy as np
from numpy.random import *
import gym
from copy import deepcopy

env = gym.make("CartPole-v0")
obs = env.reset()

model = Sequential()
model.add(Linear(4, 400, activation="relu", initialization="HeNormal"))
#model.add(Linear(400,400,activation="relu",initialization="HeNormal"))
#model.add(Linear(100,100,activation="relu",initialization="HeNormal"))
model.add(Linear(400, 2, initialization="zeros"))
model.compile(optimizer="MomentumSGD")
target_model = deepcopy(model)
Memory = DQN()
initial_exploration = 100
replay_size = 32

epsilon = 0.3
gamma = 0.95
time = 0
episode = 0
last_obs = deepcopy(obs)

#ReplayMemory = [None for i in range(10**5)]
#m_size = 0
step = 0
Beispiel #11
0
#!/usr/bin/python3
from keras.datasets import cifar10
from model import Sequential
from layers.pool import MaxPool
from one_hot import one_hot

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train, x_test = x_train / 255, x_test / 255
y_train = one_hot(y_train)

model = Sequential(x_train, y_train)

model.add_Conv(32, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Conv(32, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Conv(64, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Dense(512)
model.out(10)

model.compile(1, 32)
Beispiel #12
0
from model import Sequential
from layer import Dense, Conv2D, MaxPool2D, Flatten
from loss import BinaryCrossEntropy
from activation import Sigmoid, ReLU
from optimizer import GradientDescentOptimizer

if __name__ == "__main__":
    from sklearn.datasets import load_digits
    data = load_digits(n_class=2)
    X, y = data['data'].reshape(-1, 8, 8, 1) / 16, data['target'].reshape(
        -1, 1)

    model = Sequential()
    model.add(Conv2D,
              ksize=3,
              stride=1,
              activation=ReLU(),
              input_size=(8, 8, 1),
              filters=7,
              padding=0)
    model.add(MaxPool2D, ksize=2, stride=1, padding=0)
    model.add(Conv2D,
              ksize=2,
              stride=1,
              activation=ReLU(),
              filters=5,
              padding=0)
    model.add(Flatten)
    model.add(Dense, units=1, activation=Sigmoid())
    model.summary()
Beispiel #13
0
    return nb_data_errors


if __name__ == "__main__":

    # The seed is set to 0 during the part of the models definitions. Weights of the linear layers are randomly
    # initialized. Sometimes, these values doesn't make the model converge (whether it is our or pytorch one, as it is the same architecture).
    # In real case, the user just have to relaunch the process but as it is for evaluation purpose, we
    # prefer our script to have a predictable result.
    # To put it on a nutshell, we initialize the weights with deterministic values.
    torch.manual_seed(0)

    # Model definitions
    model = Sequential(Linear(2, 25), ReLu(), Linear(25, 25), ReLu(),
                       Linear(25, 25), ReLu(), Dropout(0.2), Linear(25, 2),
                       ReLu())
    model_torch = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 25),
                                nn.ReLU(), nn.Linear(25, 25), nn.ReLU(),
                                nn.Dropout(0.2), nn.Linear(25, 2), nn.ReLU())
    # Creating toy datas

    # Set the seed to a random value, this time to generate data randomly (and for the dropout layers)
    torch.manual_seed(random.randint(0, 2**32 - 1))

    train_input, train_target, label = generate_data(10000)
    test_input, test_target, test_label = generate_data(200)

    # Training models
    train_model(model, train_input, train_target, 500)
    model_torch.train()
Beispiel #14
0
    def __init__(self,
                 blocks,
                 growth_rate,
                 compression_factor,
                 bottleneck,
                 activation,
                 dropout_rate,
                 kernel_initializer,
                 kernel_regularizer,
                 name='densenet_bc_imagenet'):
        super().__init__(name=name)

        self.conv1 = Sequential([
            tf.layers.Conv2D(2 * growth_rate,
                             7,
                             2,
                             padding='same',
                             use_bias=False,
                             kernel_initializer=kernel_initializer,
                             kernel_regularizer=kernel_regularizer,
                             name='conv1'),
            Normalization(),
            activation,
        ])
        self.conv1_max_pool = tf.layers.MaxPooling2D(3, 2, padding='same')

        self.dense_block_1 = DenseNet_Block(
            growth_rate,
            depth=blocks[1],
            bottleneck=bottleneck,
            activation=activation,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='dense_block1')

        self.transition_layer_1 = TransitionLayer(
            input_filters=blocks[1] * growth_rate + 64,
            compression_factor=compression_factor,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='transition_layer_1')

        self.dense_block_2 = DenseNet_Block(
            growth_rate,
            depth=blocks[2],
            bottleneck=bottleneck,
            activation=activation,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='dense_block2')

        self.transition_layer_2 = TransitionLayer(
            input_filters=blocks[2] * growth_rate +
            self.transition_layer_1.layers[1].filters,  # FIXME:
            compression_factor=compression_factor,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='transition_layer_2')

        self.dense_block_3 = DenseNet_Block(
            growth_rate,
            depth=blocks[3],
            bottleneck=bottleneck,
            activation=activation,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='dense_block3')

        self.transition_layer_3 = TransitionLayer(
            input_filters=blocks[3] * growth_rate +
            self.transition_layer_2.layers[1].filters,  # FIXME:
            compression_factor=compression_factor,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='transition_layer_3')

        self.dense_block_4 = DenseNet_Block(
            growth_rate,
            depth=blocks[4],
            bottleneck=bottleneck,
            activation=activation,
            dropout_rate=dropout_rate,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='dense_block4')
Beispiel #15
0
    def build(self, input_shape):
        self.input_conv = Sequential([
            tf.layers.Conv2D(32,
                             3,
                             strides=2,
                             padding='same',
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.bottleneck_1_1 = Bottleneck(
            16,
            expansion_factor=1,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_2_1 = Bottleneck(
            24,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_2_2 = Bottleneck(
            24,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_3_1 = Bottleneck(
            32,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_3_2 = Bottleneck(
            32,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_3_3 = Bottleneck(
            32,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_4_1 = Bottleneck(
            64,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_2 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_3 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_4 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_5_1 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_5_2 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_5_3 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_6_1 = Bottleneck(
            160,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_6_2 = Bottleneck(
            160,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_6_3 = Bottleneck(
            160,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_7_1 = Bottleneck(
            320,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.output_conv = Sequential([
            tf.layers.Conv2D(32,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        super().build(input_shape)
    return X, y, X_test, y_test


# Create dataset
X, y, X_test, y_test = create_data_mnist('fashion_mnist_images')
# Shuffle the training dataset
keys = np.array(range(X.shape[0]))
np.random.shuffle(keys)
X = X[keys]
y = y[keys]
# Scale and reshape samples
X = (X.reshape(X.shape[0], -1).astype(np.float32) - 127.5) / 127.5
X_test = (X_test.reshape(X_test.shape[0], -1).astype(np.float32) -
          127.5) / 127.5
# Instantiate the model
model = Sequential()
# Add layers
model.add(Layer_Dense(X.shape[1], 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 10))
model.add(Activation_softmax())
# Set loss, optimizer and accuracy objects
model.compile(loss=Loss_CategoricalCrossentropy(),
              optimizer=Optimizer_Adam(decay=1e-4),
              metrics=Accuracy_Categorical())
# model.fit(X, y, validation_data = (X_test, y_test), epochs = 10 , batch_size = 128 , steps_per_epoch = 100 )
# model.save('fashion_mnist.model')
# model.evaluate(X_test, y_test)
model = model.load('fashion_mnist.model')
Beispiel #17
0
from tensor import Tensor
from optimizer import SGD
from layer import MSELoss, Linear, Tanh, Sigmoid
from model import Sequential

import numpy as np

#Toy example of Using Tensor Class
np.random.seed(0)
data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), requires_grad=True)
target = Tensor(np.array([[0], [1], [0], [1]]), requires_grad=True)
#Every element in w, is an Object of Tensor representing weight matrix
model = Sequential(
    Linear(2, 3),
    Tanh(),
    Linear(3, 3),
    Tanh(),
    Linear(3, 1),
)
optim = SGD(parameters=model.get_parameters(), lr=0.1)
criterion = MSELoss()
for i in range(10):
    pred = model(data)
    loss = criterion(pred, target)
    loss.backward(Tensor(np.ones_like(loss.data), is_grad=True))
    optim.step()
    print(loss.data)
print(
    "------------------------------------------------------------------------")
Beispiel #18
0
from links import *
from model import Sequential
import numpy as np
from numpy.random import *
import chainer
from tqdm import tqdm

model = Sequential()
model.add(Linear(784, 500, activation="relu", initialization="HeNormal"))
model.add(Linear(500, 500, activation="relu", initialization="HeNormal"))
model.add(Linear(500, 10, activation="softmax"))
model.compile(optimizer="Adam")

train, test = chainer.datasets.get_mnist()
train_data, train_label = train._datasets
test_data, test_label = test._datasets
#print train_label[0:100]

count = 0
count2 = 0
loss = 0
for i in tqdm(range(6000000)):
    #if train_label[i%60000]>1:
    #    continue
    #count2 += 1
    #inp = randint(0,2,(1,2))
    inp = np.zeros((1, 784))
    inp[0] = train_data[i % 60000]
    y = model(inp)
    t = np.zeros((1, 10))
    #t[0][0] = train_label[i%60000]