示例#1
0
    def __init__(self, args, device='cuda'):
        super().__init__()
        self.args = args
        self.device = device
        self.img_channels = 3
        self.depths = [args.zdim, 256, 256, 256, 128, 128]
        self.didx = 0
        self.alpha = 1.

        # init G
        self.G = nn.ModuleList()
        blk = nn.ModuleList()
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 4, padding=3)) # to 4x4
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 3, padding=1))
        self.G.append(blk)
        self.toRGB = nn.ModuleList()
        self.toRGB.append(ll.Conv2d(self.depths[0], self.img_channels, 1, lrelu=False, pnorm=False)) # toRGB

        # init D
        self.fromRGB = nn.ModuleList()
        self.fromRGB.append(ll.Conv2d(self.img_channels, self.depths[0], 1)) # fromRGB
        self.D = nn.ModuleList()
        blk = nn.ModuleList()
        blk.append(ll.MinibatchStddev())
        blk.append(ll.Conv2d(self.depths[0]+1, self.depths[0], 3, padding=1))
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 4, stride=4)) # to 1x1
        blk.append(ll.Flatten())
        blk.append(ll.Linear(self.depths[0], 1))
        self.D.append(blk)

        self.doubling = nn.Upsample(scale_factor=2)
        self.halving = nn.AvgPool2d(2, 2)
        self.set_optimizer() # 
        self.criterion = losses.GANLoss(loss_type=args.loss_type, device=device)
        self.loss_type = args.loss_type
 def __init__(self, input_shape):
     super(LocalDiscriminator, self).__init__()
     self.input_shape = input_shape
     self.output_shape = (1024,)
     self.img_c = input_shape[0]
     self.img_h = input_shape[1]
     self.img_w = input_shape[2]
     self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
     self.bn1 = nn.BatchNorm2d(64)
     self.act1 = nn.ReLU()
     self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
     self.bn2 = nn.BatchNorm2d(128)
     self.act2 = nn.ReLU()
     self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
     self.bn3 = nn.BatchNorm2d(256)
     self.act3 = nn.ReLU()
     self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
     self.bn4 = nn.BatchNorm2d(512)
     self.act4 = nn.ReLU()
     self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
     self.bn5 = nn.BatchNorm2d(512)
     self.act5 = nn.ReLU()
     in_features = 512 * (self.img_h//32) * (self.img_w//32)
     self.flatten6 = layers.Flatten()
     self.linear6 = nn.Linear(in_features, 1024)
     self.act6 = nn.ReLU()
示例#3
0
def Encoder(img_size, in_channel, conv_channel, filter_size, latent_dim,
            dense_size, bn):
    inner_conv_channel = conv_channel // 2
    if img_size % 4 != 0:
        print("WARNING: image size mod 4 != 0, may produce bug.")
    # total input number of the input of the last conv layer, new image size = old / 2 / 2
    flatten_img_size = inner_conv_channel * img_size / 4 * img_size / 4

    # explain: first two layer's padding = 2, because we set W/S = W/S + floor((-F+2P)/S+1), S=2,F=5,so P=2
    if VERBOSE:
        print(img_size, in_channel, conv_channel, filter_size, latent_dim, bn)
    model = nn.Sequential(
        layers.ConvLayer(in_channel,
                         conv_channel,
                         filter_size,
                         stride=2,
                         padding=2,
                         bn=bn),
        layers.ConvLayer(conv_channel,
                         inner_conv_channel,
                         filter_size,
                         stride=2,
                         padding=2,
                         bn=bn),
        layers.ConvLayer(inner_conv_channel,
                         inner_conv_channel,
                         filter_size,
                         stride=1,
                         padding=2,
                         bn=bn), layers.Flatten(),
        layers.Dense(flatten_img_size, dense_size),
        layers.Dense(dense_size, latent_dim))
    model = model.to(device=device, dtype=dtype)
    model = torch.nn.DataParallel(model, device_ids=GPU_IDs)
    return model
    def __init__(self, input_shape, arc='places2'):
        super(GlobalDiscriminator, self).__init__()
        self.arc = arc
        self.input_shape = input_shape
        self.output_shape = (1024, )
        self.img_c = input_shape[0]
        self.img_h = input_shape[1]
        self.img_w = input_shape[2]

        self.conv1 = nn.Conv2d(self.img_c,
                               64,
                               kernel_size=5,
                               stride=2,
                               padding=2)
        self.bn1 = nn.BatchNorm2d(64)
        self.act1 = nn.ReLU()
        self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
        self.bn2 = nn.BatchNorm2d(128)
        self.act2 = nn.ReLU()
        self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
        self.bn3 = nn.BatchNorm2d(256)
        self.act3 = nn.ReLU()
        self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
        self.bn4 = nn.BatchNorm2d(512)
        self.act4 = nn.ReLU()
        self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
        self.bn5 = nn.BatchNorm2d(512)
        self.act5 = nn.ReLU()
        if arc == 'celeba':
            in_features = 512 * (self.img_h // 32) * (self.img_w // 32)
            self.flatten6 = layers.Flatten()
            self.linear6 = nn.Linear(in_features, 1024)
            self.act6 = nn.ReLU()
        elif arc == 'places2':
            self.conv6 = nn.Conv2d(512,
                                   512,
                                   kernel_size=5,
                                   stride=2,
                                   padding=2)
            self.bn6 = nn.BatchNorm2d(512)
            self.act6 = nn.ReLU()
            in_features = 512 * (self.img_h // 64) * (self.img_w // 64)
            self.flatten7 = layers.Flatten()
            self.linear7 = nn.Linear(in_features, 1024)
            self.act7 = nn.ReLU()
        else:
            raise ValueError('Unsupported architecture \'%s\'.' % self.arc)
示例#5
0
 def __init__(self, inputSize, outputSize):
     super().__init__()
     self.outputSize = outputSize
     self.Flatten = layers.Flatten()
     self.fc1 = layers.FullyConnected(inputSize,
                                      10,
                                      activation=activate.SoftMax())
     self.inputLayer = self.Flatten
     self.outputLayer = self.fc1
     self.Flatten.addSon(self.fc1)
示例#6
0
def test_layers():
    x = torch.randn(3, 64, 224, 224)
    m = layers.ConvBatchnormRelu2d(64, 32, 3, padding=1)
    x = m(x)
    m = layers.Flatten()
    x = m(x)
    m = layers.SingSqrt()
    x = m(x)
    m = layers.L2Norm()
    x = m(x)
示例#7
0
 def __init__(self, output_size=5, hidden_size=64, layers=4):
     super().__init__()
     self.hidden_size = hidden_size
     self.base = ConvBase(output_size=hidden_size,
                          hidden=hidden_size,
                          channels=1,
                          max_pool=False,
                          layers=layers)
     self.features = M.Sequential(
         kl.Lambda(lambda x: F.reshape(x, (-1, 1, 28, 28))),
         self.base,
         kl.Lambda(lambda x: kf.mean(x, axis=[2, 3])),
         kl.Flatten(),
     )
     self.classifier = M.Linear(hidden_size, output_size, bias=True)
示例#8
0
 def build_raw_branch(self , resnet_kwargs ):
     branch_layers = []
     blocks = self.trunk.build_blocks( resnet_kwargs['block'] , resnet_kwargs['num_features'][-2] , resnet_kwargs['num_features'][-1] , resnet_kwargs['strides'][-1] , resnet_kwargs['num_blocks'][-1] )
     branch_layers.append( blocks )
     shape = ( resnet_kwargs['input_shape'][0] // prod( resnet_kwargs['strides'])  , resnet_kwargs['input_shape'][1] // prod( resnet_kwargs['strides'] ) )  
     if resnet_kwargs['use_maxpool']:
         shape = ( shape[0] // 2 , shape[1] // 2 )
     if resnet_kwargs['use_avgpool']:
         avgpool = nn.AvgPool2d( [*shape] , 1 )
         branch_layers.append( avgpool )
         shape = 1 * 1
     if resnet_kwargs['feature_layer_dim'] is not None:
         fc1 = nn.Sequential( layers.Flatten() , layers.linear( resnet_kwargs['num_features'][-1] * shape , resnet_kwargs['feature_layer_dim'] , activation_fn = None , pre_activation  = False , use_batchnorm = resnet_kwargs['use_batchnorm']) )
         branch_layers.append( fc1 )
     return nn.Sequential( *branch_layers )
示例#9
0
 def __init__(self, input_size, output_size, sizes=None):
     super().__init__()
     if sizes is None:
         sizes = [256, 128, 64, 64]
     layers = [
         LinearBlock(input_size, sizes[0]),
     ]
     for s_i, s_o in zip(sizes[:-1], sizes[1:]):
         layers.append(LinearBlock(s_i, s_o))
     layers = M.Sequential(*layers)
     self.features = M.Sequential(
         kl.Flatten(),
         layers,
     )
     self.classifier = M.Linear(sizes[-1], output_size)
     fc_init_(self.classifier)
     self.input_size = input_size
示例#10
0
 def __init__(self, inputSize, outputSize):
     super().__init__()
     self.outputSize = outputSize
     self.conv1 = layers.Convolution(5, 1)
     self.Pool1 = layers.Pool(5, stride=2)
     self.Flatten = layers.Flatten()
     self.fc1 = layers.FullyConnected(14 * 14 * 5,
                                      50,
                                      activation=activate.Relu())
     self.fc2 = layers.FullyConnected(50,
                                      outputSize,
                                      activation=activate.SoftMax())
     self.inputLayer = self.conv1
     self.outputLayer = self.fc2
     self.conv1.addSon(self.Pool1)
     self.Pool1.addSon(self.Flatten)
     self.Flatten.addSon(self.fc1)
     self.fc1.addSon(self.fc2)
示例#11
0
    def __init__(self, lossfunc, optimizer, batch_size):
        super().__init__(lossfunc, optimizer, batch_size)

        self.conv0 = L.Convolution_(n_filter=8, filter_size=(3, 3), stride=1)
        self.conv1 = L.Convolution_(n_filter=16, filter_size=(3, 3), stride=1)

        self.fc0 = L.Linear_(output_size=1024)
        self.fc1 = L.Linear_(output_size=10)

        self.bn0 = L.BatchNormalization_()
        self.bn1 = L.BatchNormalization_()
        self.bn4 = L.BatchNormalization_()

        self.acti0 = L.ELU()
        self.acti1 = L.ELU()
        self.acti4 = L.ELU()

        self.pool0 = L.MaxPooling(7, 7)
        self.pool1 = L.MaxPooling(5, 5)

        self.flat = L.Flatten()

        self.drop0 = L.Dropout(0.5)
        self.drop1 = L.Dropout(0.5)

        self.layers = [
            self.conv0,
            self.acti0,
            self.pool0,
            self.bn0,
            #self.drop0,
            self.conv1,
            self.acti1,
            self.pool1,
            self.bn1,
            #self.drop1,
            self.flat,
            self.fc0,
            self.acti4,
            self.bn4,
            self.fc1,
        ]
示例#12
0
def test_mnist_with_cov2d():
    (train_x, train_y), (test_x, test_y) = mnist.load_data(flatten=False)
    val_x = train_x[50000:]
    val_y = train_y[50000:]
    train_x = train_x[:50000]
    train_y = train_y[:50000]
    batch_size = 200
    modle = models.Sequential()
    modle.add(
        layers.Conv2D(4, (3, 3),
                      stride=1,
                      pad=1,
                      input_shape=(None, 1, 28, 28)))
    modle.add(layers.ReLU())
    modle.add(layers.MaxPool2D((2, 2), stride=2))
    modle.add(layers.Flatten())
    modle.add(layers.Linear(10))
    modle.add(layers.Softmax())
    acc = losses.categorical_accuracy.__name__
    modle.compile(losses.CrossEntropy(),
                  optimizers.SGD(lr=0.001),
                  metrics=[losses.categorical_accuracy])
    modle.summary()
    history = modle.train(train_x,
                          train_y,
                          batch_size,
                          epochs=4,
                          validation_data=(val_x, val_y))
    epochs = range(1, len(history["loss"]) + 1)
    plt.plot(epochs, history["loss"], 'ro', label="Traning loss")
    plt.plot(epochs, history["val_loss"], 'go', label="Validating loss")
    plt.plot(epochs, history[acc], 'r', label="Traning accuracy")
    plt.plot(epochs, history["val_" + acc], 'g', label="Validating accuracy")
    plt.title('Training/Validating loss/accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Loss/Accuracy')
    plt.legend()
    plt.show(block=True)
示例#13
0
sig = mf.sigmoid()
# MaxPool layer 2x2 stride of 1
pool1 = layers.Maxpool(conv1.out_dim, size=2, stride=2)
# Conv layer with 2 filters kernel size of 3x3 stride of 1 and no padding
conv2 = layers.Conv(pool1.out_dim,
                    n_filter=2,
                    h_filter=3,
                    w_filter=3,
                    stride=1,
                    padding=0)
# activation for layer 2 rectified linear
relu = mf.ReLU()
# MaxPool layer 2x2 stride 1
pool2 = layers.Maxpool(conv2.out_dim, size=2, stride=1)
# Flatten the matrix
flat = layers.Flatten()
# Fully connected layer with 50 neurons
fc1 = layers.FullyConnected(np.prod(pool2.out_dim), 50)
# Activation for fully connected layer of 50 neurons is tanh
tanh = mf.TanH()

# Fully connected layer with 10 neurons 'output layer'
out = layers.FullyConnected(50, num_classes)

cnn = layers.CNN([conv1, sig, pool1, conv2, relu, pool2, flat, fc1, tanh, out])

mf.model_summary(cnn, 'cnn_model_plot.png', f)

e_nnet, e_accuracy, e_validate, e_loss, e_loss_val = mf.sgd(cnn,
                                                            x_train,
                                                            y_train,
示例#14
0
    layers.Convolution("conv5",
                       kernel_size=3,
                       kernel_channels=384,
                       n_kernels=256,
                       stride=1,
                       same_padding=True,
                       logging=False))
graph.add_layer(layers.Activation(activation="relu", logging=False))

graph.add_layer(
    layers.MaxPooling(window_size=3,
                      window_channels=256,
                      stride=2,
                      logging=False))

graph.add_layer(layers.Flatten((9216, 1), logging=False))
graph.add_layer(
    layers.FullyConnected("fc1", 9216, 4096, activation="relu", logging=False))
graph.add_layer(
    layers.FullyConnected("fc2", 4096, 4096, activation="relu", logging=False))
graph.add_layer(
    layers.FullyConnected("fc3",
                          4096,
                          n_classes,
                          activation="softmax",
                          logging=False))

try:
    print()
    print("[+] N_EXAMPLES:", n_examples)
    for e in range(epochs):
示例#15
0
    def get_accuracy(self, X, Y):
        # print("Expected:    ", Y)
        Yhat = self.predict(X)
        # print("Predictions: ", Yhat)
        matches = (Yhat == Y.reshape(self.m))
        # print(matches)
        return np.sum(matches) / len(matches)


if __name__ == "__main__":
    nx = 10
    m = 12
    X = np.array(np.random.randn(nx, 12))
    Y = np.array([[0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1]])
    nn = NeuralNetwork()
    nn.add_layer(layers.Flatten(nx))
    nn.add_layer(
        layers.Dense(20,
                     activation=ActivationFunctions.relu,
                     initialization="He"))
    nn.add_layer(
        layers.Dense(30,
                     activation=ActivationFunctions.relu,
                     initialization="He"))
    nn.add_layer(
        layers.Dense(1,
                     activation=ActivationFunctions.sigmoid,
                     initialization="He"))

    costs = nn.train(X, Y, alpha=0.01, num_epochs=1500, print_cost=True)
示例#16
0
activation = layers.Activation(activation="relu")
X = activation.forward(X)
print("[+] ACTIVATION_shape:", X.shape)
dActivation = activation.backward(np.ones_like(X))
print("dActivation_SHAPE:", dActivation.shape)
print()

maxpool = layers.MaxPooling(window_size=2, window_channels=10, stride=2)
X = maxpool.forward(X)
print("[+] MAXPOOLED_shape:", X.shape)
dMaxpool = maxpool.backward(np.ones_like(X))
print("dMaxpool_SHAPE:", dMaxpool.shape)

N, C, H, W = X.shape

flatten = layers.Flatten((C * H * W, N))
X = flatten.forward(X)
print(X.shape)

I, N = X.shape

fc1 = layers.FullyConnected("fc1", I, 1000, activation="relu")
X = fc1.forward(X)
print(X.shape)

fc2 = layers.FullyConnected("fc2", 1000, 10, activation="softmax")
Y = fc2.forward(X)
print(Y.shape)

cross_entropy = layers.CrossEntropy()
loss = cross_entropy.forward(Y, T)
import layers
import functions

x = np.asarray([[1, 1, 1], [2, 2, 2], [3, 3, 3]])

a = x.shape // 2

w = x[0, :]

x = np.sqrt(1e-8) / np.sqrt(1e-8)

net = layers.Network(layers.Adam(0.01))
net.add(layers.Dense(2, 5, functions.LeakyReLu(), '1st hidden'))
net.add(layers.Dense(5, 20, functions.LeakyReLu(), '1st hidden'))
net.add(layers.Dense(20, 5, functions.LeakyReLu(), '2nd hidden'))
net.add(layers.Flatten())
net.add(layers.Dense(5, 3, functions.LeakyReLu(), '3nd hidden'))
net.add(layers.Dense(3, 1, functions.Sigmoid(), 'Out'))

inp = np.asarray([[0, 0], [1, 0], [0, 1], [1, 1]])
shape = inp.shape

x = np.reshape(inp, (8))
y = np.reshape(x, shape)

print(inp)
target = np.asarray([0, 1, 1, 0])
print(target)

result = net.propagate(inp[0])
print(result)
示例#18
0
weight_decay = config['weight_decay']
net = []
regularizers = []
inputs = np.random.randn(config['batch_size'], 1, 28, 28)
net += [layers.Convolution(inputs, 16, 5, "conv1")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv1_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool1")]
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv2_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
## 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'fc3_l2reg')
]
net += [layers.ReLU(net[-1], "relu3")]
net += [layers.FC(net[-1], 10, "logits")]

data_loss = layers.SoftmaxCrossEntropyWithLogits()
loss = layers.RegularizedLoss(data_loss, regularizers)

nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
nn.evaluate("Test", test_x, test_y, net, loss, config)
示例#19
0
    def __init__(
        self,
        lossfunc,
        optimizer,
        batch_size=32,
    ):
        self.lossfunc = lossfunc
        self.optimizer = optimizer
        self.batch_size = batch_size

        input_size = 64
        hidden_size = 3136
        output_size = 10

        # self.lr = 0.001
        # self.alpha = 0.9
        self.l1 = 1e-4
        self.l2 = 1e-4
        self.optimizer = optimizers.Adam(l1=self.l1, l2=self.l2)

        self.conv0 = L.Convolution_(n_filter=8, filter_size=(3, 3), stride=1)
        self.conv1 = L.Convolution_(n_filter=16, filter_size=(3, 3), stride=1)
        self.conv2 = L.Convolution_(n_filter=32, filter_size=(5, 5), stride=1)
        self.conv3 = L.Convolution_(n_filter=64, filter_size=(5, 5), stride=1)

        self.fc0 = L.Linear_(output_size=1024)
        self.fc1 = L.Linear_(output_size=10)

        self.bn0 = L.BatchNormalization_()
        self.bn1 = L.BatchNormalization_()
        self.bn2 = L.BatchNormalization_()
        self.bn3 = L.BatchNormalization_()
        self.bn4 = L.BatchNormalization_()

        self.acti0 = L.ELU()
        self.acti1 = L.ELU()
        self.acti2 = L.ELU()
        self.acti3 = L.ELU()
        self.acti4 = L.ELU()

        self.pool0 = L.MaxPooling(7, 7)
        self.pool1 = L.MaxPooling(5, 5)
        self.pool2 = L.MaxPooling(3, 3)
        self.pool3 = L.MaxPooling(3, 3)

        self.flat = L.Flatten()

        self.drop0 = L.Dropout(0.5)
        self.drop1 = L.Dropout(0.5)
        self.drop2 = L.Dropout(0.5)
        self.drop3 = L.Dropout(0.25)

        self.layers = [
            self.conv0,
            self.acti0,
            self.pool0,
            self.bn0,
            #self.drop0,
            self.conv1,
            self.acti1,
            self.pool1,
            self.bn1,
            #self.drop1,

            #self.conv2,
            #self.acti2,
            #self.pool2,
            #self.bn2,
            #self.drop2,

            #self.conv3,
            #self.acti3,
            #self.pool3,
            #self.bn3,
            #self.drop3,
            self.flat,
            self.fc0,
            self.acti4,
            self.bn4,
            self.fc1,
        ]