Пример #1
0
def make_model():
    model = Sequential()
    model.add(Input(shape=input_shape))

    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(ReLU())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(ReLU())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(ReLU())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(ReLU())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(ReLU())

    model.add(Flatten())
    model.add(Dense(2500, kernel_initializer='He'))
    model.add(ReLU())
    model.add(Dense(1500, kernel_initializer='He'))
    model.add(ReLU())
    model.add(Dense(10, kernel_initializer='He'))
    model.add(Softmax())

    model.summary()
    model.compile(Adam(), 'categorical_crossentropy', 'accuracy')

    return model
Пример #2
0
def make_model():
    model = Sequential()
    model.add(Input(shape=input_shape))

    model.add(
        Conv2D(16,
               kernel_size=3,
               strides=1,
               padding='same',
               kernel_regularizer=l2(1e-4)))
    model.add(BatchNormalization_v2())
    model.add(ReLU())

    add_residual_block(model, num_filters=16)
    add_residual_block(model, num_filters=16)
    add_residual_block(model, num_filters=16)

    add_residual_block(model, num_filters=32, strides=2, cnn_shortcut=True)
    add_residual_block(model, num_filters=32)
    add_residual_block(model, num_filters=32)

    add_residual_block(model, num_filters=64, strides=2, cnn_shortcut=True)
    add_residual_block(model, num_filters=64)
    add_residual_block(model, num_filters=64)
    model.add(AveragePooling2DAll())

    model.add(Flatten())
    model.add(Dense(10, kernel_initializer='He'))
    model.add(Softmax())

    model.summary()
    model.compile(Adam(lr=0.001, decay=1e-4), 'categorical_crossentropy',
                  'accuracy')

    return model
Пример #3
0
def make_model():
    model = Sequential()
    model.add(Input(shape=input_shape))
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(BN_LAYER())
    model.add(ReLU())
    model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
    model.add(BN_LAYER())
    model.add(ReLU())
    model.add(MaxPooling2D(2, 2, stride=2))

    model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
    model.add(BN_LAYER())
    model.add(ReLU())
    model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
    model.add(BN_LAYER())
    model.add(ReLU())
    model.add(MaxPooling2D(2, 2, stride=2))

    model.add(Flatten())
    model.add(Dense(512, kernel_initializer='He'))
    model.add(BN_LAYER())
    model.add(ReLU())
    model.add(Dense(10, kernel_initializer='He'))
    model.add(Softmax())

    model.summary()
    model.compile(Adam(), loss='categorical_crossentropy', metric='accuracy')

    return model
Пример #4
0
def main():
    (X_train, _), (_, _), (X_test, _) = make_mnist_data(valid_ratio=0)
    X_train = np.concatenate([X_train, X_test])

    adam = Adam(lr=0.0002, beta_1=0.5)
    generator, discriminator = make_generator(adam), make_discriminator(adam)
    gan_model = make_gan_model(generator, discriminator, adam)

    train(X_train, generator, discriminator,
          gan_model, epochs=epochs, batchSize=batchSize, plot_freq=plot_freq)
Пример #5
0
def main():
    transform_train = torchvision.transforms.Compose([
        torchvision.transforms.RandomCrop(32, padding=4, padding_mode='edge'),
        torchvision.transforms.RandomHorizontalFlip(),
        # torchvision.transforms.RandomRotation(15),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    training_data = torchvision.datasets.CIFAR10('./data/cifar10', train=True, transform=transform_train, download=True)
    testing_data = torchvision.datasets.CIFAR10('./data/cifar10', train=False, transform=transform_test, download=True)

    model = resnet_v2(input_shape, depth, n_classes)
    model.summary()
    model.compile(Adam(lr=0.001), 'categorical_crossentropy', 'accuracy')

    def lr_schduler(model):
        lr = 1e-3
        if model.n_epoch > 180:
            lr *= 0.5e-3
        elif model.n_epoch > 160:
            lr *= 1e-3
        elif model.n_epoch > 120:
            lr *= 1e-2
        elif model.n_epoch > 80:
            lr *= 1e-1
        model.optimizer.lr = lr
        print('Learning rate: ', lr)
        return lr

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=6,
                                   min_lr=0.5e-6)

    history = model.fit_torchvision(training_data, batch_size=batch_size, epochs=epochs,
                                    validation_data=testing_data, callbacks=[lr_schduler])

    plot_history(history, 'cifar10_resnet56_v2.jpg')

    model.save('resnet56_v2.h8')
Пример #6
0
def main():
    (X_train,
     _), (_, _), (X_test,
                  _) = make_cifar10_data(valid_ratio=0,
                                         image_data_format='channels_last')
    X_train = np.concatenate([X_train, X_test])
    X_train = X_train.reshape(X_train.shape[0], -1)

    adam = Adam(lr=0.0002, beta_1=0.5)
    generator, discriminator = make_generator(adam), make_discriminator(adam)
    gan_model = make_gan_model(generator, discriminator, adam)

    train(X_train,
          generator,
          discriminator,
          gan_model,
          epochs=epochs,
          batchSize=batchSize,
          plot_freq=plot_freq)
Пример #7
0
def make_model():
    model = Sequential()
    model.add(Input(shape=input_shape))
    model.add(Dense(4096))
    model.add(ReLU())
    model.add(Dense(4096))
    model.add(ReLU())
    model.add(Dense(4096))
    model.add(ReLU())
    model.add(Dense(4096))
    model.add(ReLU())
    model.add(Dense(4096))
    model.add(ReLU())
    model.add(Dense(10))
    model.add(Softmax())

    model.summary()
    model.compile(Adam(), 'categorical_crossentropy', 'accuracy')

    return model
Пример #8
0
def main():
    class bbox_transform:
        def __init__(self, img_size):
            self.img_size = img_size

        def __call__(self, x):
            bbox_num = len(x)
            target = np.zeros((bbox_num, 6))

            for index, bbox in enumerate(x):
                target[index, 1] = bbox['category_id']
                target[index, 2:] = bbox['bbox']

            target[:, 2:] /= self.img_size

            return target

    transform_train = torchvision.transforms.Compose([
        torchvision.transforms.Resize((112, 112)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465),
                                         (0.2023, 0.1994, 0.2010)),
    ])

    training_data = torchvision.datasets.CocoDetection(
        '../datasets/shape/train',
        '../datasets/shape/annotations/instances_train.json',
        transform=transform_train,
        target_transform=bbox_transform(512))
    testing_data = torchvision.datasets.CocoDetection(
        '../datasets/shape/val',
        '../datasets/shape/annotations/instances_val.json',
        transform=transform_train,
        target_transform=bbox_transform(512))

    def collate_fn(batch):
        imgs, targets = list(zip(*batch))

        # Remove empty boxes
        targets = [boxes for boxes in targets if boxes is not None]

        # set the sample index
        for b_i, boxes in enumerate(targets):
            boxes[:, 0] = b_i
        targets = np.concatenate(targets, 0)
        imgs = np.stack([img for img in imgs])
        return imgs, targets

    training_laoder = DataLoader(training_data,
                                 batch_size=8,
                                 collate_fn=collate_fn)

    loss_params = {
        "scaled_anchors": anchors,
        "ignore_th": 0.5,
        "obj_scale": 1,
        "noobj_scale": 100,
    }

    model = make_my_yolo(input_shape, len(anchors), n_classes)
    model.summary()
    model.compile(Adam(lr=0.001), yolo_loss(**loss_params), None)

    history = model.fit_dataloader(training_laoder,
                                   batch_size=batch_size,
                                   epochs=epochs)