Exemplo n.º 1
0
def main():
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--initialepoch', type=int, default=0)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--output', default='output')
    parser.add_argument('--zdims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--testmode', action='store_true')

    args = parser.parse_args()

    # select gpu
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Make output direcotiry if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # Load datasets
    if args.dataset == 'mnist':
        datasets = mnist.load_data()
    elif args.dataset == 'fashion_mnist':
        datasets = mnist.load_data()
    elif args.dataset == 'svhn':
        datasets = svhn.load_data()
    else:
        datasets = dts.load_data(args.dataset)

    # Construct model
    if args.model not in models:
        raise Exception('Unknown model:', args.model)

    model = models[args.model](input_shape=datasets.shape[1:],
                               z_dims=args.zdims,
                               output=args.output)

    if args.testmode:
        model.test_mode = True

    if args.resume is not None:
        model.load_model(args.resume)

    # Training loop
    datasets = datasets.images * 2.0 - 1.0
    samples = np.random.normal(size=(100, args.zdims)).astype(np.float32)
    model.main_loop(datasets,
                    samples,
                    epochs=args.epoch,
                    initial_epoch=args.initialepoch,
                    batchsize=args.batchsize,
                    reporter=['loss', 'g_loss', 'd_loss', 'g_acc', 'd_acc'])
Exemplo n.º 2
0
def main():
    yml_path = sys.argv[1]
    with open(yml_path) as f:
        config = yaml.load(f)

    inlier_classes = config['train_data_params']['labels']
    outlier_classes = [i for i in range(10) if i not in inlier_classes]

    x, y = load_data('test', normalization='tanh', with_label=True)
    y = np.array(y)
    x = tf.constant(x, dtype=tf.float32)

    model = DeepSVDD(**config['model_params'])
    model.build(input_shape=(None, 32, 32, 1))

    center = tf.Variable(np.zeros((model.output_dim)), dtype=tf.float32)
    radius = tf.Variable(0., dtype=tf.float32)
    checkpoint = tf.train.Checkpoint(center=center, radius=radius)

    model.load_weights(
        os.path.join(config['logdir'], 'model',
                     'model_%d' % config['test_epoch']))
    checkpoint.restore(
        os.path.join(config['logdir'], 'model',
                     'center_radius_%d' % config['test_epoch']))

    outputs = model.predict(x)
    df_inlier = create_dataframe(x, y, center, outputs, inlier_classes,
                                 model.output_dim, 'inlier')
    df_outlier = create_dataframe(x, y, center, outputs, outlier_classes,
                                  model.output_dim, 'outlier')
    df = pd.concat([df_inlier, df_outlier], axis=0)
    df.to_csv(os.path.join(config['logdir'], 'outputs.csv'), index=None)
Exemplo n.º 3
0
def main():
    yml_path = sys.argv[1]
    with open(yml_path) as f:
        config = yaml.load(f)

    inlier_classes = config['train_data_params']['labels']
    outlier_classes = [i for i in range(10) if i not in inlier_classes]

    x, y = load_data('test',
                     with_label=True,
                     normalization='tanh')
    y = np.array(y)

    discriminator = Discriminator(**config['discriminator_params'])
    _ = tf.nn.sigmoid(discriminator(tf.constant(x[0][None, :], dtype=tf.float32), training=False))
    discriminator.load_weights(os.path.join(config['logdir'], 'model/ocgan', 'discriminator_%d.h5' % config['test_epoch']))
    outputs = tf.nn.sigmoid(discriminator(tf.constant(x, dtype=tf.float32), training=False))

    outputs = np.squeeze(np.asarray(outputs))
    inlier_outputs = np.zeros(shape=(0, ))
    for c in inlier_classes:
        inlier_outputs = np.append(inlier_outputs,
                                   outputs[y == c])
    df_inlier = pd.DataFrame({'normality': inlier_outputs,
                              'label': 'inlier'})

    outlier_outputs = np.zeros(shape=(0, ))
    for c in outlier_classes:
        outlier_outputs = np.append(outlier_outputs,
                                    outputs[y == c])
    df_outlier = pd.DataFrame({'normality': outlier_outputs,
                               'label': 'outlier'})

    df = pd.concat([df_inlier, df_outlier], axis=0)
    df.to_csv(os.path.join(config['logdir'], 'outputs.csv'), index=None)
Exemplo n.º 4
0
def test_mnist():
    (train_x, train_y), (test_x, test_y) = mnist.load_data()
    val_x = train_x[50000:]
    val_y = train_y[50000:]
    train_x = train_x[:50000]
    train_y = train_y[:50000]
    batch_size = 200
    modle = models.Sequential()
    modle.add(layers.Linear(28, input_shape=(None, train_x.shape[1])))
    modle.add(layers.ReLU())
    modle.add(layers.Linear(10))
    modle.add(layers.ReLU())
    modle.add(layers.Linear(10))
    modle.add(layers.Softmax())
    acc = losses.categorical_accuracy.__name__
    modle.compile(losses.CrossEntropy(),
                  optimizers.SGD(lr=0.001),
                  metrics=[losses.categorical_accuracy])
    modle.summary()
    history = modle.train(train_x,
                          train_y,
                          batch_size,
                          epochs=32,
                          validation_data=(val_x, val_y))
    epochs = range(1, len(history["loss"]) + 1)
    plt.plot(epochs, history["loss"], 'ro', label="Traning loss")
    plt.plot(epochs, history["val_loss"], 'go', label="Validating loss")
    plt.plot(epochs, history[acc], 'r', label="Traning accuracy")
    plt.plot(epochs, history["val_" + acc], 'g', label="Validating accuracy")
    plt.title('Training/Validating loss/accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Loss/Accuracy')
    plt.legend()
    plt.show(block=True)
Exemplo n.º 5
0
def main(_):
    # Parsing arguments
    parser = argparse.ArgumentParser(description='Training GANs or VAEs')
    parser.add_argument('--model', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--datasize', type=int, default=-1)
    parser.add_argument('--epoch', type=int, default=200)
    parser.add_argument('--batchsize', type=int, default=50)
    parser.add_argument('--output', default='output')
    parser.add_argument('--zdims', type=int, default=256)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--testmode', action='store_true')

    args = parser.parse_args()

    # select gpu
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Make output direcotiry if not exists
    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    # Load datasets
    if args.dataset == 'mnist':
        datasets = mnist.load_data()
    elif args.dataset == 'svhn':
        datasets = svhn.load_data()
    else:
        #datasets = load_data(args.dataset, args.datasize)
        datasets = load_images(args.dataset)  # load
    # Construct model
    if args.model not in models:
        raise Exception('Unknown model:', args.model)


#--------------------------------------------------------
    print("input_shape", datasets.shape[1:])
    #--------------------------------------------------------

    model = models[args.model](batchsize=args.batchsize,
                               input_shape=datasets.shape[1:],
                               attr_names=None or datasets.attr_names,
                               z_dims=args.zdims,
                               output=args.output,
                               resume=args.resume)

    if args.testmode:
        model.test_mode = True

    tf.set_random_seed(12345)

    # Training loop
    datasets.images = datasets.images.astype('float32') * 2.0 - 1.0
    #--------------------------
    print("datasets.images", datasets.images.shape)

    model.main_loop(datasets, epochs=args.epoch)
Exemplo n.º 6
0
def main():
    yml_path = sys.argv[1]
    with open(yml_path) as f:
        config = yaml.load(f)

    noise_sampler = NoiseSampler('normal')

    inlier_classes = config['train_data_params']['labels']
    outlier_classes = [i for i in range(10) if i not in inlier_classes]

    x, y = load_data('test', with_label=True, normalization='tanh')
    y = np.array(y)

    generator = Generator(**config['generator_params'])
    discriminator = Discriminator(**config['discriminator_params'])

    generator.build(input_shape=(None, generator.latent_dim))
    discriminator.build(input_shape=(None, 32, 32, 1))

    generator.load_weights(
        os.path.join(config['logdir'], 'model',
                     'generator_%d' % config['test_epoch']))
    discriminator.load_weights(
        os.path.join(config['logdir'], 'model',
                     'discriminator_%d' % config['test_epoch']))

    inliers = np.zeros(shape=(0, *x.shape[1:]))
    for c in inlier_classes:
        inliers = np.append(inliers, x[y == c], axis=0)
    outliers = np.zeros(shape=(0, *x.shape[1:]))
    for c in outlier_classes:
        outliers = np.append(outliers, x[y == c], axis=0)

    inlier = inliers[np.random.randint(len(inliers))]
    outlier = outliers[np.random.randint(len(outliers))]

    computer = ScoreComputer(generator, discriminator,
                             **config['score_computer_params'])
    _, inlier_history = computer.compute_score(inlier[None, :, :, :],
                                               noise_sampler,
                                               **config['compute_params'],
                                               with_history=True)
    _, outlier_history = computer.compute_score(outlier[None, :, :, :],
                                                noise_sampler,
                                                **config['compute_params'],
                                                with_history=True)
    inlier_scores = np.array(inlier_history['score']).squeeze()
    inlier_gens = np.array(inlier_history['generated']).squeeze()
    inlier_z = np.array(inlier_history['z']).squeeze()
    outlier_scores = np.array(outlier_history['score']).squeeze()
    outlier_gens = np.array(outlier_history['generated']).squeeze()
    outlier_z = np.array(outlier_history['z']).squeeze()

    plot_gen_score(inlier, inlier_gens, inlier_scores, 'inlier.gif')
    plot_gen_score(outlier, outlier_gens, outlier_scores, 'outlier.gif')
Exemplo n.º 7
0
def test_mnist_with_cov2d():
    (train_x, train_y), (test_x, test_y) = mnist.load_data(flatten=False, one_hot_label=False)
    val_x = train_x[50000:]
    val_y = train_y[50000:]
    train_x = train_x[:50000]
    train_y = train_y[:50000]
    net = Net()
    dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    net.to(dev)
    optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
    epochs = 20
    batch_size = 200
    batches = len(train_x)//batch_size
    history = {"loss":[], "val_loss":[], "accuracy":[], "val_accuracy":[]}
    for epoch in range(epochs):
        net.train()
        epoch_loss = 0.0
        epoch_acc = 0.0
        for batch in range(batches):
            x_true = torch.tensor(train_x[batch*batch_size: (batch+1)*batch_size], dtype=torch.float).to(dev)
            y_true = torch.tensor(train_y[batch*batch_size: (batch+1)*batch_size], dtype=torch.long).to(dev)
            y_pred = net(x_true)
            loss = F.cross_entropy(y_pred, y_true)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            epoch_loss += loss.item()
            epoch_acc += accuracy(y_pred, y_true)
        train_loss = epoch_loss/batches
        train_acc = epoch_acc/batches
        history["loss"].append(train_loss)
        history["accuracy"].append(train_acc)
        # Do validation after each epoch
        net.eval()
        with torch.no_grad():
            x_true = torch.tensor(val_x, dtype=torch.float).to(dev)
            y_true = torch.tensor(val_y, dtype=torch.long).to(dev)
            y_pred = net(x_true)
            val_loss = F.cross_entropy(y_pred, y_true)
            val_acc = accuracy(y_pred, y_true)
        history["val_loss"].append(val_loss)
        history["val_accuracy"].append(val_acc)
        print(f"Epoch: {epoch+1} loss: {train_loss} accuracy:{train_acc} val_loss: {val_loss}, val_accuracy:{val_acc}")
    plt.plot(history["loss"], 'ro', label="Training loss")
    plt.plot(history["val_loss"], 'go',label="Validating loss")
    plt.plot(history["accuracy"], 'r', label="Training accuracy")
    plt.plot(history["val_accuracy"], 'g', label="Validating accuracy")
    plt.title('Training/Validating loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss/Accuracy')
    plt.legend()
    plt.show(block=True)
Exemplo n.º 8
0
def main():
    yml_path = sys.argv[1]
    with open(yml_path) as f:
        config = yaml.load(f)

    inlier_classes = config['train_data_params']['labels']
    outlier_classes = [i for i in range(10) if i not in inlier_classes]

    x, y = load_data('test', normalization='tanh', with_label=True)
    y = np.array(y)
    x = tf.constant(x, dtype=tf.float32)

    autoencoder = AutoEncoder(**config['autoencoder_params'])
    estimation_network = EstimationNetwork(**config['estimator_params'])
    gmm = GMM(config['estimator_params']['dense_units'][-1],
              config['autoencoder_params']['latent_dim'] + 1)

    autoencoder.build(input_shape=(1, 32, 32, 1))
    estimation_network.build(
        input_shape=(1, config['autoencoder_params']['latent_dim'] + 1))
    gmm([
        tf.random.normal((1, config['autoencoder_params']['latent_dim'] + 1)),
        tf.random.normal((1, config['estimator_params']['dense_units'][-1]))
    ])

    # tf 2.1.0 doesn't accept
    # gmm.build(input_shape=[(1, config['autoencoder_params']['latent_dim']+1),
    #                        (1, config['estimator_params']['dense_units'][-1])])

    dagmm = DAGMM(autoencoder, estimation_network, gmm)

    dagmm.load_weights(
        os.path.join(config['logdir'], 'model',
                     'dagmm_%d.h5' % config['test_epoch']))
    outputs = dagmm(x, training=False)
    outputs = np.squeeze(np.asarray(outputs))
    inlier_outputs = np.zeros(shape=(0, ))
    for c in inlier_classes:
        inlier_outputs = np.append(inlier_outputs, outputs[y == c])
    df_inlier = pd.DataFrame({'energy': inlier_outputs, 'label': 'inlier'})

    outlier_outputs = np.zeros(shape=(0, ))
    for c in outlier_classes:
        outlier_outputs = np.append(outlier_outputs, outputs[y == c])
    df_outlier = pd.DataFrame({'energy': outlier_outputs, 'label': 'outlier'})

    df = pd.concat([df_inlier, df_outlier], axis=0)
    df.to_csv(os.path.join(config['logdir'], 'outputs.csv'), index=None)
Exemplo n.º 9
0
def main():
    yml_path = sys.argv[1]
    with open(yml_path) as f:
        config = yaml.load(f)

    noise_sampler = NoiseSampler('normal')

    inlier_classes = config['train_data_params']['labels']
    outlier_classes = [i for i in range(10) if i not in inlier_classes]

    x, y = load_data('test', with_label=True, normalization='tanh')
    y = np.array(y)

    generator = Generator(**config['generator_params'])
    discriminator = Discriminator(**config['discriminator_params'])

    generator.build(input_shape=(None, generator.latent_dim))
    discriminator.build(input_shape=(None, 32, 32, 1))

    generator.load_weights(
        os.path.join(config['logdir'], 'model',
                     'generator_%d' % config['test_epoch']))
    discriminator.load_weights(
        os.path.join(config['logdir'], 'model',
                     'discriminator_%d' % config['test_epoch']))

    computer = ScoreComputer(generator, discriminator,
                             **config['score_computer_params'])
    outputs = computer.compute_score(x, noise_sampler,
                                     **config['compute_params'])

    inlier_outputs = np.zeros(shape=(0, ))
    for c in inlier_classes:
        inlier_outputs = np.append(inlier_outputs, outputs[y == c])
    df_inlier = pd.DataFrame({'score': inlier_outputs, 'label': 'inlier'})

    outlier_outputs = np.zeros(shape=(0, ))
    for c in outlier_classes:
        outlier_outputs = np.append(outlier_outputs, outputs[y == c])
    df_outlier = pd.DataFrame({'score': outlier_outputs, 'label': 'outlier'})

    df = pd.concat([df_inlier, df_outlier], axis=0)
    df.to_csv(os.path.join(config['logdir'], 'outputs.csv'), index=None)
Exemplo n.º 10
0
def test_mnist():
    (train_x, train_y), (test_x, test_y) = mnist.load_data()
    val_x = train_x[50000:]
    val_y = train_y[50000:]
    train_x = train_x[:50000]
    train_y = train_y[:50000]
    model = models.Sequential()
    model.add(layers.Dense(28, activation="relu", input_shape=(train_x.shape[1],)))
    model.add(layers.Dense(10, activation="relu"))
    model.add(layers.Dense(10, activation="softmax"))
    model.compile(optimizer=optimizers.SGD(lr=0.001), loss=losses.categorical_crossentropy, metrics=['accuracy'])
    model.summary()
    history = model.fit(train_x, train_y, batch_size=200, epochs=32, validation_data=(val_x, val_y)).history
    epochs = range(1, len(history["loss"])+1)
    plt.plot(epochs, history["loss"], 'ro', label="Traning loss")
    plt.plot(epochs, history["val_loss"], 'go',label="Validating loss")
    plt.plot(epochs, history["accuracy"], 'r', label="Traning accuracy")
    plt.plot(epochs, history["val_accuracy"], 'g', label="Validating accuracy")
    plt.title('Training/Validating loss/accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Loss/Accuracy')
    plt.legend()
    plt.show(block=True)
Exemplo n.º 11
0
def test_mnist_with_cov2d():
    (train_x, train_y), (test_x, test_y) = mnist.load_data(flatten=False)
    val_x = train_x[50000:]
    val_y = train_y[50000:]
    train_x = train_x[:50000]
    train_y = train_y[:50000]
    model = models.Sequential()
    model.add(layers.Conv2D(4, (3, 3), strides=(1,1), padding='same', data_format="channels_first", activation="relu", input_shape=(1, 28, 28)))
    model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2), data_format="channels_first"))
    model.add(layers.Flatten())
    model.add(layers.Dense(10, activation="softmax"))
    model.compile(optimizer=optimizers.SGD(lr=0.001), loss=losses.categorical_crossentropy, metrics=['accuracy'])
    model.summary()
    history = model.fit(train_x, train_y, batch_size=200, epochs=20, validation_data=(val_x, val_y)).history
    epochs = range(1, len(history["loss"])+1)
    plt.plot(epochs, history["loss"], 'ro', label="Traning loss")
    plt.plot(epochs, history["val_loss"], 'go',label="Validating loss")
    plt.plot(epochs, history["acc"], 'r', label="Traning accuracy")
    plt.plot(epochs, history["val_acc"], 'g', label="Validating accuracy")
    plt.title('Training/Validating loss/accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Loss/Accuracy')
    plt.legend()
    plt.show(block=True)
Exemplo n.º 12
0
        Parameters
        ---------
        filename: str, optional
            Name of the ``.npz`` compressed binary in to be saved


        """
        if not os.path.exists('models'):
            os.makedirs('models')
        np.savez_compressed(
            file=os.path.join(os.curdir, 'models', filename),
            weights=self.weights,
            biases=self.biases,
            batch_size=self.batch_size,
            epochs=self.epochs,
            eta=self.eta,
            cost=self.cost,
        )


if __name__ == '__main__':
    training_data, validation_data, test_data = mnist.load_data()
    nn = Network(sizes=[784, 30, 10], cost='cross_entropy')
    print "--------------- Training ---------------"
    nn.fit(training_data=training_data, validation_data=validation_data)
    print "Neural Network accuracy on test data is {} %".format(
        nn.validate(test_data) / 100.00)
    nn.save()
    print " -------------- Complete ---------------"
Exemplo n.º 13
0
def load_dataset(dataset_name):
    if dataset_name == 'mnist':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    elif dataset_name == 'mnist-original':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            original=True)
    elif dataset_name == 'mnist-rgb':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            use_rgb=True)
    elif dataset_name == 'svhn':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
        )
    elif dataset_name == 'svhn-extra':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
            include_extra=True)
    elif dataset_name == 'mnist-svhn':
        anchor = load_dataset('mnist-rgb')
        mirror = load_dataset('svhn')
        dataset = CrossDomainDatasets(dataset_name.replace('-', ''), anchor,
                                      mirror)
    elif dataset_name == 'cifar10':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = cifar10.load_data(
        )
    elif dataset_name == 'moving-mnist':
        data = moving_mnist.load_data()
        dataset = TimeCorelatedDataset(dataset_name.replace('-', ''), data)
    elif dataset_name == 'lsun-bedroom':
        datapath = lsun.load_data()
        dataset = LargeDataset(datapath)
    elif dataset_name == 'celeba':
        datapath = celeba.load_data()
        dataset = LargeDataset(datapath)
    else:
        raise KeyError("Dataset not implemented")

    return dataset
Exemplo n.º 14
0
def load_dataset(dataset_name):
    if dataset_name == 'mnist':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    if dataset_name == 'stacked-mnist':
        dataset = StackedDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
        )
    elif dataset_name == 'mnist-original':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            original=True)
    elif dataset_name == 'mnist-rgb':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = mnist.load_data(
            use_rgb=True)
    elif dataset_name == 'svhn':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
        )
    elif dataset_name == 'svhn-extra':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = svhn.load_data(
            include_extra=True)
    elif dataset_name == 'mnist-svhn':
        anchor = load_dataset('mnist-rgb')
        mirror = load_dataset('svhn')
        dataset = CrossDomainDatasets(dataset_name.replace('-', ''), anchor,
                                      mirror)
    elif dataset_name == 'cifar10':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.x_test, dataset.y_test, dataset.attr_names = cifar10.load_data(
        )
    elif dataset_name == 'moving-mnist':
        data = moving_mnist.load_data()
        dataset = TimeCorelatedDataset(dataset_name.replace('-', ''), data)
    elif dataset_name == 'lsun-bedroom':
        datapath = lsun.load_data()
        dataset = LargeDataset(datapath)
    elif dataset_name == 'celeba':
        datapath = celeba.load_data()
        dataset = LargeDataset(datapath, buffer_size=20000)
    elif dataset_name == 'celeba-128':
        datapath = celeba.load_data(image_size=128)
        dataset = LargeDataset(datapath, buffer_size=5000)
    elif dataset_name == 'celeba-crop-128':
        datapath = celeba.load_data(image_size=128, center_crop=True)
        dataset = LargeDataset(datapath, buffer_size=5000)
    elif dataset_name == 'celeba-crop-64':
        datapath = celeba.load_data(image_size=64, center_crop=True)
        dataset = LargeDataset(datapath, buffer_size=20000)
    elif dataset_name == 'synthetic-8ring':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="ring", n=8, std=.05, r=1, density=5000)
    elif dataset_name == 'synthetic-25grid':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="grid", n=25, std=.05, density=2500)
    elif dataset_name == 'synthetic-high-dim':
        dataset = ConditionalDataset(name=dataset_name.replace('-', ''))
        dataset.images, dataset.attrs, dataset.attr_names = mixture.load_data(
            type="high-dim", n=10, d=1200, std=1., density=5000)
    elif dataset_name == 'mnist-anomaly':
        x, y, x_t, y_t, y_names = mnist.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    elif dataset_name == 'svhn-anomaly':
        x, y, x_t, y_t, y_names = svhn.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    elif dataset_name == 'cifar10-anomaly':
        x, y, x_t, y_t, y_names = cifar10.load_data()
        dataset = SimulatedAnomalyDetectionDataset(dataset_name.replace(
            '-', ''),
                                                   x,
                                                   y,
                                                   anomaly_class=0,
                                                   test_set=(x_t, y_t))
    else:
        raise KeyError("Dataset not implemented")

    return dataset
Exemplo n.º 15
0
Arquivo: LeNet.py Projeto: iiharu/NN
        return model


CLASSES = 10
ROWS, COLS, CHS = 28, 28, 1

TRAIN_SIZE = 50000
TEST_SIZE = 10000
VALIDATION_SPLIT = TEST_SIZE / TRAIN_SIZE

BATCH_SIZE = 128
EPOCHS = 16

if __name__ == '__main__':
    (X_train, Y_train), (X_test, Y_test) = mnist.load_data()

    model = LeNet().build(input_shape=(ROWS, COLS, CHS), classes=CLASSES)

    # keras.utils.plot_model(mode, to_file='model.png')

    model.compile(optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True), loss=keras.losses.categorical_crossentropy, metrics=[keras.metrics.categorical_accuracy])

    # history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_test, Y_test), verbose=2)
    model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_test, Y_test), verbose=2, callbacks=[keras.callbacks.TensorBoard()])

    score = model.evaluate(X_test, Y_test, verbose=0)

    print("loss: ", score[0])
    print("acc: ", score[1])