示例#1
0
def train_cifar10():
    (x_train, y_train), (x_test, y_test) = load_cifar10()

    idg = ImageDataGenerator(
        horizontal_flip=True,
        height_shift_range=4,
        width_shift_range=4,
        fill_mode='reflect'
    )

    idg.fit(x_train)

    n = 16
    k = 8
    mdl = create_wide_residual_network(x_train.shape[1:], 10, n, k)
    mdl.compile(SGDTorch(lr=.1, momentum=0.9, nesterov=True), 'categorical_crossentropy', ['acc'])

    lr_cb = LearningRateScheduler(lambda e: 0.1 * (0.2 ** (e >= 160 and 3 or e >= 120 and 2 or e >= 60 and 1 or 0)))

    batch_size = 128
    mdl.fit_generator(
        generator=idg.flow(x_train, to_categorical(y_train), batch_size=batch_size),
        epochs=200,
        validation_data=(idg.standardize(x_test), to_categorical(y_test)),
        callbacks=[lr_cb]
    )
    mdl.save_weights('cifar10_WRN_{}-{}.h5'.format(n, k))
示例#2
0
def build_cifar10(model_state_dict=None, optimizer_state_dict=None, **kwargs):
    epoch = kwargs.pop('epoch')
    ratio = kwargs.pop('ratio')
    train_ds, valid_ds = utils.load_cifar10(args.child_batch_size, args.child_cutout_size) # subset random sample

    model = NASWSNetworkCIFAR(10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob,
                              args.child_drop_path_keep_prob,
                              args.child_use_aux_head, args.steps)
    model = model.cuda()
    train_criterion = nn.CrossEntropyLoss().cuda()
    eval_criterion = nn.CrossEntropyLoss().cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(
        model.parameters(),
        args.child_lr_max,
        momentum=0.9,
        weight_decay=args.child_l2_reg,
    )
    if model_state_dict is not None:
        model.load_state_dict(model_state_dict)
    if optimizer_state_dict is not None:
        optimizer.load_state_dict(optimizer_state_dict)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch)
    return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
示例#3
0
def transformation_cifar10_vs_tinyimagenet():
    _, (x_test, y_test) = load_cifar10()
    x_test_out = load_tinyimagenet('/home/izikgo/Imagenet_resize/Imagenet_resize/')

    transformer = Transformer(8, 8)
    n = 16
    k = 8
    base_mdl = create_wide_residual_network(x_test.shape[1:], 10, n, k)

    transformations_cls_out = Activation('softmax')(dense(transformer.n_transforms)(base_mdl.get_layer(index=-3).output))

    mdl = Model(base_mdl.input, [base_mdl.output, transformations_cls_out])
    mdl.load_weights('cifar10_WRN_doublehead-transformations_{}-{}.h5'.format(n, k))

    scores_mdl = Model(mdl.input, mdl.output[1])
    x_test_all = np.concatenate((x_test, x_test_out))
    preds = np.zeros((len(x_test_all), transformer.n_transforms))
    for t in range(transformer.n_transforms):
        preds[:, t] = scores_mdl.predict(transformer.transform_batch(x_test_all, [t] * len(x_test_all)),
                                  batch_size=128)[:, t]

    labels = np.concatenate((np.ones(len(x_test)), np.zeros(len(x_test_out))))
    scores = preds.mean(axis=-1)

    save_roc_pr_curve_data(scores, labels, 'cifar10-vs-tinyimagenet_transformations.npz')
示例#4
0
def test(args):
    batch_size = args.batch_size
    classifier_weight = args.weight

    # load test dataset
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    X_train, y_train, X_test, y_test = utils.load_cifar10(cifar10_path)

    assert X_test.shape[0] == y_test.shape[0]
    print(f"Dataset (test) size = {y_test.shape[0]}")

    test_data = utils.MyDataset(x=X_test, y=y_test, transform=transform)
    #test_data = utils.MyDataset(x=X_train, y=y_train, transform=transform)
    test_loader = DataLoader(test_data,
                             batch_size,
                             shuffle=False,
                             num_workers=1)

    # model, optimizer, loss function
    model = Classifier().to(device)
    model.load_state_dict(torch.load(classifier_weight))

    # test
    correct = 0
    total = 0
    y_pred = []
    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)
            # forward
            outputs, _ = model(inputs)

            _, predicted_labels = torch.max(outputs.data, 1)

            total += labels.size(0)
            correct += (predicted_labels == labels).sum().item()

            y_pred += predicted_labels.cpu().numpy().tolist()

    print(f"Accuracy: {100 * correct / total:.2f}%")

    for i, label in enumerate(CIFAR10_CLASSES):
        correct = ((y_test == i) * 1) * ((np.array(y_pred) == y_test) * 1)
        class_accuracy = 100 * correct.sum() / y_test[y_test == i].shape[0]
        print(f"{label}: {class_accuracy:.1f}%")

    confusion_matrix = torch.zeros(len(CIFAR10_CLASSES), len(CIFAR10_CLASSES))
    for t, p in zip(y_test, y_pred):
        confusion_matrix[t, p] += 1
    print(confusion_matrix)
示例#5
0
def train_cifar10_transformations():
    (x_train, y_train), _ = load_cifar10()

    transformer = Transformer(8, 8)

    def data_gen(x, y, batch_size):
        while True:
            ind_permutation = np.random.permutation(len(x))
            for b_start_ind in range(0, len(x), batch_size):
                batch_inds = ind_permutation[b_start_ind:b_start_ind + batch_size]
                x_batch = x[batch_inds]
                y_batch = y[batch_inds].flatten()

                if K.image_data_format() == 'channels_first':
                    x_batch = np.transpose(x_batch, (0, 2, 3, 1))

                y_t_batch = np.random.randint(0, transformer.n_transforms, size=len(x_batch))

                x_batch = transformer.transform_batch(x_batch, y_t_batch)

                if K.image_data_format() == 'channels_first':
                    x_batch = np.transpose(x_batch, (0, 3, 1, 2))

                yield (x_batch, [to_categorical(y_batch, num_classes=10), to_categorical(y_t_batch, num_classes=transformer.n_transforms)])

    n = 16
    k = 8
    base_mdl = create_wide_residual_network(x_train.shape[1:], 10, n, k)

    transformations_cls_out = Activation('softmax')(dense(transformer.n_transforms)(base_mdl.get_layer(index=-3).output))

    mdl = Model(base_mdl.input, [base_mdl.output, transformations_cls_out])

    mdl.compile(SGDTorch(lr=.1, momentum=0.9, nesterov=True), 'categorical_crossentropy', ['acc'])

    lr_cb = LearningRateScheduler(lambda e: 0.1 * (0.2 ** (e >= 160 and 3 or e >= 120 and 2 or e >= 60 and 1 or 0)))

    batch_size = 128
    mdl.fit_generator(
        generator=data_gen(x_train, y_train, batch_size=batch_size),
        steps_per_epoch=len(x_train) // batch_size,
        epochs=200,
        callbacks=[lr_cb]
    )
    mdl.save_weights('cifar10_WRN_doublehead-transformations_{}-{}.h5'.format(n, k))
示例#6
0
def main():
    '''
    Predict the model defined above.
    '''

    dataset_name = 'cifar_10'
    model_name = 'MobileNet'
    image_size = 128
    channels = 3
    batch_size = 1

    if (dataset_name == 'mnist'):
        (x_train,
         y_train), (x_val, y_val), (x_test,
                                    y_test) = load_mnist(image_size, channels)
    elif (dataset_name == 'cifar_10'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar10(
            image_size, channels)
    elif (dataset_name == 'cifar_100'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar100(
            image_size, channels)
    elif (dataset_name == 'fashion_mnist'):
        (x_train, y_train), (x_val, y_val), (x_test,
                                             y_test) = load_fashion_mnist(
                                                 image_size, channels)

    model = load_model(f'{dataset_name}_{model_name}')

    optimizer = optimizers.SGD(lr=0.001,
                               momentum=0.9,
                               decay=0.0001,
                               nesterov=False)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    start = time.time()
    predict = model.predict(x_val[:32], verbose=1, batch_size=batch_size)
    end = time.time()

    print(str(end - start))
def main(unused_argv):
    # dataset = load_mnist()
    # x,y,x_test,y_test = dataset
    # train(dataset,'mnist')

    dataset = load_cifar10()
    x, y, x_test, y_test = dataset
    # y = tf.one_hot(y, depth=10)
    # input(y)
    train(dataset, 'cifar10')

    # later...

    # load json and create model
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()

    loaded_model = tf.keras.models.model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model.h5")
    # print("Loaded model from disk")
    # # predict(x, batch_size=None, verbose=0, steps=None, callbacks=None)
    print(loaded_model.predict(x))
示例#8
0
def save_data():
    print('-' * 10 + 'SAVING DATA TO DISK' + '-' * 10 + '\n')
    MODEL_PATH = './model/'
    DATA_PATH = './data/'

    if not os.path.exists(MODEL_PATH):
        os.makedirs(MODEL_PATH)

    if not os.path.exists(DATA_PATH):
        os.makedirs(DATA_PATH)

    # Choosing dataset
    if "local" in args.dataset:
        print("USING LOCAL DATASET")
        x, y, test_x, test_y = load_dataset(args.train_feat, args.train_label,
                                            args.test_feat, args.train_label)
    elif "mnist" in args.dataset:
        print("USING MNIST DATASET")
        x, y, test_x, test_y = load_mnist_keras()
    elif "cifar10" in args.dataset:
        print("USING CIFAR10 DATASET")
        x, y, test_x, test_y = load_cifar10()

    # x, y, test_x, test_y = load_cifar10()
    if test_x is None:
        print('Splitting train/test data with ratio {}/{}'.format(
            1 - args.test_ratio, args.test_ratio))
        x, test_x, y, test_y = train_test_split(x,
                                                y,
                                                test_size=args.test_ratio,
                                                stratify=y)

    # need to partition target and shadow model data
    assert len(x) > 2 * args.target_data_size

    target_data_indices, shadow_indices = get_data_indices(
        len(x), target_train_size=args.target_data_size)
    np.savez(MODEL_PATH + 'data_indices.npz', target_data_indices,
             shadow_indices)

    # target model's data
    print('Saving data for target model')
    train_x, train_y = x[target_data_indices], y[target_data_indices]
    size = len(target_data_indices)
    if size < len(test_x):
        test_x = test_x[:size]
        test_y = test_y[:size]
    # save target data
    np.savez(DATA_PATH + 'target_data.npz', train_x, train_y, test_x, test_y)

    # shadow model's data
    target_size = len(target_data_indices)
    shadow_x, shadow_y = x[shadow_indices], y[shadow_indices]
    shadow_indices = np.arange(len(shadow_indices))

    for i in range(args.n_shadow):
        print('Saving data for shadow model {}'.format(i))
        shadow_i_indices = np.random.choice(shadow_indices,
                                            2 * target_size,
                                            replace=False)
        shadow_i_x, shadow_i_y = shadow_x[shadow_i_indices], shadow_y[
            shadow_i_indices]
        train_x, train_y = shadow_i_x[:target_size], shadow_i_y[:target_size]
        test_x, test_y = shadow_i_x[target_size:], shadow_i_y[target_size:]
        np.savez(DATA_PATH + 'shadow{}_data.npz'.format(i), train_x, train_y,
                 test_x, test_y)
示例#9
0
def train_cifar10():
    logging.info("Args = %s", args)
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)

    global_step = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
    epoch = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
    best_acc_top1 = tf.Variable(initial_value=0.0,
                                trainable=False,
                                dtype=tf.float32)

    ################################################ model setup #######################################################
    train_ds, test_ds = utils.load_cifar10(args.batch_size, args.cutout_size)
    total_steps = int(np.ceil(50000 / args.batch_size)) * args.epochs

    model = NASNetworkCIFAR(classes=10,
                            reduce_distance=args.cells,
                            num_nodes=args.nodes,
                            channels=args.channels,
                            keep_prob=args.keep_prob,
                            drop_path_keep_prob=args.drop_path_keep_prob,
                            use_aux_head=args.use_aux_head,
                            steps=total_steps,
                            arch=args.arch)

    temp_ = tf.random.uniform((64, 32, 32, 3),
                              minval=0,
                              maxval=1,
                              dtype=tf.float32)
    temp_ = model(temp_, step=1, training=True)
    model.summary()
    model_size = utils.count_parameters_in_MB(model)
    print("param size = {} MB".format(model_size))
    logging.info("param size = %fMB", model_size)

    criterion = keras.losses.CategoricalCrossentropy(from_logits=True)
    learning_rate = keras.experimental.CosineDecay(
        initial_learning_rate=args.initial_lr,
        decay_steps=total_steps,
        alpha=0.0001)
    # learning_rate = keras.optimizers.schedules.ExponentialDecay(
    #     initial_learning_rate=args.initial_lr, decay_steps=total_steps, decay_rate=0.99, staircase=False, name=None
    # )
    optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)

    ########################################## restore checkpoint ######################################################
    if args.train_from_scratch:
        utils.clean_dir(args.model_dir)

    checkpoint_path = os.path.join(args.model_dir, 'checkpoints')
    ckpt = tf.train.Checkpoint(model=model,
                               optimizer=optimizer,
                               global_step=global_step,
                               epoch=epoch,
                               best_acc_top1=best_acc_top1)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              checkpoint_path,
                                              max_to_keep=3)
    # if a checkpoint exists, restore the latest checkpoint.
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        print('Latest checkpoint restored!!')

    ############################################# training process #####################################################
    acc_train_result = []
    loss_train_result = []
    acc_test_result = []
    loss_test_result = []

    while epoch.numpy() < args.epochs:
        print('epoch {} lr {}'.format(epoch.numpy(),
                                      optimizer._decayed_lr(tf.float32)))

        train_acc, train_loss, step = train(train_ds,
                                            model,
                                            optimizer,
                                            global_step,
                                            criterion,
                                            classes=10)
        test_acc, test_loss = valid(test_ds, model, criterion, classes=10)

        acc_train_result.append(train_acc)
        loss_train_result.append(train_loss)
        acc_test_result.append(test_acc)
        loss_test_result.append(test_loss)

        logging.info('epoch %d lr %e', epoch.numpy(),
                     optimizer._decayed_lr(tf.float32))
        logging.info(acc_train_result)
        logging.info(loss_train_result)
        logging.info(acc_test_result)
        logging.info(loss_test_result)

        is_best = False
        if test_acc > best_acc_top1:
            best_acc_top1 = test_acc
            is_best = True
        epoch.assign_add(1)
        if (epoch.numpy() + 1) % 1 == 0:
            ckpt_save_path = ckpt_manager.save()
            print('Saving checkpoint for epoch {} at {}'.format(
                epoch.numpy() + 1, ckpt_save_path))
        if is_best:
            pass

    utils.plot_single_list(acc_train_result,
                           x_label='epochs',
                           y_label='acc',
                           file_name='acc_train')
    utils.plot_single_list(loss_train_result,
                           x_label='epochs',
                           y_label='loss',
                           file_name='loss_train')
    utils.plot_single_list(acc_test_result,
                           x_label='epochs',
                           y_label='acc',
                           file_name='acc_test')
    utils.plot_single_list(loss_test_result,
                           x_label='epochs',
                           y_label='loss',
                           file_name='loss_test')
示例#10
0
import utils
import config
import numpy as np
import cupy as cp

print "Loading CIFAR10" if config.fargs[
    'dataset'] == "cifar10" else "Loading Fashion MNIST"
X_train, X_test = utils.load_cifar10(
    "data"
) if config.fargs['dataset'] == "cifar10" else utils.load_fashion_mnist("data")
X_train = (X_train / 255.0).astype(np.float32)
X_test = (X_test / 255.0).astype(np.float32)
mean = X_train.mean(axis=(0, 2, 3))
std = X_train.std(axis=(0, 2, 3))
X_train = (X_train - mean[:, None, None]) / std[:, None, None]
X_test = (X_test - mean[:, None, None]) / std[:, None, None]
X_train_flip = np.flip(X_train, 3)
X_test_flip = np.flip(X_test, 3)
X_train = cp.asarray(X_train).reshape(X_train.shape[0], X_train.shape[1],
                                      config.pixel * config.pixel)
X_test = cp.asarray(X_test).reshape(X_test.shape[0], X_test.shape[1],
                                    config.pixel * config.pixel)
X_train_flip = cp.asarray(X_train_flip).reshape(X_train_flip.shape[0],
                                                X_train_flip.shape[1],
                                                config.pixel * config.pixel)
X_test_flip = cp.asarray(X_test_flip).reshape(X_test_flip.shape[0],
                                              X_test_flip.shape[1],
                                              config.pixel * config.pixel)


def featurize(st, ed, train, flip=False):
示例#11
0
def train(args):
    num_epoch = args.epoch
    lr = args.learning_rate
    batch_size = args.batch_size
    weight = args.weight
    alpha = args.alpha

    # preprocessing
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Pad(padding=4, padding_mode="reflect"),
        transforms.RandomCrop((32, 32)),
        transforms.ColorJitter(),
        #transforms.RandomRotation(degrees=20),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        transforms.RandomErasing(),
    ])

    # load training dataset
    X_train, y_train, _, _ = utils.load_cifar10(cifar10_path)

    assert X_train.shape[0] == y_train.shape[0]
    print(f"Dataset size (train) = {y_train.shape[0]}")

    train_data = utils.MyDataset(x=X_train, y=y_train, transform=transform)

    # weighted sampling
    weighted_sampler = utils.set_weighted_sampler(label_data=y_train)
    train_loader = DataLoader(train_data,
                              batch_size,
                              shuffle=False,
                              sampler=weighted_sampler,
                              num_workers=2)

    # normal sampling
    #train_loader = DataLoader(train_data, batch_size, shuffle=True, num_workers=2)

    # model, optimizer, loss function
    model = Classifier().to(device)
    if len(weight) > 0:
        model.load_state_dict(torch.load(weight))

    # optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=learning_rate_schedule)

    # loss function
    cross_entropy_loss = nn.CrossEntropyLoss().to(device)
    mse_loss = nn.MSELoss().to(device)

    # train
    train_loss = []
    for epoch in range(num_epoch):
        train_tmp_loss = []
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            # forward
            outputs, autoencoder_outputs = model(inputs)

            # calc categorical cross entropy
            loss1 = cross_entropy_loss(outputs, labels)
            # calc mean squared loss
            loss2 = mse_loss(inputs, autoencoder_outputs)

            loss = loss1 + alpha * loss2

            # backpropagate losses
            loss.backward()
            train_tmp_loss.append(loss.item() * len(inputs))
            optimizer.step()

            if i % 100 == 99:
                print(
                    f"epoch: {epoch+1}, batch: {i+1}, loss: {loss.item():.3f}")

        train_loss.append(sum(train_tmp_loss) / y_train.shape[0])

        # adjust learning rate according to the schedule
        scheduler.step()

        # save parameters
        weight_path = pathlib.Path(f"weight/epoch_{epoch+1:03d}_{alpha}.pth")
        torch.save(model.state_dict(), weight_path)

        print(train_loss)
        from utils import load_mnist

        _, _, adv_testloader, input_shape, n_classes = load_mnist(
            ARGS.batch_size,
            ARGS.data_dir,
            augmentation=False,
            stddev=0.0,
            adv_subset=ARGS.samples,
            workers=0)

        from models import LeNet as Model

    elif ARGS.dataset == 'cifar10':
        from utils import load_cifar10

        _, _, adv_testloader, input_shape, n_classes = load_cifar10(
            ARGS.batch_size,
            ARGS.data_dir,
            augmentation=False,
            stddev=0.0,
            adv_subset=ARGS.samples,
            workers=0)

        from models import ResNet20_v2 as Model

    else:
        raise NotImplementedError('Only MNIST and CIFAR10 are supported.')

    # Start evaluation
    eval_robustness(ARGS)
dataset_model = 'cifar_10'
image_size = 128
channels = 3
n_classes = 10
epochs = 300
batch_size = 32
layers_conv, layers_hidden = 3, 1

if (dataset_model == 'mnist'):
    (x_train, y_train), (x_val,
                         y_val), (x_test,
                                  y_test) = load_mnist(image_size, channels)
elif (dataset_model == 'cifar_10'):
    (x_train, y_train), (x_val,
                         y_val), (x_test,
                                  y_test) = load_cifar10(image_size, channels)
elif (dataset_model == 'cifar_100'):
    (x_train,
     y_train), (x_val, y_val), (x_test,
                                y_test) = load_cifar100(image_size, channels)
elif (dataset_model == 'fashion_mnist'):
    (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_fashion_mnist(
        image_size, channels)

#print('x_train shape:', x_train.shape)
#print('x_val shape:', x_val.shape)
#print('x_test shape:', x_test.shape)
#print('y_train shape:', y_train.shape)
#    print('y_val shape:', y_val.shape)
#    print('y_test shape:', y_test.shape)
#    print('Loaded Images ', type(x_train[0][0][0][0]))
示例#14
0
                        type=float,
                        default=40,
                        help='Iterations of PGD attack')

    ARGS, unparsed = parser.parse_known_args()

    # Load dataset and specify model
    if ARGS.dataset == 'mnist':
        from utils import load_mnist

        trainloader, testloader, adv_testloader, input_shape, n_classes = load_mnist(
            ARGS.batch_size, ARGS.data_dir, ARGS.noise, ARGS.stddev,
            ARGS.adv_subset)

        from models import LeNet as Model

    elif ARGS.dataset == 'cifar10':
        from utils import load_cifar10

        trainloader, testloader, adv_testloader, input_shape, n_classes = load_cifar10(
            ARGS.batch_size, ARGS.data_dir, ARGS.noise, ARGS.stddev,
            ARGS.adv_subset)

        from models import ResNet20_v2 as Model

    else:
        raise NotImplementedError('Only MNIST and CIFAR10 are supported.')

    # Start training
    train(ARGS)
示例#15
0
import utils
import config
import numpy as np
import cupy as cp

X_train, X_test = utils.load_cifar10("data") #if config.fargs['dataset'] == "cifar10" else utils.load_cifar100("data")
X_train = (X_train / 255.0).astype(np.float32) 
X_test = (X_test / 255.0).astype(np.float32) 
mean = X_train.mean(axis = (0, 2, 3))
std = X_train.std(axis = (0, 2, 3))
X_train = (X_train - mean[:, None, None]) / std[:, None, None]
X_test = (X_test - mean[:, None, None]) / std[:, None, None]
X_train_flip = np.flip(X_train, 3)
X_test_flip = np.flip(X_test, 3)

X_train = cp.pad(cp.asarray(X_train).reshape(50000, 3, 32, 32), ((0, 0), (0, 0), (1, 1), (1, 1)), mode = "constant").reshape(50000, 3, 34 * 34)
X_test = cp.pad(cp.asarray(X_test).reshape(10000, 3, 32, 32), ((0, 0), (0, 0), (1, 1), (1, 1)), mode = "constant").reshape(10000, 3, 34 * 34)
X_train_flip = cp.pad(cp.asarray(X_train_flip).reshape(50000, 3, 32, 32), ((0, 0), (0, 0), (1, 1), (1, 1)), mode = "constant").reshape(50000, 3, 34 * 34)
X_test_flip = cp.pad(cp.asarray(X_test_flip).reshape(10000, 3, 32, 32), ((0, 0), (0, 0), (1, 1), (1, 1)), mode = "constant").reshape(10000, 3, 34 * 34)

def featurize(st, ed, train, flip = False):
    if flip:
        return X_train_flip[st:ed] if train else X_test_flip[st:ed] 
    else:
        return X_train[st:ed] if train else X_test[st:ed]
示例#16
0
# For creating the model
from tensorflow.keras import utils, Sequential, optimizers
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dropout, LeakyReLU
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2

# Get the load_data() and save_keras_model functions
from utils import load_cifar10, save_keras_model, plot_history

# Load the test CIFAR-10 data
(x_train, y_train), (x_test, y_test) = load_cifar10()

# Normalize to 0-1 range
x_train = x_train / 255.
x_test = x_test / 255.
# Pre-process targets
n_classes = 3
y_train = utils.to_categorical(y_train, n_classes)
y_test = utils.to_categorical(y_test, n_classes)

# Build model
model = Sequential()
''' # ------ THIS PART IS WITH NORMAL ReLU --------
model.add(Conv2D(8, (5, 5), strides = (1, 1), activation = 'relu', input_shape = (32, 32, 3)))
'''
# Add the first convolutional layer with 8 filters of size 5x5, stride of 1x1 and LeakeReLU activation (for bonus)
model.add(
    Conv2D(8, (5, 5),
           strides=(1, 1),
           activation='linear',
示例#17
0
 def _load_cifar10(self):
     (x_train, y_train), (x_test, y_test) = load_cifar10(to_categoric=False)
     self.x_train = np.transpose(x_train.astype('f'), (0, 3, 1, 2))
     self.y_train = y_train.flatten().astype('i')
     self.x_test = np.transpose(x_test.astype('f'), (0, 3, 1, 2))
     self.y_test = y_test.flatten().astype('i')
示例#18
0
def main(_):
    print('BEGIN Cifar10 ...')
    cifar10Net = Cifar10Net()

    # Get data
    print('Getting/Transforming Data.')
    images, targets, test_images, test_targets = load_cifar10(cfg.data_dir)

    # Declare Model
    print('Creating the CIFAR10 Model.')
    with tf.variable_scope('model_definition') as scope:
        # Declare the training network model
        model_output = cifar10Net.cifar_cnn_model(images, cfg.batch_size)
        # This is very important!!!  We must set the scope to REUSE the variables,
        #  otherwise, when we set the test network model, it will create new random
        #  variables.  Otherwise we get random evaluations on the test batches.
        scope.reuse_variables()
        test_output = cifar10Net.cifar_cnn_model(test_images, cfg.batch_size)

    # Declare loss function
    print('Declare Loss Function.')
    loss = cifar10Net.cifar_loss(model_output, targets)

    # Create accuracy function
    accuracy = cifar10Net.accuracy_of_batch(test_output, test_targets)

    # Create training operations
    print('Creating the Training Operation.')
    generation_num = tf.Variable(0, trainable=False)
    train_op = cifar10Net.train_step(loss, generation_num)

    print('Initializing the Variables.')
    # Initialize Variables
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        # Initialize queue (This queue will feed into the model, so no placeholders necessary)
        tf.train.start_queue_runners(sess=sess)

        # Train CIFAR Model
        print('Starting Training')
        train_loss = []
        test_accuracy = []

        # Saving accuracy results
        path = cfg.result_dir + '/accuracy.csv'
        if not os.path.exists(cfg.result_dir):
            os.mkdir(cfg.result_dir)
        elif os.path.exists(path):
            os.remove(path)

        fd_results = open(path, 'w')
        fd_results.write('Epoch,TrainLoss,TestAcc,CostTime\n')

        if not os.path.exists(cfg.model_dir):
            os.mkdir(cfg.model_dir)
        elif os.path.exists(cfg.model_dir):
            shutil.rmtree(cfg.model_dir)

        for epoch in range(cfg.epochs):
            startTime = time.time()
            _, loss_value = sess.run([train_op, loss])

            if (epoch + 1) % cfg.train_sum_freq == 0:
                train_loss.append(loss_value)
                output = 'Epoch {}: Loss = {:.5f}'.format((epoch + 1),
                                                          loss_value)
                print(output)

            if (epoch + 1) % cfg.test_sum_freq == 0:
                [temp_accuracy] = sess.run([accuracy])
                test_accuracy.append(temp_accuracy)
                acc_output = ' --- Test Accuracy = {:.2f}%.'.format(
                    100. * temp_accuracy)
                print(acc_output)

                endTime = time.time()
                costTime = (endTime - startTime) * 1000
                fd_results.write(
                    str(epoch + 1) + ',' + str(loss_value) + ',' +
                    str(temp_accuracy) + ',' + str(costTime) + '\n')
                fd_results.flush()

            if (epoch) % cfg.save_freq == 0:
                saver.save(sess, cfg.model_dir + '/model_epoch_%04d' % (epoch))

        fd_results.close()
        tf.logging.info('Training done')

        if cfg.if_showplt:
            # Print loss and accuracy
            # Matlotlib code to plot the loss and accuracies
            eval_indices = range(0, cfg.epochs, cfg.test_sum_freq)
            output_indices = range(0, cfg.epochs, cfg.train_sum_freq)
            # Plot loss over time
            plt.plot(output_indices, train_loss, 'k-')
            plt.title('Softmax Loss per Generation')
            plt.xlabel('Generation')
            plt.ylabel('Softmax Loss')
            plt.show()

            # Plot accuracy over time
            plt.plot(eval_indices, test_accuracy, 'k-')
            plt.title('Test Accuracy')
            plt.xlabel('Generation')
            plt.ylabel('Accuracy')
            plt.show()

    print('END Cifar10')
示例#19
0
"""

import numpy as np
from layers import Convolution, MaxPooling, ReLU, Reshape, Dense, Softmax, Dropout, BatchNorm
from network import Network
from utils import IncrementalAverage, VectorCrossEntropy, Adam, load_cifar10

# Path to the unzipped CIFAR-10 folder
DATA_PATH = ""
# Will print progress message after number of batches is completed.
LOG_FREQ = 100
STARTING_EPOCH = 1
EPOCHS = 100
BATCH_SIZE = 50

training_data, training_labels, test_data, test_labels, _ = load_cifar10(
    DATA_PATH, normalize=True)


def make_batch(data, label, size):
    for start, end in zip(range(0, len(data), size),
                          range(size,
                                len(data) + 1, size)):
        yield data[start:end, ...], label[start:end, ...]


network = Network(
    Convolution(input_shape=(32, 32),
                input_depth=3,
                n_filters=32,
                filter_dim=(3, 3),
                stride=(1, 1),
示例#20
0
def _transformations_experiment(dataset_load_fn, dataset_name,
                                single_class_ind, gpu_q):
    gpu_to_use = gpu_q.get()
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    if dataset_name in ['cats-vs-dogs']:
        transformer = Transformer(16, 16)
        n, k = (16, 8)
    else:
        transformer = Transformer(8, 8)
        n, k = (10, 4)
    #mdl = create_wide_residual_network(x_train.shape[1:], transformer.n_transforms, n, k)
    '''mdl.compile('adam',
                'categorical_crossentropy',
                ['acc'])'''

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    #x_train_task =     x_train_task[0:15][:][:][:]
    transformations_inds = np.tile(np.arange(transformer.n_transforms),
                                   len(x_train_task))
    x_train_task_transformed = transformer.transform_batch(
        np.repeat(x_train_task, transformer.n_transforms, axis=0),
        transformations_inds)
    batch_size = 32
    '''mdl.fit(x=x_train_task_transformed, y=to_categorical(transformations_inds),
                batch_size=batch_size, epochs=int(np.ceil(200/transformer.n_transforms)))
    mdl_weights_name = '{}_transformations_{}_weights.h5'.format(dataset_name,
                                                               get_class_name_from_index(single_class_ind, dataset_name))
    #mdl.load_weights('cifar10/'+mdl_weights_name)

    #################################################################################################
    # simplified normality score
    #################################################################################################
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in [1]:#range(1):
         preds[:, t] = mdl.predict(transformer.transform_batch(x_test, [t] * len(x_test)),
                                   batch_size=batch_size)[:, t]
    
    labels = y_test.flatten() == single_class_ind
    scores = preds.mean(axis=-1)
    #print("Accuracy : "+ str(scores))
    #################################################################################################'''
    dset = CustomDataset(x_train_task_transformed, transformations_inds)
    trainLoader = DataLoader(dset, batch_size=32, shuffle=True)
    ce_criterion = nn.CrossEntropyLoss()
    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C = nn.DataParallel(C)
    optimizer_c = optim.Adam(C.parameters(), lr=0.001, betas=(0.9, 0.999))
    C.train(mode=True)
    '''for epoch in range(int(1)):
        for (inputs, labels) in trainLoader:
            inputs = inputs.permute(0,3,2,1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act,_ = C(inputs)
            loss_cc = ce_criterion(act, labels)
            optimizer_c.zero_grad()
            loss_cc.backward()
            optimizer_c.step()
            #running_cc += loss_cc.data


            running_cc = 0.0
    torch.save(C.cpu().state_dict(),  'out.pth')'''

    C = C.cpu()
    C.load_state_dict(torch.load('out.pth'))
    (x_train, y_train), (x_test, y_test) = load_cifar10()
    transformer = Transformer(8, 8)
    glabels = y_test.flatten() == single_class_ind
    C.cuda().eval()
    scores = np.array([[]])
    features = []

    correct = 0
    total = 0
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in [6, 6]:  #range(72):
        score = []
        x_test0 = transformer.transform_batch(x_test, [t] * len(x_test))
        dset = CustomDataset(x_test0, [t] * len(x_test))
        testLoader = DataLoader(dset, batch_size=128, shuffle=False)
        for i, (inputs, labels) in enumerate(testLoader):
            inputs = inputs.permute(0, 3, 2, 1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act, f = C(inputs)
            features += f.detach().cpu().tolist()
            #act = torch.nn.functional.softmax(act, dim=1)
            score += act[:, t].detach().cpu().tolist()
        preds[:, t] = list(score)
        fpr, tpr, thresholds = metrics.roc_curve(glabels, score)
        AUC = metrics.auc(fpr, tpr)
        print('AUROC: ' + str(AUC))

    scores = np.sum(((preds)), 1)
    fpr, tpr, thresholds = metrics.roc_curve(glabels, scores)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))

    def calc_approx_alpha_sum(observations):
        N = len(observations)
        f = np.mean(observations, axis=0)

        return (N * (len(f) - 1) *
                (-psi(1))) / (N * np.sum(f * np.log(f)) -
                              np.sum(f * np.sum(np.log(observations), axis=0)))

    def inv_psi(y, iters=5):
        # initial estimate
        cond = y >= -2.22
        x = cond * (np.exp(y) + 0.5) + (1 - cond) * -1 / (y - psi(1))

        for _ in range(iters):
            x = x - (psi(x) - y) / polygamma(1, x)
        return x

    def fixed_point_dirichlet_mle(alpha_init, log_p_hat, max_iter=1000):
        alpha_new = alpha_old = alpha_init
        for _ in range(max_iter):
            alpha_new = inv_psi(psi(np.sum(alpha_old)) + log_p_hat)
            if np.sqrt(np.sum((alpha_old - alpha_new)**2)) < 1e-9:
                break
            alpha_old = alpha_new
        return alpha_new

    def dirichlet_normality_score(alpha, p):
        return np.sum((alpha - 1) * np.log(p), axis=-1)

    '''scores = np.zeros((len(x_test),))
    observed_data = x_train_task
    for t_ind in range(transformer.n_transforms):
        observed_dirichlet = mdl.predict(transformer.transform_batch(observed_data, [t_ind] * len(observed_data)),
                                         batch_size=64)
        log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)

        alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
        alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx

        mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)

        x_test_p = mdl.predict(transformer.transform_batch(x_test, [t_ind] * len(x_test)),
                               batch_size=64)
        scores += dirichlet_normality_score(mle_alpha_t, x_test_p)'''

    scores /= transformer.n_transforms
    labels = y_test.flatten() == single_class_ind
    r = (roc_auc_score(labels, scores))
    f = open("guru99.txt", "a")
    f.write(str(r) + "\n")
    f.close()
    '''res_file_name = '{}_transformations_{}_{}.npz'.format(dataset_name,
示例#21
0
with tf.control_dependencies(d_update_ops):
    d_optim = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                     beta1=FLAGS.adam_beta1,
                                     beta2=FLAGS.adam_beta2).minimize(
                                         d_loss, var_list=d_vars)
with tf.control_dependencies(g_update_ops):
    g_optim = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                     beta1=FLAGS.adam_beta1,
                                     beta2=FLAGS.adam_beta2).minimize(
                                         g_loss, var_list=g_vars)

# test
fake_images, _ = generator(zc, is_training=False, reuse=True)

# dataset
trainX, trainY, testX, testY = utils.load_cifar10(data_dir)
train_gen = utils.get_batch(trainX, trainY, FLAGS.batch_size)
test_gen = utils.get_batch(testX, testY, FLAGS.batch_size)

# for test
sample_z = np.random.normal(size=[FLAGS.batch_size, FLAGS.z_dim])
sample_c = np.random.uniform(0., 1., size=[FLAGS.batch_size, 2])


def get_inception_score(sess):
    fake_list = []
    for i in range(10):
        noise_z = np.random.normal(size=[100, FLAGS.z_dim])
        noise_c = np.random.uniform(0., 1., size=[100, 2])
        _fake, = sess.run([fake_images], feed_dict={z: noise_z, c: noise_c})
        fake_list.append(_fake)
示例#22
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='Embedding extraction module')
    parser.add_argument('--net',
                        default='lenet5',
                        help='DNN name (default=lenet5)')
    parser.add_argument('--root',
                        default='data',
                        help='rootpath (default=data)')
    parser.add_argument('--dataset',
                        default='imagenet',
                        help='dataset (default=imagenet)')
    parser.add_argument('--tensor_folder',
                        default='tensor_pub',
                        help='tensor_folder (default=tensor_pub)')
    parser.add_argument('--layer-info',
                        default='layer_info',
                        help='layer-info (default=layer_info)')
    parser.add_argument('--gpu-id',
                        default='1',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('-j',
                        '--workers',
                        default=8,
                        type=int,
                        metavar='N',
                        help='number of data loading workers (default: 8)')
    parser.add_argument('-b',
                        '--batch-size',
                        default=1,
                        type=int,
                        metavar='N',
                        help='should be 1')
    args = parser.parse_args()
    use_cuda = True
    # Define what device we are using
    print("CUDA Available: ", torch.cuda.is_available())

    root = args.root
    dataset = args.dataset
    net = args.net
    tensor_folder = args.tensor_folder
    layers, cols = utils.get_layer_info(root, dataset, net, args.layer_info)
    print(dataset)
    print(root, dataset, net)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    if dataset.startswith('imagenet'):
        if net == 'resnet50':
            model = utils.load_resnet50_model(True)
        elif net == 'vgg16':
            model = utils.load_vgg_model(pretrained=True, net=net)
        else:
            model = utils.load_resnet_model(pretrained=True)

        sub_models = utils.load_imagenet_sub_models(
            utils.get_model_root(root, dataset, net), layers, net, cols)
        # sub_models = utils.load_resnet_sub_models(utils.get_model_root(root,
        # dataset, net), layers, net)
        test_loader = utils.load_imagenet_test(args.batch_size, args.workers)
        anatomy(model, sub_models, test_loader, root, dataset, tensor_folder,
                net, layers)

    else:  # cifar10, cifar100, mnist
        device = torch.device("cuda" if (
            use_cuda and torch.cuda.is_available()) else "cpu")
        nclass = 10
        if dataset == 'cifar100':
            nclass = 100
        model = utils.load_model(
            net, device, utils.get_pretrained_model(root, dataset, net),
            dataset)
        weight_models = utils.load_weight_models(
            net, device, utils.get_model_root(root, dataset, net), layers,
            cols, nclass)
        if dataset == 'mnist':
            train_loader, test_loader = utils.load_mnist(
                utils.get_root(root, dataset, 'data', net))
        elif dataset == 'cifar10':
            train_loader, test_loader = utils.load_cifar10(
                utils.get_root(root, dataset, 'data', net))
        elif dataset == 'cifar100':
            train_loader, test_loader = utils.load_cifar100(
                utils.get_root(root, dataset, 'data', net))
        else:  #default mnist
            train_loader, test_loader = utils.load_mnist(
                utils.get_root(root, dataset, 'data', net))
        anatomy(model, weight_models, test_loader, root, dataset,
                tensor_folder, net, layers)
def main():
    '''
    Train the model defined above.
    '''

    dataset_model = 'mnist'
    image_size = 128
    channels = 1
    n_classes = 10
    epochs = 3
    batch_size = 32

    layers_conv, layers_hidden = 16, 2

    if (dataset_model == 'mnist'):
        (x_train,
         y_train), (x_val, y_val), (x_test,
                                    y_test) = load_mnist(image_size, channels)
    elif (dataset_model == 'cifar_10'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar10(
            image_size, channels)
    elif (dataset_model == 'cifar_100'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar100(
            image_size, channels)
    elif (dataset_model == 'fashion_mnist'):
        (x_train, y_train), (x_val, y_val), (x_test,
                                             y_test) = load_fashion_mnist(
                                                 image_size, channels)

    print('x_train shape:', x_train.shape)
    print('x_val shape:', x_val.shape)
    print('x_test shape:', x_test.shape)
    print('y_train shape:', y_train.shape)
    print('y_val shape:', y_val.shape)
    print('y_test shape:', y_test.shape)
    print('Loaded Images ')

    #x_train, y_train = generate_aug(x_train, y_train)
    #train_generator = get_train_generator()

    # mobile_model = MobileNet('MobileNet', image_size, channels, n_classes)
    mobile_model = VGG16('VGG16', image_size, channels, n_classes)
    mobile_model.summary()

    mobile_model.compile()
    mobile_model.fit((x_train, y_train), (x_val, y_val), epochs, batch_size)
    '''
    loss_and_metrics = mobile_model.evaluate((x_train, y_train), batch_size = batch_size)
    print('\n\n[Evaluation on the train dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_train, y_train, batch_size = batch_size)

    
    loss_and_metrics = mobile_model.evaluate((x_val, y_val), batch_size = batch_size)
    print('[Evaluation on the vel dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_val, y_val, batch_size = batch_size)
    
    loss_and_metrics = mobile_model.evaluate((x_test, y_test), batch_size = batch_size)
    print('[Evaluation on the test dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_test, y_test, batch_size = batch_size)
    '''
    mobile_model.save_model(dataset_model + "_teste")

    import numpy as np
    predictions_valid = mobile_model.get_model().predict(x_test,
                                                         batch_size=batch_size,
                                                         verbose=1)
    np.set_printoptions(precision=4)
    # np.set_printoptions(suppress=True)

    with open('files/Preds.txt', 'w+') as f:
        for i in predictions_valid:
            for j in i:
                f.write('%.4f ' % j)
            f.write("\n")

    with open('files/ConvLayersW.txt', 'a+') as file:
        file.write('%d\n' % (layers_conv))
    with open('files/HiddenLayersW.txt', 'a+') as file:
        file.write('%d\n' % (layers_hidden))

    with open('files/conf.txt', 'a+') as conf:
        id_layer = 8
        list_mark = [0] * len(mobile_model.get_model().layers)
        layers = mobile_model.get_model().layers[id_layer]
        busca_em_profun(mobile_model.get_model(), layers,
                        len(mobile_model.get_model().layers), conf, list_mark,
                        id_layer)

        conf.write('flatten 1 %s\n' % layers.name)

        layers = mobile_model.get_model().layers[10]
        with open('files/HiddenLayersW.txt', 'a+') as f:
            print_hidden_soft_layer_W(f, layers, conf)

        with open('files/HiddenLayersB.txt', 'a+') as f:
            print_hidden_soft_layer_b(f, layers)

        layers = mobile_model.get_model().layers[11]
        with open('files/SmrLayerW.txt', 'a+') as f:
            print_hidden_soft_layer_W(f, layers, conf)
        with open('files/SmrLayerB.txt', 'a+') as f:
            print_hidden_soft_layer_b(f, layers)

        conf.write('endconf 1 %s\n' % layers.name)
示例#24
0
def main():
    '''
    Train the model defined above.
    '''

    dataset_name = 'cifar_10'
    model_name = 'MobileNet'
    image_size = 224
    channels = 3
    n_classes = 10
    epochs = 40
    batch_size = 32

    layers_conv, layers_hidden = 14, 1

    if (dataset_name == 'mnist'):
        (x_train,
         y_train), (x_val, y_val), (x_test,
                                    y_test) = load_mnist(image_size, channels)
    elif (dataset_name == 'cifar_10'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar10(
            image_size, channels)
    elif (dataset_name == 'cifar_100'):
        (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_cifar100(
            image_size, channels)
    elif (dataset_name == 'fashion_mnist'):
        (x_train, y_train), (x_val, y_val), (x_test,
                                             y_test) = load_fashion_mnist(
                                                 image_size, channels)

    print('x_train shape:', x_train.shape)
    print('x_val shape:', x_val.shape)
    print('x_test shape:', x_test.shape)
    print('y_train shape:', y_train.shape)
    print('y_val shape:', y_val.shape)
    print('y_test shape:', y_test.shape)
    print('Loaded Images ')

    #x_train, y_train = generate_aug(x_train, y_train)
    #train_generator = get_train_generator()

    #mobile_model = AlexNet(f"{dataset_name}_{model_name}", image_size, channels, n_classes)
    mobile_model = MobileNet(f"{dataset_name}_{model_name}", image_size,
                             channels, n_classes)
    #mobile_model = VGG16(f"{dataset_name}_{model_name}", image_size, channels, n_classes)
    #mobile_model = Inception(f"{dataset_name}_{model_name}", image_size, channels, n_classes)
    mobile_model.summary()

    mobile_model.compile()
    mobile_model.fit((x_train, y_train), (x_val, y_val), epochs, batch_size)
    '''
    loss_and_metrics = mobile_model.evaluate((x_train, y_train), batch_size = batch_size)
    print('\n\n[Evaluation on the train dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_train, y_train, batch_size = batch_size)

    
    loss_and_metrics = mobile_model.evaluate((x_val, y_val), batch_size = batch_size)
    print('[Evaluation on the vel dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_val, y_val, batch_size = batch_size)
    
    loss_and_metrics = mobile_model.evaluate((x_test, y_test), batch_size = batch_size)
    print('[Evaluation on the test dataset]\n', loss_and_metrics, '\n\n')
    mobile_model.predict(x_test, y_test, batch_size = batch_size)
    '''
    mobile_model.save_model(f"{dataset_name}_{model_name}")

    mobile_model.get_model().load_weights(
        f"files/{dataset_name}_{model_name}.h5")
    mobile_model.compile()

    import numpy as np
    predictions_valid = mobile_model.get_model().predict(x_test,
                                                         batch_size=batch_size,
                                                         verbose=1)
    np.set_printoptions(precision=4)
    # np.set_printoptions(suppress=True)

    with open('files/Preds.txt', 'w+') as f:
        for i in predictions_valid:
            for j in i:
                f.write('%.4f ' % j)
            f.write("\n")

    with open('files/ConvLayersW.txt', 'a+') as file:
        file.write('%d\n' % (layers_conv))
    with open('files/HiddenLayersW.txt', 'a+') as file:
        file.write('%d\n' % (layers_hidden))

    c, h, s = 95, 97, 98
    #AlexNet 8, 10, 11
    #Mobilenet 95, 97, 98
    #Inception 87, 93, 96
    #VGG16 31, 33, 34
    with open('files/conf.txt', 'a+') as conf:
        id_layer = c
        list_mark = [0] * len(mobile_model.get_model().layers)
        layers = mobile_model.get_model().layers[id_layer]
        busca_em_profun(mobile_model.get_model(), layers,
                        len(mobile_model.get_model().layers), conf, list_mark,
                        id_layer)

        conf.write('flatten 1 %s\n' % layers.name)

        layers = mobile_model.get_model().layers[h]
        with open('files/HiddenLayersW.txt', 'a+') as f:
            print_hidden_soft_layer_W(f, layers, conf)
            #layers = mobile_model.get_model().layers[35]
            #print_hidden_soft_layer_W(f, layers, conf)

        layers = mobile_model.get_model().layers[h]
        with open('files/HiddenLayersB.txt', 'a+') as f:
            print_hidden_soft_layer_b(f, layers)
            #layers = mobile_model.get_model().layers[35]
            #print_hidden_soft_layer_b(f, layers)

        layers = mobile_model.get_model().layers[s]
        with open('files/SmrLayerW.txt', 'a+') as f:
            print_hidden_soft_layer_W(f, layers, conf)
        with open('files/SmrLayerB.txt', 'a+') as f:
            print_hidden_soft_layer_b(f, layers)

        conf.write('endconf 1 %s\n' % layers.name)
示例#25
0
 def _load_cifar10(self):
     (self.x_train, self.y_train), (self.x_test,
                                    self.y_test) = load_cifar10()
""" Accuracy Comparison """

# import modules
import utils
from tensorflow.keras import utils as tf_utils
import numpy as np
from scipy.stats import norm, ttest_rel
from sklearn.metrics import roc_curve, auc, f1_score
import matplotlib.pyplot as plt

if __name__ == '__main__':

    # Load the test CIFAR-10 data
    (x_train, y_train), (x_test, y_test) = utils.load_cifar10()

    # Pre-processing
    # Normalize each pixel of each channel so that the range is [0, 1];
    # each pixel is represented by an integer value in the 0-255 range.
    x_train, x_test = x_train / 255., x_test / 255.

    # Create one-hot encoding of the labels;
    # pre-process targets in order to perform multi-class classification.
    n_classes = 3
    y_train = tf_utils.to_categorical(y_train, n_classes)
    y_test = tf_utils.to_categorical(y_test, n_classes)

    # Load the trained models
    model_task2 = utils.load_keras_model('../deliverable/nn_task2.h5')
    model_task1 = utils.load_keras_model('../deliverable/nn_task1.h5')

    # Predict on the given samples
示例#27
0
文件: main.py 项目: mozzielol/Report
    model.plot('res', name)

    return acc_test_d1, acc_test_d2, acc_test_d3


def save_acc(name, d):
    import json
    for i in range(3):
        path = './logs/permuted/acc{}/{}.txt'.format(str(i + 1), name)
        with open(path, 'w') as f:
            json.dump(d[i], f)


if __name__ == '__main__':
    #Train the model.
    train_task, test_task = load_cifar10()
    model = Model('cnn')
    model.val_data(test_task[0][0], test_task[0][1])
    model.fit(train_task[0][0], train_task[0][1])
    #model.set_fisher_data(train_task[0][0], train_task[0][1])
    model.fit(train_task[1][0], train_task[1][1])
    '''
    print('--'*10,'kal','--'*10)
    kal_d1,kal_d2,kal_d3 = train('kal')
    save_acc('kal',[kal_d1,kal_d2,kal_d3])

    print('--' * 10, 'nor', '--' * 10)
    nor_d1, nor_d2, nor_d3 = train('nor')
    save_acc('nor', [nor_d1, nor_d2, nor_d3])

示例#28
0
import argparse

import utils as utils
from models.model import ModelGAN

# Parsing
parser = argparse.ArgumentParser()
parser.add_argument("--load-model",
                    action='store_true',
                    help="Loads previous saved models",
                    default=False)
args = parser.parse_args()
load_model = args.load_model

data, _ = utils.load_cifar10()

model = ModelGAN(data)
if load_model:
    model.load_model()

model.compile_models()
model.train()