Ejemplo n.º 1
0
    def train(self, flags):
        self.network.train()
        self.best_accuracy_test = -1

        for epoch in range(0, flags.epochs):
            for i, (images_train, labels_train) in enumerate(self.train_loader):

                # wrap the inputs and labels in Variable
                inputs, labels = images_train.cuda(), labels_train.cuda()

                # forward with the adapted parameters
                outputs, _ = self.network(x=inputs)

                # loss
                loss = self.loss_fn(outputs, labels)

                # init the grad to zeros first
                self.optimizer.zero_grad()

                # backward your network
                loss.backward()

                # optimize the parameters
                self.optimizer.step()
                self.scheduler.step()

                if epoch < 5 or epoch % 5 == 0:
                    print(
                        'epoch:', epoch, 'ite', i, 'total loss:', loss.cpu().item(), 'lr:',
                        self.scheduler.get_lr()[0])

                flags_log = os.path.join(flags.logs, 'loss_log.txt')
                write_log(str(loss.item()), flags_log)

            self.test_workflow(epoch, flags)
Ejemplo n.º 2
0
    def setup(self, flags):
        torch.backends.cudnn.deterministic = flags.deterministic
        print('torch.backends.cudnn.deterministic:', torch.backends.cudnn.deterministic)
        fix_all_seed(flags.seed)

        if flags.dataset == 'cifar10':
            num_classes = 10
        else:
            num_classes = 100

        if flags.model == 'densenet':
            self.network = densenet(num_classes=num_classes)
        elif flags.model == 'wrn':
            self.network = WideResNet(flags.layers, num_classes, flags.widen_factor, flags.droprate)
        elif flags.model == 'allconv':
            self.network = AllConvNet(num_classes)
        elif flags.model == 'resnext':
            self.network = resnext29(num_classes=num_classes)
        else:
            raise Exception('Unknown model.')
        self.network = self.network.cuda()

        print(self.network)
        print('flags:', flags)
        if not os.path.exists(flags.logs):
            os.makedirs(flags.logs)

        flags_log = os.path.join(flags.logs, 'flags_log.txt')
        write_log(flags, flags_log)
Ejemplo n.º 3
0
    def setup_path(self, flags):

        root_folder = 'data'
        data, data_loaders = get_data_loaders()

        seen_index = flags.seen_index
        self.train_data = data[seen_index]
        self.test_data = [x for index, x in enumerate(data) if index != seen_index]

        if not os.path.exists(flags.logs):
            os.makedirs(flags.logs)

        flags_log = os.path.join(flags.logs, 'path_log.txt')
        write_log(str(self.train_data), flags_log)
        write_log(str(self.test_data), flags_log)

        self.batImageGenTrain = BatchImageGenerator(flags=flags, stage='train', file_path=root_folder,
                                                    data_loader=data_loaders[seen_index], b_unfold_label=False)

        self.batImageGenTests = []
        for index, test_loader in enumerate(data_loaders):
            if index != seen_index:
                batImageGenTest = BatchImageGenerator(flags=flags, stage='test', file_path=root_folder,
                                                      data_loader=test_loader, b_unfold_label=False)
                self.batImageGenTests.append(batImageGenTest)
Ejemplo n.º 4
0
    def train(self, flags):
        counter_k = 0
        counter_ite = 0
        self.best_accuracy_test = -1

        for epoch in range(0, flags.epochs):
            if ((epoch + 1) % flags.epochs_min == 0) and (counter_k < flags.k):  # if T_min iterations are passed
                print('Generating adversarial images [iter {}]'.format(counter_k))
                images, labels = self.maximize(flags)
                self.train_data.data = np.concatenate([self.train_data.data, images])
                self.train_data.targets.extend(labels)
                counter_k += 1

            self.network.train()
            self.train_data.transform = self.train_transform
            self.train_loader = torch.utils.data.DataLoader(
                self.train_data,
                batch_size=flags.batch_size,
                shuffle=True,
                num_workers=flags.num_workers,
                pin_memory=True)
            self.scheduler.T_max = counter_ite + len(self.train_loader) * (flags.epochs - epoch)

            for i, (images_train, labels_train) in enumerate(self.train_loader):
                counter_ite += 1

                # wrap the inputs and labels in Variable
                inputs, labels = images_train.cuda(), labels_train.cuda()

                # forward with the adapted parameters
                outputs, _ = self.network(x=inputs)

                # loss
                loss = self.loss_fn(outputs, labels)

                # init the grad to zeros first
                self.optimizer.zero_grad()

                # backward your network
                loss.backward()

                # optimize the parameters
                self.optimizer.step()
                self.scheduler.step()

                if epoch < 5 or epoch % 5 == 0:
                    print(
                        'epoch:', epoch, 'ite', i, 'total loss:', loss.cpu().item(), 'lr:',
                        self.scheduler.get_lr()[0])

                flags_log = os.path.join(flags.logs, 'loss_log.txt')
                write_log(str(loss.item()), flags_log)

            self.test_workflow(epoch, flags)
Ejemplo n.º 5
0
    def maximize(self, flags):
        self.network.eval()

        self.train_data.transform = self.preprocess
        self.train_loader = torch.utils.data.DataLoader(
            self.train_data,
            batch_size=flags.batch_size,
            shuffle=False,
            num_workers=flags.num_workers,
            pin_memory=True)
        images, labels = [], []

        for i, (images_train, labels_train) in enumerate(self.train_loader):

            # wrap the inputs and labels in Variable
            inputs, targets = images_train.cuda(), labels_train.cuda()

            # forward with the adapted parameters
            inputs_embedding = self.network(x=inputs)[-1]['Embedding'].detach().clone()
            inputs_embedding.requires_grad_(False)

            inputs_max = inputs.detach().clone()
            inputs_max.requires_grad_(True)
            optimizer = sgd(parameters=[inputs_max], lr=flags.lr_max)

            for ite_max in range(flags.loops_adv):
                tuples = self.network(x=inputs_max)

                # loss
                loss = self.loss_fn(tuples[0], targets) + flags.eta * entropy_loss(tuples[0]) - \
                       flags.gamma * self.dist_fn(tuples[-1]['Embedding'], inputs_embedding)

                # init the grad to zeros first
                self.network.zero_grad()
                optimizer.zero_grad()

                # backward your network
                (-loss).backward()

                # optimize the parameters
                optimizer.step()

                flags_log = os.path.join(flags.logs, 'max_loss_log.txt')
                write_log('ite_adv:{}, {}'.format(ite_max, loss.item()), flags_log)

            inputs_max = inputs_max.detach().clone().cpu()
            for j in range(len(inputs_max)):
                input_max = self.image_denormalise(inputs_max[j])
                input_max = self.image_transform(input_max.clamp(min=0.0, max=1.0))
                images.append(input_max)
                labels.append(labels_train[j].item())

        return np.stack(images), labels
Ejemplo n.º 6
0
    def train(self, flags):
        counter_k = 0
        self.best_accuracy_test = -1

        for ite in range(flags.loops_train):
            if ((ite + 1) % flags.loops_min == 0) and (counter_k < flags.k):  # if T_min iterations are passed
                print('Generating adversarial images [iter {}]'.format(counter_k))
                images, labels = self.maximize(flags)
                self.batImageGenTrain.images = np.concatenate((self.batImageGenTrain.images, images))
                self.batImageGenTrain.labels = np.concatenate((self.batImageGenTrain.labels, labels))
                self.batImageGenTrain.shuffle()
                counter_k += 1

            self.network.train()
            self.scheduler.step(epoch=ite)

            # get the inputs and labels from the data reader
            images_train, labels_train = self.batImageGenTrain.get_images_labels_batch()

            inputs, labels = torch.from_numpy(np.array(images_train, dtype=np.float32)), torch.from_numpy(
                np.array(labels_train, dtype=np.float32))

            # wrap the inputs and labels in Variable
            inputs, labels = Variable(inputs, requires_grad=False).cuda(), \
                             Variable(labels, requires_grad=False).long().cuda()

            # forward with the adapted parameters
            outputs, _ = self.network(x=inputs)

            # loss
            loss = self.loss_fn(outputs, labels)

            # init the grad to zeros first
            self.optimizer.zero_grad()

            # backward your network
            loss.backward()

            # optimize the parameters
            self.optimizer.step()

            if ite < 500 or ite % 500 == 0:
                print(
                    'ite:', ite, 'total loss:', loss.cpu().item(), 'lr:',
                    self.scheduler.get_lr()[0])

            flags_log = os.path.join(flags.logs, 'loss_log.txt')
            write_log(str(loss.item()), flags_log)

            if ite % flags.test_every == 0 and ite is not 0:
                self.test_workflow(self.batImageGenTests, flags, ite)
Ejemplo n.º 7
0
    def setup(self, flags):
        torch.backends.cudnn.deterministic = flags.deterministic
        print('torch.backends.cudnn.deterministic:', torch.backends.cudnn.deterministic)
        fix_all_seed(flags.seed)

        self.network = LeNet5(num_classes=flags.num_classes)
        self.network = self.network.cuda()

        print(self.network)
        print('flags:', flags)
        if not os.path.exists(flags.logs):
            os.makedirs(flags.logs)

        flags_log = os.path.join(flags.logs, 'flags_log.txt')
        write_log(flags, flags_log)
Ejemplo n.º 8
0
    def maximize(self, flags):
        self.network.eval()

        images_train, labels_train = self.batImageGenTrain.images, self.batImageGenTrain.labels
        images, labels = [], []

        for start, end in zip(range(0, len(labels_train), flags.batch_size),
                              range(flags.batch_size, len(labels_train), flags.batch_size)):
            inputs, targets = torch.from_numpy(
                np.array(images_train[start:end], dtype=np.float32)), torch.from_numpy(
                np.array(labels_train[start:end], dtype=np.float32))

            # wrap the inputs and labels in Variable
            inputs, targets = Variable(inputs, requires_grad=False).cuda(), \
                              Variable(targets, requires_grad=False).long().cuda()

            inputs_embedding = self.network(x=inputs)[-1]['Embedding'].detach().clone()
            inputs_embedding.requires_grad_(False)

            inputs_max = inputs.detach().clone()
            inputs_max.requires_grad_(True)
            optimizer = sgd(parameters=[inputs_max], lr=flags.lr_max)

            for ite_max in range(flags.loops_adv):
                tuples = self.network(x=inputs_max)

                # loss
                loss = self.loss_fn(tuples[0], targets) + flags.eta * entropy_loss(tuples[0]) - \
                       flags.gamma * self.dist_fn(tuples[-1]['Embedding'], inputs_embedding)

                # init the grad to zeros first
                self.network.zero_grad()
                optimizer.zero_grad()

                # backward your network
                (-loss).backward()

                # optimize the parameters
                optimizer.step()

                flags_log = os.path.join(flags.logs, 'max_loss_log.txt')
                write_log('ite_adv:{}, {}'.format(ite_max, loss.item()), flags_log)

            inputs_max = inputs_max.detach().clone().clamp(min=0.0, max=1.0)
            images.append(inputs_max.cpu().numpy())
            labels.append(targets.cpu().numpy())

        return np.concatenate(images), np.concatenate(labels)
Ejemplo n.º 9
0
    def train(self, flags):
        self.network.train()
        self.best_accuracy_test = -1

        for ite in range(flags.loops_train):

            self.scheduler.step(epoch=ite)

            # get the inputs and labels from the data reader
            total_loss = 0.0
            images_train, labels_train = self.batImageGenTrain.get_images_labels_batch()

            inputs, labels = torch.from_numpy(np.array(images_train, dtype=np.float32)), torch.from_numpy(
                np.array(labels_train, dtype=np.float32))

            # wrap the inputs and labels in Variable
            inputs, labels = Variable(inputs, requires_grad=False).cuda(), \
                             Variable(labels, requires_grad=False).long().cuda()

            # forward with the adapted parameters
            outputs, _ = self.network(x=inputs)

            # loss
            loss = self.loss_fn(outputs, labels)

            # init the grad to zeros first
            self.optimizer.zero_grad()

            # backward your network
            loss.backward()

            # optimize the parameters
            self.optimizer.step()

            if ite < 500 or ite % 500 == 0:
                print(
                    'ite:', ite, 'total loss:', loss.cpu().item(), 'lr:',
                    self.scheduler.get_lr()[0])

            flags_log = os.path.join(flags.logs, 'loss_log.txt')
            write_log(str(loss.item()), flags_log)

            if ite % flags.test_every == 0 and ite is not 0:
                self.test_workflow(self.batImageGenTests, flags, ite)
Ejemplo n.º 10
0
def train_net(args):
    data_dir = config.dataset_path
    image_size = config.image_shape[0:2]
    assert len(image_size) == 2
    assert image_size[0] == image_size[1]
    print('image_size', image_size)
    print('num_classes', config.num_classes)
    training_path = os.path.join(data_dir, "train.tfrecords")

    print('Called with argument:', args, config)
    train_dataset, batches_per_epoch = data_input.training_dataset(
        training_path, default.per_batch_size)

    extractor, classifier = build_model((image_size[0], image_size[1], 3),
                                        args)

    global_step = 0
    ckpt_path = os.path.join(
        args.models_root, '%s-%s-%s' % (args.network, args.loss, args.dataset),
        'model-{step:04d}.ckpt')
    ckpt_dir = os.path.dirname(ckpt_path)
    print('ckpt_path', ckpt_path)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
    if len(args.pretrained) == 0:
        latest = tf.train.latest_checkpoint(ckpt_dir)
        if latest:
            global_step = int(latest.split('-')[-1].split('.')[0])
            classifier.load_weights(latest)
    else:
        print('loading', args.pretrained, args.pretrained_epoch)
        load_path = os.path.join(args.pretrained, '-', args.pretrained_epoch,
                                 '.ckpt')
        classifier.load_weights(load_path)

    initial_epoch = global_step // batches_per_epoch
    rest_batches = global_step % batches_per_epoch

    lr_decay_steps = [(int(x), args.lr * np.power(0.1, i + 1))
                      for i, x in enumerate(args.lr_steps.split(','))]
    print('lr_steps', lr_decay_steps)

    valid_datasets = data_input.load_valid_set(data_dir, config.val_targets)

    classifier.compile(
        optimizer=keras.optimizers.SGD(lr=args.lr, momentum=args.mom),
        loss=keras.losses.CategoricalCrossentropy(from_logits=True),
        metrics=[keras.metrics.SparseCategoricalAccuracy()])
    classifier.summary()

    tensor_board = keras.callbacks.TensorBoard(ckpt_dir)
    tensor_board.set_model(classifier)

    train_names = ['train_loss', 'train_acc']
    train_results = []
    highest_score = 0
    for epoch in range(initial_epoch, default.end_epoch):
        for batch in range(rest_batches, batches_per_epoch + 1):
            utils.update_learning_rate(classifier, lr_decay_steps, global_step)
            train_results = classifier.train_on_batch(train_dataset,
                                                      reset_metrics=False)
            global_step += 1
            if global_step % 1000 == 0:
                print('lr-batch-epoch:',
                      float(K.get_value(classifier.optimizer.lr)), batch,
                      epoch)
            if global_step >= 0 and global_step % args.verbose == 0:
                acc_list = []
                for key in valid_datasets:
                    data_set, data_set_flip, is_same_list = valid_datasets[key]
                    embeddings = extractor.predict(data_set)
                    embeddings_flip = extractor.predict(data_set_flip)
                    embeddings_parts = [embeddings, embeddings_flip]
                    x_norm = 0.0
                    x_norm_cnt = 0
                    for part in embeddings_parts:
                        for i in range(part.shape[0]):
                            embedding = part[i]
                            norm = np.linalg.norm(embedding)
                            x_norm += norm
                            x_norm_cnt += 1
                    x_norm /= x_norm_cnt
                    embeddings = embeddings_parts[0] + embeddings_parts[1]
                    embeddings = sklearn.preprocessing.normalize(embeddings)
                    print(embeddings.shape)
                    _, _, accuracy, val, val_std, far = verification.evaluate(
                        embeddings, is_same_list, folds=10)
                    acc, std = np.mean(accuracy), np.std(accuracy)

                    print('[%s][%d]XNorm: %f' % (key, batch, x_norm))
                    print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' %
                          (key, batch, acc, std))
                    acc_list.append(acc)

                if len(acc_list) > 0:
                    score = sum(acc_list)
                    if highest_score == 0:
                        highest_score = score
                    elif highest_score >= score:
                        print('\nStep %05d: score did not improve from %0.5f' %
                              (global_step, highest_score))
                    else:
                        path = ckpt_path.format(step=global_step)
                        print(
                            '\nStep %05d: score improved from %0.5f to %0.5f,'
                            ' saving model to %s' %
                            (global_step, highest_score, score, path))
                        highest_score = score
                        classifier.save_weights(path)

        utils.write_log(tensor_board, train_names, train_results, epoch)
        classifier.reset_metrics()