Exemple #1
0
def test(model, args):
    # Compile the model
    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=[margin_loss, 'mse'],
                  loss_weights=[1., args.lam_recon],
                  metrics={
                      'capsnet': [
                          'accuracy', top_3_categorical_accuracy,
                          'top_k_categorical_accuracy'
                      ]
                  })

    # Evaluate the model using custom generator
    scores = model.evaluate_generator(generator=custom_generator(
        get_iterator(args.filepath, subset="test"), testing=args.testing),
                                      steps=int(40000 / args.batch_size))
    print(scores)

    # Reconstruct batch of images
    if args.recon:
        x_test_batch, y_test_batch = get_iterator(args.filepath,
                                                  subset="test").next()
        y_pred, x_recon = model.predict(x_test_batch)

        # Save reconstructed and original images
        save_recons(x_recon, x_test_batch, y_pred, y_test_batch, args.save_dir)
Exemple #2
0
def on_end_epoch(state):
    print(
        '[Epoch %d] Training Loss: %.4f Top1 Accuracy: %.2f%% Top5 Accuracy: %.2f%%'
        % (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0],
           meter_accuracy.value()[1]))

    train_loss_logger.log(state['epoch'], meter_loss.value()[0])
    train_top1_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
    train_top5_accuracy_logger.log(state['epoch'], meter_accuracy.value()[1])

    # learning rate scheduler
    scheduler.step(meter_loss.value()[0], epoch=state['epoch'])

    reset_meters()

    engine.test(processor, get_iterator(False, BATCH_SIZE, USE_DA))

    test_loss_logger.log(state['epoch'], meter_loss.value()[0])
    test_top1_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
    test_top5_accuracy_logger.log(state['epoch'], meter_accuracy.value()[1])
    confusion_logger.log(confusion_meter.value())

    print(
        '[Epoch %d] Testing Loss: %.4f Top1 Accuracy: %.2f%% Top5 Accuracy: %.2f%%'
        % (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0],
           meter_accuracy.value()[1]))

    torch.save(model.state_dict(), 'epochs/%d.pth' % (state['epoch']))

    # visualization
    test_image, _ = next(iter(get_iterator(False, 25, USE_DA)))
    test_image_logger.log(
        make_grid(test_image, nrow=5, normalize=True).numpy())
Exemple #3
0
def train(model, args):
    # Define callbacks
    log = callbacks.CSVLogger(args.save_dir + '/log.csv', append=True)
    tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
                               batch_size=args.batch_size,
                               histogram_freq=int(args.debug))
    lr_decay = callbacks.LearningRateScheduler(
        schedule=lambda epoch: args.lr * (args.lr_decay**epoch))
    checkpoint = callbacks.ModelCheckpoint(args.save_dir +
                                           '/weights-{epoch:02d}.h5',
                                           monitor='val_capsnet_acc',
                                           save_best_only=True,
                                           save_weights_only=True,
                                           verbose=args.verbose)
    early_stopper = callbacks.EarlyStopping(monitor='val_capsnet_loss',
                                            patience=args.patience,
                                            verbose=args.verbose)

    # Compile the model
    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=[margin_loss, 'mse'],
                  loss_weights=[1., args.lam_recon],
                  metrics={
                      'capsnet': [
                          'accuracy', top_3_categorical_accuracy,
                          'top_k_categorical_accuracy'
                      ]
                  })

    # Start training using custom generator
    model.fit_generator(
        generator=custom_generator(get_iterator(args.filepath,
                                                args.input_size,
                                                args.shift_fraction,
                                                args.hor_flip,
                                                args.whitening,
                                                args.rotation_range,
                                                args.brightness_range,
                                                args.shear_range,
                                                args.zoom_range,
                                                subset="train"),
                                   testing=args.testing),
        steps_per_epoch=int(210000 / args.batch_size),
        epochs=args.epochs,
        validation_data=custom_generator(get_iterator(args.filepath,
                                                      subset="val"),
                                         testing=args.testing),
        validation_steps=int(40000 / args.batch_size),
        callbacks=[log, tb, checkpoint, lr_decay, early_stopper],
        initial_epoch=args.initial_epoch)

    # Save the model
    model_path = '/t_model.h5'
    model.save(args.save_dir + model_path)
    print('The model saved to \'%s' + model_path + '\'' % args.save_dir)
def on_end_epoch(state):
    # train
    msg = '[%s] [Epoch %d] Training Loss: %.4f (Accuracy: %.2f%%)' % (
        visdom_env, state['epoch'], meter_loss.value()[0],
        meter_accuracy.value()[0])
    if args.log_dir != '':
        f = open(log_path + '/train.txt', 'a')
        f.write(msg + "\n")
        f.close()
    if args.track:
        print(msg)
        train_loss_logger.log(state['epoch'], meter_loss.value()[0])
        train_error_logger.log(state['epoch'], meter_accuracy.value()[0])
    reset_meters()
    # test
    engine.test(
        processor,
        get_iterator(args.dataset,
                     False,
                     args.batch_size,
                     trans=args.transform))
    msg = '[%s] [Epoch %d] Testing Loss: %.4f (Accuracy: %.2f%%)' % (
        visdom_env, state['epoch'], meter_loss.value()[0],
        meter_accuracy.value()[0])
    if args.track:
        print(msg)
        test_loss_logger.log(state['epoch'], meter_loss.value()[0])
        test_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
        confusion_logger.log(confusion_meter.value())
    if args.log_dir != '':
        f = open(log_path + '/test.txt', 'a')
        f.write(msg + "\n")
        f.close()
Exemple #5
0
def run_epoch(train_dataloader, validation_dataloader, model, optimizer,
              scores_per_epoch, verbose, evaluator):
    train_losses = []
    train_accuracies = []
    validation_losses = []
    validation_accuracies = []

    model.train()
    n_updates = 0
    for x, m, y in get_iterator(train_dataloader, verbose):
        lm_logits, task_logits = model(x)
        double_head_loss, task_loss, lm_loss = evaluator.compute_double_head_loss(
            x, y, m, lm_logits, task_logits)
        double_head_loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        n_updates += 1
        if n_updates % math.ceil(
                float(len(train_dataloader)) / float(scores_per_epoch)
        ) == 0 or n_updates == len(train_dataloader):
            train_loss, train_accuracy = score(train_dataloader,
                                               model,
                                               verbose=verbose,
                                               evaluator=evaluator)
            validation_loss, validation_accuracy = score(validation_dataloader,
                                                         model,
                                                         verbose=verbose,
                                                         evaluator=evaluator)
            train_losses.append(train_loss)
            train_accuracies.append(train_accuracy)
            validation_losses.append(validation_loss)
            validation_accuracies.append(validation_accuracy)

    return train_losses, train_accuracies, validation_losses, validation_accuracies
Exemple #6
0
def train_epoch(dh_model, optimizer, dataloader, evaluator, module_indices, update_indices, verbose):
    for x, m, y in get_iterator(dataloader, False):
        lm_logits, task_logits = dh_model(x)
        double_head_loss, task_loss, lm_loss = evaluator.compute_double_head_loss(x, y, m, lm_logits, task_logits)
        dh_model.zero_grad()
        double_head_loss.backward()
        if isinstance(optimizer, StackedOptimizer):
            tuned_dh_model = optimizer(dh_model, double_head_loss, module_indices, update_indices)
        else:
            optimizer.step()
            tuned_dh_model = dh_model
    return tuned_dh_model
Exemple #7
0
def on_end_epoch(state):
    print('[Epoch %d] Training Loss: %.4f (Accuracy: %.2f%%)' %
          (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0]))

    train_loss_logger.log(state['epoch'], meter_loss.value()[0])
    train_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])

    reset_meters()

    engine.test(processor, utils.get_iterator(False))
    test_loss_logger.log(state['epoch'], meter_loss.value()[0])
    test_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
    confusion_logger.log(confusion_meter.value())

    print('[Epoch %d] Testing Loss: %.4f (Accuracy: %.2f%%)' %
          (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0]))

    torch.save(model.state_dict(), 'epochs/epoch_%d.pt' % state['epoch'])

    # reconstruction visualization

    test_sample = next(iter(utils.get_iterator(False)))

    ground_truth = (test_sample[0].unsqueeze(1).float() / 255.0)
    if torch.cuda.is_available():
        _, reconstructions = model(Variable(ground_truth).cuda())
    else:
        _, reconstructions = model(Variable(ground_truth))
    reconstruction = reconstructions.cpu().view_as(ground_truth).data

    ground_truth_logger.log(
        make_grid(ground_truth,
                  nrow=int(config.BATCH_SIZE**0.5),
                  normalize=True,
                  range=(0, 1)).numpy())
    reconstruction_logger.log(
        make_grid(reconstruction,
                  nrow=int(config.BATCH_SIZE**0.5),
                  normalize=True,
                  range=(0, 1)).numpy())
Exemple #8
0
def score(dataloader, model, verbose, evaluator):
    losses = []
    accuracies = []
    with torch.no_grad():
        model.eval()
        for x, m, y in get_iterator(dataloader, verbose):
            lm_logits, task_logits = model(x)
            double_head_loss, task_loss, lm_loss = evaluator.compute_double_head_loss(
                x, y, m, lm_logits, task_logits)
            accuracy = evaluator.compute_score(y, task_logits)
            losses.extend([double_head_loss.cpu().item()] * x.shape[0])
            accuracies.extend([accuracy.cpu().item()] * x.shape[0])
    return np.mean(losses), np.mean(accuracies)
Exemple #9
0
def test_epoch(dh_model, dataloader, evaluator, verbose):
    losses = []
    accuracies = []
    for x, m, y in get_iterator(dataloader, False):
        lm_logits, task_logits = dh_model(x)
        double_head_loss, task_loss, lm_loss = evaluator.compute_double_head_loss(x, y, m, lm_logits, task_logits)
        accuracy = test_evaluator.compute_score(y, task_logits)
        losses.append(double_head_loss)
        accuracies.append(accuracy.cpu().item())
    losses = torch.cat([loss.unsqueeze(-1) for loss in losses], dim=-1)
    loss = losses.mean(-1)
    accuracy = np.mean(accuracies)
    return loss, accuracy
    def testGetIterator(self):
        tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_file(
            'vocab.txt', default_value=0)
        # src_data = utils.load_data(self.src_data_dir)
        # tgt_data = utils.load_data(self.tgt_data_dir)
        # src_dataset = tf.data.Dataset.from_tensor_slices(tf.constant(src_data))
        # tgt_dataset = tf.data.Dataset.from_tensor_slices(tf.constant(tgt_data))
        src_dataset = tf.data.TextLineDataset(self.src_data_dir)
        tgt_dataset = tf.data.TextLineDataset(self.tgt_data_dir)

        iterator = utils.get_iterator(src_dataset=src_dataset,
                                      tgt_dataset=tgt_dataset,
                                      src_vocab_table=src_vocab_table,
                                      tgt_vocab_table=tgt_vocab_table,
                                      batch_size=self.batch_size,
                                      random_seed=123,
                                      shuffle=False,
                                      source_reverse=False)

        tables_initializer = tf.tables_initializer()
        source = iterator.source
        target_input = iterator.target_input
        target_output = iterator.target_output
        src_seq_len = iterator.source_sequence_length
        tgt_seq_len = iterator.target_sequence_length

        with tf.Session() as sess:
            sess.run(tables_initializer)
            sess.run(iterator.initializer)

            (source_v, src_len_v, target_input_v, target_output_v,
             tgt_len_v) = (sess.run((source, src_seq_len, target_input,
                                     target_output, tgt_seq_len)))

            print source_v
            print src_len_v
            print target_input_v
            print target_output_v
            print tgt_len_v
def train(model, eval_model, args):
    # Compile the model
    if args.metric_type == "euclidean":
        model.compile(optimizer=optimizers.Adam(lr=args.lr),
                      loss=[triplet_eucliden_loss, "mse", "mse", "mse"],
                      loss_weights=[1., 0., 0., 0.])
    elif args.metric_type == "cosine":
        model.compile(
            optimizer=optimizers.Adam(lr=args.lr),
            loss=[triplet_cosine_loss, margin_loss, margin_loss, margin_loss],
            loss_weights=[1., 0.2, 0.2, 0.2])
    else:
        raise Exception(
            "Wrong metric type. Available: ['euclidean', 'cosine']")

    if not os.path.isdir(os.path.join(args.save_dir, "tensorboard-logs")):
        os.mkdir(os.path.join(args.save_dir, "tensorboard-logs"))

    tensorboard = callbacks.TensorBoard(log_dir=os.path.join(
        args.save_dir, "tensorboard-logs"),
                                        histogram_freq=0,
                                        batch_size=args.batch_size,
                                        write_graph=True,
                                        write_grads=True)
    tensorboard.set_model(model)

    lr_scheduler = callbacks.LearningRateScheduler(
        schedule=lambda epoch: args.lr * (args.lr_decay**epoch))
    lr_scheduler.set_model(model)

    train_iterator = get_iterator(os.path.join(args.filepath,
                                               "train"), args.input_size,
                                  args.batch_size, args.shift_fraction,
                                  args.hor_flip, args.whitening,
                                  args.rotation_range, args.brightness_range,
                                  args.shear_range, args.zoom_range)
    train_generator = custom_generator(train_iterator)

    losses = list()
    for i in range(args.initial_epoch, args.epochs):
        total_loss, total_triplet_loss = 0, 0
        total_anchor_xentr, total_positive_xentr, total_negative_xentr = 0, 0, 0

        print("Epoch (" + str(i + 1) + "/" + str(args.epochs) + "):")
        t_start = time.time()
        lr_scheduler.on_epoch_begin(i)
        if i > 0:
            print("\nLearning rate is reduced to {:.8f}.".format(
                K.get_value(model.optimizer.lr)))

        for j in tqdm(range(len(train_iterator)),
                      ncols=100,
                      desc="Training",
                      bar_format="{l_bar}%s{bar}%s{r_bar}" %
                      (Fore.GREEN, Fore.RESET)):
            x, y = next(train_generator)

            loss, triplet_loss_, anchor_xentr, positive_xentr, negative_xentr = model.train_on_batch(
                x, y)
            total_loss += loss
            total_triplet_loss += triplet_loss_
            total_anchor_xentr += anchor_xentr
            total_positive_xentr += positive_xentr
            total_negative_xentr += negative_xentr

            if args.metric_type == "euclidean":
                print("\tTotal Loss: {:.4f}"
                      "\tTriplet Loss: {:.4f}".format(
                          total_loss / (j + 1), total_triplet_loss / (j + 1)),
                      "\r",
                      end="")
            elif args.metric_type == "cosine":
                print("\tTotal Loss: {:.4f}"
                      "\tTriplet: {:.4f}"
                      "\tA X-Ent: {:.4f}"
                      "\tP X-Ent: {:.4f}"
                      "\tN X-Ent: {:.4f}".format(
                          total_loss / (j + 1), total_triplet_loss / (j + 1),
                          total_anchor_xentr / (j + 1),
                          total_positive_xentr / (j + 1),
                          total_negative_xentr / (j + 1)),
                      "\r",
                      end="")
            else:
                Exception(
                    "Wrong metric type. Available: ['euclidean', 'cosine']")

        print("\nEpoch ({}/{}) completed in {:5.6f} secs.".format(
            i + 1, args.epochs,
            time.time() - t_start))

        if i % 5 == 0:
            print("\nEvaluating the model...")
            test(model=eval_model, args=args)

        # On epoch end loss and improved or not
        on_epoch_end_loss = total_loss / len(train_iterator)
        on_epoch_end_triplet = total_triplet_loss / len(train_iterator)
        on_epoch_end_a_xentr = total_anchor_xentr / len(train_iterator)
        on_epoch_end_p_xentr = total_positive_xentr / len(train_iterator)
        on_epoch_end_n_xentr = total_negative_xentr / len(train_iterator)
        print("On epoch end loss: {:.6f}".format(on_epoch_end_loss))
        if len(losses) > 0:
            if np.min(losses) > on_epoch_end_loss:
                print("\nSaving weights to {}".format(
                    os.path.join(args.save_dir,
                                 "weights-" + str(i + 1) + ".h5")))
                # if os.path.isfile(os.path.join(args.save_dir, "weights-" + str(np.argmin(losses)) + ".h5")):
                #     os.remove(os.path.join(args.save_dir, "weights-" + str(np.argmin(losses)) + ".h5"))
                model.save_weights(
                    os.path.join(args.save_dir,
                                 "weights-" + str(i + 1) + ".h5"))
            else:
                print("\nLoss value {:.6f} not improved from ({:.6f})".format(
                    on_epoch_end_loss, np.min(losses)))
        else:
            print("\nSaving weights to {}".format(
                os.path.join(args.save_dir, "weights-" + str(i + 1) + ".h5")))
            model.save_weights(
                os.path.join(args.save_dir, "weights-" + str(i + 1) + ".h5"))

        losses.append(on_epoch_end_loss)

        # LR scheduling
        lr_scheduler.on_epoch_end(i)

        # Tensorboard
        tensorboard.on_epoch_end(
            i, {
                "Total Loss": on_epoch_end_loss,
                "Triplet Loss": on_epoch_end_triplet,
                "Anchor X-Entropy Loss": on_epoch_end_a_xentr,
                "Positive X-Entropy Loss": on_epoch_end_p_xentr,
                "Negative X-Entropy Loss": on_epoch_end_n_xentr,
                "Learning rate": K.get_value(model.optimizer.lr)
            })

    tensorboard.on_train_end(None)

    # Model saving
    model_path = 't_model.h5'
    model.save(os.path.join(args.save_dir, model_path))
    print("The model file saved to \"{}\"".format(
        os.path.join(args.save_dir, model_path)))
Exemple #12
0
                        choices=['test_single', 'test_multi'],
                        help='visualized data mode')
    parser.add_argument('--num_iterations',
                        default=3,
                        type=int,
                        help='routing iterations number')
    opt = parser.parse_args()

    DATA_TYPE = opt.data_type
    DATA_MODE = opt.data_mode
    NUM_ITERATIONS = opt.num_iterations
    batch_size = 16 if DATA_MODE == 'test_single' else 8
    nrow = 4 if DATA_MODE == 'test_single' else 2

    images, labels = next(
        iter(get_iterator(DATA_TYPE, DATA_MODE, batch_size, False)))
    save_image(images,
               filename='vis_%s_%s_original.png' % (DATA_TYPE, DATA_MODE),
               nrow=nrow,
               normalize=True,
               padding=4,
               pad_value=255)

    for NET_MODE in ['Capsule_ps', 'Capsule_fc', 'CNN']:
        if NET_MODE == 'Capsule_ps':
            model = MixNet(data_type=DATA_TYPE,
                           capsule_type='ps',
                           num_iterations=NUM_ITERATIONS,
                           return_prob=True)
            AM_method = ProbAM(model)
        elif NET_MODE == 'Capsule_fc':
Exemple #13
0
    confusion_meter = tnt.meter.ConfusionMeter(10, normalized=True)

    train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'})
    train_top1_accuracy_logger = VisdomPlotLogger(
        'line', opts={'title': 'Train Top1 Accuracy'})
    train_top5_accuracy_logger = VisdomPlotLogger(
        'line', opts={'title': 'Train Top5 Accuracy'})
    test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'})
    test_top1_accuracy_logger = VisdomPlotLogger(
        'line', opts={'title': 'Test Top1 Accuracy'})
    test_top5_accuracy_logger = VisdomPlotLogger(
        'line', opts={'title': 'Test Top5 Accuracy'})
    confusion_logger = VisdomLogger('heatmap',
                                    opts={'title': 'Confusion Matrix'})
    test_image_logger = VisdomLogger('image',
                                     opts={
                                         'title': 'Test Image',
                                         'width': 371,
                                         'height': 335
                                     })

    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch

    engine.train(processor,
                 get_iterator(True, BATCH_SIZE, USE_DA),
                 maxepoch=NUM_EPOCHS,
                 optimizer=optimizer)
from utils import augmentation, get_iterator


def process(data):
    if args.dataset in ['mnist', 'fashion']:
        data = data.unsqueeze(1)
    elif args.dataset == 'cifar10':
        data = data.permute(0, 3, 1, 2)
    elif args.dataset == 'svhn':
        pass  # explicit
    else:
        raise ValueError
    assert torch.max(data) > 2  # To ensure everything needs to be scaled down
    return data.float() / 255.0


def processor(sample):
    data, labels, training = sample
    data = augmentation(process(data))
    labels = torch.LongTensor(labels)
    labels = torch.sparse.torch.eye(num_classes).index_select(dim=0,
                                                              index=labels)
    data = Variable(data).cuda()
    classes = F.softmax(model(data).cuda(), dim=1)
    labels = Variable(labels, requires_grad=False).cuda()
    loss = capsule_loss(classes, labels)
    return loss, classes

engine.train(processor, get_iterator(args.dataset, True, test=False), \
             maxepoch=args.max_epochs, optimizer=optimizer)
Exemple #15
0
def on_end_epoch(state):
    print('[Epoch %d] Training Loss: %.4f Training Accuracy: %.2f%%' %
          (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0]))

    train_loss_logger.log(state['epoch'], meter_loss.value()[0])
    train_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
    train_confusion_logger.log(meter_confusion.value())
    results['train_loss'].append(meter_loss.value()[0])
    results['train_accuracy'].append(meter_accuracy.value()[0])

    # test single
    reset_meters()
    engine.test(processor,
                get_iterator(DATA_TYPE, 'test_single', BATCH_SIZE, USE_DA))
    test_single_loss_logger.log(state['epoch'], meter_loss.value()[0])
    test_single_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
    test_confusion_logger.log(meter_confusion.value())
    results['test_single_loss'].append(meter_loss.value()[0])
    results['test_single_accuracy'].append(meter_accuracy.value()[0])
    print(
        '[Epoch %d] Testing Single Loss: %.4f Testing Single Accuracy: %.2f%%'
        % (state['epoch'], meter_loss.value()[0], meter_accuracy.value()[0]))

    # test multi
    engine.test(processor,
                get_iterator(DATA_TYPE, 'test_multi', BATCH_SIZE, USE_DA))
    test_multi_accuracy_logger.log(state['epoch'],
                                   meter_multi_accuracy.value()[0])
    test_multi_confidence_accuracy_logger.log(state['epoch'],
                                              meter_multi_accuracy.value()[1])
    results['test_multi_accuracy'].append(meter_multi_accuracy.value()[0])
    results['test_multi_confidence_accuracy'].append(
        meter_multi_accuracy.value()[1])
    print(
        '[Epoch %d] Testing Multi Accuracy: %.2f%% Testing Multi Confidence Accuracy: %.2f%%'
        % (state['epoch'], meter_multi_accuracy.value()[0],
           meter_multi_accuracy.value()[1]))

    # save best model
    global best_acc
    if meter_accuracy.value()[0] > best_acc:
        if NET_MODE == 'Capsule':
            torch.save(
                model.state_dict(),
                'epochs/%s_%s_%s.pth' % (DATA_TYPE, NET_MODE, CAPSULE_TYPE))
        else:
            torch.save(model.state_dict(),
                       'epochs/%s_%s.pth' % (DATA_TYPE, NET_MODE))
        best_acc = meter_accuracy.value()[0]
    # save statistics at every 10 epochs
    if state['epoch'] % 10 == 0:
        out_path = 'statistics/'
        data_frame = pd.DataFrame(data={
            'train_loss':
            results['train_loss'],
            'train_accuracy':
            results['train_accuracy'],
            'test_single_loss':
            results['test_single_loss'],
            'test_single_accuracy':
            results['test_single_accuracy'],
            'test_multi_accuracy':
            results['test_multi_accuracy'],
            'test_multi_confidence_accuracy':
            results['test_multi_confidence_accuracy']
        },
                                  index=range(1, state['epoch'] + 1))
        if NET_MODE == 'Capsule':
            data_frame.to_csv(out_path + DATA_TYPE + '_' + NET_MODE + '_' +
                              CAPSULE_TYPE + '_results.csv',
                              index_label='epoch')
        else:
            data_frame.to_csv(out_path + DATA_TYPE + '_' + NET_MODE +
                              '_results.csv',
                              index_label='epoch')
Exemple #16
0
    train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'})
    train_accuracy_logger = VisdomPlotLogger('line',
                                             opts={'title': 'Train Accuracy'})
    test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'})
    test_accuracy_logger = VisdomPlotLogger('line',
                                            opts={'title': 'Test Accuracy'})
    confusion_logger = VisdomLogger('heatmap',
                                    opts={
                                        'title':
                                        'Confusion Matrix',
                                        'columnnames':
                                        list(range(config.NUM_CLASSES)),
                                        'rownames':
                                        list(range(config.NUM_CLASSES))
                                    })
    ground_truth_logger = VisdomLogger('image', opts={'title': 'Ground Truth'})
    reconstruction_logger = VisdomLogger('image',
                                         opts={'title': 'Reconstruction'})

    capsule_loss = CapsuleLoss()

    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch

    engine.train(processor,
                 utils.get_iterator(True),
                 maxepoch=config.NUM_EPOCHS,
                 optimizer=optimizer)
Exemple #17
0
        'line', env=DATA_TYPE, opts={'title': 'Test Multi Accuracy'})
    test_multi_confidence_accuracy_logger = VisdomPlotLogger(
        'line',
        env=DATA_TYPE,
        opts={'title': 'Test Multi Confidence Accuracy'})
    train_confusion_logger = VisdomLogger('heatmap',
                                          env=DATA_TYPE,
                                          opts={
                                              'title':
                                              'Train Confusion Matrix',
                                              'columnnames': class_name,
                                              'rownames': class_name
                                          })
    test_confusion_logger = VisdomLogger('heatmap',
                                         env=DATA_TYPE,
                                         opts={
                                             'title': 'Test Confusion Matrix',
                                             'columnnames': class_name,
                                             'rownames': class_name
                                         })

    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch

    engine.train(processor,
                 get_iterator(DATA_TYPE, 'train', BATCH_SIZE, USE_DA),
                 maxepoch=NUM_EPOCHS,
                 optimizer=optimizer)
def on_end_epoch(state):
    # train
    msg = '[%s] [Epoch %d] Training Loss: %.4f (Accuracy: %.2f%%)' % (
        visdom_env, state['epoch'], meter_loss.value()[0],
        meter_accuracy.value()[0])
    if args.log_dir != '':
        f = open(log_path + '/train.txt', 'a')
        f.write(msg + "\n")
        f.close()
    if args.track:
        print(msg)
        train_loss_logger.log(state['epoch'], meter_loss.value()[0])
        writer.add_scalar("train/loss", meter_loss.value()[0], state['epoch'])
        train_error_logger.log(state['epoch'], meter_accuracy.value()[0])
        writer.add_scalar("train/accuracy",
                          meter_accuracy.value()[0], state['epoch'])
    reset_meters()
    # test
    engine.test(
        processor,
        get_iterator(args.dataset,
                     False,
                     args.batch_size,
                     trans=args.transform))
    msg = '[%s] [Epoch %d] Testing Loss: %.4f (Accuracy: %.2f%%)' % (
        visdom_env, state['epoch'], meter_loss.value()[0],
        meter_accuracy.value()[0])
    if args.track:
        print(msg)
        test_loss_logger.log(state['epoch'], meter_loss.value()[0])
        writer.add_scalar("test/loss", meter_loss.value()[0], state['epoch'])
        test_accuracy_logger.log(state['epoch'], meter_accuracy.value()[0])
        writer.add_scalar("test/accuracy",
                          meter_accuracy.value()[0], state['epoch'])
        confusion_logger.log(confusion_meter.value())
        # reconstructions
        reconstruction_iter = iter(
            get_iterator(
                args.dataset, False,
                trans=args.transform))  # False sets value of train mode
        all_mat = defaultdict(list)
        all_metadata = []
        all_label_img = []
        for i in range(3 if args.test else 100
                       ):  # Accumulate more examples for embedding
            test_sample = next(reconstruction_iter)
            if i == 0:
                ground_truth = process(test_sample[0])
                _, reconstruction, perturbations = model(
                    Variable(ground_truth).cuda(),
                    perturb=state['epoch'] % args.batch_size)
                reconstruction = reconstruction.cpu().view_as(
                    ground_truth).data
                size = list(ground_truth.size())
                size[0] = 16 * 11
                perturbation = perturbations.cpu().view(size).data
            embedding(test_sample, all_mat, all_metadata, all_label_img)
        all_metadata = torch.cat(all_metadata)
        all_label_img = torch.cat(all_label_img)
        for j in range(args.num_routing_iterations):
            all_mat[j] = torch.cat(all_mat[j])
            writer.add_embedding(
                all_mat[j],
                metadata=all_metadata,
                label_img=all_label_img,
                global_step=state['epoch'] * 100 + j + 1,
                tag="Ep {} Iter {}".format(state['epoch'], j + 1),
            )
        combined_mat = torch.cat(
            [all_mat[j] for j in range(args.num_routing_iterations)])
        combined_metadata = torch.cat([
            torch.ones(all_mat[j].size(0)) * (j + 1)
            for j in range(args.num_routing_iterations)
        ])
        combined_label_img = all_label_img.repeat(args.num_routing_iterations,
                                                  1, 1, 1)
        writer.add_embedding(
            combined_mat,
            metadata=combined_metadata,
            label_img=combined_label_img,
            global_step=state['epoch'] * 100,
            tag="Ep {} Comb".format(state['epoch']),
        )

        gt_image = make_grid(ground_truth,
                             nrow=int(args.batch_size**0.5),
                             normalize=True,
                             range=(0, 1))
        writer.add_image("groundtruth", gt_image, state['epoch'])
        ground_truth_logger.log(gt_image.numpy())
        r_image = make_grid(reconstruction,
                            nrow=int(args.batch_size**0.5),
                            normalize=True,
                            range=(0, 1))
        writer.add_image("reconstruction", r_image, state['epoch'])
        reconstruction_logger.log(r_image.numpy())
        p_image = make_grid(perturbation,
                            nrow=11,
                            normalize=True,
                            range=(0, 1))
        writer.add_image("perturbation", p_image, state['epoch'])
        perturbation_sample_logger.log(p_image.numpy())

        # fig 3: all reconstructions across all target classes
        num_good, num_bad = 1, 1
        good_samples, bad_samples = [], []
        for sample in iter(get_iterator(args.dataset, False, 1)):
            img, true_lbl = sample
            true_lbl = int(true_lbl[0])

            if len(good_samples) == num_good and len(bad_samples) == num_bad:
                break

            ground_truth = process(img)

            classes, reconstruction, all_reconstructions = model(
                Variable(ground_truth).cuda(), all_reconstructions=True)

            # get prediction
            _, max_length_indices = classes.max(dim=1)
            pred_lbl = max_length_indices.data[0]

            # for the given image, get all reconstructions for each class
            all_reconstructions = all_reconstructions.cpu().view(
                num_classes, img_channels, img_width, img_width
            ).data  # reconstructions: [torch.FloatTensor of size 10x1x28x28]
            candidate = (ground_truth, true_lbl, pred_lbl, all_reconstructions)

            # collect good samples
            if len(good_samples) < num_good and true_lbl == pred_lbl:
                good_samples.append(candidate)

            if len(bad_samples) < num_bad and true_lbl != pred_lbl:
                bad_samples.append(candidate)

        cats = []
        for sample in good_samples + bad_samples:
            ground_truth, true_lbl, pred_lbl, all_reconstructions = sample
            cat = torch.cat([ground_truth, all_reconstructions])
            maximum = cat.max()
            if cat.size(1) == 1:
                cat = cat.repeat(1, 3, 1, 1)
            cat[true_lbl + 1, 1, -2:] = maximum
            cat[true_lbl + 1, 1, :2] = maximum
            cat[true_lbl + 1, 1, :, -2:] = maximum
            cat[true_lbl + 1, 1, :, :2] = maximum
            if pred_lbl != true_lbl:
                cat[pred_lbl + 1, 0, -2:] = maximum
                cat[pred_lbl + 1, 0, :2] = maximum
                cat[pred_lbl + 1, 0, :, -2:] = maximum
                cat[pred_lbl + 1, 0, :, :2] = maximum
            cats.append(cat)
        image = make_grid(torch.cat(cats),
                          nrow=1 + num_classes,
                          normalize=True,
                          range=(0, 1))
        writer.add_image("reconstruction_all", image, state['epoch'])
        all_reconstruction_logger.log(image.numpy())

    if args.log_dir != '':
        f = open(log_path + '/test.txt', 'a')
        f.write(msg + "\n")
        f.close()

    torch.save(model.state_dict(),
               model_path + '/epoch_%d.pt' % state['epoch'])
Exemple #19
0
    config.FLAGS.action = args.action
    config.FLAGS.pred_file = args.data

    action = config.FLAGS.action

    # 获取词的总数。
    vocab_size = get_src_vocab_size()
    src_unknown_id = tgt_unknown_id = vocab_size
    src_padding = vocab_size + 1

    src_vocab_table, tgt_vocab_table = create_vocab_tables(src_vocab_file, tgt_vocab_file, src_unknown_id,
                                                           tgt_unknown_id)
    embedding = load_word2vec_embedding(vocab_size)

    if action == 'train':
        iterator = get_iterator(src_vocab_table, tgt_vocab_table, vocab_size, BATCH_SIZE)
    elif action == 'predict':
        BATCH_SIZE = 1
        DROPOUT_RATE = 1.0
        iterator = get_predict_iterator(src_vocab_table, vocab_size, BATCH_SIZE)
    else:
        print('Only support train and predict actions.')
        exit(0)

    tag_table = tag_to_id_table()
    net = NER_net("ner", iterator, embedding, BATCH_SIZE)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(iterator.initializer)
        tf.tables_initializer().run()
Exemple #20
0
def main():
    model, params, stats = models.__dict__[opt.model](N=opt.N, J=opt.scat)

    iter_test = get_iterator(False, opt)

    scat = Scattering(M=opt.N, N=opt.N, J=opt.scat, pre_pad=False).cuda()

    epoch = 0
    if opt.resume != '':
        resumeFile = opt.resume
        if not resumeFile.endswith('pt7'):
            resumeFile = torch.load(opt.resume + '/latest.pt7')['latest_file']
        state_dict = torch.load(resumeFile)

        model.load_state_dict(state_dict['state_dict'])
        print('model was restored from epoch:', epoch)

    print('\nParameters:')
    print(
        pd.DataFrame([(key, v.size(), torch.typename(v.data))
                      for key, v in params.items()]))
    print('\nAdditional buffers:')
    print(
        pd.DataFrame([(key, v.size(), torch.typename(v))
                      for key, v in stats.items()]))
    n_parameters = sum(
        [p.numel() for p in list(params.values()) + list(stats.values())])
    print('\nTotal number of parameters: %f' % n_parameters)

    meter_loss = meter.AverageValueMeter()
    classacc = meter.ClassErrorMeter(topk=[1, 5], accuracy=False)
    timer_data = meter.TimeMeter('s')
    timer_sample = meter.TimeMeter('s')
    timer_train = meter.TimeMeter('s')
    timer_test = meter.TimeMeter('s')

    def h(sample):
        inputs = sample[0].cuda()
        if opt.scat > 0:
            inputs = scat(inputs)
        inputs = Variable(inputs)
        targets = Variable(sample[1].cuda().long())
        if sample[2]:
            model.train()
        else:
            model.eval()

    # y = model.forward(inputs)
        y = torch.nn.parallel.data_parallel(model, inputs,
                                            np.arange(opt.ngpu).tolist())
        return F.cross_entropy(y, targets), y

    def on_sample(state):
        global data_time
        data_time = timer_data.value()
        timer_sample.reset()
        state['sample'].append(state['train'])

    def on_forward(state):
        prev_sum5 = classacc.sum[5]
        prev_sum1 = classacc.sum[1]
        classacc.add(state['output'].data,
                     torch.LongTensor(state['sample'][1]))
        meter_loss.add(state['loss'].data[0])

        next_sum5 = classacc.sum[5]
        next_sum1 = classacc.sum[1]
        n = state['output'].data.size(0)
        curr_top5 = 100.0 * (next_sum5 - prev_sum5) / n
        curr_top1 = 100.0 * (next_sum1 - prev_sum1) / n
        sample_time = timer_sample.value()
        timer_data.reset()
        if (state['train']):
            txt = 'Train:'
        else:
            txt = 'Test'

        print(
            '%s [%i,%i/%i] ; loss: %.3f (%.3f) ; err5: %.2f (%.2f) ; err1: %.2f (%.2f) ; data %.3f ; time %.3f'
            % (txt, state['epoch'], state['t'] % len(state['iterator']),
               len(state['iterator']), state['loss'].data[0],
               meter_loss.value()[0], curr_top5, classacc.value(5), curr_top1,
               classacc.value(1), data_time, sample_time))

    def on_start(state):
        state['epoch'] = epoch

    def on_start_epoch(state):
        classacc.reset()
        meter_loss.reset()
        timer_train.reset()

        epoch = state['epoch'] + 1

    def on_end_epoch(state):
        train_loss = meter_loss.value()
        train_acc = classacc.value()
        train_time = timer_train.value()
        meter_loss.reset()
        classacc.reset()
        timer_test.reset()

        engine.test(h, iter_test)

    engine = Engine()
    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch
    engine.hooks['on_start'] = on_start
    engine.test(h, iter_test)
    print(classacc.value())
Exemple #21
0
def main(_):
    # create input tensor
    inputs = tf.placeholder(dtype=tf.float32,
                            shape=[None, 256, 256, 3],
                            name="input")
    style = tf.placeholder(dtype=tf.float32,
                           shape=[None, 256, 256, 3],
                           name="style")

    # init image data
    style_image = load_image(FLAGS.style_image_path)
    iterator = get_iterator(glob.glob(FLAGS.train_dataset), FLAGS.batch_size,
                            FLAGS.epoch)

    #build transfer model
    optimizer, trans, total_loss, content_loss, style_loss = \
        build_model(inputs, style, FLAGS.learning_rate,
                    FLAGS.content_loss_weight, FLAGS.style_loss_weight)

    with tf.Session() as sess:
        # load pre-trained parameters
        vgg_vars = slim.get_variables_to_restore(include=['vgg_16'])
        variable_restore_op = slim.assign_from_checkpoint_fn(
            FLAGS.vgg_path, vgg_vars, ignore_missing_vars=True)
        variable_restore_op(sess)

        # get trainable parameters
        variables_to_save = slim.get_variables_to_restore(include=['transfer'])
        saver = tf.train.Saver(variables_to_save)

        all_var = tf.global_variables()
        init_var = [v for v in all_var if 'vgg_16' not in v.name]
        init = tf.variables_initializer(var_list=init_var)
        sess.run(init)
        sess.run(tf.local_variables_initializer())
        style_image = sess.run(style_image)

        # config visualization parameters
        tf.summary.scalar('losses/total_loss', total_loss)
        tf.summary.scalar('losses/content_loss', content_loss)
        tf.summary.scalar('losses/style_loss', style_loss)
        tf.summary.image('transformed', trans)
        tf.summary.image('origin', inputs)
        summary = tf.summary.merge_all()
        writer = tf.summary.FileWriter(FLAGS.summary_path, sess.graph)

        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(sess=sess, coord=coord)
        counter = 0
        try:
            while not coord.should_stop():
                images = sess.run(iterator)
                feed_dict = {
                    inputs: images,
                    style: [style_image for _ in range(images.shape[0])]
                }
                sess.run([optimizer], feed_dict=feed_dict)
                counter += 1

                if counter % 10 == 0:
                    result = sess.run(summary, feed_dict=feed_dict)
                    # update summary
                    writer.add_summary(result, counter)

                if counter % 1000 == 0:
                    # save model parameters
                    saver.save(sess, FLAGS.model_path, global_step=counter)

        except tf.errors.OutOfRangeError:
            coord.request_stop()
            coord.join(thread)

        writer.close()