Пример #1
0
def validate(val_loader, net):
    top1 = [AverageMeter(), AverageMeter()]
    top5 = [AverageMeter(), AverageMeter()]

    # switch to evaluate mode
    net.eval()

    prefetcher = DataPrefetcher(val_loader)
    inputs, labels = prefetcher.next()
    with torch.no_grad():
        while inputs is not None:
            inputs = inputs.float().cuda()
            labels = labels.cuda()

            stu_outputs, tea_outputs = net(inputs)

            pred_s = accuracy(stu_outputs[-1], labels, topk=(1, 5))
            pred_t = accuracy(tea_outputs[-1], labels, topk=(1, 5))

            top1[0].update(pred_s[0].item(), inputs.size(0))
            top5[0].update(pred_s[1].item(), inputs.size(0))

            top1[1].update(pred_t[0].item(), inputs.size(0))
            top5[1].update(pred_t[1].item(), inputs.size(0))

            inputs, labels = prefetcher.next()

    return top1[0].avg, top5[0].avg, top1[1].avg, top5[1].avg
Пример #2
0
def cross_validation(x_train, t_train, x_val, t_val, x_test, t_test, iter_max,
                     random_state, alpha_vals):

    K = t_train.shape[1]  # number of classes
    n = x_train.shape[1]  # number of features

    # initialize arrays
    train_error = []
    val_error = []

    i = 0
    # try different values of alpha
    for alpha in alpha_vals:
        print('--> alpha = %f' % alpha)

        # initialize weights_list
        weights = initialize_weights(K, n, random_state)

        # gradient descent
        gradient_descent(weights=weights,
                         x=x_train,
                         t=t_train,
                         iter_max=iter_max,
                         alpha=alpha)

        train_error.append(cross_entropy(weights, x_train, t_train, alpha))
        val_error.append(cross_entropy(weights, x_val, t_val, 0))

        if i == 0:  # initial optimal values
            alpha_optimal = alpha
            weights_optimal = weights

        if len(train_error) != 1:  # skip first alpha
            if val_error[i - 1] < val_error[i]:
                break
            else:
                alpha_optimal = alpha
                weights_optimal = weights
        i += 1

    print('\n --> Training set size: ' + str(x_train.shape))
    print('=== After applying logistic regression: ===')
    print('Optimal alpha = ' + str(alpha_optimal))
    print('Train: Error = ' +
          str(cross_entropy(weights_optimal, x_train, t_train, alpha_optimal)))

    print('Val: Error = ' +
          str(cross_entropy(weights_optimal, x_val, t_val, 0)))
    print('Val: Accuracy = ' + str(
        accuracy(t_val.argmax(axis=1), predictions(x_val, weights_optimal))))

    print('Test: Error = ' +
          str(cross_entropy(weights_optimal, x_test, t_test, 0)))
    print('Test: Accuracy = ' + str(
        accuracy(t_test.argmax(axis=1), predictions(x_test, weights_optimal))))

    return [weights_optimal, train_error, val_error]
Пример #3
0
def cross_validation(x_train, t_train, x_val, t_val, x_test, t_test, iter_max,
                     seed, alpha_vals, labels):
    """
    Apply cross validation to find the optimal regularization parameter.
    """

    # initialize arrays
    train_error = []
    val_error = []

    i = 0
    # try different values of alpha
    for alpha in alpha_vals:
        print('--> alpha = %f' % alpha)

        # initialize weights_list
        weights = initialize_weights(x_train.shape[1], seed)

        # gradient descent
        gradient_descent(x_train, t_train, weights, iter_max, alpha)

        train_error.append(cross_entropy(weights, x_train, t_train, alpha))
        val_error.append(cross_entropy(weights, x_val, t_val, 0))

        if i == 0:  # initial optimal values
            alpha_optimal = alpha
            weights_optimal = weights

        if len(train_error) != 1:  # skip first alpha
            if val_error[i - 1] < val_error[i]:
                break
                #True
            else:
                alpha_optimal = alpha
                weights_optimal = weights
        i += 1

    print('\n --> Training set size: ' + str(x_train.shape))
    print('=== After applying logistic regression: ===')
    print('Error: Optimal alpha = ' + str(alpha_optimal))
    print('Train: Error = ' +
          str(cross_entropy(weights_optimal, x_train, t_train, alpha_optimal)))

    print('Val: Error = ' +
          str(cross_entropy(weights_optimal, x_val, t_val, 0)))
    print('Val: Accuracy = ' +
          str(accuracy(t_val, predictions(x_val, weights_optimal))))

    print('Test: Error = ' +
          str(cross_entropy(weights_optimal, x_test, t_test, 0)))
    print('Test: Accuracy = ' +
          str(accuracy(t_test, predictions(x_test, weights_optimal))))

    return [weights_optimal, train_error, val_error]
Пример #4
0
def accumulate_acc(output, target, task, meter):
    if 'All' in output.keys(): # Single-headed model
        meter.update(accuracy(output['All'], target), len(target))
    else:  # outputs from multi-headed (multi-task) model
        for t, t_out in output.items():
            inds = [i for i in range(len(task)) if task[i] == t]  # The index of inputs that matched specific task
            if len(inds) > 0:
                t_out = t_out[inds]
                t_target = target[inds]
                meter.update(accuracy(t_out, t_target), len(inds))

    return meter
Пример #5
0
    def valid_epoch(self, epoch):
        self.model.eval()
        val_loss = []
        val_acc = []
        val_acc5 = []
        with torch.no_grad():
            for batch_idx, (sentences,
                            gt_topics) in enumerate(self.valid_loader):
                sentences, gt_topics = sentences.to(self.device), gt_topics.to(
                    self.device)

                pred_topics = self.model(sentences)
                loss = self.cls_loss(pred_topics, gt_topics)
                acc = accuracy(pred_topics, gt_topics)

                val_loss.append(loss.item())
                val_acc.append(acc[0].item())
                val_acc5.append(acc[1].item())

        self.writer.set_step(epoch, mode='val')
        self.val_metrics.update('loss', np.mean(val_loss))
        self.val_metrics.update('acc_1', np.mean(val_acc))
        self.val_metrics.update('acc_5', np.mean(val_acc5))

        return self.val_metrics.result()
Пример #6
0
    def train_epoch(self, epoch):
        self.model.train()
        for batch_idx, (sentences, gt_topics) in enumerate(self.train_loader):
            sentences = sentences.to(self.device)
            gt_topics = gt_topics.to(self.device)

            pred_topics = self.model(sentences)
            loss = self.cls_loss(pred_topics, gt_topics)
            acc = accuracy(pred_topics, gt_topics)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # add loss summary when update generator to save memory
            self.writer.set_step(
                (epoch - 1) * len(self.train_loader) + batch_idx, mode='train')
            self.train_metrics.update('loss', loss.item())
            self.train_metrics.update('acc_1', acc[0].item())
            self.train_metrics.update('acc_5', acc[1].item())

            # log on console
            if batch_idx % self.config.summary_step == 0:
                self.logger.info(
                    'Train Epoch: {} {} Loss:{:.4f}, Acc_1: {:.2f}, Acc_5: {:.2f}]'
                    .format(epoch, self._progress(batch_idx), loss.item(),
                            acc[0].item(), acc[1].item()))

        self.lr_scheduler.step()
        log = self.train_metrics.result()
        val_log = self.valid_epoch(epoch)
        log.update(**{'val_' + k: v for k, v in val_log.items()})
        return log
Пример #7
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    # train_y = get_one_hot(train_y, 2)

    net = Net([Dense(100), ReLU(), Dense(30), ReLU(), Dense(1)])

    model = Model(net=net,
                  loss=SigmoidCrossEntropy(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_y_idx = np.asarray(test_y).reshape(-1)
        test_pred = model.forward(test_x)
        test_pred[test_pred > 0] = 1
        test_pred[test_pred <= 0] = 0
        test_pred_idx = test_pred.reshape(-1)
        res = accuracy(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #8
0
    def test(self, epoch):
        self.model.eval()
        top1 = AverageMeter()
        all_result = []
        for batch_idx, data in enumerate(self.test_dataloader):
            images, labels, images_path = data['images'], data['labels'], data[
                'images_path']
            if self.use_cuda:
                images, labels = images.cuda(), labels.cuda()
            outputs = self.model(images)
            prec1 = accuracy(outputs.data, labels.data, topk=(1, ))
            top1.update(prec1[0].detach().item(), images.size(0))
            self.writer.add_scalar('test/acc', top1.val, self.iters)

            if self.args.is_save:
                probs, preds = outputs.softmax(dim=1).max(dim=1)
                probs, preds = probs.view(-1), preds.view(-1)
                for idx in range(images.size(0)):
                    result = '{}\t{}\t{}\t{}\n'.format(images_path[idx],
                                                       labels[idx].item(),
                                                       preds[idx].item(),
                                                       probs[idx].item())
                    all_result.append(result)
        if self.args.is_save:
            with open('result.txt', 'w') as f:
                f.writelines(all_result)
        self.acc = top1.avg
        print('Test epoch:{}, acc:{}'.format(epoch, top1.avg))
Пример #9
0
    def train(self, epoch):
        self.model.train()
        losses = AverageMeter()
        top1 = AverageMeter()
        pbar = tqdm(self.train_dataloader)
        for batch_idx, data in enumerate(pbar):
            self.iters += 1
            images, labels = data['images'], data['labels']
            if self.use_cuda:
                images, labels = images.cuda(), labels.cuda()
            outputs = self.model(images)
            loss = self.criterion(outputs, labels)
            self.optimizer.zero_grad()
            loss.backward(loss.data)
            self.optimizer.step()
            if self.args.optimizer_type == 'sgd':
                adjust_learning_rate(self.optimizer, self.args.base_lr, epoch,
                                     self.args.lr_decay_epoch)
            prec1 = accuracy(outputs.data, labels.data, topk=(1, ))
            loss = loss.view(-1)
            losses.update(loss.data[0], images.size(0))
            top1.update(prec1[0].detach().item(), images.size(0))
            self.writer.add_scalar('train/loss', loss.data[0], self.iters)
            self.writer.add_scalar('train/acc', top1.val, self.iters)

            if batch_idx % self.args.log_interval == 0:
                print('epoch:{}, iter:{}/{}, loss:{}, acc:{}'.format(
                    epoch, batch_idx, self.iters, losses.avg,
                    round(top1.avg, 6)))
                losses.reset(), top1.reset()
Пример #10
0
    def train():
        epoch_loss, epoch_acc = np.zeros(2)

        # Sets the module in training mode, only on modules such as Dropout or BatchNorm.
        model.train()

        stamp = time.time()
        for partial_epoch, (volume, label) in enumerate(train_data_loader, 1):
            volume_var = Variable(volume).float().cuda()
            label_var = Variable(label).cuda()

            out = model(volume_var)

            loss = nll_loss(out, label_var)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            acc = accuracy(out, label_var)
            epoch_loss += loss.data[0]
            epoch_acc += acc.data[0]

        consume = time.time() - stamp
        avg_loss, avg_acc = np.array([epoch_loss, epoch_acc]) / partial_epoch
        print('===> Training epoch: {:.2f}/{}\t'.format(i / train_compress, TRAIN.expect_epoch),
              'Loss: {:.5f} | Accuracy: {:.5f}'.format(avg_loss, avg_acc),
              ' |  Elapsed: {:.3f}s / batch({})'.format(consume / len(train_data_loader), TRAIN.batch_size))

        return avg_loss, avg_acc, volume
    def train_epoch(self, epoch):
        self.model.train()
        # add free adversarial learning to improve generalization ability of the model and robust to noise
        for batch_idx, (images, labels) in enumerate(self.train_loader):
            images = images.to(self.device)
            labels = labels.to(self.device)

            noise = Variable(self.noise[0:images.size(0)],
                             requires_grad=True).to(self.device)
            noisy_images = images + noise
            noisy_images.clamp_(0, 1.0)
            self.norm.do(noisy_images)
            outputs = self.model(noisy_images)
            loss = self.cls_loss(outputs, labels)
            acc = accuracy(outputs, labels)

            self.optimizer.zero_grad()
            loss.backward()

            # Update the noise for the next iteration
            pert = self.config.fgsm_step * torch.sign(noise.grad)
            self.noise[:images.size(0)] += pert.data
            self.noise.clamp_(-self.config.clip_eps, self.config.clip_eps)

            self.optimizer.step()
            self.lr_scheduler.step()

            # add loss summary when update generator to save memory
            self.writer.set_step(
                (epoch - 1) * len(self.train_loader) + batch_idx, mode='train')
            self.train_metrics.update('loss', loss.item())
            self.train_metrics.update('acc_avg', np.mean(acc))
            for acc, attr in zip(acc, self.config.attrs):
                self.train_metrics.update('acc_' + attr, acc)

            # log on console
            if batch_idx % self.config.summary_step == 0:
                self.logger.info(
                    'Train Epoch: {} {} Loss:{:.4f}, Acc: {:.2f}]'.format(
                        epoch, self._progress(batch_idx), loss.item(),
                        np.mean(acc)))

        log = self.train_metrics.result()
        val_log = self.valid_epoch(epoch)
        log.update(**{'val_' + k: v for k, v in val_log.items()})
        return log
def train(train_loader, model, criterion, optimizer, epoch, cfgs):
    logger = logging.getLogger('{}.train'.format(cfgs['log_name']))

    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(train_loader),
                             [batch_time, data_time, losses, top1, top5],
                             prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        images = images.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % cfgs['print_freq'] == 0:
            logger.info(progress.display(i))
def validate(val_loader, model, criterion, cfgs):
    logger = logging.getLogger('{}.validate'.format(cfgs['log_name']))

    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
                             prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)
            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % cfgs['print_freq'] == 0:
                logger.info(progress.display(i))

        # TODO: this should also be done with the ProgressMeter
        logger.info(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(
            top1=top1, top5=top5))

    return top1.avg
Пример #14
0
    def training(self, epoch):
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        self.model.train()

        for i, (input, target) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
            output = self.model(input)
            loss = self.criterion(output, target)

            # print(output) # Tensor(shape=[256, 1000]
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.numpy()[0], input.shape[0])
            top1.update(prec1.numpy()[0], input.shape[0])
            top5.update(prec5.numpy()[0], input.shape[0])

            self.optimizer.clear_grad()
            loss.backward()
            self.optimizer.step()

            if i % self.cfg.Log_print_freq == 0:
                self.logger.info('Epoch: [{0}][{1}/{2}]\t'
                                 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                                 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                                 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    epoch, i, len(self.train_loader), loss=losses, top1=top1, top5=top5))

        prec1, prec5 = self.validate()

        if self.cfg.visualDL:
            with LogWriter(logdir=self.logDir) as writer:
                # 使用scalar组件记录一个标量数据
                writer.add_scalar(tag="loss", step=epoch, value=losses.avg)
                writer.add_scalar(tag="prec1", step=epoch, value=prec1)
                writer.add_scalar(tag="prec5", step=epoch, value=prec5)

        self.logger.info("Epoch {}: prec1: {} prec5: {}".format(epoch, prec1, prec5))

        return prec1, prec5, losses
Пример #15
0
def validate(val_loader, net):
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    net.eval()

    prefetcher = DataPrefetcher(val_loader)
    inputs, labels = prefetcher.next()
    with torch.no_grad():
        while inputs is not None:
            inputs = inputs.float().cuda()
            labels = labels.cuda()

            stu_outputs, _ = net(inputs)

            pred1, pred5 = accuracy(stu_outputs[-1], labels, topk=(1, 5))
            top1.update(pred1.item(), inputs.size(0))
            top5.update(pred5.item(), inputs.size(0))
            inputs, labels = prefetcher.next()

    return top1.avg, top5.avg
Пример #16
0
def validate(testloader, model, gpu, size):
    model.eval()

    acc = 0
    acc_cnt = 0
    with torch.no_grad():
        for idx, data in enumerate(testloader):
            if size is None or idx < size:
                data, target, task = data
                if gpu:
                    with torch.no_grad():
                        data = data.cuda()
                        target = target.cuda()

                outputs = model.forward(data, task)

                acc += accuracy(outputs, target)
                acc_cnt += 1

            else:
                break
    return acc / acc_cnt
Пример #17
0
    def validate(self):
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        # switch to evaluate mode
        self.model.eval()

        for i, (input, target) in enumerate(self.val_loader):
            output = self.model(input)
            loss = self.criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.numpy()[0], input.shape[0])
            top1.update(prec1.numpy()[0], input.shape[0])
            top5.update(prec5.numpy()[0], input.shape[0])

        self.logger.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
                         .format(top1=top1, top5=top5))

        return top1.avg, top5.avg
Пример #18
0
    def test():
        epoch_loss, epoch_acc = np.zeros(2)

        # Sets the module in training mode, only on modules such as Dropout or BatchNorm.
        model.eval()

        for partial_epoch, (volume, label) in enumerate(test_data_loader, 1):
            volume_var = Variable(volume, volatile=True).float().cuda()
            label_var = Variable(label, volatile=True).cuda()

            out = model(volume_var)

            loss = nll_loss(out, label_var)
            acc = accuracy(out, label_var)
            epoch_loss += loss.data[0]
            epoch_acc += acc.data[0]

        avg_loss, avg_acc = np.array([epoch_loss, epoch_acc]) / partial_epoch
        if i % TRAIN.val_interval == 0:
            print('{}\n===> Validation  | '.format('-' * 130),
                  'Loss: {:.5f} | Accuracy: {:.5f}'.format(avg_loss, avg_acc),
                  'Current time: {}\n{}'.format(time.strftime('%Y-%m-%d %H:%M:%S'), '-' * 130))
        return avg_loss, avg_acc, volume
    def valid_epoch(self, epoch):
        self.model.eval()
        val_loss = []
        val_acc = []
        with torch.no_grad():
            for batch_idx, (images, labels) in enumerate(self.valid_loader):
                images, labels = images.to(self.device), labels.to(self.device)

                outputs = self.model(images)
                loss = self.cls_loss(outputs, labels)
                acc = accuracy(outputs, labels)

                val_loss.append(loss.item())
                val_acc.append(acc)

        self.writer.set_step(epoch, mode='val')
        self.val_metrics.update('loss', np.mean(val_loss))
        attr_acc = np.mean(val_acc, axis=0)
        self.val_metrics.update('acc_avg', np.mean(attr_acc))
        for acc, attr in zip(attr_acc, self.config.attrs):
            self.val_metrics.update('acc_' + attr, acc)

        return self.val_metrics.result()
Пример #20
0
def test_resnet():
    model = Resnet50_new()

    top1 = AverageMeter()
    top5 = AverageMeter()
    accu_20 = []
    for i in range(80):
        accu_20.append(AverageMeter())

    for step, sample in enumerate(gd.val_loader):
        weight = sample[0].shape[0]
        s = Variable(sample[0].cuda())
        pre = model(s)
        prec1, prec5 = accuracy(pre.data, sample[1].cuda(), topk=(1, 5))
        top1.update(prec1[0], n=weight)
        top5.update(prec5[0], n=weight)
        update_class_acc(accu_20, pre.data, sample[1].cuda())


        print("Step: {step}, top1: {top1.avg:.3f}({top1.val:.3f}), "
              "top5: {top5.avg:.3f}({top5.val:.3f})".format(step=step, top1=top1, top5=top5))

    for k, j in enumerate(accu_20):
        print("{k}: {top1.avg:.3f}({top1.val:.3f}), ".format(k=k, top1=j))
Пример #21
0
def train(train_loader,
          model,
          optimizer,
          scheduler,
          epoch,
          args,
          streams=None,
          scaler=None):
    """training function"""
    batch_time = metric.AverageMeter('Time', ':6.3f')
    data_time = metric.AverageMeter('Data', ':6.3f')
    avg_ce_loss = metric.AverageMeter('ce_loss', ':.4e')
    avg_cot_loss = metric.AverageMeter('cot_loss', ':.4e')

    # record the top1 accuray of each small network
    top1_all = []
    for i in range(args.loop_factor):
        # ce_losses_l.append(metric.AverageMeter('{}_CE_Loss'.format(i), ':.4e'))
        top1_all.append(metric.AverageMeter('{}_Acc@1'.format(i), ':6.2f'))
    avg_top1 = metric.AverageMeter('Avg_Acc@1', ':6.2f')
    #if args.dataset == 'imagenet':
    #	avg_top5 = metric.AverageMeter('Avg_Acc@1', ':6.2f')

    # show all
    total_iters = len(train_loader)
    progress = metric.ProgressMeter(total_iters,
                                    batch_time,
                                    data_time,
                                    avg_ce_loss,
                                    avg_cot_loss,
                                    *top1_all,
                                    avg_top1,
                                    prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()
    end = time.time()

    # prefetch data
    prefetcher = prefetch.data_prefetcher(train_loader)
    images, target = prefetcher.next()
    i = 0
    """Another way to load the data
	for i, (images, target) in enumerate(train_loader):
	
		# measure data loading time
		data_time.update(time.time() - end)

		if args.gpu is not None:
			images = images.cuda(args.gpu, non_blocking=True)
		target = target.cuda(args.gpu, non_blocking=True)
	"""
    optimizer.zero_grad()
    while images is not None:
        # measure data loading time
        data_time.update(time.time() - end)
        # adjust the lr first
        scheduler(optimizer, i, epoch)
        i += 1

        # compute outputs and losses
        if args.is_amp:
            # Runs the forward pass with autocasting.
            with amp.autocast():
                ensemble_output, outputs, ce_loss, cot_loss = model(
                    images,
                    target=target,
                    mode='train',
                    epoch=epoch,
                    streams=streams)
        else:
            ensemble_output, outputs, ce_loss, cot_loss = model(
                images,
                target=target,
                mode='train',
                epoch=epoch,
                streams=streams)

        # measure accuracy and record loss
        batch_size_now = images.size(0)
        # notice the index i and j, avoid contradictory
        for j in range(args.loop_factor):
            acc1 = metric.accuracy(outputs[j, ...], target, topk=(1, ))
            top1_all[j].update(acc1[0].item(), batch_size_now)

        # simply average outputs of small networks
        avg_acc1 = metric.accuracy(ensemble_output, target, topk=(1, ))
        avg_top1.update(avg_acc1[0].item(), batch_size_now)
        # avg_top5.update(avg_acc1[0].item(), batch_size_now)

        avg_ce_loss.update(ce_loss.mean().item(), batch_size_now)
        avg_cot_loss.update(cot_loss.mean().item(), batch_size_now)

        # compute gradient and do SGD step
        total_loss = (ce_loss + cot_loss) / args.iters_to_accumulate

        if args.is_amp:
            # Scales loss.  Calls backward() on scaled loss to create scaled gradients.
            # Backward passes under autocast are not recommended.
            # Backward ops run in the same dtype autocast chose for corresponding forward ops.
            scaler.scale(total_loss).backward()

            if i % args.iters_to_accumulate == 0 or i == total_iters:
                # scaler.step() first unscales the gradients of the optimizer's assigned params.
                # If these gradients do not contain infs or NaNs, optimizer.step() is then called,
                # otherwise, optimizer.step() is skipped.
                scaler.step(optimizer)
                # Updates the scale for next iteration.
                scaler.update()
                optimizer.zero_grad()
        else:
            total_loss.backward()
            if i % args.iters_to_accumulate == 0 or i == total_iters:
                optimizer.step()
                optimizer.zero_grad()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if not args.multiprocessing_distributed or (args.rank %
                                                    args.ngpus_per_node == 0):
            if i % (args.print_freq * args.iters_to_accumulate) == 0:
                progress.print(i)
        images, target = prefetcher.next()
Пример #22
0
# save model
new_path = os.path.join(loc.densenet_dir, "version")
if not os.path.exists(new_path):
    os.mkdir(new_path)
torch.save(densenet, os.path.join(new_path, "model_best.pth.tar"))

# evaluate on val
densenet = torch.load(loc.densenet_model_dir)
top1 = AverageMeter()
top5 = AverageMeter()
accu_20 = []
for i in range(80):
    accu_20.append(AverageMeter())

for step, sample in enumerate(gd.val_loader):
    weight = sample[0].shape[0]
    s = Variable(sample[0].cuda())
    pre = densenet(s)
    prec1, prec5 = accuracy(pre.data, sample[1].cuda(), topk=(1, 5))
    top1.update(prec1[0], n=weight)
    top5.update(prec5[0], n=weight)
    update_class_acc(accu_20, pre.data, sample[1].cuda())

    print("Step: {step}, top1: {top1.avg:.3f}({top1.val:.3f}), "
          "top5: {top5.avg:.3f}({top5.val:.3f})".format(step=step,
                                                        top1=top1,
                                                        top5=top5))

# for k, j in enumerate(accu_20):
#     print("{k}: {top1.avg:.3f}({top1.val:.3f}), ".format(k=k, top1=j))
Пример #23
0
def train(train_loader, net, discriminator, criterion, optimizer_logit,
          scheduler_logit, optimizer_s_fmap, scheduler_s_fmap,
          optimizer_t_fmap, scheduler_t_fmap, epoch, logger):
    top1 = [AverageMeter(), AverageMeter()]
    top5 = [AverageMeter(), AverageMeter()]
    loss_total = [AverageMeter(), AverageMeter()]

    # switch to train mode
    net.train()

    iters = len(train_loader.dataset) // Config.batch_size
    prefetcher = DataPrefetcher(train_loader)  # 加速数据读取
    inputs, labels = prefetcher.next()

    iter = 1
    while inputs is not None:
        inputs, labels = inputs.float().cuda(), labels.cuda()

        # Adversarial ground truths
        valid = Variable(torch.cuda.FloatTensor(inputs.shape[0], 1, 1,
                                                1).fill_(1.0),
                         requires_grad=False)
        fake = Variable(torch.cuda.FloatTensor(inputs.shape[0], 1, 1,
                                               1).fill_(0.0),
                        requires_grad=False)

        # -----------------
        #  Train Generator
        # -----------------

        # zero the parameter gradients
        # student
        optimizer_logit[0].zero_grad()  # s_g_logit
        optimizer_s_fmap[0].zero_grad()  # s_g_fmap
        # teacher
        optimizer_logit[1].zero_grad()  # t_g_logit
        optimizer_t_fmap[0].zero_grad()  # t_g_fmap

        # forward + backword + optimize
        stu_outputs, tea_outputs = net(inputs)  # [x1, x2, x3, x4, x]

        # student
        loss_stu_logit = criterion[0](stu_outputs[-1], labels) + criterion[1](
            stu_outputs[-1], tea_outputs[-1].detach())

        loss_stu_g = criterion[2](valid,
                                  discriminator.discri_s(stu_outputs[-2]))
        loss_stu = loss_stu_logit + loss_stu_g

        # teacher
        loss_tea_logit = criterion[0](tea_outputs[-1], labels) + criterion[1](
            tea_outputs[-1], stu_outputs[-1].detach())
        loss_tea_g = criterion[2](valid,
                                  discriminator.discri_t(tea_outputs[-2]))
        loss_tea = loss_tea_logit + loss_tea_g

        # student
        loss_stu.backward()
        optimizer_logit[0].step()  # s_g_logit
        optimizer_s_fmap[0].step()  # s_g_fmap

        # teacher
        loss_tea.backward()
        optimizer_logit[1].step()  # t_g_logit
        optimizer_t_fmap[0].step()  # t_g_fmap

        # ---------------------
        #  Train Discriminator
        # ---------------------
        optimizer_s_fmap[1].zero_grad()  # s_d
        optimizer_t_fmap[1].zero_grad()  # t_d

        # discriminator loss
        # student
        loss_stu_d = criterion[2](valid, discriminator.discri_s(tea_outputs[-2].detach())) + \
                     criterion[2](fake, discriminator.discri_s(stu_outputs[-2].detach()))
        # teacher
        loss_tea_d = criterion[2](valid, discriminator.discri_t(stu_outputs[-2].detach())) + \
                     criterion[2](fake, discriminator.discri_t(tea_outputs[-2].detach()))

        # student
        loss_stu_d.backward()
        optimizer_s_fmap[1].step()

        # teacher
        loss_tea_d.backward()
        optimizer_t_fmap[1].step()

        # student
        prec_s = accuracy(stu_outputs[-1], labels, topk=(1, 5))
        top1[0].update(prec_s[0].item(), inputs.size(0))
        top5[0].update(prec_s[1].item(), inputs.size(0))
        loss_total[0].update(loss_stu.item(), inputs.size(0))

        # teacher
        prec_t = accuracy(tea_outputs[-1], labels, topk=(1, 5))
        top1[1].update(prec_t[0].item(), inputs.size(0))
        top5[1].update(prec_t[1].item(), inputs.size(0))
        loss_total[1].update(loss_tea.item(), inputs.size(0))

        inputs, labels = prefetcher.next()  # 取下一批数据

        if iter % 20 == 0:
            loss_log = f"train: epoch {epoch:0>3d}, iter [{iter:0>4d}, {iters:0>4d}]\n"
            loss_log += f"Student detail:\n "
            loss_log += f"top1 acc: {prec_s[0]:.2f}%, top5 acc: {prec_s[1]:.2f}%, "
            loss_log += f"loss_total: {loss_stu.item():3f}, "
            loss_log += f"loss_logit: {loss_stu_logit.item():3f} "
            loss_log += f"loss_g: {loss_stu_g.item():3f} "
            loss_log += f"loss_d: {loss_stu_d.item():3f} "

            loss_log += f"\nTeacher detail:\n "
            loss_log += f"top1 acc: {prec_t[0]:.2f}%, top5 acc: {prec_t[1]:.2f}%, "
            loss_log += f"loss_total: {loss_tea.item():3f}, "
            loss_log += f"loss_logit: {loss_tea_logit.item():3f} "
            loss_log += f"loss_g: {loss_tea_g.item():3f} "
            loss_log += f"loss_d: {loss_tea_d.item():3f} "
            logger.info(loss_log)
        iter += 1

    scheduler_logit[0].step()
    scheduler_s_fmap[0].step()
    scheduler_logit[1].step()
    scheduler_t_fmap[0].step()
    scheduler_s_fmap[1].step()
    scheduler_t_fmap[1].step()

    return top1[0].avg, top1[1].avg, top5[0].avg, top5[1].avg, loss_total[
        0].avg, loss_total[1].avg
def accumulate_acc(output, target, meter):
    # Single-headed model
    meter.update(accuracy(output, target), len(target))
    return meter
Пример #25
0
def train_baseline(train_loader, net, criterion, optimizer, scheduler, epoch,
                   logger):
    top1 = [AverageMeter(), AverageMeter()]
    top5 = [AverageMeter(), AverageMeter()]
    loss_total = [AverageMeter(), AverageMeter()]

    # switch to train mode
    net.train()

    iters = len(train_loader.dataset) // Config.batch_size
    prefetcher = DataPrefetcher(train_loader)  # 加速数据读取
    inputs, labels = prefetcher.next()

    iter = 1
    while inputs is not None:
        inputs, labels = inputs.float().cuda(), labels.cuda()

        # zero the parameter gradients
        optimizer[0].zero_grad()
        optimizer[1].zero_grad()

        # forward + backword + optimize
        stu_outputs, tea_outputs = net(inputs)  # [x1, x2, x3, x4, x]

        loss_stu = criterion(stu_outputs[-1], labels)
        loss_tea = criterion(tea_outputs[-1], labels)

        loss_stu.backward()
        loss_tea.backward()
        optimizer[0].step()
        optimizer[1].step()

        # student
        prec_s = accuracy(stu_outputs[-1], labels, topk=(1, 5))
        top1[0].update(prec_s[0].item(), inputs.size(0))
        top5[0].update(prec_s[1].item(), inputs.size(0))
        loss_total[0].update(loss_stu.item(), inputs.size(0))

        # teacher
        prec_t = accuracy(tea_outputs[-1], labels, topk=(1, 5))
        top1[1].update(prec_t[0].item(), inputs.size(0))
        top5[1].update(prec_t[1].item(), inputs.size(0))
        loss_total[1].update(loss_tea.item(), inputs.size(0))

        inputs, labels = prefetcher.next()  # 取下一批数据

        if iter % 20 == 0:
            loss_log = f"train: epoch {epoch:0>3d}, iter [{iter:0>4d}, {iters:0>4d}]\n"
            loss_log += f"Student detail:\n "
            loss_log += f"top1 acc: {prec_s[0]:.2f}%, top5 acc: {prec_s[1]:.2f}%, "
            loss_log += f"loss_s: {loss_stu.item():3f}, "

            loss_log += f"\nTeacher detail:\n "
            loss_log += f"top1 acc: {prec_t[0]:.2f}%, top5 acc: {prec_t[1]:.2f}%, "
            loss_log += f"loss_t: {loss_tea.item():3f}, "
            logger.info(loss_log)
        iter += 1
    scheduler[0].step()
    scheduler[1].step()
    return top1[0].avg, top1[1].avg, top5[0].avg, top5[1].avg, loss_total[
        0].avg, loss_total[1].avg
    def validation(self, test_loader, from_train=1):
        # this might possibly change for other incremental scenario
        # This function doesn't distinguish tasks.
        batch_timer = Timer()
        acc = AverageMeter()
        losses = AverageMeter()
        acc_5 = AverageMeter()
        acc_class = [
            AverageMeter()
            for i in range(len(self.train_loader.dataset.class_list))
        ]  #[AverageMeter()] *  len(self.train_loader.dataset.class_list)
        acc_class_5 = [
            AverageMeter()
            for i in range(len(self.train_loader.dataset.class_list))
        ]
        batch_timer.tic()
        orig_mode = self.training
        self.eval()
        for i, (input, target) in enumerate(test_loader):

            if self.gpu:
                with torch.no_grad():
                    input = input.cuda()
                    target = target.cuda()
                    output = self.forward(input)
                    loss = self.criterion(output, target)

            losses.update(loss, input.size(0))
            # Summarize the performance of all tasks, or 1 task, depends on dataloader.
            # Calculated by total number of data.

            t_acc, acc_class = accuracy(
                output, target, topk=(1, ), avg_meters=acc_class
            )  #self.accumulate_acc(output, target, acc)
            t_acc_5, acc_class_5 = accuracy(output,
                                            target,
                                            topk=(5, ),
                                            avg_meters=acc_class_5)
            # import pdb; pdb.set_trace()
            acc.update(t_acc, len(target))
            acc_5.update(t_acc_5, len(target))

        class_list = self.train_loader.dataset.class_list.inverse
        acc_cl_1 = {}
        acc_cl_5 = {}

        #from accuracies obtained create inst size based accuracies
        inst_clss_lst = self.train_loader.dataset.class_inst_list
        # import pdb; pdb.set_trace()
        for ins_clss_, insts in inst_clss_lst.items():
            cls_sum = sum([acc_class[inst].sum for inst in insts])
            cls_cnt = sum([acc_class[inst].count for inst in insts])
            if cls_cnt == 0:
                import pdb
                pdb.set_trace()
            inst_avg = cls_sum / cls_cnt

            self.writer.add_scalar(self.str_ + '/Acc_1_{}'.format(ins_clss_),
                                   inst_avg, self.n_iter)

            cls_sum_5 = sum([acc_class_5[inst].sum for inst in insts])
            cls_cnt_5 = sum([acc_class_5[inst].count for inst in insts])
            inst_avg_5 = cls_sum_5 / cls_cnt_5
            self.writer.add_scalar(self.str_ + '/Acc_5_{}'.format(ins_clss_),
                                   inst_avg_5, self.n_iter)

        for idx, cl_ in class_list.items():
            acc_cl_1[cl_] = [
                acc_class[idx].avg, acc_class[idx].sum, acc_class[idx].count
            ]
            acc_cl_5[cl_] = [
                acc_class_5[idx].avg, acc_class_5[idx].sum,
                acc_class_5[idx].count
            ]
            # self.log(' * Val Acc {acc.avg:.3f} for class {cls}, {acc.sum} / {acc.count} '
            #       .format(acc=acc_class[idx], cls=cl_))

        self.train(orig_mode)

        self.log(' * Val Acc {acc.avg:.3f}, Total time {time:.2f}'.format(
            acc=acc, time=batch_timer.toc()))
        if from_train:
            return acc, losses
        else:
            return acc, acc_5, acc_cl_1, acc_cl_5, losses
 def accumulate_acc(self, output, target, meter):
     meter.update(accuracy(output, target), len(target))
     return meter
Пример #28
0
def validate(val_loader, model, args, streams=None):
    """validate function"""
    batch_time = metric.AverageMeter('Time', ':6.3f')
    avg_ce_loss = metric.AverageMeter('ce_loss', ':.4e')

    # record the top1 accuray of each small network
    top1_all = []
    for i in range(args.loop_factor):
        top1_all.append(metric.AverageMeter('{}_Acc@1'.format(i), ':6.2f'))
    avg_top1 = metric.AverageMeter('Avg_Acc@1', ':6.2f')
    avg_top5 = metric.AverageMeter('Avg_Acc@1', ':6.2f')
    progress = metric.ProgressMeter(len(val_loader),
                                    batch_time,
                                    avg_ce_loss,
                                    *top1_all,
                                    avg_top1,
                                    avg_top5,
                                    prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute outputs and losses
            if args.is_amp:
                with amp.autocast():
                    ensemble_output, outputs, ce_loss = model(images,
                                                              target=target,
                                                              mode='val')
            else:
                ensemble_output, outputs, ce_loss = model(images,
                                                          target=target,
                                                          mode='val')

            # measure accuracy and record loss
            batch_size_now = images.size(0)
            for j in range(args.loop_factor):
                acc1, acc5 = metric.accuracy(outputs[j, ...],
                                             target,
                                             topk=(1, 5))
                top1_all[j].update(acc1[0].item(), batch_size_now)

            # simply average outputs of small networks
            avg_acc1, avg_acc5 = metric.accuracy(ensemble_output,
                                                 target,
                                                 topk=(1, 5))
            avg_top1.update(avg_acc1[0].item(), batch_size_now)
            avg_top5.update(avg_acc5[0].item(), batch_size_now)

            avg_ce_loss.update(ce_loss.mean().item(), batch_size_now)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.print(i)

        acc_all = []
        acc_all.append(avg_top1.avg)
        acc_all.append(avg_top5.avg)
        acc_info = '* Acc@1 {:.3f} Acc@5 {:.3f}'.format(acc_all[0], acc_all[1])
        for j in range(args.loop_factor):
            acc_all.append(top1_all[j].avg)
            acc_info += '\t {}_acc@1 {:.3f}'.format(j, top1_all[j].avg)

        print(acc_info)

    # torch.cuda.empty_cache()
    return acc_all
Пример #29
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = mnist(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    if args.model_type == "cnn":
        train_x = train_x.reshape((-1, 28, 28, 1))
        test_x = test_x.reshape((-1, 28, 28, 1))

    if args.model_type == "cnn":
        # a LeNet-5 model with activation function changed to ReLU
        net = Net([
            Conv2D(kernel=[5, 5, 1, 6], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Conv2D(kernel=[5, 5, 6, 16], stride=[1, 1], padding="SAME"),
            ReLU(),
            MaxPool2D(pool_size=[2, 2], stride=[2, 2]),
            Flatten(),
            Dense(120),
            ReLU(),
            Dense(84),
            ReLU(),
            Dense(10)
        ])
    elif args.model_type == "dense":
        net = Net([
            Dense(200),
            ReLU(),
            Dense(100),
            ReLU(),
            Dense(70),
            ReLU(),
            Dense(30),
            ReLU(),
            Dense(10)
        ])
    else:
        raise ValueError("Invalid argument: model_type")

    model = Model(net=net,
                  loss=SoftmaxCrossEntropy(),
                  optimizer=Adam(lr=args.lr))

    iterator = BatchIterator(batch_size=args.batch_size)
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            pred = model.forward(batch.inputs)
            loss, grads = model.backward(pred, batch.targets)
            model.apply_grad(grads)
            loss_list.append(loss)
        print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = np.asarray(test_y)
        res = accuracy(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
Пример #30
0
 def metric(self, logit, truth, threshold=0.5):
     prob = F.sigmoid(logit)
     acc = accuracy(prob, truth, threshold=threshold, is_average=True)
     return acc