Ejemplo n.º 1
0
            _, predicted = torch.max(out_s.data, dim=1)
            correct_s += predicted.eq(y_s.data).cpu().sum().item()
            _, predicted = torch.max(out_t.data, dim=1)
            correct_t += predicted.eq(y_t.data).cpu().sum().item()

            loss_t += losses_t.item()

            total += y_s.size(0)

            progress_bar(batch_idx, min(len(source_loader), len(target_loader)), "[Training] Source acc: %.3f%% | Target acc: %.3f%%"
                             %(100.0 * correct_s / total, 100.0 * correct_t / total))

            #######################
            # Record Training log #
            #######################
            source_recorder.update(loss=losses_s.item(), acc=accuracy(out_s.data, y_s.data, (1, 5)),
                            batch_size=out_s.shape[0], cur_lr=optimizer_s.param_groups[0]['lr'], end=end)

            target_recorder.update(loss=losses_t.item(), acc=accuracy(out_t.data, y_t.data, (1, 5)),
                                   batch_size=out_t.shape[0], cur_lr=optimizer_t.param_groups[0]['lr'], end=end)

        # Test target acc
        test_acc = mask_test(target_net, target_mask_dict, target_test_loader)
        print('\n[Epoch %d] Test Acc: %.3f' % (epoch, test_acc))
        target_recorder.update(loss=None, acc=test_acc, batch_size=0, end=None, is_train=False)

        if best_test_acc < test_acc:
            best_test_acc = test_acc
            if not os.path.isdir('%s/checkpoint' %save_root):
                os.makedirs('%s/checkpoint' %save_root)
            torch.save(source_net.state_dict(), '%s/checkpoint/%s-temp.pth' %(save_root, source_dataset_name))
Ejemplo n.º 2
0
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()

            optimizer.zero_grad()
            outputs = net(inputs, cur_CR)
            losses = nn.CrossEntropyLoss()(outputs, targets)

            for layer_name, layer_idx in net.layer_name_list:
                layer = get_layer(net, layer_idx)
                if isinstance(layer, FBS_CNN) or isinstance(layer, FBS_Linear):
                    saliency = torch.sum(layer.saliency)
                    losses += (saliency_penalty * saliency)

            losses.backward()
            optimizer.step()

            recorder.print_training_result(batch_idx, len(train_loader))
            recorder.update(loss=losses.item(), acc=accuracy(outputs.data, targets.data, (1, 5)),
                            batch_size=outputs.shape[0], cur_lr=optimizer.param_groups[0]['lr'], end=end)

            end = time.time()

        test_acc = test(net, CR=cur_CR, test_loader=test_loader, dataset_name=dataset_name)

        recorder.update(loss=None, acc=test_acc, batch_size=0, end=None, is_train=False)
        # Adjust lr
        recorder.adjust_lr(optimizer)

print('Best test acc: %s' %recorder.get_best_test_acc())
recorder.close()
Ejemplo n.º 3
0
def test(net,
         quantized_type,
         test_loader,
         use_cuda=True,
         dataset_name='CIFAR10',
         n_batches_used=None):

    net.eval()

    if dataset_name != 'ImageNet':

        correct = 0
        total = 0

        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()

            with torch.no_grad():
                outputs = net(inputs, quantized_type)

            _, predicted = torch.max(outputs.data, dim=1)
            correct += predicted.eq(targets.data).cpu().sum().item()
            total += targets.size(0)
            progress_bar(batch_idx, len(test_loader),
                         "Test Acc: %.3f%%" % (100.0 * correct / total))

        return 100.0 * correct / total

    else:

        batch_time = AverageMeter()
        train_loss = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        with torch.no_grad():
            end = time.time()
            for batch_idx, (inputs, targets) in enumerate(test_loader):
                if use_cuda:
                    inputs, targets = inputs.cuda(), targets.cuda()
                outputs = net(inputs, quantized_type)
                losses = nn.CrossEntropyLoss()(outputs, targets)

                prec1, prec5 = accuracy(outputs.data,
                                        targets.data,
                                        topk=(1, 5))
                train_loss.update(losses.data.item(), inputs.size(0))
                top1.update(prec1.item(), inputs.size(0))
                top5.update(prec5.item(), inputs.size(0))

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if batch_idx % 200 == 0:
                    print('Test: [{0}/{1}]\t'
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                          'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                          'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                              batch_idx,
                              len(test_loader),
                              batch_time=batch_time,
                              loss=train_loss,
                              top1=top1,
                              top5=top5))

                if n_batches_used is not None and batch_idx >= n_batches_used:
                    break

        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))

        return top1.avg, top5.avg
            net.conv1)
        conv1_pos_file.write(
            '%d, %.3f, %.3f\n' %
            (batch_idx + epoch * len(train_loader), conv1_pos, conv1_pos_rate))
        conv1_neg_file.write(
            '%d, %.3f, %.3f\n' %
            (batch_idx + epoch * len(train_loader), conv1_neg, conv1_neg_rate))
        conv1_prune_file.write(
            '%d, %.3f\n' %
            (batch_idx + epoch * len(train_loader), conv1_prune_rate))
        conv1_pos_file.flush()
        conv1_neg_file.flush()
        conv1_prune_file.flush()

        recorder.update(loss=losses.item(),
                        acc=accuracy(outputs.data, targets.data, (1, 5)),
                        batch_size=outputs.shape[0],
                        cur_lr=optimizer.param_groups[0]['lr'],
                        end=end)

        recorder.print_training_result(
            batch_idx,
            len(train_loader),
            append=
            'pos: %.3f, neg: %.3f, pos rate: %.3f, neg rate: %.3f, prune rate: %.3f'
            % (conv1_pos, conv1_neg, conv1_pos_rate, conv1_neg_rate,
               conv1_prune_rate))
        end = time.time()

    test_acc = test(net, test_loader=test_loader, dataset_name=dataset_name)