Beispiel #1
0
def demo(family: Family, goods: set):
    family1 = copy.copy(family)
    family1.name = "Group 1"
    family2 = copy.copy(family)
    family2.name = "Group 2"
    utils.demo(twothirds_protocol.allocate, [family1, family2], goods)
    print("\n")
Beispiel #2
0
],
                 PROP1,
                 name="Group 2")
family3 = Family([
    AdditiveAgent({
        u: 1,
        v: 1,
        w: 1,
        x: 2,
        y: 3,
        z: 3
    }, 9),
    AdditiveAgent({
        u: 3,
        v: 3,
        w: 3,
        x: 2,
        y: 1,
        z: 1
    }, 3)
],
                 PROP1,
                 name="Group 3")

print("\n\n\ndemocratic EF1/PROP1 allocation between two groups:")
demo(line_protocol.allocate, [family1, family2], "uvwxyz")
print("\n\n\n")
demo(line_protocol.allocate, [family1, family3], "uvwxyz")
print("\n\n\n")
demo(line_protocol.allocate, [family2, family3], "zyxwvu")
import line_protocol, logging
from agents import *
from families import Family
from utils import demo
import fairness_criteria


line_protocol.logger.setLevel(logging.INFO)

# Define fairness criteria:
goods = "uvwxyz"
u='u'; v='v'; w='w'; x='x'; y='y'; z='z'
k = 3 # num of families
PropStar =  fairness_criteria.ProportionalExceptC(num_of_agents=k, c=k-1)
family1 = Family([
    AdditiveAgent({u:1, v:1,w:2,x:4,y:8,z:16}, 7),
    AdditiveAgent({u:16, v:16,w:8,x:4,y:2,z:1}, 2)],
    PropStar, name="Group 1")
family2 = Family([
    AdditiveAgent({u:1, v:1,w:1,x:3,y:3,z:4}, 5),
    AdditiveAgent({u:4, v:4,w:3,x:1,y:3,z:1}, 1)],
    PropStar, name="Group 2")
family3 = Family([
    AdditiveAgent({u:1, v:1,w:1,x:2,y:3,z:3}, 9),
    AdditiveAgent({u:3, v:3,w:3,x:2,y:1,z:1}, 3)],
    PropStar, name="Group 3")

print("\n\n\ndemocratic-fair allocation among three groups:")
demo(line_protocol.allocate, [family1, family2, family3], goods)

Beispiel #4
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    print("INFO:PyTorch: Initialize process group for distributed training")
    if args.dist_url == "env://" and args.rank == -1:
        args.rank = int(os.environ["RANK"])

    args.rank = args.rank * ngpus_per_node + gpu

    distributed.init_process_group(backend=args.dist_backend,
                                   init_method=args.dist_url,
                                   world_size=args.world_size,
                                   rank=args.rank)

    if args.gpu is not None:
        if not args.evaluate:
            print(
                "INFO:PyTorch: Use GPU: {} for training, the rank of this GPU is {}"
                .format(args.gpu, args.rank))
        else:
            print(
                "INFO:PyTorch: Use GPU: {} for evaluating, the rank of this GPU is {}"
                .format(args.gpu, args.rank))

    criterion = nn.MSELoss(reduction='none')
    model = eval(args.arch)(criterion=criterion, args=args)

    if args.discriminator:
        criterion_D = FocalWithLogitsLoss(alpha=1, gamma=2, reduction=True)
        D = eval(args.discriminator)(criterion=criterion_D, args=args)

    # print the number of parameters in the model
    print("INFO:PyTorch: The number of parameters is {}".format(
        get_the_number_of_params(model)))
    if args.is_summary:
        summary.summary(model,
                        torch.rand((1, 3, args.h, args.w)),
                        target=torch.ones(1, dtype=torch.long))
        return None

    if args.is_syncbn:
        print(
            "INFO:PyTorch: convert torch.nn.BatchNormND layer in the model to torch.nn.SyncBatchNorm layer"
        )
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
        D = nn.SyncBatchNorm.convert_sync_batchnorm(
            D) if args.discriminator else None

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model.cuda(args.gpu)
        D = D.cuda(args.gpu) if args.discriminator else None

        args.batch_size = int(args.batch_size / ngpus_per_node)
        args.workers = int(
            (args.workers + ngpus_per_node - 1) / ngpus_per_node)

    else:
        model.cuda()
        D = D.cuda() if args.discriminator else None

    param_groups = model.parameters()
    args.param_groups = param_groups

    if args.discriminator:
        param_groups_D = D.parameters()
        args.param_groups_D = param_groups_D
        optimizer_D = torch.optim.SGD(
            param_groups_D,
            args.lr * 10,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
            nesterov=True if args.is_nesterov else False)

    if args.optim == 'adam':
        optimizer = torch.optim.Adam(param_groups,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    elif args.optim == 'adamW':
        optimizer = optim.AdamW(param_groups,
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                eps=args.epsilon)
    else:
        optimizer = torch.optim.SGD(
            param_groups,
            args.lr,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
            nesterov=True if args.is_nesterov else False)

    #AMP GradScaler
    if args.is_amp:
        print("INFO:PyTorch: => Using Pytorch AMP to accelerate Training")
        args.scaler = GradScaler()
        args.scaler_D = GradScaler() if args.discriminator else None

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("INFO:PyTorch: => loading checkpoint '{}'".format(
                args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)

            args.start_epoch = checkpoint['epoch']
            #best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])

            if args.discriminator:
                D.load_state_dict(checkpoint['D'])
                optimizer_D.load_state_dict(checkpoint['optimizer_D'])

            print("INFO:PyTorch: => loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("INFO:PyTorch: => no checkpoint found at '{}'".format(
                args.resume))

    if args.lr_mode == 'step':
        scheduler = MultiStepLR(optimizer,
                                milestones=args.lr_milestones,
                                gamma=args.lr_step_multiplier,
                                last_epoch=args.start_epoch - 1)
    else:
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                         T_max=args.epochs)
        # scheduler = lr_scheduler.lr_scheduler(init_lr=args.lr,
        #                             mode=args.lr_mode,
        #                             max_iter=args.epochs,
        #                             lr_milestones=args.lr_milestones,
        #                             slow_start_steps=args.slow_start_epochs,
        #                             slow_start_lr=args.slow_start_lr,
        #                             end_lr=args.end_lr,
        #                             lr_step_multiplier=args.lr_step_multiplier,
        #                             multiplier=args.lr_multiplier
        #                         )

    if args.discriminator:
        scheduler_D = MultiStepLR(optimizer_D,
                                  milestones=args.lr_milestones,
                                  gamma=args.lr_step_multiplier,
                                  last_epoch=args.start_epoch - 1)

    # accelarate the training
    torch.backends.cudnn.benchmark = True

    if args.demo:  # TODO: Update code
        with torch.no_grad():
            demo(model, args)
        return None

    # Data loading code
    traindir = os.path.join(args.dataset_path, args.dataset_type,
                            args.training_folder)
    testdir = os.path.join(args.dataset_path, args.dataset_type,
                           args.testing_folder)
    labeldir = os.path.join(args.dataset_path, args.dataset_type,
                            args.label_folder) if args.label_folder else None
    train_batch, test_batch, train_sampler = load_dataset(
        traindir, testdir, labeldir, args)

    if args.evaluate:
        with torch.no_grad():
            if args.discriminator:
                acc, test_average_loss = evaluate_new_D(
                    model, D, test_batch, args)
            else:
                acc, test_average_loss = evaluate_new(model, test_batch, args)
        print('Test AUC: {}%'.format(acc * 100))
        return None

    # LOGGING
    setproctitle.setproctitle(args.dataset_type + '_' + args.arch +
                              '_rank{}'.format(args.rank))
    log_path = os.path.join(
        '/home/miaobo/project/anomaly_demo2', 'runs', '_'.join([
            args.suffix, args.dataset_type, args.arch,
            args.discriminator if args.discriminator else ""
        ]))
    val_writer = SummaryWriter(log_dir=log_path)
    print("Tensorboard log: {}".format(log_path))

    for epoch in range(args.start_epoch, args.epochs + 1):
        train_sampler.set_epoch(epoch)

        # if args.lr_mode != 'step':
        #     scheduler(optimizer, epoch)

        if args.discriminator:
            train_average_loss = train_D(train_batch, model, D, optimizer,
                                         optimizer_D, epoch, args)
            scheduler_D.step()
        else:
            train_average_loss = train(train_batch, model, optimizer, epoch,
                                       args)

        scheduler.step()

        if epoch % args.eval_per_epoch == 0:
            print("Starting EVALUATION ......")
            a = time.time()
            with torch.no_grad():
                if args.discriminator:
                    acc1, test_average_loss = evaluate_new_D(
                        model, D, test_batch, args)
                else:
                    acc1, test_average_loss = evaluate_new(
                        model, test_batch, args)
            print("EVALUATION TIME COST: {} min".format(
                int(time.time() - a) / 60))

            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)

            if args.rank % ngpus_per_node == 0:

                print("epoch: {}, EVALUATION AUC: {}, HISTORY BEST AUC: {}".
                      format(epoch, acc1 * 100, best_acc1 * 100))
                # summary per epoch
                val_writer.add_scalar('avg_acc1', acc1, global_step=epoch)
                val_writer.add_scalar('best_acc1',
                                      best_acc1,
                                      global_step=epoch)
                val_writer.add_scalars('losses', {
                    'TrainLoss': train_average_loss,
                    'TestLoss': test_average_loss
                },
                                       global_step=epoch)
                val_writer.add_scalar('learning_rate',
                                      optimizer.param_groups[0]['lr'],
                                      global_step=epoch)
                val_writer.add_scalar('training_loss',
                                      train_average_loss,
                                      global_step=epoch)
                # val_writer.add_scalar('testing_loss', test_average_loss, global_step=epoch)

                # save checkpoints
                filename = '_'.join([
                    args.suffix, args.arch,
                    args.discriminator if args.discriminator else "",
                    args.dataset_type, "checkpoint.pth.tar"
                ])
                ckpt_dict = {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'best_acc1': best_acc1,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }
                if args.discriminator:
                    ckpt_dict['D'] = D.state_dict()
                    ckpt_dict['optimizer_D'] = optimizer_D.state_dict()

                metric.save_checkpoint(ckpt_dict,
                                       is_best,
                                       args,
                                       filename=filename)

                # reload best model every n epoch
                if args.reload_best and epoch % args.reload_interval == 0:
                    print(
                        "Reloading model from best_{}, the acc is changed from {} to {}"
                        .format(filename, acc1, best_acc1))
                    if args.gpu is None:
                        checkpoint = torch.load(
                            os.path.join(args.model_dir, "best_" + filename))
                    else:
                        # Map model to be loaded to specified single gpu.
                        loc = 'cuda:{}'.format(args.gpu)
                        checkpoint = torch.load(os.path.join(
                            args.model_dir, "best_" + filename),
                                                map_location=loc)
                    model.load_state_dict(checkpoint['model'])
                    if args.discriminator:
                        D.load_state_dict(checkpoint['D'])

    torch.cuda.empty_cache()
    val_writer.close()
Beispiel #5
0
from agents import BinaryAgent
from families import Family
from utils import demo

rwav_protocol.logger.setLevel(logging.INFO)
rwav_protocol.choose_good.logger.setLevel(logging.INFO)
rwav_protocol.member_weight.logger.setLevel(logging.INFO)


# define fairness criteria:
fairness_1_of_2_mms  = fairness_criteria.MaximinShareOneOfC(2)
fairness_1_of_best_2 = fairness_criteria.OneOfBestC(2)

# Define families:
family1 = Family([
    BinaryAgent("vx",2),
    BinaryAgent("vxy",1),
    BinaryAgent("wxyz",5),
    BinaryAgent("zw",3)], fairness_1_of_2_mms, name="Group 1")
family2 = Family([
    BinaryAgent("wxyz",2),
    BinaryAgent("vz",3)], fairness_1_of_best_2, name="Group 2")

# Run the protocol:
print("\n\n\nRWAV protocol - {} plays first".format(family1.name))
demo(rwav_protocol.allocate, [family1, family2], "vwxyz")

print("\n\n\nRWAV protocol - {} plays first".format(family2.name))
demo(rwav_protocol.allocate, [family2, family1], "vwxyz")

fairness_1_of_best_2 = fairness_criteria.OneOfBestC(2)

# Define families:
family1 = Family([
    BinaryAgent("vw", 3),
    BinaryAgent("vx", 3),
    BinaryAgent("vy", 2),
    BinaryAgent("vz", 2)
],
                 fairness_1_of_best_2,
                 name="Group 1")
print(family1)
family2 = Family([
    BinaryAgent(goods, 1)
    for goods in ["vw", "vx", "vy", "vz", "wx", "wy", "wz", "xy", "xz", "yz"]
],
                 fairness_1_of_best_2,
                 name="Group 2")
print(family2)

# Run the protocol:
print("\n\nRWAV protocol - group 1 plays first:")
demo(rwav_protocol.allocate, [family1, family2], "vwxyz")

print("\n\nRWAV protocol - group 1 and group 2 exchange roles:")
demo(rwav_protocol.allocate, [family2, family1], "vwxyz")

threshold = 0.6
print("\n\nEnhanced RWAV protocol with threshold {}:".format(threshold))
demo(enhanced_rwav_protocol.allocate, [family1, family2], "vwxyz", threshold)