Exemplo n.º 1
0
def eva_predict(args):

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    eva_loader = EVA_Loader(
        args.is_divide_variance).eva(batch_size=args.batch_size)

    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay)

    model.load_state_dict(torch.load(args.ckpt_file)['model_state_dict'])

    model.to(device)

    model.eval()

    submitter = Submitter()

    for x in eva_loader:
        x = x.to(device)
        batch_pred = model(x)
        pred_int = torch.argmax(batch_pred, dim=1)
        submitter.submit_batch(pred_int.cpu().numpy())
def get_class_wise_accuracy(args):
    # single model

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    if args.method == 'normal':
        loader = EVA_Loader(is_divide_variance=args.model1_is_divide_variance)
    elif args.method == 'medfilter':
        loader = EVA_Medfilter_Loader(
            is_divide_variance=args.model2_is_divide_variance)
    elif args.method == 'meansub':
        loader = EVA_MeanSub_Loader(
            is_divide_variance=args.model3_is_divide_variance)
    else:
        print('Error. Please choose one of ["normal", "medfilter", "meansub"]')
        return None

    train_loader = loader.train(batch_size=args.batch_size)
    val_loader = loader.val(batch_size=args.batch_size)

    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay)
    model.load_state_dict(torch.load(args.ckpt_file)['model_state_dict'])
    model.to(device)

    train_pred_prob = get_pred_prob(model, train_loader, device)
    train_pred = np.argmax(train_pred_prob, axis=1)

    val_pred_prob = get_pred_prob(model, val_loader, device)
    val_pred = np.argmax(val_pred_prob, axis=1)

    train_target = get_target(train_loader)

    val_target = get_target(val_loader)

    train_acc_class_wise = calculate_accuracy(target=train_target,
                                              predict=train_pred,
                                              classes_num=args.nb_class)
    train_acc = (np.array(train_target) == np.array(train_pred)).mean()

    val_acc_class_wise = calculate_accuracy(target=val_target,
                                            predict=val_pred,
                                            classes_num=args.nb_class)
    val_acc = (np.array(val_target) == np.array(val_pred)).mean()

    labels = [
        'airport', 'bus', 'metro', 'metro_station', 'park', 'public_square',
        'shopping_mall', 'street_pedestrian', 'street_traffic', 'tram'
    ]

    train_acc_cw = dict()
    val_acc_cw = dict()
    for i in range(args.nb_class):
        train_acc_cw[labels[i]] = train_acc_class_wise[i]
        val_acc_cw[labels[i]] = val_acc_class_wise[i]

    return train_acc_cw, val_acc_cw, train_acc, val_acc
def combine_predict(args):

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    lb_loader = LB_Loader(args.is_divide_variance).lb(batch_size=args.batch_size)

    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class, drop_rate=args.drop_rate, decay=args.decay)

    model.load_state_dict(torch.load(args.ckpt_file)['model_state_dict'])
    model.to(device)
    model.eval()
    prob1 = get_pred(model, lb_loader, device)

    model.load_state_dict(torch.load(args.ckpt_file1)['model_state_dict'])
    model.to(device).eval()

    prob2 = get_pred(model, lb_loader, device)

    prob = prob1 + prob2

    pred_int = np.argmax(prob, axis=1)

    submitter = Submitter()

    submitter.submit_batch(pred_int)
Exemplo n.º 4
0
def run(args):

    set_seed(args.seed)

    set_logging(ROOT_DIR, args)
    import pprint
    logging.info(
        pprint.pformat(vars(args)) if not isinstance(args, dict) else pprint.
        pformat(args))

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    loader = Dev_Loader(is_divide_variance=args.is_divide_variance)

    train_loader = loader.train(batch_size=args.batch_size)
    val_loader = loader.val(batch_size=args.batch_size)

    # model = getattr(net_archs, args.net)(args).cuda()
    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay).cuda()

    if args.optimizer == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.init_lr,
                              momentum=0.9,
                              nesterov=True)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.init_lr,
                               weight_decay=args.l2)
    if args.lr_factor < 1.0:
        scheduler = ReduceLROnPlateau(optimizer,
                                      mode='max',
                                      verbose=True,
                                      factor=args.lr_factor,
                                      patience=args.lr_patience)

    train_hist = History(name='train')
    test_list = ['a', 'bc', 'abc']
    val_hist = dict()
    for d in test_list:
        val_hist[d] = History(name='val/{}'.format(d))

    if args.continue_run:
        ckpt_file = Reporter(exp=args.exp).select_last(
            args.ckpt_prefix[0:5]).selected_ckpt
        logging.info('continue training from {}'.format(ckpt_file))

        ckpt_dicts = torch.load(ckpt_file)

        model.load_state_dict(ckpt_dicts['model_state_dict'])
        model.cuda()

        optimizer.load_state_dict(ckpt_dicts['optimizer_state_dict'])

        start_epoch = ckpt_dicts['epoch'] + 1
    else:
        start_epoch = 1

    # checkpoint after new History, order matters
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer,
                        path='{}/ckpt/{}'.format(ROOT_DIR, args.exp),
                        prefix=args.ckpt_prefix,
                        interval=1,
                        save_num=1)

    for epoch in range(start_epoch, args.run_epochs):

        train_mixup_all(train_loader,
                        model,
                        optimizer,
                        device,
                        mix_alpha=args.mix_alpha)

        train_hist.add(logs=eval_model(train_loader, model, device),
                       epoch=epoch)

        a_logs = eval_model(val_loader['a'], model, device)
        bc_logs = eval_model(val_loader['bc'], model, device)
        avg_loss = (a_logs['loss'] + bc_logs['loss']) / 2
        avg_acc = (a_logs['acc'] + bc_logs['acc']) / 2
        avg_logs = {'loss': avg_loss, 'acc': avg_acc}
        val_hist['a'].add(logs=a_logs, epoch=epoch)
        val_hist['bc'].add(logs=bc_logs, epoch=epoch)
        val_hist['abc'].add(logs=avg_logs, epoch=epoch)

        if args.lr_factor < 1.0:
            scheduler.step(val_hist['abc'].recent['acc'])

        # plotting
        if args.plot:
            train_hist.clc_plot()
            for d in test_list:
                val_hist[d].plot()

        # logging
        logging.info("Epoch{:04d},{:6},{}".format(epoch, train_hist.name,
                                                  str(train_hist.recent)))
        for d in test_list:
            logging.info("Epoch{:04d},{:6},{}".format(epoch, val_hist[d].name,
                                                      str(val_hist[d].recent)))

        ckpter.check_on(epoch=epoch,
                        monitor='acc',
                        loss_acc=val_hist['abc'].recent)

    # explicitly save last
    ckpter.save(epoch=args.run_epochs - 1,
                monitor='acc',
                loss_acc=val_hist['abc'].recent)
Exemplo n.º 5
0
def combine_predict3(args):

    # set up cuda device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    eva_loader1 = EVA_Loader(is_divide_variance=True).eva(
        batch_size=args.batch_size)
    eva_loader2 = EVA_Medfilter_Loader(is_divide_variance=False).eva(
        batch_size=args.batch_size)
    eva_loader3 = EVA_MeanSub_Loader(is_divide_variance=False).eva(
        batch_size=args.batch_size)

    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay)

    model.load_state_dict(torch.load(args.ckpt_file)['model_state_dict'],
                          strict=False)
    model.to(device).eval()
    prob1 = get_pred(model, eva_loader1, device)

    model.load_state_dict(torch.load(args.ckpt_file1)['model_state_dict'])
    model.to(device).eval()

    prob2 = get_pred(model, eva_loader2, device)

    model.load_state_dict(torch.load(args.ckpt_file2)['model_state_dict'])
    model.to(device).eval()

    prob3 = get_pred(model, eva_loader3, device)

    prob = prob1 + prob2 + prob3

    pred_int = np.argmax(prob, axis=1)

    submitter = Submitter()

    submitter.submit_batch(pred_int)
def get_device_wise_accuracy(args):
    # single model

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    if args.method == 'normal':
        loader = Device_Wise_Val_Loader(
            is_divide_variance=args.model1_is_divide_variance)
    elif args.method == 'medfilter':
        loader = Device_Wise_Medfilter_Val_Loader(
            is_divide_variance=args.model2_is_divide_variance)
    elif args.method == 'meansub':
        loader = Device_Wise_MeanSub_Val_Loader(
            is_divide_variance=args.model3_is_divide_variance)
    else:
        print('Error. Please choose one of ["normal", "medfilter", "meansub"]')
        return None

    val_loader = loader.val(batch_size=args.batch_size)

    from xception import ModifiedXception
    model = ModifiedXception(num_classes=args.nb_class,
                             drop_rate=args.drop_rate,
                             decay=args.decay)
    model.load_state_dict(
        torch.load(os.path.join(ROOT_DIR, args.ckpt_file))['model_state_dict'])
    model.to(device)

    labels = [
        'airport', 'bus', 'metro', 'metro_station', 'park', 'public_square',
        'shopping_mall', 'street_pedestrian', 'street_traffic', 'tram'
    ]
    val_acc_cw = []
    val_acc_mean = []
    bc_val_pred = []
    bc_val_target = []
    for i in range(3):
        val_pred_prob = get_pred_prob(model, val_loader[i], device)
        val_pred = np.argmax(val_pred_prob, axis=1)
        val_target = get_target(val_loader[i])
        if i > 0:
            bc_val_pred.append(val_pred)
            bc_val_target.append(val_target)
        val_acc = calculate_accuracy(target=val_target,
                                     predict=val_pred,
                                     classes_num=args.nb_class)
        acc_mean = (np.array(val_target) == np.array(val_pred)).mean()
        val_acc_mean.append(acc_mean)

        val_acc_class_wise = dict()
        for i in range(args.nb_class):
            val_acc_class_wise[labels[i]] = val_acc[i]
        val_acc_cw.append(val_acc_class_wise)

    bc_val_target = np.concatenate(bc_val_target)
    bc_val_pred = np.concatenate(bc_val_pred)
    bc_val_acc = calculate_accuracy(target=bc_val_target,
                                    predict=bc_val_pred,
                                    classes_num=args.nb_class)
    bc_val_acc_mean = (np.array(bc_val_target) == np.array(bc_val_pred)).mean()
    val_acc_class_wise = dict()
    for i in range(args.nb_class):
        val_acc_class_wise[labels[i]] = bc_val_acc[i]
    val_acc_cw.append(val_acc_class_wise)
    val_acc_mean.append(bc_val_acc_mean)
    return val_acc_cw, val_acc_mean
def get_device_wise_accuracy_multi_models(args):
    # device-wise, class-wise, multi-model fusion

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
    device = torch.device('cuda')

    loaders = get_loaders(batch_size=args.batch_size,
                          model1_var=args.model1_is_divide_variance,
                          model2_var=args.model2_is_divide_variance,
                          model3_var=args.model3_is_divide_variance)

    from xception import ModifiedXception

    labels = [
        'airport', 'bus', 'metro', 'metro_station', 'park', 'public_square',
        'shopping_mall', 'street_pedestrian', 'street_traffic', 'tram'
    ]
    val_acc_cw = []
    val_acc_mean = []
    bc_val_pred = []
    bc_val_target = []
    for i in range(3):
        model = ModifiedXception(num_classes=args.nb_class,
                                 drop_rate=args.drop_rate,
                                 decay=args.decay)
        model.load_state_dict(
            torch.load(os.path.join(ROOT_DIR,
                                    args.ckpt_file))['model_state_dict'])
        model.to(device)
        val_pred_prob1 = get_pred_prob(model, loaders[0][i], device)
        model = ModifiedXception(num_classes=args.nb_class,
                                 drop_rate=args.drop_rate,
                                 decay=args.decay)
        model.load_state_dict(
            torch.load(os.path.join(ROOT_DIR,
                                    args.ckpt_file1))['model_state_dict'])
        model.to(device)
        val_pred_prob2 = get_pred_prob(model, loaders[1][i], device)
        model = ModifiedXception(num_classes=args.nb_class,
                                 drop_rate=args.drop_rate,
                                 decay=args.decay)
        model.load_state_dict(
            torch.load(os.path.join(ROOT_DIR,
                                    args.ckpt_file2))['model_state_dict'])
        model.to(device)
        val_pred_prob3 = get_pred_prob(model, loaders[2][i], device)
        val_pred_prob = val_pred_prob1 + val_pred_prob2 + val_pred_prob3
        val_pred = np.argmax(val_pred_prob, axis=1)

        val_target = get_target(loaders[0][i])

        val_acc = calculate_accuracy(target=val_target,
                                     predict=val_pred,
                                     classes_num=args.nb_class)
        if i > 0:
            bc_val_pred.append(val_pred)
            bc_val_target.append(val_target)
        acc_mean = (np.array(val_target) == np.array(val_pred)).mean()
        val_acc_mean.append(acc_mean)

        val_acc_class_wise = dict()
        for i in range(args.nb_class):
            val_acc_class_wise[labels[i]] = val_acc[i]
        val_acc_cw.append(val_acc_class_wise)

    # average of device (b, c)
    bc_val_target = np.concatenate(bc_val_target)
    bc_val_pred = np.concatenate(bc_val_pred)
    bc_val_acc = calculate_accuracy(target=bc_val_target,
                                    predict=bc_val_pred,
                                    classes_num=args.nb_class)
    bc_val_acc_mean = (np.array(bc_val_target) == np.array(bc_val_pred)).mean()
    val_acc_class_wise = dict()
    for i in range(args.nb_class):
        val_acc_class_wise[labels[i]] = bc_val_acc[i]
    val_acc_cw.append(val_acc_class_wise)
    val_acc_mean.append(bc_val_acc_mean)
    return val_acc_cw, val_acc_mean