Example #1
0
def logits_and_y_true_for_test_set(net, num_models):
    logits = []
    y_true = []
    for imgs, labels in data.test_dataset(batch_size=100):
        logits.append(net.logits(imgs, single_result=False,
                                 model_dropout=False))
        y_true.extend(labels)
    logits = jnp.stack(logits)                       # (27, M, 100, 10)
    logits = logits.transpose((1, 0, 2, 3))          # (M, 27, 100, 10)
    logits = logits.reshape((num_models, 2700, 10))  # (M, 2700, 10)
    return logits, y_true
test_gt_root = opt.test_gt_root
test_depth_root = opt.test_depth_root
save_path = opt.save_path

if not os.path.exists(save_path):
    os.makedirs(save_path)

# load data
print('load data...')
train_loader = get_loader(image_root,
                          gt_root,
                          depth_root,
                          batchsize=opt.batchsize,
                          trainsize=opt.trainsize,
                          num_workers=2)
test_loader = test_dataset(test_image_root, test_gt_root, test_depth_root,
                           opt.trainsize)
total_step = len(train_loader)

logging.basicConfig(
    stream=sys.stdout,
    format='[%(asctime)s-%(filename)s-%(levelname)s:%(message)s]',
    level=logging.INFO,
    datefmt='%Y-%m-%d %I:%M:%S %p')
logging.info("BBSNet-Train")
logging.info("Config")
logging.info(
    'epoch:{};lr:{};batchsize:{};trainsize:{};clip:{};decay_rate:{};load:{};save_path:{};decay_epoch:{}'
    .format(opt.epoch, opt.lr, opt.batchsize, opt.trainsize, opt.clip,
            opt.decay_rate, opt.load, save_path, opt.decay_epoch))

# set loss function
generator = Generator(channel=64)
generator.load_state_dict(torch.load('/home1/bowen/models/gan_semi/Modeltexture_29_gen.pth'))

generator.cuda()
generator.eval()

test_datasets = ['ECSSD','DUT','DUTS','PASCAL','THUR','HKU-IS']

for dataset in test_datasets:
    
    save_path = '/home1/bowen/results/ResNet50/' + dataset + '/'

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    image_root = dataset_path + dataset + '/'
    #print(image_root)

    test_loader = test_dataset(image_root, opt.testsize)
    #print(test_loader.size)
    for i in range(test_loader.size):
        print i
        image, HH, WW, name = test_loader.load_data()
        image = image.cuda()
        generator_pred ,_ = generator.forward(image)
        res =torch.sigmoid(generator_pred)
        res = F.upsample(res, size=[WW,HH], mode='bilinear', align_corners=False)
        res = res.sigmoid().data.cpu().numpy().squeeze()
        res = (res - res.min()) / (res.max() - res.min() + 1e-8)
        misc.imsave(save_path+name, res)
Example #4
0
#test_datasets = [ '\\LFSD']
#test_datasets = [ '\\NJUD\\test_data']
#test_datasets = [ '\\NLPR\\test_data']
#test_datasets = [ '\\RGBD135']
#test_datasets = [ '\\SSD']
#test_datasets = [ '\\STEREO']
#test_datasets = [ '\\DUTS-TEST']
#time_start=time.time()
for dataset in test_datasets:
    save_path = '' + dataset + '\\results\\'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    image_root = dataset_path + dataset + '\\images\\'
    gt_root = dataset_path + dataset + '\\gts\\'
    depth_root = dataset_path + dataset + '\\depths\\'
    test_loader = test_dataset(image_root, gt_root,depth_root, opt.testsize)
    for i in range(test_loader.size):
        image, gt,depth, name = test_loader.load_data()
        gt = np.asarray(gt, np.float32)
        gt /= (gt.max() + 1e-8)
        depth /= (depth.max() + 1e-8)
        image = Variable(image).cuda()
        depth = Variable(depth).cuda()
        n,c, h, w = image.size()
        depth1 = depth.view(n,h, w, 1).repeat(1,1, 1, c)
        depth1 = depth1.transpose(3, 2)
        depth1 = depth1.transpose(2, 1)
        time_start = time.time()
        _, res, _, _ = model(image, depth1, depth)
        res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
        time_end = time.time()
Example #5
0
def print_validation_test_accuracy(net):
    print("validation %0.3f" % util.accuracy(
        net, data.validation_dataset(batch_size=100)))
    print("test %0.3f" % util.accuracy(net, data.test_dataset(batch_size=100)))
Example #6
0
model = models.build_model(opts)

# plumb batch dimension for models_per_device
all_models_apply = vmap(model.apply, in_axes=(0, None))

# plumb batch dimension for num_devices
all_models_apply = vmap(all_models_apply, in_axes=(0, None))

num_classes = 10


# convert to a prediction function that ensembles over all models
@jit
def predict_fn(imgs):
    logits = all_models_apply(params, imgs)
    batch_size = logits.shape[-2]
    logits = logits.reshape((-1, batch_size, num_classes))  # (M, B, 10)
    ensembled_logits = jnp.sum(logits, axis=0)  # (B, 10)
    predictions = jnp.argmax(ensembled_logits, axis=-1)  # (B)
    return predictions


# check against validation set
accuracy = util.accuracy(predict_fn, data.validation_dataset(batch_size=128))
print("validation accuracy %0.3f" % accuracy)

# check against test set
accuracy = util.accuracy(predict_fn, data.test_dataset(batch_size=128))
print("test accuracy %0.3f" % accuracy)