def test():
    model.eval()
    test_loss = 0
    total_length = 0

    for data, target in test_loader:

        if CUDA_ON:
            data, target = data.cuda(), target.cuda()

        data, target = Var(data, volatile=True), Var(target)

        output = nn.functional.sigmoid(model(data))

        for i in range(output.size(0)):
            try:
                if target.data[i].sum() == 0: continue
                if target.data[i].sum() >= 12: continue
                test_loss += accuracy(output.data[i], target.data[i])
                total_length += 1
            except Exception as e:
                print(e)
                print(target.data[i])
                sys.exit()
    return test_loss / total_length
示例#2
0
def test_3rd_snapshot():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=None)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    model = SmallerNetwork(INP_SIZE, 23)
    state = torch.load(SNAP_PATH + "snap3rd.nn")
    model.load_state_dict(state['state_dict'])
    if CUDA_ON: model.cuda()
    model.eval()
    loss = 0
    skipped = 0

    for X, y in tqdm(te_load, desc='3rd'):
        X, y = Var(X, volatile=True), Var(y)
        if CUDA_ON:
            X, y = X.cuda(), y.cuda()
    
        out = model(X)

        for i in range(out.size(0)):
            try:
                loss += accuracy(out.data[i], y.data[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
示例#3
0
def test_5th_snapshot():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=CROP_SIZE)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    model = MidrangeNetwork(CROP_SIZE, 23)
    state = torch.load(SNAP_PATH + "snap5th.nn")
    model.load_state_dict(state['state_dict'])
    if CUDA_ON: model.cuda()
    model.eval()
    loss = 0
    skipped = 0

    for X, y in tqdm(te_load, desc='5th'):
        X, y = Var(X, volatile=True), Var(y)
        
        bs, ncrops, c, h, w = X.size()

        if CUDA_ON:
            X, y = X.cuda(), y.cuda()
    
        out = model(X.view(-1, c, h, w))
        out = out.view(bs, ncrops, -1).mean(1)

        for i in range(out.size(0)):
            try:
                loss += accuracy(out.data[i], y.data[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
示例#4
0
def test_distribution():
    te_set = PosterSet(POSTER_PATH, split, 'test',  gen_d=gen_d, augment=False, resize=None, ten_crop=None)#, debug=True)
    te_load = DataLoader(te_set, batch_size=64, shuffle=False, num_workers=3, drop_last=True)
    pred = torch.Tensor([0.28188283, 0.23829031, 0.27430824, 0.39426496, 0.38900912, 0.22754676, 0.24563302, 0.11153192, 0.29865512, 0.14260318, 0.17011903, 0.0307621 , 0.20026279, 0.2485701 , 0.24037718, 0.17645695, 1. , 0.42100788, 0.11593755, 0.31264492, 0.62699026, 0.1946205 , 0.27446282])
    loss = 0
    skipped = 0
    for X, y in tqdm(te_load, desc='dis'):
        for i in range(X.size(0)):
            try:
                loss += accuracy(pred, y[i])
            except:
                skipped += 1
    
    return loss / (len(te_set) - skipped)
示例#5
0
 def loss(self,
          cls_score,
          bbox_pred,
          labels,
          label_weights,
          bbox_targets,
          bbox_weights,
          reduce=True):
     losses = dict()
     if cls_score is not None:
         losses['loss_cls'] = weighted_cross_entropy(cls_score,
                                                     labels,
                                                     label_weights,
                                                     reduce=reduce)
         losses['acc'] = accuracy(cls_score, labels)
     if bbox_pred is not None:
         losses['loss_reg'] = weighted_smoothl1(
             bbox_pred,
             bbox_targets,
             bbox_weights,
             avg_factor=bbox_targets.size(0))
     return losses