Exemplo n.º 1
0
def valid(datacfg,
          darknetcfg,
          learnetcfg,
          weightfile,
          outfile,
          use_baserw=False):
    options = read_data_cfg(datacfg)
    valid_images = options['valid']
    metadict = options['meta']
    # name_list = options['names']
    # backup = cfg.backup
    ckpt = weightfile.split('/')[-1].split('.')[0]
    backup = weightfile.split('/')[-2]
    ckpt_pre = '/ene_' if use_baserw else '/ene'
    prefix = 'results/' + backup.split('/')[-1] + ckpt_pre + ckpt
    print('saving to: ' + prefix)
    # prefix = 'results/' + weightfile.split('/')[1]
    # names = load_class_names(name_list)

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    m = Darknet(darknetcfg, learnetcfg)
    m.print_network()
    m.load_weights(weightfile)
    m.cuda()
    m.eval()

    valid_dataset = dataset.listDataset(valid_images,
                                        shape=(m.width, m.height),
                                        shuffle=False,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                        ]))
    valid_batchsize = 2
    assert (valid_batchsize > 1)

    kwargs = {'num_workers': 4, 'pin_memory': True}
    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=valid_batchsize,
                                               shuffle=False,
                                               **kwargs)

    if False:
        metaset = dataset.MetaDataset(metafiles=metadict,
                                      train=False,
                                      ensemble=True)
        metaloader = torch.utils.data.DataLoader(metaset,
                                                 batch_size=len(metaset),
                                                 shuffle=False,
                                                 **kwargs)
        metaloader = iter(metaloader)
        n_cls = len(metaset.classes)

        print('===> Generating dynamic weights...')
        metax, mask = metaloader.next()
        metax, mask = metax.cuda(), mask.cuda()
        metax, mask = Variable(metax, volatile=True), Variable(mask,
                                                               volatile=True)
        dynamic_weights, _ = m.meta_forward(metax, mask)

        for i in range(len(dynamic_weights)):
            assert dynamic_weights[i].size(0) == sum(metaset.meta_cnts)
            inds = np.cumsum([0] + metaset.meta_cnts)
            new_weight = []
            for j in range(len(metaset.meta_cnts)):
                new_weight.append(
                    torch.mean(dynamic_weights[i][inds[j]:inds[j + 1]], dim=0))
            dynamic_weights[i] = torch.stack(new_weight)
            print(dynamic_weights[i].shape)
    else:
        metaset = dataset.MetaDataset(metafiles=metadict,
                                      train=False,
                                      ensemble=True,
                                      with_ids=True)
        metaloader = torch.utils.data.DataLoader(metaset,
                                                 batch_size=64,
                                                 shuffle=False,
                                                 **kwargs)
        # metaloader = iter(metaloader)
        n_cls = len(metaset.classes)

        enews = [0.0] * n_cls
        cnt = [0.0] * n_cls
        print('===> Generating dynamic weights...')
        kkk = 0
        for metax, mask, clsids in metaloader:
            print('===> {}/{}'.format(kkk, len(metaset) // 64))
            kkk += 1
            with torch.no_grad():
                metax, mask = metax.cuda(), mask.cuda()
                metax, mask = Variable(metax,
                                       volatile=True), Variable(mask,
                                                                volatile=True)
                dws, _ = m.meta_forward(metax, mask)
            dw = dws[0]
            for ci, c in enumerate(clsids):
                enews[c] = enews[c] * cnt[c] / (cnt[c] +
                                                1) + dw[ci] / (cnt[c] + 1)
                cnt[c] += 1
        dynamic_weights = [torch.stack(enews)]

        # import pickle
        # with open('data/rws/voc_novel2_.pkl', 'wb') as f:
        #     tmp = [x.data.cpu().numpy() for x in dynamic_weights]
        #     pickle.dump(tmp, f)
        # import pdb; pdb.set_trace()

        if use_baserw:
            import pickle
            # f = 'data/rws/voc_novel{}_.pkl'.format(cfg.novelid)
            f = 'data/rws/voc_novel{}_.pkl'.format(0)
            print('===> Loading from {}...'.format(f))
            with open(f, 'rb') as f:
                # with open('data/rws/voc_novel0_.pkl', 'rb') as f:
                rws = pickle.load(f)
                rws = [Variable(torch.from_numpy(rw)).cuda() for rw in rws]
                tki = cfg._real_base_ids
                for i in range(len(rws)):
                    dynamic_weights[i][tki] = rws[i][tki]
                    # dynamic_weights[i] = rws[i]
            # pdb.set_trace()

    if not os.path.exists(prefix):
        # os.mkdir(prefix)
        os.makedirs(prefix)

    fps = [0] * n_cls
    for i, cls_name in enumerate(metaset.classes):
        buf = '%s/%s%s.txt' % (prefix, outfile, cls_name)
        fps[i] = open(buf, 'w')

    lineId = -1

    conf_thresh = 0.005
    nms_thresh = 0.45
    for batch_idx, (data, target) in enumerate(valid_loader):
        data = data.cuda()
        data = Variable(data, volatile=True)
        with torch.no_grad():
            output = m.detect_forward(data, dynamic_weights)

        if isinstance(output, tuple):
            output = (output[0].data, output[1].data)
        else:
            output = output.data

        # import pdb; pdb.set_trace()
        batch_boxes = get_region_boxes_v2(output, n_cls, conf_thresh,
                                          m.num_classes, m.anchors,
                                          m.num_anchors, 0, 1)

        if isinstance(output, tuple):
            bs = output[0].size(0)
        else:
            assert output.size(0) % n_cls == 0
            bs = output.size(0) // n_cls

        for b in range(bs):
            lineId = lineId + 1
            imgpath = valid_dataset.lines[lineId].rstrip()
            print(imgpath)
            imgid = os.path.basename(imgpath).split('.')[0]
            width, height = get_image_size(imgpath)
            for i in range(n_cls):
                # oi = i * bs + b
                oi = b * n_cls + i
                boxes = batch_boxes[oi]
                boxes = nms(boxes, nms_thresh)
                for box in boxes:
                    x1 = (box[0] - box[2] / 2.0) * width
                    y1 = (box[1] - box[3] / 2.0) * height
                    x2 = (box[0] + box[2] / 2.0) * width
                    y2 = (box[1] + box[3] / 2.0) * height

                    det_conf = box[4]
                    for j in range(int((len(box) - 5) / 2)):
                        cls_conf = box[5 + 2 * j]
                        cls_id = box[6 + 2 * j]
                        prob = det_conf * cls_conf
                        fps[i].write('%s %f %f %f %f %f\n' %
                                     (imgid, prob, x1, y1, x2, y2))

    for i in range(n_cls):
        fps[i].close()
Exemplo n.º 2
0
def train(epoch):
    global processed_batches
    t0 = time.time()
    if ngpus > 1:
        cur_model = model.module
    else:
        cur_model = model
    train_loader = torch.utils.data.DataLoader(dataset.listDataset(
        trainlist,
        shape=(init_width, init_height),
        shuffle=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]),
        train=True,
        seen=cur_model.seen,
        batch_size=batch_size,
        num_workers=num_workers),
                                               batch_size=batch_size,
                                               shuffle=False,
                                               **kwargs)

    lr = adjust_learning_rate(optimizer, processed_batches)
    logging('epoch %d/%d, processed %d samples, lr %f' %
            (epoch, max_epochs, epoch * len(train_loader.dataset), lr))
    model.train()
    t1 = time.time()
    avg_time = torch.zeros(9)
    for batch_idx, (data, target) in enumerate(train_loader):
        t2 = time.time()
        adjust_learning_rate(optimizer, processed_batches)
        processed_batches = processed_batches + 1
        #if (batch_idx+1) % dot_interval == 0:
        #    sys.stdout.write('.')

        if use_cuda:
            data = data.cuda()
            #target= target.cuda()
        t3 = time.time()
        data, target = Variable(data), Variable(target)
        t4 = time.time()
        optimizer.zero_grad()
        t5 = time.time()
        output = model(data)
        t6 = time.time()
        region_loss.seen = region_loss.seen + data.data.size(0)
        loss = region_loss(output, target)
        t7 = time.time()
        loss.backward()
        t8 = time.time()
        optimizer.step()
        t9 = time.time()
        if False and batch_idx > 1:
            avg_time[0] = avg_time[0] + (t2 - t1)
            avg_time[1] = avg_time[1] + (t3 - t2)
            avg_time[2] = avg_time[2] + (t4 - t3)
            avg_time[3] = avg_time[3] + (t5 - t4)
            avg_time[4] = avg_time[4] + (t6 - t5)
            avg_time[5] = avg_time[5] + (t7 - t6)
            avg_time[6] = avg_time[6] + (t8 - t7)
            avg_time[7] = avg_time[7] + (t9 - t8)
            avg_time[8] = avg_time[8] + (t9 - t1)
            print('-------------------------------')
            print('       load data : %f' % (avg_time[0] / (batch_idx)))
            print('     cpu to cuda : %f' % (avg_time[1] / (batch_idx)))
            print('cuda to variable : %f' % (avg_time[2] / (batch_idx)))
            print('       zero_grad : %f' % (avg_time[3] / (batch_idx)))
            print(' forward feature : %f' % (avg_time[4] / (batch_idx)))
            print('    forward loss : %f' % (avg_time[5] / (batch_idx)))
            print('        backward : %f' % (avg_time[6] / (batch_idx)))
            print('            step : %f' % (avg_time[7] / (batch_idx)))
            print('           total : %f' % (avg_time[8] / (batch_idx)))
        t1 = time.time()
    print('')
    t1 = time.time()
    logging('training with %f samples/s' % (len(train_loader.dataset) /
                                            (t1 - t0)))
    if (epoch + 1) % cfg.save_interval == 0:
        logging('save weights to %s/%06d.weights' % (backupdir, epoch + 1))
        cur_model.seen = (epoch + 1) * len(train_loader.dataset)
        cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch + 1))
Exemplo n.º 3
0
processed_batches = model.seen / batch_size
init_width = model.width
init_height = model.height
init_epoch = 0 if cfg.tuning else model.seen / nsamples
max_epochs = max_batches * batch_size / nsamples + 1
max_epochs = int(math.ceil(cfg.max_epoch * 1. /
                           cfg.repeat)) if cfg.tuning else max_epochs
# init_epoch        = model.seen/nsamples

print(nsamples, max_batches, max_epochs)

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(dataset.listDataset(
    testlist,
    shape=(init_width, init_height),
    shuffle=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ]),
    train=False),
                                          batch_size=batch_size,
                                          shuffle=False,
                                          **kwargs)

if use_cuda:
    if ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

params_dict = dict(model.named_parameters())
params = []
Exemplo n.º 4
0
def valid(datacfg, cfgfile, weightfile, outfile):
    options = read_data_cfg(datacfg)
    valid_images = options['valid']
    # backup = cfg.backup
    backup = weightfile.split('/')[-2]
    ckpt = weightfile.split('/')[-1].split('.')[0]
    prefix = 'results/' + backup.split('/')[-1] + '/e' + ckpt
    print('saving to: ' + prefix)
    names = cfg.classes

    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]
    
    m = Darknet(cfgfile)
    m.print_network()
    m.load_weights(weightfile)
    m.cuda()
    m.eval()

    valid_dataset = dataset.listDataset(valid_images, shape=(m.width, m.height),
                                        shuffle=False,
                                        transform=transforms.Compose([
                           transforms.ToTensor(),
                       ]))
    valid_batchsize = 2
    assert(valid_batchsize > 1)

    kwargs = {'num_workers': 4, 'pin_memory': True}
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset, batch_size=valid_batchsize, shuffle=False, **kwargs) 

    fps = [0]*m.num_classes
    if not os.path.exists(prefix):
        # os.mkdir(prefix)
        os.makedirs(prefix)
    for i in range(m.num_classes):
        buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
        fps[i] = open(buf, 'w')
   
    lineId = -1
    
    conf_thresh = 0.005
    nms_thresh = 0.45
    for batch_idx, (data, target) in enumerate(valid_loader):
        data = data.cuda()
        data = Variable(data, volatile = True)
        output = m(data).data
        batch_boxes = get_region_boxes(output, conf_thresh, m.num_classes, m.anchors, m.num_anchors, 0, 1)
        for i in range(output.size(0)):
            lineId = lineId + 1
            fileId = os.path.basename(valid_files[lineId]).split('.')[0]
            width, height = get_image_size(valid_files[lineId])
            print(valid_files[lineId])
            boxes = batch_boxes[i]
            boxes = nms(boxes, nms_thresh)
            for box in boxes:
                x1 = (box[0] - box[2]/2.0) * width
                y1 = (box[1] - box[3]/2.0) * height
                x2 = (box[0] + box[2]/2.0) * width
                y2 = (box[1] + box[3]/2.0) * height

                det_conf = box[4]
                # import pdb
                # pdb.set_trace()
                for j in range((len(box)-5)/2):
                    cls_conf = box[5+2*j]
                    cls_id = box[6+2*j]
                    prob =det_conf * cls_conf
                    fps[cls_id].write('%s %f %f %f %f %f\n' % (fileId, prob, x1, y1, x2, y2))
                    # fps[cls_id].write('%s %f %f %f %f %f %f\n' % (fileId, det_conf, cls_conf, x1, y1, x2, y2))

    for i in range(m.num_classes):
        fps[i].close()
Exemplo n.º 5
0
processed_batches = 0 if cfg.tuning else model.seen/batch_size
trainlist         = dataset.build_dataset(data_options)
nsamples          = len(trainlist)
init_width        = model.width
init_height       = model.height
init_epoch        = 0 if cfg.tuning else model.seen/nsamples
max_epochs        = max_batches*batch_size/nsamples+1
max_epochs        = int(math.ceil(cfg.max_epoch*1./cfg.repeat)) if cfg.tuning else max_epochs 
print(cfg.repeat, nsamples, max_batches, batch_size)
print(num_workers)

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
    dataset.listDataset(testlist, shape=(init_width, init_height),
                        shuffle=False,
                        transform=transforms.Compose([
                       transforms.ToTensor(),
                   ]), train=False),
    batch_size=batch_size, shuffle=False, **kwargs)

test_metaset = dataset.MetaDataset(metafiles=metadict, train=True)
test_metaloader = torch.utils.data.DataLoader(
    test_metaset,
    batch_size=test_metaset.batch_size,
    shuffle=False,
    num_workers=num_workers//2,
    pin_memory=True
)

# Adjust learning rate
factor = len(test_metaset.classes)
Exemplo n.º 6
0
def train(epoch, repeat_time, mask_ratio):
    global processed_batches
    t0 = time.time()
    if ngpus > 1:
        cur_model = model.module
    else:
        cur_model = model

    train_loader = torch.utils.data.DataLoader(
        dataset.listDataset(trainlist, shape=(init_width, init_height),
                            shuffle=False,
                            transform=transforms.Compose([
                           transforms.ToTensor(),
                       ]),
                            train=True,
                            seen=cur_model.seen,
                            batch_size=batch_size,
                            num_workers=num_workers),
        batch_size=batch_size, shuffle=False, **kwargs)

    metaset = dataset.MetaDataset(metafiles=metadict, train=True, with_ids=True)
    metaloader = torch.utils.data.DataLoader(
        metaset,
        batch_size=metaset.batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True
    )
    metaloader = iter(metaloader)

    lr = adjust_learning_rate(optimizer, processed_batches)
    logging('epoch %d/%d, processed %d samples, lr %f' % (epoch, max_epochs, epoch * len(train_loader.dataset), lr))

    model.train()
    t1 = time.time()
    avg_time = torch.zeros(9)
    
    novel_id = cfg['novel_ids']
    for batch_idx, (data, target) in enumerate(train_loader):
        metax, mask, target_cls_ids = metaloader.next()
        
        novel_cls_flag = torch.zeros(len(target_cls_ids))
        for index,j in enumerate(target_cls_ids):
            #print(index)
            if j in novel_id:
                #print("flag",index)
                novel_cls_flag[int(index)] = 0
            else:
                novel_cls_flag[int(index)] = 1
                
        t2 = time.time()
        adjust_learning_rate(optimizer, processed_batches)
        processed_batches = processed_batches + 1

        if use_cuda:
            data = data.cuda()
            metax = metax.cuda()
            mask = mask.cuda()
            target_cls_ids = target_cls_ids.cuda()
            #target= target.cuda()
        t3 = time.time()
        data, target = Variable(data), Variable(target)
        metax, mask, target_cls_ids = Variable(metax,requires_grad=True), Variable(mask), Variable(target_cls_ids)
        t4 = time.time()
        for i in range(repeat_time):
            optimizer.zero_grad()
            t5 = time.time()
            metax_disturbance = metax
            if i == 0:
                mask_disturbance = mask
            else:
                for index, flag_each in enumerate(novel_cls_flag):
                    if flag_each == 0:
                        mask_disturbance[index] = mask[index]
                    elif flag_each == 1:
                        mask_disturbance[index] = mask[index] * metax_mask[0][index]
                    else:
                        print("error")
            output, dynamic_weights = model(data, metax_disturbance, mask_disturbance)
            t6 = time.time()
            region_loss.seen = region_loss.seen + data.data.size(0)
            if i == 0:
                loss = region_loss(output, target, dynamic_weights, target_cls_ids)
                dynamic_weights_store = dynamic_weights
                target_cls_ids_store = target_cls_ids
                dynamic_weight_buffer = dynamic_weights
            else:
                with torch.no_grad():
                    for index, flag_each in enumerate(novel_cls_flag):
                        if flag_each == 1:
                            dynamic_weights_store = [torch.cat((dynamic_weights_store[0],dynamic_weights[0][index].unsqueeze(0)),dim = 0)]
                        else:
                            continue
                    for num in range(int(torch.sum(novel_cls_flag) // len(novel_id))):
                        Tensor_novel_id = torch.Tensor(novel_id).long().cuda()
                        target_cls_ids_store = torch.cat((target_cls_ids_store,Tensor_novel_id),0)
                loss = region_loss(output, target, dynamic_weights_store, target_cls_ids_store)
                
            t7 = time.time()
            loss.backward()
            metax_mask = Inverted_gradient([metax.grad], mask_ratio, mask)
            
            t8 = time.time()
            optimizer.step()
            t9 = time.time()
        t1 = time.time()
    t1 = time.time()
    logging('training with %f samples/s' % (len(train_loader.dataset)/(t1-t0)))

    if (epoch+1) % cfg.save_interval == 0:
        logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
        cur_model.seen = (epoch + 1) * len(train_loader.dataset)
        cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch+1))