Пример #1
0
def validate_batch(model,
                   imgs,
                   labels,
                   json_file=None,
                   mini_batch_size=32,
                   box_size=7,
                   use_cuda=True,
                   writer=None,
                   verbose=False):
    overall_recall = 0

    debug = True

    if json_file is None:
        val_loader = torch.utils.data.DataLoader(peaknet_dataset.listDataset(
            imgs,
            labels,
            shape=(imgs.shape[2], imgs.shape[3]),
            predict=False,
            box_size=box_size,
        ),
                                                 batch_size=mini_batch_size,
                                                 shuffle=False)
    else:
        val_loader = torch.utils.data.DataLoader(peaknet_dataset.psanaDataset(
            json_file,
            predict=False,
            box_size=box_size,
        ),
                                                 batch_size=mini_batch_size,
                                                 shuffle=False)

    model.eval()
    region_loss = model.loss
    region_loss.seen = model.seen
    t1 = time.time()
    avg_time = torch.zeros(9)

    val_seen = 0
    for batch_idx, (data, target) in enumerate(val_loader):
        if use_cuda:
            data = data.cuda()
            target = target.cuda()
        data, target = Variable(data), Variable(target)
        output, _ = model(data.float())
        val_seen += data.size(0)
        if debug:
            print("output", output.size())
            print("label length", len(target))
            print("label[0] length", len(target[0]))
        loss, recall = region_loss(output, target)
        overall_recall += data.size(0) * float(recall)
        if writer != None:
            writer.add_scalar('loss_val', loss, model.seen)
            writer.add_scalar('recall_val', recall, model.seen)

    overall_recall /= (1.0 * val_seen)
    return overall_recall
Пример #2
0
def predict_batch(model,
                  imgs,
                  conf_thresh=0.15,
                  nms_thresh=0.45,
                  batch_size=32,
                  box_size=7,
                  use_cuda=True,
                  writer=None,
                  verbose=False):

    test_loader = torch.utils.data.DataLoader(peaknet_dataset.listDataset(
        imgs,
        None,
        shape=(imgs.shape[2], imgs.shape[3]),
        predict=True,
        box_size=box_size,
    ),
                                              batch_size=batch_size,
                                              shuffle=False)
    #model.train()
    model.eval()

    batch_nms_boxes = []

    for batch_idx, data in enumerate(test_loader):
        if verbose:
            print(batch_idx, data.size())
        if use_cuda:
            data = data.cuda()
        data = Variable(data)
        output, _ = model(data.float())
        output = output.data
        #print(output.size())
        #print(output)
        #print(model.num_classes, model.anchors, model.num_anchors)

        boxes = get_region_boxes(output, conf_thresh, model.num_classes,
                                 model.anchors, model.num_anchors)
        #print(len(boxes[0][0]))
        nms_boxes = []
        for box in boxes:
            n0 = len(box)
            box = nms(box, nms_thresh)
            n1 = len(box)
            nms_boxes.append(box)
        batch_nms_boxes.append(nms_boxes)

    return batch_nms_boxes
Пример #3
0
def valid_batch(model,
                imgs,
                labels,
                batch_size=1,
                box_size=7,
                use_cuda=True,
                writer=None):
    debug = False

    data_loader = torch.utils.data.DataLoader(peaknet_dataset.listDataset(
        imgs,
        labels,
        shape=(imgs.shape[2], imgs.shape[3]),
        shuffle=False,
        train=False,
        box_size=box_size,
        batch_size=batch_size),
                                              batch_size=batch_size,
                                              shuffle=False)

    model.eval()

    t1 = time.time()

    for batch_idx, (data, target) in enumerate(train_loader):
        t2 = time.time()

        if use_cuda:
            data = data.cuda()
            target = target.cuda()

        t3 = time.time()
        data, target = Variable(data), Variable(target)
        t4 = time.time()
        output, _ = model(data)
        t6 = time.time()

        loss, recall = region_loss(output, target)

        t7 = time.time()

        if writer != None:
            writer.add_scalar('dev-loss', loss, model.seen)
            writer.add_scalar('dev-recall', recall, model.seen)
Пример #4
0
def train_batch(model,
                imgs,
                labels,
                mini_batch_size=32,
                box_size=7,
                use_cuda=True,
                writer=None,
                verbose=False):
    #     optimizer = optim.Adagrad(model.parameters(), lr=0.001, weight_decay=0.0005)

    train_loader = torch.utils.data.DataLoader(peaknet_dataset.listDataset(
        imgs,
        labels,
        shape=(imgs.shape[2], imgs.shape[3]),
        predict=False,
        box_size=box_size,
    ),
                                               batch_size=mini_batch_size,
                                               shuffle=True)

    # lr = adjust_learning_rate(optimizer, processed_batches)
    # logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
    model.train()
    #     model.eval()
    region_loss = model.loss
    region_loss.seen = model.seen
    t1 = time.time()
    avg_time = torch.zeros(9)

    for batch_idx, (data, target) in enumerate(train_loader):
        #print("data min", data.min())
        #print("data max", data.max())
        t2 = time.time()
        #         optimizer.zero_grad()
        # adjust_learning_rate(optimizer, processed_batches)
        # processed_batches = processed_batches + 1
        #if (batch_idx+1) % dot_interval == 0:
        #    sys.stdout.write('.')
        #print("timgs type", data.type())
        if verbose:
            for i in range(int(target.size(0))):
                for j in range(int(target.size(1) / 5)):
                    if target[i, j * 5 + 3] < 0.001:
                        break
                    print(i, j, target[i, j * 5 + 0], target[i, j * 5 + 1],
                          target[i, j * 5 + 2], target[i, j * 5 + 3],
                          target[i, j * 5 + 4])
        if use_cuda:
            data = data.cuda()
            target = target.cuda()
        t3 = time.time()
        #print( "before", data )
        data, target = Variable(data), Variable(target)
        t4 = time.time()
        t5 = time.time()
        #print( "after", data )
        #output = model( data )
        output, _ = model(data)
        #print(output[0,0,:,:])
        #boxes = get_region_boxes(output, conf_thresh, net.model.num_classes, net.model.anchors, net.model.num_anchors)

        #print(output)
        t6 = time.time()
        region_loss.seen = region_loss.seen + data.data.size(0)
        model.seen = region_loss.seen
        #try:
        if verbose:
            print("output", output.size())
            print("label length", len(target))
            print("label[0] length", len(target[0]))
        #region_loss = RegionLoss()

        loss, recall = region_loss(output, target)

        if verbose:
            print("label length", len(target))
            print("label[0]", len(target[0]))
            #print("label[0]", target[0,1])
            #print("label[0]", target[0].shape)
            #print("label[1]", target[1].shape)
            #print("label[2]", target[2].shape)
#             raise "something wrong with the labels?"

        t7 = time.time()
        loss.backward()
        t8 = time.time()
        #         optimizer.step()
        t9 = time.time()
        if writer != None:
            #writer.add_scalars('loss/recall', {"loss":loss, "recall":recall}, model.seen)
            writer.add_scalar('loss', loss, model.seen)
            writer.add_scalar('recall', recall, model.seen)
Пример #5
0
def train_batch(model, imgs, labels, batch_size=32, box_size=7, use_cuda=True):
    # global processed_batches
    # t0 = time.time()
    # if ngpus > 1:
    #     cur_model = model.module
    # else:
    #     cur_model = model
    train_loader = torch.utils.data.DataLoader(
        peaknet_dataset.listDataset(
            imgs,
            labels,
            shape=(imgs.shape[2], imgs.shape[3]),
            shuffle=True,
            #transform=transforms.Compose([
            #    transforms.ToTensor(),
            #    ]),
            transform=None,
            train=True,
            box_size=box_size,
            # seen=cur_model.seen,
            batch_size=batch_size
            # num_workers=num_workers
        ),
        # batch_size=batch_size, shuffle=False, **kwargs)
        batch_size=batch_size,
        shuffle=False)

    # lr = adjust_learning_rate(optimizer, processed_batches)
    # logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
    model.train()
    region_loss = model.loss
    region_loss.seen = model.seen
    t1 = time.time()
    avg_time = torch.zeros(9)
    for batch_idx, (data, target) in enumerate(train_loader):
        t2 = time.time()
        # adjust_learning_rate(optimizer, processed_batches)
        # processed_batches = processed_batches + 1
        #if (batch_idx+1) % dot_interval == 0:
        #    sys.stdout.write('.')

        if use_cuda:
            data = data.cuda()
            target = target.cuda()
        t3 = time.time()
        #print( "before", data )
        data, target = Variable(data), Variable(target)
        t4 = time.time()
        # optimizer.zero_grad()
        t5 = time.time()
        #print( "after", data )
        output = model(data.float())
        t6 = time.time()
        region_loss.seen = region_loss.seen + data.data.size(0)
        loss = region_loss(output, target)
        t7 = time.time()
        loss.backward()
        t8 = time.time()
        # optimizer.step()
        t9 = time.time()
        if False and batch_idx > 1:
            avg_time[0] = avg_time[0] + (t2 - t1)
            avg_time[1] = avg_time[1] + (t3 - t2)
            avg_time[2] = avg_time[2] + (t4 - t3)
            avg_time[3] = avg_time[3] + (t5 - t4)
            avg_time[4] = avg_time[4] + (t6 - t5)
            avg_time[5] = avg_time[5] + (t7 - t6)
            avg_time[6] = avg_time[6] + (t8 - t7)
            avg_time[7] = avg_time[7] + (t9 - t8)
            avg_time[8] = avg_time[8] + (t9 - t1)
            print('-------------------------------')
            print('       load data : %f' % (avg_time[0] / (batch_idx)))
            print('     cpu to cuda : %f' % (avg_time[1] / (batch_idx)))
            print('cuda to variable : %f' % (avg_time[2] / (batch_idx)))
            print('       zero_grad : %f' % (avg_time[3] / (batch_idx)))
            print(' forward feature : %f' % (avg_time[4] / (batch_idx)))
            print('    forward loss : %f' % (avg_time[5] / (batch_idx)))
            print('        backward : %f' % (avg_time[6] / (batch_idx)))
            print('            step : %f' % (avg_time[7] / (batch_idx)))
            print('           total : %f' % (avg_time[8] / (batch_idx)))