def test_solver(model, data_loader, output_dir):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    result_path = os.path.join(output_dir[1], 'result.json')
    log_dir = output_dir[1]
    count = 0
    logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    for box_feature, rank_score, box_box, box_label, box_score_origin, box_box_origin, image_id, box_keep_np in data_loader:
        # print(image_id)
        image_id = int(image_id.numpy())
        bboxes = []
        start = time.time()
        box_feature_variable =  Variable(box_feature).cuda()
        box_score_variable = Variable(rank_score).cuda()
        box_label_variable = Variable(box_label).cuda()
        box_box_variable = Variable(box_box).cuda()

        output = test(box_feature_variable, box_score_variable, box_box_variable, model)
        # keep = list(np.where(output==1)[0])
        box_score_origin = box_score_origin.cpu().numpy().astype(np.float)
        box_keep_np = box_keep_np.cpu().numpy().astype(np.int)
        # final_score = box_score_origin * output
        final_score = box_score_origin * output

        # for index in keep:
        for index in range(final_score.shape[0]):
            # cls_index = np.argmax(box_score_origin[index, :])
            cls_all_index = np.where(box_keep_np[index, :]==1)[0]
            for cls_index in cls_all_index:
                # cls_index = np.argsort(final_score[index, :])[::-1][0]
                x1, y1, x2, y2 = box_box_origin[index, cls_index*4:cls_index*4+4]
                score = final_score[index, cls_index]
                # score = box_score_origin[index, cls_index]
                category_id = New2Old[str(cls_index+1)][1]
                bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)})
        count += 1
        end = time.time()
        print_time = float(end-start)
        results.extend(bboxes)
        logger.info('index:{}, image_id:{}, cost:{}'.format(count, image_id,print_time))
    cvb.dump(results, result_path)

        
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
    log_dir = output_dir[1]
    # count = 0
    logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            continue
        data_np = dataset[count]
        # input
        all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_label = data_np[3]
        all_class_box_weight = data_np[4]
        all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(data_np[5]), torch.FloatTensor(data_np[6])
        gts_box = torch.FloatTensor(data_np[7])
        unique_class, unique_class_len = torch.FloatTensor(data_np[8]), torch.FloatTensor(data_np[9])
        pre_unique_class, pre_unique_class_len = data_np[8], data_np[9]
        image_id = int(data_np[10])
        # if data_np[1].shape[0]==0:
        #     results.extend(image_id)
        bboxes = []
        start = time.time()

        # all_class_box_label_variable = Variable(all_class_box_label).cuda()
        all_class_box_score_variable = Variable(all_class_box_score).cuda()
        all_class_box_box_variable = Variable(all_class_box_box).cuda()
        all_class_box_feature_variable = Variable(all_class_box_feature).cuda()
        all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda()
        all_class_box_origin_box_variable = Variable(all_class_box_origin_box).cuda()
        gts_box_tensor = gts_box.cuda()
        unique_class_cuda = unique_class.cuda()
        unique_class_len_cuda = unique_class_len.cuda()

        pre_stage_output, post_stage_output, post_stage_label, post_stage_weight, post_stage_box_origin_score_variable, post_stage_box_origin_box_tensor, post_unique_class, post_unique_class_len = model(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, all_class_box_origin_box_variable, gts_box_tensor, unique_class_cuda, unique_class_len_cuda)
        # output_record = model(box_feature_variable, box_score_variable, box_box_variable, all_class_box_feature_variable, all_class_box_score_variable, all_class_box_box_variable, unique_class, unique_class_len)
        pre_output = pre_stage_output.data.cpu().numpy().reshape(-1, 1).astype(np.float)
        # output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, all_class_box_origin_box_variable, gts_box_tensor, unique_class_cuda, unique_class_len_cuda, model)

        pre_score = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1)
        pre_box = all_class_box_origin_box_variable.data.cpu().numpy()
        pre_label = all_class_box_label
        # post
        post_score = post_stage_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1,1)
        post_box = post_stage_box_origin_box_tensor.cpu().numpy()
        post_output = post_stage_output.data.cpu().numpy().reshape(-1, 1).astype(np.float)
        post_unique_class_np = post_unique_class.cpu().numpy()
        post_unique_class_len_np = post_unique_class_len.cpu().numpy()
        post_label = post_stage_label.data.cpu().numpy()
        
        torch.cuda.empty_cache()
        # final_score = box_score_origin
        # final_score = (box_score_origin + output) / 2
        pre_flag=False
        if pre_flag:
            final_score = pre_score * pre_output
            unique_class_np = pre_unique_class
            unique_class_len_np = pre_unique_class_len
            final_box = pre_box
            final_label = pre_label
        else:
            final_score = post_score * post_output
            unique_class_np = post_unique_class_np
            unique_class_len_np = post_unique_class_len_np
            final_box = post_box
            final_label = post_label

        # final_score = output
        for cls_index in range(80):
            if unique_class_np[cls_index] == 0:
                continue
            start_ = int(unique_class_len_np[cls_index])
            end_ = int(unique_class_len_np[cls_index+1])

            for index in range(start_, end_):
                x1, y1, x2, y2 = final_box[index, 0:4]
                score = final_score[index, 0]
                # if final_label[index, 0]==0:
                    # continue
                # if(score<0.01):
                    # continue
                category_id = New2Old[str(cls_index+1)][1]
                bboxes.append({'bbox': [int(x1), int(y1), int(x2-x1+1), int(y2-y1+1)], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)})

        end = time.time()
        print_time = float(end-start)
        results.extend(bboxes)
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time))
    return results
    # cvb.dump(results, result_path)

        
Пример #3
0
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load(
        '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    log_dir = output_dir[1]
    # count = 0
    logger = solver_log(
        os.path.join(
            log_dir, 'test_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            # count += 1
            continue
        data_np = dataset[count]
        # print(type(data_np))
        # print(len(data_np))
        # input()
        box_feature, rank_score, box_box, box_label, box_score_origin, box_box_origin, image_id, box_keep_np = torch.FloatTensor(
            data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(
                data_np[2]), torch.FloatTensor(data_np[3]), torch.FloatTensor(
                    data_np[4]), torch.FloatTensor(
                        data_np[5]), torch.IntTensor(
                            [data_np[6]]), torch.FloatTensor(data_np[7])
        # print(image_id)
        # input()
        image_id = int(image_id.numpy())
        bboxes = []
        start = time.time()
        box_feature_variable = Variable(box_feature).cuda()
        box_score_variable = Variable(rank_score).cuda()
        # box_label_variable = Variable(box_label).cuda()
        box_box_variable = Variable(box_box).cuda()

        output = test(box_feature_variable, box_score_variable,
                      box_box_variable, model)
        # keep = list(np.where(output==1)[0])
        box_score_origin = box_score_origin.cpu().numpy().astype(np.float)
        box_keep_np = box_keep_np.cpu().numpy().astype(np.int)
        # final_score = box_score_origin * output
        final_score = box_score_origin * output

        # for index in keep:
        for index in range(final_score.shape[0]):
            # cls_index = np.argmax(box_score_origin[index, :])
            # if output[index, 0]==0:
            # continue
            cls_all_index = np.where(box_keep_np[index, :] == 1)[0]
            for cls_index in cls_all_index:
                # cls_index = np.argsort(final_score[index, :])[::-1][0]
                x1, y1, x2, y2 = box_box_origin[index, cls_index *
                                                4:cls_index * 4 + 4]
                # if abs(box_score_origin[index, cls_index]-output[index, 0]) >= 0.8:
                # continue
                score = final_score[index, cls_index]
                # score = 1
                # score = box_score_origin[index, cls_index]
                category_id = New2Old[str(cls_index + 1)][1]
                bboxes.append({
                    'bbox': [
                        int(x1),
                        int(y1),
                        int(x2) - int(x1) + 1,
                        int(y2) - int(y1) + 1
                    ],
                    'score':
                    float(score),
                    'category_id':
                    category_id,
                    'image_id':
                    int(image_id)
                })
        # count += 1
        end = time.time()
        print_time = float(end - start)
        results.extend(bboxes)
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(
            thread_index, count, image_id, print_time))
    return results
Пример #4
0
def solver(model,
           data_loader,
           n_epochs,
           output_dir,
           print_every=1,
           save_every=1,
           learning_rate=0.01,
           step=10,
           pos_neg_weight=100,
           load_file=None,
           continue_epochs=None,
           continue_iters=None):
    # plot_losses = []
    if continue_epochs is None:
        continue_epochs = -1
        continue_iters = -1
        first_count = None
    else:
        first_count = continue_iters
    print_losses = []
    print_loss_total = 0  # Reset every print_every
    print_pos_acc_total = 0
    print_neg_acc_total = 0
    print_time_total = 0

    model_optimizer = optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9)
    model_dir = output_dir[0]
    log_dir = output_dir[1]
    # log

    logger = solver_log(
        os.path.join(
            log_dir, 'train_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))

    for epoch_index in range(n_epochs):
        # print len(data_loader)
        if (epoch_index < continue_epochs):
            continue
        if first_count is None:
            count = 0
        else:
            count = first_count
        for box_feature, rank_score, box_box, box_label, box_weight, box_class_label, box_class_weight in data_loader:
            # for box_feature, rank_score, box_box, box_label in data_loader:
            start = time.time()
            # print('begin')
            box_feature_variable = Variable(box_feature).cuda()
            box_score_variable = Variable(rank_score).cuda()
            box_label_variable = Variable(box_label).cuda()
            box_class_label_variable = Variable(box_class_label).cuda()
            # box_class_weight_variable = Variable(box_class_weight).cuda()
            box_box_variable = Variable(box_box).cuda()
            criterion_box = nn.BCELoss(weight=box_weight.cuda())
            criterion_box_class = nn.BCELoss(weight=box_class_weight.cuda())
            # criterion = nn.BCELoss(weight=box_weight.cuda())
            # criterion = nn.BCELoss(weight=box_weight.cuda())
            # print box_score_fusion_variable.size()
            loss, pos_accuracy, neg_accuracy = train(
                box_feature_variable, box_score_variable, box_box_variable,
                box_label_variable, box_class_label_variable, model,
                model_optimizer, criterion_box, criterion_box_class)
            count += 1
            print_loss_total += loss
            print_pos_acc_total += pos_accuracy
            print_neg_acc_total += neg_accuracy
            end = time.time()
            print_time_total += float(end - start)
            if count % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_pos_acc_avg = print_pos_acc_total / print_every
                print_neg_acc_avg = print_neg_acc_total / print_every
                print_time_avg = print_time_total / print_every

                print_loss_total = 0
                print_pos_acc_total = 0
                print_neg_acc_total = 0
                print_time_total = 0
                # print('aa')
                logger.info(
                    'epoch:{}, iter:{}, lr:{}, avg_time:{:.3f}, avg_loss:{:.10f}, accuracy:{:.3f}, recall:{:.3f}'
                    .format(epoch_index, count, learning_rate, print_time_avg,
                            print_loss_avg, print_pos_acc_avg,
                            print_neg_acc_avg))
            if count % save_every == 0:
                save_checkpoint(model, epoch_index, count, model_dir)

        print_loss_total = 0
        print_pos_acc_total = 0
        print_neg_acc_total = 0
        print_time_total = 0

        if epoch_index % step == 0 and epoch_index > 0:
            learning_rate = learning_rate * 0.1
            for param_group in model_optimizer.param_groups:
                param_group['lr'] = learning_rate
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    log_dir = output_dir[1]
    # count = 0
    logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            continue
        data_np = dataset[count]
        # input
        all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, unique_class, unique_class_len, img_id, phase_np
        # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_label = data_np[3]
        all_class_box_class, all_class_box_origin_score, all_class_box_origin_box = data_np[7], torch.FloatTensor(data_np[8]), data_np[9]
        unique_class, unique_class_len = torch.FloatTensor(data_np[10]), torch.FloatTensor(data_np[11])
        image_id = int(data_np[12])
        # all_class_box_single = all_class_box_origin_score[:, 0:1].numpy().copy()
        # print(all_class_box_single.shape)
        # input()
        bboxes = []
        start = time.time()
        box_feature_variable =  Variable(box_feature).cuda()
        box_score_variable = Variable(rank_score).cuda()
        # box_label_variable = Variable(box_label).cuda()
        box_box_variable = Variable(box_box).cuda()

        # all_class_box_label_variable = Variable(all_class_box_label).cuda()
        all_class_box_score_variable = Variable(all_class_box_score).cuda()
        all_class_box_box_variable = Variable(all_class_box_box).cuda()
        all_class_box_feature_variable = Variable(all_class_box_feature).cuda()
        all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda()

        unique_class_cuda = unique_class.cuda()
        unique_class_len_cuda = unique_class_len.cuda()

        # output = test(box_feature_variable, box_score_variable, box_box_variable, all_class_box_feature_variable, all_class_box_score_variable, all_class_box_box_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model)

        box_score_origin = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1)
        final_score = box_score_origin
        # final_score = box_score_origin * output
        # np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
        # print(np.concatenate((all_class_box_class, all_class_box_label, box_score_origin, output, final_score), axis=1))
        # if count==20:
            # break
        for index in range(final_score.shape[0]):
            cls_index = int(all_class_box_class[index, 0])
            x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4]
            score = final_score[index, 0]
            category_id = New2Old[str(cls_index+1)][1]
            bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)})
        # count += 1
        end = time.time()
        print_time = float(end-start)
        results.extend(bboxes)
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time))
    return results
    # cvb.dump(results, result_path)

        
Пример #6
0
def solver(model,
           data_loader,
           n_epochs,
           output_dir,
           print_every=1,
           save_every=1,
           learning_rate=0.01,
           step=10,
           pos_neg_weight=100,
           load_file=None,
           continue_epochs=None,
           continue_iters=None):
    # plot_losses = []
    if continue_epochs is None:
        continue_epochs = -1
        continue_iters = -1
        first_count = None
    else:
        first_count = continue_iters
    print_losses = []
    print_loss_total = 0  # Reset every print_every
    print_pos_acc_total = 0
    print_neg_acc_total = 0
    print_time_total = 0

    model_optimizer = optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9)
    model_dir = output_dir[0]
    log_dir = output_dir[1]
    # log

    logger = solver_log(
        os.path.join(
            log_dir, 'train_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))

    for epoch_index in range(n_epochs):
        # print len(data_loader)
        if (epoch_index < continue_epochs):
            continue
        if first_count is None:
            count = 0
        else:
            count = first_count
        for box_feature, rank_score, box_box, box_label, box_weight, unique_class, unique_class_len, all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score in data_loader:
            # for box_feature, rank_score, box_box, box_label in data_loader:
            start = time.time()
            # print('begin')
            box_feature_variable = Variable(box_feature).cuda()
            box_score_variable = Variable(rank_score).cuda()
            box_label_variable = Variable(box_label).cuda()
            box_box_variable = Variable(box_box).cuda()

            all_class_box_rank_score = all_class_box_origin_score[:,
                                                                  0:1].clone()
            all_class_box_label_np = all_class_box_label.numpy()
            rank_index = np.where(all_class_box_label_np == 0)[0]
            if rank_index.shape[0] == 0:
                count += 1
                continue
            all_class_box_rank_label = torch.ones(all_class_box_label.size())
            all_class_box_rank_label[rank_index, :] = -1
            all_class_box_rank_label_variable = Variable(
                torch.FloatTensor(all_class_box_rank_label)).cuda()
            all_class_box_rank_score_variable = Variable(
                all_class_box_rank_score).cuda()

            all_class_box_label_variable = Variable(all_class_box_label).cuda()
            all_class_box_score_variable = Variable(all_class_box_score).cuda()
            all_class_box_box_variable = Variable(all_class_box_box).cuda()
            all_class_box_feature_variable = Variable(
                all_class_box_feature).cuda()
            all_class_box_origin_score_variable = Variable(
                all_class_box_origin_score).cuda()

            unique_class_cuda = unique_class.cuda()
            unique_class_len_cuda = unique_class_len.cuda()

            criterion = nn.BCELoss(weight=all_class_box_weight.cuda())
            criterion_rank = nn.MarginRankingLoss()
            loss, pos_accuracy, neg_accuracy = train(
                box_feature_variable, box_score_variable, box_box_variable,
                box_label_variable, all_class_box_feature_variable,
                all_class_box_score_variable, all_class_box_box_variable,
                all_class_box_label_variable,
                all_class_box_origin_score_variable,
                all_class_box_rank_score_variable,
                all_class_box_rank_label_variable, model, model_optimizer,
                criterion, criterion_rank, unique_class_cuda,
                unique_class_len_cuda)

            count += 1
            print_loss_total += loss
            print_pos_acc_total += pos_accuracy
            print_neg_acc_total += neg_accuracy
            end = time.time()
            print_time_total += float(end - start)
            if count % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_pos_acc_avg = print_pos_acc_total / print_every
                print_neg_acc_avg = print_neg_acc_total / print_every
                print_time_avg = print_time_total / print_every

                print_loss_total = 0
                print_pos_acc_total = 0
                print_neg_acc_total = 0
                print_time_total = 0
                # print('aa')
                logger.info(
                    'epoch:{}, iter:{}, lr:{}, avg_time:{:.3f}, avg_loss:{:.10f}, accuracy:{:.3f}, recall:{:.3f}'
                    .format(epoch_index, count, learning_rate, print_time_avg,
                            print_loss_avg, print_pos_acc_avg,
                            print_neg_acc_avg))
            if count % save_every == 0:
                save_checkpoint(model, epoch_index, count, model_dir)

        print_loss_total = 0
        print_pos_acc_total = 0
        print_neg_acc_total = 0
        print_time_total = 0

        if epoch_index % step == 0 and epoch_index > 0:
            learning_rate = learning_rate * 0.1
            for param_group in model_optimizer.param_groups:
                param_group['lr'] = learning_rate
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load(
        '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
    log_dir = output_dir[1]
    # count = 0
    logger = solver_log(
        os.path.join(
            log_dir, 'test_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            continue
        data_np = dataset[count]
        # input
        all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(
            data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(
                data_np[2])
        all_class_box_label = data_np[3]
        all_class_box_weight = data_np[4]
        if all_class_box_weight.shape[0] == 0:
            continue
        all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(
            data_np[5]), data_np[6]
        unique_class, unique_class_len = torch.FloatTensor(
            data_np[7]), torch.FloatTensor(data_np[8])
        unique_class_np, unique_class_len_np = data_np[7], data_np[8]
        image_id = int(data_np[9])
        # if data_np[1].shape[0]==0:
        #     results.extend(image_id)
        bboxes = []
        start = time.time()

        # all_class_box_label_variable = Variable(all_class_box_label).cuda()
        all_class_box_score_variable = Variable(all_class_box_score).cuda()
        all_class_box_box_variable = Variable(all_class_box_box).cuda()
        all_class_box_feature_variable = Variable(all_class_box_feature).cuda()
        all_class_box_origin_score_variable = Variable(
            all_class_box_origin_score).cuda()

        unique_class_cuda = unique_class.cuda()
        unique_class_len_cuda = unique_class_len.cuda()

        output = test(all_class_box_feature_variable,
                      all_class_box_box_variable, all_class_box_score_variable,
                      all_class_box_origin_score_variable, unique_class_cuda,
                      unique_class_len_cuda, model)

        box_score_origin = all_class_box_origin_score_variable.data.cpu(
        ).numpy().astype(np.float)[:, 0:1].reshape(-1, 1)
        # final_score = box_score_origin
        # final_score = (box_score_origin + output) / 2
        final_score = box_score_origin * output
        # final_score = output
        for cls_index in range(80):
            if unique_class_np[cls_index] == 0:
                continue
            start_ = int(unique_class_len_np[cls_index])
            end_ = int(unique_class_len_np[cls_index + 1])
            # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1)
            # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label'])
            # print(qwe)
            # qwe.style.apply(highlight_greaterthan,threshold=0.5,column=['label'], axis=1)
            # qwe
            # print(qwe.to_string())
            # print(qwe.sort_values(by='final'))
            # print(qwe.sort_values(by='label'))
            # input()
            for index in range(start_, end_):
                x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4]
                score = final_score[index, 0]
                # if(score<0.01):
                # continue
                category_id = New2Old[str(cls_index + 1)][1]
                bboxes.append({
                    'bbox':
                    [int(x1),
                     int(y1),
                     int(x2 - x1 + 1),
                     int(y2 - y1 + 1)],
                    'score':
                    float(score),
                    'category_id':
                    category_id,
                    'image_id':
                    int(image_id)
                })

        end = time.time()
        print_time = float(end - start)
        results.extend(bboxes)
        # if count==20:
        # break
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(
            thread_index, count, image_id, print_time))
    return results
Пример #8
0
def solver(model,
           data_loader,
           n_epochs,
           output_dir,
           print_every=1,
           save_every=1,
           learning_rate=0.01,
           step=10,
           pre_post_weight=1,
           load_file=None,
           continue_epochs=None,
           continue_iters=None):
    # plot_losses = []
    if continue_epochs is None:
        continue_epochs = -1
        continue_iters = -1
        first_count = None
    else:
        first_count = continue_iters

    print_pre_loss_total = 0  #
    print_post_loss_total = 0  #
    print_precision_total = [0, 0]
    print_recall_total = [0, 0]
    print_time_total = 0

    model_optimizer = optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9)
    model_dir = output_dir[0]
    log_dir = output_dir[1]
    # log

    logger = solver_log(
        os.path.join(
            log_dir, 'train_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))

    for epoch_index in range(n_epochs):
        # print len(data_loader)
        if (epoch_index < continue_epochs):
            continue
        if first_count is None:
            count = 0
        else:
            count = first_count
        for all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, gts_box, unique_class, unique_class_len, img_id in data_loader:
            start = time.time()
            # print('begin')
            all_class_box_feature_variable = Variable(
                all_class_box_feature).cuda()
            all_class_box_box_variable = Variable(all_class_box_box).cuda()
            all_class_box_score_variable = Variable(all_class_box_score).cuda()
            all_class_box_label_variable = Variable(all_class_box_label).cuda()
            all_class_box_origin_score_variable = Variable(
                all_class_box_origin_score).cuda()
            all_class_box_origin_box_variable = Variable(
                all_class_box_origin_box).cuda()
            all_class_box_weight_tensor = all_class_box_weight.cuda()
            gts_box_tensor = gts_box.cuda()
            # all_class_box_origin_box_variable = Variable(all_class_box_origin_box).cuda()
            # ranks = ranks.cuda()
            # logger.info(int(img_id.numpy()))
            image_id = int(img_id.numpy())
            unique_class_cuda = unique_class.cuda()
            unique_class_len_cuda = unique_class_len.cuda()

            loss, precision, recall = train(
                all_class_box_feature_variable, all_class_box_box_variable,
                all_class_box_score_variable, all_class_box_label_variable,
                all_class_box_origin_score_variable,
                all_class_box_origin_box_variable, all_class_box_weight_tensor,
                gts_box_tensor, unique_class_cuda, unique_class_len_cuda,
                model, model_optimizer, pre_post_weight)
            count += 1
            print_pre_loss_total += loss[0]
            print_post_loss_total += loss[1]
            print_precision_total[0] += precision[0]
            print_precision_total[1] += precision[1]
            print_recall_total[0] += recall[0]
            print_recall_total[1] += recall[1]

            end = time.time()
            print_time_total += float(end - start)
            if count % print_every == 0:
                print_pre_loss_avg = print_pre_loss_total / print_every
                print_post_loss_avg = print_post_loss_total / print_every
                print_pre_pre_avg = print_precision_total[0] / print_every
                print_post_pre_avg = print_precision_total[1] / print_every
                print_pre_rec_avg = print_recall_total[0] / print_every
                print_post_rec_avg = print_recall_total[1] / print_every
                print_time_avg = print_time_total / print_every

                print_pre_loss_total = 0
                print_post_loss_total = 0
                print_precision_total = [0, 0]
                print_recall_total = [0, 0]
                print_time_total = 0
                # print('aa')
                logger.info(
                    'epoch:{}, iter:{}, lr:{}, avg_time:{:.3f}, pre_loss:{:.10f}, post_loss:{:.10f}, pre_pre:{:.3f}, post_pre:{:.3f}, pre_rec:{:.3f}, post_rec:{:.3f}'
                    .format(epoch_index, count, learning_rate, print_time_avg,
                            print_pre_loss_avg, print_post_loss_avg,
                            print_pre_pre_avg, print_post_pre_avg,
                            print_pre_rec_avg, print_post_rec_avg))
            # if count % save_every == 0:
            # save_checkpoint(model, epoch_index, count, model_dir)
        save_checkpoint(model, epoch_index, count, model_dir)

        print_loss_total = 0
        print_pos_acc_total = 0
        print_neg_acc_total = 0
        print_time_total = 0

        if epoch_index % step == 0 and epoch_index > 0:
            learning_rate = learning_rate * 0.1
            for param_group in model_optimizer.param_groups:
                param_group['lr'] = learning_rate
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load('/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
    log_dir = output_dir[1]
    color_map = _get_voc_color_map()
    # count = 0
    logger = solver_log(os.path.join(log_dir, 'test_'+ time.strftime('%Y%m%d_%H%M%S', time.localtime()) +'.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            continue
        if count>=100:
            break
        data_np = dataset[count]
        # input
        # all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id
        # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_label = data_np[3]
        all_class_box_weight = data_np[4]
        all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(data_np[5]), data_np[6]
        unique_class, unique_class_len = torch.FloatTensor(data_np[7]), torch.FloatTensor(data_np[8])
        unique_class_np, unique_class_len_np = data_np[7], data_np[8]
        image_id = int(data_np[9])
        img_name = str(image_id).zfill(12)
        im_file = '/mnt/lustre/liushu1/qilu_ex/dataset/coco/fpn_bn_base/img/' + img_name + '.jpg'
        im = cv2.imread(im_file)
        bboxes = []
        start = time.time()

        # all_class_box_label_variable = Variable(all_class_box_label).cuda()
        all_class_box_score_variable = Variable(all_class_box_score).cuda()
        all_class_box_box_variable = Variable(all_class_box_box).cuda()
        all_class_box_feature_variable = Variable(all_class_box_feature).cuda()
        all_class_box_origin_score_variable = Variable(all_class_box_origin_score).cuda()

        unique_class_cuda = unique_class.cuda()
        unique_class_len_cuda = unique_class_len.cuda()

        output = test(all_class_box_feature_variable, all_class_box_box_variable, all_class_box_score_variable, all_class_box_origin_score_variable, unique_class_cuda, unique_class_len_cuda, model)

        box_score_origin = all_class_box_origin_score_variable.data.cpu().numpy().astype(np.float)[:,0:1].reshape(-1, 1)
        # final_score = box_score_origin
        # final_score = (box_score_origin + output) / 2
        final_score = box_score_origin * output
        for cls_index in range(80):
            if unique_class_np[cls_index] == 0:
                continue
            start_ = int(unique_class_len_np[cls_index])
            end_ = int(unique_class_len_np[cls_index+1])
            # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1)
            # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label'])
            # print(qwe)
            # qwe.style.apply(highlight_greaterthan,threshold=0.5,column=['label'], axis=1)
            # qwe
            # print(qwe.to_string())
            # print(qwe.sort_values(by='final'))
            # print(qwe.sort_values(by='label'))
            # input()
            for index in range(start_, end_):
                x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4]
                score = final_score[index, 0]
                # if(score<0.05):
                #     continue
                category_id = cls_index+1
                bboxes.append({'bbox': [int(x1), int(y1), int(x2-x1+1), int(y2-y1+1)], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)})
        
        for bbox_single in bboxes:
            cls_indx = int(bbox_single['category_id'])
            x1, y1, w, h = bbox_single['bbox']
            score = bbox_single['score']
            if score<0.01:
                continue           
            cv2.rectangle(im, (int(x1), int(y1)), (int(x1+w-1), int(y1+h-1)), tuple(color_map[cls_indx, :]), 2)
        # count += 1
        save_path = os.path.join('/mnt/lustre/liushu1/qilu_ex/Post_vis/', img_name+'.jpg')
        # save_gt_path = os.path.join(args.output_dir, img_name+'.jpg')
        # cv2.imwrite('/data/luqi/000000156500_proposal.png', im_proposal)
        cv2.imwrite(save_path, im)
        end = time.time()
        print_time = float(end-start)
        # results.extend(bboxes)
        # if count==20:
            # break
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(thread_index, count, image_id, print_time))
    return results
    # cvb.dump(results, result_path)

        
def test_solver(model, dataset, output_dir, thread_index, thread_num):
    # load checkpoint
    load_checkpoint(model, output_dir[0])
    New2Old = cvb.load(
        '/mnt/lustre/liushu1/mask_rcnn/coco-master/PythonAPI/Newlabel.pkl')
    # result_path = os.path.join(output_dir[1], 'result.json')
    np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
    log_dir = output_dir[1]
    # count = 0
    logger = solver_log(
        os.path.join(
            log_dir, 'test_' +
            time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.log'))
    # logger = solver_log(os.path.join(log_dir, 'test1.log'))
    results = []
    data_num = len(dataset)
    for count in range(data_num):
        if count % thread_num != thread_index:
            continue
        data_np = dataset[count]
        # input
        # all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, img_id
        # box_feature, rank_score, box_box = torch.FloatTensor(data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(data_np[2])
        all_class_box_feature, all_class_box_box, all_class_box_score = torch.FloatTensor(
            data_np[0]), torch.FloatTensor(data_np[1]), torch.FloatTensor(
                data_np[2])
        all_class_box_label = data_np[3]
        if all_class_box_label.shape[0] == 0:
            continue
        all_class_box_weight = data_np[4]
        all_class_box_origin_score, all_class_box_origin_box = torch.FloatTensor(
            data_np[5]), data_np[6]
        unique_class, unique_class_len = torch.FloatTensor(
            data_np[7]), torch.FloatTensor(data_np[8])
        unique_class_np, unique_class_len_np = data_np[7], data_np[8]
        image_id = int(data_np[9])

        bboxes = []
        start = time.time()

        # all_class_box_label_variable = Variable(all_class_box_label).cuda()
        all_class_box_score_variable = Variable(all_class_box_score).cuda()
        all_class_box_box_variable = Variable(all_class_box_box).cuda()
        all_class_box_feature_variable = Variable(all_class_box_feature).cuda()
        all_class_box_origin_score_variable = Variable(
            all_class_box_origin_score).cuda()

        unique_class_cuda = unique_class.cuda()
        unique_class_len_cuda = unique_class_len.cuda()

        output = test(all_class_box_feature_variable,
                      all_class_box_box_variable, all_class_box_score_variable,
                      all_class_box_origin_score_variable, unique_class_cuda,
                      unique_class_len_cuda, model)

        box_score_origin = all_class_box_origin_score_variable.data.cpu(
        ).numpy().astype(np.float)[:, 0:1].reshape(-1, 1)
        # final_score = box_score_origin
        save_score = np.concatenate((output, box_score_origin), 1)

        save_path = '/mnt/lustre/liushu1/qilu_ex/dataset/test_dev/panet/score/' + str(
            image_id).zfill(12) + '.pkl'
        cvb.dump(save_score, save_path)
        # iii = cvb.load(save_path)
        # output = iii[:,0:1]
        # box_score_origin = iii[:,1:2]
        # final_score = box_score_origin * output
        # for cls_index in range(80):
        #     if unique_class_np[cls_index] == 0:
        #         continue
        #     start_ = int(unique_class_len_np[cls_index])
        #     end_ = int(unique_class_len_np[cls_index+1])
        #     # info_info = np.concatenate((box_score_origin[start_:end_, 0:1], output[start_:end_, 0:1], final_score[start_:end_,0:1], all_class_box_origin_box[start_:end_, 0:4].astype(np.int), all_class_box_label[start_:end_, 0:1]), axis=1)
        #     # qwe = DataFrame(info_info, columns=['score_origin', 'network', 'final', 'x1', 'y1', 'x2', 'y2', 'label'])
        #     # print(qwe)
        #     # print(qwe.sort_values(by='score_origin'))
        #     # input()
        #     for index in range(start_, end_):
        #         x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4]
        #         score = final_score[index, 0]
        #         category_id = New2Old[str(cls_index+1)][1]
        #         bboxes.append({'bbox': [int(x1), int(y1), int(x2)-int(x1)+1, int(y2)-int(y1)+1], 'score': float(score), 'category_id':category_id, 'image_id':int(image_id)})

        # # count += 1
        end = time.time()
        print_time = float(end - start)
        # results.extend(bboxes)
        # # if count==20:
        #     # break
        logger.info('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(
            thread_index, count, image_id, print_time))
    return results