Exemple #1
0
def run(thread_index, thread_num, result, args):
    # initialization
    model_dir = os.path.join(args.output_dir, 'model/')
    model_path = os.path.join(model_dir, 'epoch_6_iter_117266.pth')
    if not osp.exists(model_path):
        raise "there is no latest.pth "

    output_dir = [model_path, result_dir]
    use_cuda = torch.cuda.is_available()

    cls_list = ['_' for _ in range(81)]
    # datasets
    val = TrainDataset(args.base_path,
                       args.img_list,
                       'msra',
                       cls_list,
                       phase='test')
    # val_loader = torch.utils.data.DataLoader(val, batch_size=1, num_workers=1, collate_fn=unique_collate, pin_memory=False)

    # model
    # model = Encoder_Decoder(args.hidden_size, attn_type=args.attn_type, context_type=args.context_type, n_layers=args.n_layers, multi_head=args.multi_head)
    model = Encoder_Decoder(args.hidden_size,
                            attn_type=args.attn_type,
                            context_type=args.context_type)

    if use_cuda:
        model = model.cuda()

    model.eval()
    thread_result = test_solver(model, val, output_dir, thread_index,
                                thread_num)
    result.extend(thread_result)
def run(thread_index, thread_num, result, args):
    cls_list = ['_' for _ in range(81)]
    # datasets
    val = TrainDataset(args.base_path,
                       args.img_list,
                       args.use_mode,
                       cls_list,
                       phase='test')
    num = len(val)
    New2Old = cvb.load('/data/luqi/coco-master/PythonAPI/Newlabel.pkl')
    multilabel = cvb.load('/data/luqi/dataset/pytorch_data/multilabel.pkl')

    thread_result = []
    for i in range(num):
        if i % thread_num != thread_index:
            continue
        start_time = time.time()
        all_class_box_feature, all_class_box_box, all_class_box_score, all_class_box_label, all_class_box_weight, all_class_box_origin_score, all_class_box_origin_box, unique_class, unique_class_len, image_id, phase_np = val[
            i]
        bboxes = []
        for cls_index in range(80):
            if unique_class[cls_index] == 0:
                continue
            img_name = str(int(image_id)).zfill(12)
            class_score = float(multilabel[img_name][cls_index])
            start = int(unique_class_len[cls_index])
            end = int(unique_class_len[cls_index + 1])
            for index in range(start, end):
                # if(all_class_box_label[index]==0):
                # continue
                x1, y1, x2, y2 = all_class_box_origin_box[index, 0:4]
                # score = all_class_box_origin_score[index, 0] * class_score
                score = all_class_box_origin_score[index, 0]
                category_id = New2Old[str(cls_index + 1)][1]
                bboxes.append({
                    'bbox': [
                        int(x1),
                        int(y1),
                        int(x2) - int(x1) + 1,
                        int(y2) - int(y1) + 1
                    ],
                    'score':
                    float(score),
                    'category_id':
                    category_id,
                    'image_id':
                    int(image_id)
                })
            # count += 1
        thread_result.extend(bboxes)
        end_time = time.time()
        print_time = float(end_time - start_time)
        print('thread_index:{}, index:{}, image_id:{}, cost:{}'.format(
            thread_index, i, image_id, print_time))
    result.extend(thread_result)
    if not osp.exists(model_dir):
        os.makedirs(model_dir)

    if not osp.exists(log_info_dir):
        os.makedirs(log_info_dir)

    output_dir = [model_dir, log_info_dir]

    use_cuda = torch.cuda.is_available()

    cls_list = ['_' for _ in range(81)]
    # dataset
    train = TrainDataset(args.base_path,
                         args.img_list,
                         args.use_mode,
                         cls_list,
                         weight=args.weight)
    train_loader = torch.utils.data.DataLoader(train,
                                               batch_size=1,
                                               num_workers=1,
                                               collate_fn=unique_collate,
                                               pin_memory=False,
                                               shuffle=True)

    # model
    model = Encoder_Decoder(args.hidden_size,
                            attn_type=args.attn_type,
                            context_type=args.context_type)
    if args.load:
        continue_epochs, continue_iters = load_checkpoint(model, load_pth)