Exemplo n.º 1
0
    def __init__(self, res_path, info):
        net = net_1024.net_1024()
        net_path = "SaveModel/net_1024_beta2.pth"
        print("------->  loading net_1024")
        self.net = LoadModel(net, net_path)

        self.sequence = []

        print("------->  initializing  MOT17-{}-{} ...".format(
            info[0], info[1]))
        self.sequence.append(VideoData(info, res_path))
        print("------->  initialize  MOT17-{}-{}  done".format(
            info[0], info[1]))

        self.vis_save_path = "test/visualize"
Exemplo n.º 2
0
    def __init__(self, res_path, info):
        net = net_1024.net_1024()
        # net_path = "SaveModel/net_1024_beta2.pth"
        # net_path = "/hdd/yongxinw/MOT17/experiments/debug11/net_1024.pth"
        # net_path = "/hdd/yongxinw/MOT17/experiments/train_mot15/net_1024.pth"
        net_path = "/hdd/yongxinw/MOT15/new_experiments/train_mot15_train/checkpoints/net_39500.pth"
        print("------->  loading net_1024")
        print("-----------------> resuming from {}".format(net_path))
        self.net = LoadModel(net, net_path)

        self.sequence = []

        # # MOT17
        print("------->  initializing  MOT17-{}-{} ...".format(
            info[0], info[1]))
        self.sequence.append(VideoData(info, res_path))
        print("------->  initialize  MOT17-{}-{}  done".format(
            info[0], info[1]))
Exemplo n.º 3
0
def val(parser, generator, log, log_path):
    print("validation \n")
    model = net_1024.net_1024()

    # "----------------- pretrained model loading -----------------"
    # print("loading pretrained model")
    # checkpoint = torch.load("/home/lallazhao/MOT/result/Oct-25-at-02-17-net_1024/net_1024_88.4.pth")
    checkpoint = torch.load(
        "/hdd/yongxinw/Det/experiments/train_mot15_w_detect_3anchors/net_1024.pth"
    )
    model.load_state_dict(checkpoint["state_dict"])
    # "------------------------------------------------------------"

    image_dir = osp.join(log_path, "images")
    os.makedirs(image_dir, exist_ok=True)

    model = model.cuda()
    model.eval()

    for it in range(1):
        frame = 6
        cur_crop, pre_crop, cur_motion, pre_motion, gt_matrix, pos_mask, neg_mask, anchor_class, offsets, curr_image, \
        curr_data, prev_image, prev_data, anchors = generator(frame=frame)

        with torch.no_grad():
            s0, s1, s2, s3, adj1, adj, box_pred, cls_pred = model(
                pre_crop, cur_crop, pre_motion, cur_motion)

        # predicted matching score
        adj_sig = F.sigmoid(adj)

        if parser.use_gt_match:
            adj_sig = torch.from_numpy(gt_matrix).cuda()

        # use top k adj scores for match
        scores, match_idx = torch.topk(adj_sig.t(), 1, dim=1)

        # print(scores)
        # print(match_idx)

        # mask the indices that are below the threshold
        match_idx[scores < parser.threshold] = -1

        # x_inds = torch.arange(match_idx.shape[0]).view(-1, 1).repeat(1, match_idx.shape[1]).view(-1)
        prev_boxes = prev_data[:, 2:6]
        gt_ids = prev_data[:, 1]
        max_color = 10
        colors = random_colors(max_color, bright=True)

        # Visualize gt boxes
        curr_image_copy = prev_image.copy()
        for i, gt_box in enumerate(prev_boxes):
            id = gt_ids[i]
            color_tmp = tuple(
                [int(tmp * 255) for tmp in colors[int(id % max_color)]])
            curr_image_copy = visualize_boxes(curr_image_copy, [gt_box],
                                              width=5,
                                              outline=color_tmp)

        curr_image_copy.save(
            osp.join(image_dir, "prev_{:03d}.jpg".format(frame + 1)))

        # Visualize anchor matching
        # curr_image_copy = curr_image.copy()
        # for j, anchor_box in enumerate(anchors):
        #     matches = match_idx[j]
        #     for k in range(len(matches)):
        #         if matches[k] != -1:
        #             match_gt_id = gt_ids[matches[k]]
        #             width = 3
        #             outline = tuple([int(tmp * 255) for tmp in colors[int(match_gt_id % max_color)]])
        #         else:
        #             width = 1
        #             outline = "white"
        #
        #         curr_image_copy = visualize_boxes(curr_image_copy, [anchor_box], width=width, outline=outline)
        # curr_image_copy.save(osp.join(image_dir, "curr_match_{:03d}.jpg".format(frame)))

        # Visualize anchor detection+classification
        print(cls_pred)
        print(anchor_class)
        print(pos_mask)

        # Visualize detections
        curr_boxes = curr_data[:, 2:6]

        curr_image_copy = curr_image.copy()

        colors = random_colors(max_color, bright=False)
        # Draw negative anchors
        for j, anchor_box in enumerate(anchors):
            # predicted class
            cls_j = np.argmax(cls_pred.detach().cpu().numpy()[j])

            # if we are in debug mode and want to use some gt information, specify in config
            if parser.use_gt_anchor_class:
                cls_j = anchor_class[j]

            if cls_j == 0:
                curr_image_copy = visualize_boxes(curr_image_copy,
                                                  [anchor_box],
                                                  width=1,
                                                  outline='white')

        # Draw positive anchors
        for j, anchor_box in enumerate(anchors):
            # predicted class
            cls_j = np.argmax(cls_pred.detach().cpu().numpy()[j])

            # predicted offset
            offset_j = box_pred.detach().cpu().numpy()[j]

            # if we are in debug mode and want to use some gt information, specify in config
            if parser.use_gt_anchor_class:
                cls_j = anchor_class[j]
            if parser.use_gt_offsets:
                offset_j = offsets[j]
            if cls_j == 1:
                match = match_idx[j]
                match_gt_id = gt_ids[match]
                outline = tuple([
                    int(tmp * 255)
                    for tmp in colors[int(match_gt_id % max_color)]
                ])
                if parser.show_aligned_anchors:
                    gw, gh = np.exp(offset_j[2:]) * anchor_box[2:]
                    gleft, gtop = offset_j[:2] * anchor_box[2:] + anchor_box[:2]
                    anchor_box_aligned = [gleft, gtop, gw, gh]
                    curr_image_copy = visualize_boxes(curr_image_copy,
                                                      [anchor_box_aligned],
                                                      width=3,
                                                      outline=outline)
                else:
                    curr_image_copy = visualize_boxes(curr_image_copy,
                                                      [anchor_box],
                                                      width=3,
                                                      outline=outline)

        # visualize the GT
        for i, gt_box in enumerate(curr_boxes):
            id = gt_ids[i]
            color_tmp = tuple(
                [int(tmp * 255) for tmp in colors[int(id % max_color)]])
            curr_image_copy = visualize_boxes(curr_image_copy, [gt_box],
                                              width=5,
                                              outline=color_tmp)
        curr_image_copy.save(
            osp.join(image_dir, "curr_det_{:03d}.jpg".format(frame + 1)))
Exemplo n.º 4
0
def train(parser, generator, log, log_path):
    # print("training net_1024\n")
    # model = net_1024.net_1024()

    print("training final\n")
    model = net_1024.net_1024()

    # "----------------- pretrained model loading -----------------"
    # print("loading pretrained model")
    # checkpoint = torch.load("/home/lallazhao/MOT/result/Oct-25-at-02-17-net_1024/net_1024_88.4.pth")
    # checkpoint = torch.load("/hdd/yongxinw/MOT17/experiments/debug1/net_1024.pth")
    # model.load_state_dict(checkpoint["state_dict"])
    # "------------------------------------------------------------"

    model = model.cuda()
    net_param_dict = model.parameters()

    weight = torch.Tensor([10])
    criterion_BCE = torch.nn.BCEWithLogitsLoss(pos_weight=weight).cuda()
    criterion_CE = torch.nn.CrossEntropyLoss().cuda()
    criterion_MSE = torch.nn.MSELoss().cuda()
    criterion_SMOOTHL1 = torch.nn.SmoothL1Loss().cuda()

    if parser.optimizer == "SGD":
        optimizer = torch.optim.SGD(net_param_dict,
                                    lr=parser.learning_rate,
                                    momentum=parser.momentum,
                                    weight_decay=parser.decay,
                                    nesterov=True)
    elif parser.optimizer == "Adam":
        optimizer = torch.optim.Adam(net_param_dict,
                                     lr=parser.learning_rate,
                                     weight_decay=parser.decay)
    elif parser.optimizer == "RMSprop":
        optimizer = torch.optim.RMSprop(net_param_dict,
                                        lr=parser.learning_rate,
                                        weight_decay=parser.decay,
                                        momentum=parser.momentum)
    else:
        raise NotImplementedError

    # Main Training and Evaluation Loop
    start_time, epoch_time = time.time(), AverageMeter()

    Batch_time = AverageMeter()
    Loss = AverageMeter()
    CLoss = AverageMeter()
    RLoss = AverageMeter()
    Acc = AverageMeter()
    Acc_pos = AverageMeter()

    # Initialize visual validation
    val_parser, val_generator, val_log_path = init_visual_validation()

    for epoch in range(parser.start_epoch, parser.epochs):
        all_lrs = adjust_learning_rate(optimizer, epoch, parser.gammas,
                                       parser.schedule)
        need_hour, need_mins, need_secs = convert_secs2time(
            epoch_time.avg * (parser.epochs - epoch))

        # ----------------------------------- train for one epoch -----------------------------------
        batch_time, loss, classification_loss, regression_loss, acc, acc_pos = \
            train_net_1024(model, generator, optimizer, criterion_BCE, criterion_CE, criterion_MSE, criterion_SMOOTHL1)

        Batch_time.update(batch_time)
        Loss.update(loss.item())
        CLoss.update(classification_loss.item())
        RLoss.update(regression_loss.item())
        Acc.update(acc)
        Acc_pos.update(acc_pos)

        if epoch % parser.print_freq == 0 or epoch == parser.epochs - 1:
            print_log(
                'Epoch: [{:03d}/{:03d}]\t'
                'Time {batch_time.val:5.2f} ({batch_time.avg:5.2f})\t'
                'Match Loss {loss.val:6.3f} ({loss.avg:6.3f})\t'
                'Cls Loss {closs.val:6.3f} ({closs.avg:6.3f})\t'
                'Reg Loss {rloss.val:6.3f} ({rloss.avg:6.3f})\t'
                "Acc {acc.val:6.3f} ({acc.avg:6.3f})\t"
                "Acc_pos {acc_pos.val:6.3f} ({acc_pos.avg:6.3f})\t".format(
                    epoch,
                    parser.epochs,
                    batch_time=Batch_time,
                    loss=Loss,
                    closs=CLoss,
                    rloss=RLoss,
                    acc=Acc,
                    acc_pos=Acc_pos), log)

            visual_log(model, epoch, val_parser, val_generator, val_log_path)

            Batch_time.reset()
            Loss.reset()
            CLoss.reset()
            RLoss.reset()

        if (epoch in parser.schedule):
            print_log(
                "------------------- adjust learning rate -------------------",
                log)
        # -------------------------------------------------------------------------------------------

        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()

        if epoch % 100 == 0:
            save_file_path = osp.join(log_path, "net_1024.pth")
            states = {
                "state_dict": model.state_dict(),
            }
            torch.save(states, save_file_path)