Exemplo n.º 1
0
def extract_features(model,
                     data_loader,
                     is_flip=False,
                     print_freq=1,
                     metric=None):
    model.eval()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    features = OrderedDict()

    end = time.time()
    if is_flip:
        print('flip')
        for i, (imgs, flip_imgs, fnames) in enumerate(data_loader):
            data_time.update(time.time() - end)

            outputs = extract_cnn_feature(model, imgs)
            flip_outputs = extract_cnn_feature(model, flip_imgs)
            final_outputs = (outputs + flip_outputs) / 2
            for fname, output in zip(fnames, final_outputs):
                features[fname] = output.numpy()

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                      batch_time.val,
                                                      batch_time.avg,
                                                      data_time.val,
                                                      data_time.avg))
    else:
        print('no flip')
        for i, (imgs, fnames) in enumerate(data_loader):
            data_time.update(time.time() - end)

            outputs = extract_cnn_feature(model, imgs)
            for fname, output in zip(fnames, outputs):
                features[fname] = output.numpy()

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                      batch_time.val,
                                                      batch_time.avg,
                                                      data_time.val,
                                                      data_time.avg))
    return features
Exemplo n.º 2
0
def reid_draw(frame, b_b, model, shujuku, frame_size):
    size = frame_size

    left=int((b_b[0] - b_b[2]/2.0) * size[0])
    top=int((b_b[1]- b_b[3]/2.0) * size[1])
    right=int((b_b[0] + b_b[2]/2.0) * size[0])
    bottom=int((b_b[1] + b_b[3]/2.0) * size[1])
    if top>=bottom or left>=right or top<=0 or left<=0:
        return
        
    img1 = jieduan(frame,left,top,right,bottom)
    img = preprocess(img1)
    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1

    #for feature2,filename in shujuku:
    customer_id = -1
    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature,fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                customer_id = int(query)
    #add to debug customer_id
    #customer_id = 1
    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame,str(customer_id),(left,top),cv2.FONT_HERSHEY_COMPLEX,6,(255,0,0),2)

    customer_name = "name_"+str(customer_id)
    assert type(customer_id) is int # must be number

    # return frame
    #print("pub msg finish")
    return customer_name, customer_id
Exemplo n.º 3
0
def extract_features(model, data_loader, print_freq=50, metric=None):
    model.eval()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    features = OrderedDict()
    labels = OrderedDict()

    end = time.time()
    for i, (imgs, fnames, pids, _) in enumerate(data_loader):
        data_time.update(time.time() - end)
        outputs = extract_cnn_feature(model, imgs)
        for fname, output, pid in zip(fnames, outputs, pids):
            features[fname] = output
            labels[fname] = pid

        batch_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % print_freq == 0:
            print('Extract Features: [{}/{}]\t'
                  'Time {:.3f} ({:.3f})\t'
                  'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                  batch_time.val,
                                                  batch_time.avg,
                                                  data_time.val,
                                                  data_time.avg))

    return features, labels
Exemplo n.º 4
0
def extract_features_cross_cam(model, data_loader):
    model.eval()
    cross_cam_features = []
    cross_cam_fnames = []
    cross_cam_distribute = []
    cams = []
    cam_number = len(model.classifier)
    print("Start extract features cross camera")
    for imgs, fnames, _, camid in tqdm(data_loader):

        with torch.no_grad():
            outputs = extract_cnn_feature(model, imgs, norm=False)
            fnorm = torch.norm(outputs, p=2, dim=1, keepdim=True)
            norm_outputs = outputs.div(fnorm.expand_as(outputs))
            for i in range(cam_number):

                x = model.classifier[i](outputs)
                if i == 0:
                    distribute = F.softmax(x.data, dim=1)
                else:
                    distribute_tmp = F.softmax(x.data, dim=1)
                    distribute = torch.cat((distribute, distribute_tmp), dim=1)

        for fname, output, cam, dis in zip(fnames, norm_outputs, camid,
                                           distribute):
            cam = cam.item()
            cross_cam_fnames.append(fname)
            cross_cam_features.append(output)
            cams.append(cam)
            cross_cam_distribute.append(dis.cpu().numpy())
    return cross_cam_features, cross_cam_fnames, cross_cam_distribute, cams
Exemplo n.º 5
0
    def extract_features(model, data_loader, eval_only, print_freq=100):
        model.eval()
        batch_time = AverageMeter()
        data_time = AverageMeter()

        features = []
        labels = []
        cameras = []

        end = time.time()
        for i, (imgs, fnames, pids, cids) in enumerate(data_loader):
            data_time.update(time.time() - end)

            outputs = extract_cnn_feature(model, imgs, eval_only)
            for fname, output, pid, cid in zip(fnames, outputs, pids, cids):
                features.append(output)
                labels.append(int(pid.numpy()))
                cameras.append(int(cid.numpy()))

            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                print('Extract Features: [{}/{}]\t'
                      'Time {:.3f} ({:.3f})\t'
                      'Data {:.3f} ({:.3f})\t'
                      .format(i + 1, len(data_loader),
                              batch_time.val, batch_time.avg,
                              data_time.val, data_time.avg))

        output_features = torch.stack(features, 0)

        return output_features, labels, cameras
Exemplo n.º 6
0
def reid_draw(frame, b_b, model, cfg):
    global size
    id_name = 0
    cfg.cuda()
    left = int((b_b[0] - b_b[2] / 2.0) * size[0])
    top = int((b_b[1] - b_b[3] / 2.0) * size[1])
    right = int((b_b[0] + b_b[2] / 2.0) * size[0])
    bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])

    #print("bottom is {}".format(bottom))
    if left < 0 or right < 0 or top < 0 or bottom < 0:
        return left, top, right, bottom, 999

    # 处理半身
    if bottom > 530:
        ratio = float(bottom - top) / (right - left)
        #print("ratio is: {}".format(ratio))
        if ratio < 1.5:
            #print("ratio is: {}".format(ratio))
            #print('filtered out')
            return left, top, right, bottom, 999

    frame_reid = copy.deepcopy(frame)
    # draw shangpin area
    left_x, top_y, right_m, bottom_n = shangpin_area()
    cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)

    left_x_2, top_y_2, right_m_2, bottom_n_2 = shangpin_area_huojia2()
    cv2.rectangle(frame, (left_x_2, top_y_2), (right_m_2, bottom_n_2),
                  (255, 0, 0), 2)

    img1 = jieduan(frame_reid, left, top, right, bottom)
    img = preprocess(img1)
    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1

    try:
        pkl_file = open('/data/reid/renti/data.pkl', 'rb')
        shujuku = pickle.load(pkl_file)
        pkl_file.close()
    except:
        pkl_file = open('/data/reid/renti/data_bu.pkl', 'rb')
        shujuku = pickle.load(pkl_file)
        pkl_file.close()

    # for feature2,filename in shujuku:
    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature, fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                id_name = int(query)

    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame, str(id_name), (left, top), cv2.FONT_HERSHEY_COMPLEX, 6,
                (255, 0, 0), 2)

    return left, top, right, bottom, id_name
Exemplo n.º 7
0
def extract_n_save(model,
                   data_loader,
                   args,
                   root,
                   num_cams,
                   is_detection=True,
                   use_fname=True,
                   gt_type='reid'):
    model.eval()
    print_freq = 1000
    batch_time = AverageMeter()
    data_time = AverageMeter()

    if_created = [0 for _ in range(num_cams)]
    lines = [[] for _ in range(num_cams)]

    end = time.time()
    for i, (imgs, fnames, pids, cams) in enumerate(data_loader):
        cams += 1
        outputs = extract_cnn_feature(model, imgs)
        for fname, output, pid, cam in zip(fnames, outputs, pids, cams):
            if is_detection:
                pattern = re.compile(r'c(\d+)_f(\d+)')
                cam, frame = map(int, pattern.search(fname).groups())
                # f_names[cam - 1].append(fname)
                # features[cam - 1].append(output.numpy())
                line = np.concatenate(
                    [np.array([cam, 0, frame]),
                     output.numpy()])
            else:
                if use_fname:
                    pattern = re.compile(r'(\d+)_c(\d+)_f(\d+)')
                    pid, cam, frame = map(int, pattern.search(fname).groups())
                else:
                    cam, pid = cam.numpy(), pid.numpy()
                    frame = -1 * np.ones_like(pid)
                # line = output.numpy()
                line = np.concatenate(
                    [np.array([cam, pid, frame]),
                     output.numpy()])
            lines[cam - 1].append(line)
        batch_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % print_freq == 0:
            print('Extract Features: [{}/{}]\t'
                  'Time {:.3f} ({:.3f})\t'
                  'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                  batch_time.val,
                                                  batch_time.avg,
                                                  data_time.val,
                                                  data_time.avg))

            if_created = save_file(lines, args, root, if_created)

            lines = [[] for _ in range(num_cams)]

    save_file(lines, args, root, if_created)
    return
Exemplo n.º 8
0
def inference(model, query_loader, gallery_loader, use_gpu):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf = []
        for batch_idx, (imgs, _) in enumerate(query_loader):
            if use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = extract_cnn_feature(model, imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.extend(list(features))

        gf, g_paths = [], []
        for batch_idx, (imgs, path) in enumerate(gallery_loader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = extract_cnn_feature(model, imgs)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.extend(list(features))
            g_paths.extend(list(path))

    print('=> BatchTime(s): {:.3f}'.format(batch_time.avg))

    x = torch.cat([qf[i].unsqueeze(0) for i in range(len(qf))], 0)
    y = torch.cat([gf[i].unsqueeze(0) for i in range(len(gf))], 0)
    m, n = x.size(0), y.size(0)
    x = x.view(m, -1)
    y = y.view(n, -1)
    dist = torch.pow(x, 2).sum(1).unsqueeze(1).expand(m, n) + \
           torch.pow(y, 2).sum(1).unsqueeze(1).expand(n, m).t()
    dist.addmm_(1, -2, x, y.t())

    return dist
Exemplo n.º 9
0
    def execute(self, frame: FrameType) -> bytes:
        trans_im = cv2.resize(frame, (self.width, self.height))

        image = Image.fromarray(trans_im)
        img = self.test_transformer(image)
        img_list = [img]
        imgs = torch.stack(img_list)

        img_feat = extract_cnn_feature(self.model, imgs)[0]

        output = pickle.dumps(img_feat)
        return output
Exemplo n.º 10
0
def extract_features(model, data):
    model.eval()
    features = OrderedDict()
    labels = OrderedDict()
    uids, pids, imgs = data
    outputs = extract_cnn_feature(model, imgs)

    for uid, pid, output in zip(uids, pids, outputs):
        features[uid] = output
        labels[uid] = pid

    return features, labels
Exemplo n.º 11
0
    def execute(self, frame: FrameType) -> FrameType:
        trans_im = cv2.resize(frame, (self.width, self.height))

        image = Image.fromarray(trans_im)
        img = self.test_transformer(image)
        img_list = [img]
        imgs = torch.stack(img_list)

        t1 = datetime.now()

        img_feat = extract_cnn_feature(self.model, imgs)[0]

        t2 = datetime.now()

        # print("extract features in ", (t2 - t1).total_seconds() * 1000, " ms.")
        # print(img_feat)

        return trans_im
Exemplo n.º 12
0
def reid_draw(frame, b_b, model, cfg):
    global size
    id_name = 'new'
    cfg.cuda()
    left = int((b_b[0] - b_b[2] / 2.0) * size[0])
    top = int((b_b[1] - b_b[3] / 2.0) * size[1])
    right = int((b_b[0] + b_b[2] / 2.0) * size[0])
    bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])
    #print left,top,right,bottom
    if left < 0 or right < 0 or top < 0 or bottom < 0:
        return left, top, right, bottom, id_name

    img1 = jieduan(frame, left, top, right, bottom)
    img = preprocess(img1)
    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1

    rentidir = '/home/tujh/renti/'
    pkl_file = open('/data/reid/renti/data.pkl', 'rb')
    shujuku = pickle.load(pkl_file)

    #for feature2,filename in shujuku:
    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature, fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                id_name = int(query)
    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame, str(id_name), (left, top), cv2.FONT_HERSHEY_COMPLEX, 6,
                (255, 0, 0), 2)

    # draw shangpin area
    left_x, top_y, right_m, bottom_n = shangpin_area()
    cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)

    left_x_2, top_y_2, right_m_2, bottom_n_2 = shangpin_area_huojia2()
    cv2.rectangle(frame, (left_x_2, top_y_2), (right_m_2, bottom_n_2),
                  (255, 0, 0), 2)

    #print(left, top, right, bottom)

    return left, top, right, bottom, id_name
Exemplo n.º 13
0
def extract_features(model,
                     data_loader,
                     print_freq=1,
                     save_name='feature.mat'):

    batch_time = AverageMeter()
    data_time = AverageMeter()

    ids = []
    cams = []
    features = []
    query_files = []
    end = time.time()
    for i, (imgs, fnames) in enumerate(data_loader):
        data_time.update(time.time() - end)

        outputs = extract_cnn_feature(model, imgs)
        #for test time augmentation
        #bs, ncrops, c, h, w = imgs.size()
        #outputs = extract_cnn_feature(model, imgs.view(-1,c,h,w))
        #outputs = outputs.view(bs,ncrops,-1).mean(1)
        for fname, output in zip(fnames, outputs):
            if fname[0] == '-':
                ids.append(-1)
                cams.append(int(fname[4]))
            else:
                ids.append(int(fname[:4]))
                cams.append(int(fname[6]))
            features.append(output.numpy())
            query_files.append(fname)
            batch_time.update(time.time() - end)
            end = time.time()

        if (i + 1) % print_freq == 0:
            print('Extract Features: [{}/{}]\t'
                  'Time {:.3f} ({:.3f})\t'
                  'Data {:.3f} ({:.3f})\t'.format(i + 1, len(data_loader),
                                                  batch_time.val,
                                                  batch_time.avg,
                                                  data_time.val,
                                                  data_time.avg))

    return features, ids, cams, query_files
Exemplo n.º 14
0
def loadDataset():
    torch.cuda.set_device(0)
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    ###
    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    # --datasets
    shujuku = {}
    rentidir = '/data/reid/renti/queries'
    for query in os.listdir(rentidir):
        query_dir = osp.join(rentidir, query)
        shujuku[query] = []
        for img in os.listdir(query_dir):
            _img = cv2.imread(osp.join(query_dir, img))
            _img = np.transpose(_img, (2, 0, 1)).astype(np.float32)
            _img = torch.from_numpy(_img)
            _img = torch.unsqueeze(_img, 0)
            _feature = extract_cnn_feature(model, _img.cuda())
            shujuku[query].append(_feature)

            # --

    return model, shujuku
Exemplo n.º 15
0
def extract_features_per_cam(model, data_loader):
    model.eval()
    per_cam_features = {}
    per_cam_fname = {}
    print("Start extract features per camera")
    for imgs, fnames, _, camid in tqdm(data_loader):
        camid = list(camid)
        for cam in camid:
            cam = cam.item()
            if cam not in per_cam_features.keys():
                per_cam_features[cam] = []
                per_cam_fname[cam] = []
        with torch.no_grad():
            outputs = extract_cnn_feature(model, imgs)

        for fname, output, cam in zip(fnames, outputs, camid):
            cam = cam.item()
            per_cam_features[cam].append(output)
            per_cam_fname[cam].append(fname)
    return per_cam_features, per_cam_fname
Exemplo n.º 16
0
def compute_distance(frame, b_b):
    left=int((b_b[0] - b_b[2]/2.0) * size[0])
    top=int((b_b[1]- b_b[3]/2.0) * size[1])
    right=int((b_b[0] + b_b[2]/2.0) * size[0])
    bottom=int((b_b[1] + b_b[3]/2.0) * size[1])
    img1 = jieduan(frame,left,top,right,bottom)
    img = np.transpose(img1, (2,0,1)).astype(np.float32)
    img = torch.from_numpy(img)
    img = torch.unsqueeze(img, 0)
    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1
    #for feature2,filename in shujuku:

    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature,fea)
            if minsim > distan or minsim == -1:
                minsim = distan
    return minsim
Exemplo n.º 17
0
def reid_draw(frame, b_b, model, cfg, huojia1_id, pre_res, change_idnum):
    global size
    global save_box_no
    id_name = 0
    cfg.cuda()
    left = int((b_b[0] - b_b[2] / 2.0) * size[0])
    top = int((b_b[1] - b_b[3] / 2.0) * size[1])
    right = int((b_b[0] + b_b[2] / 2.0) * size[0])
    bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])

    if left < 0 or right < 0 or top < 0 or bottom < 0:
        return left, top, right, bottom, 999

    # if bottom > 530:
    #     ratio = float(bottom - top) / (right - left)
    #     #print("ratio is: {}".format(ratio))
    #     if ratio < 1.5:
    #         #print("ratio is: {}".format(ratio))
    #         print('filtered out')
    #         return left, top, right, bottom, 999

    frame_reid = copy.deepcopy(frame)
    # draw shangpin area
    left_x, top_y, right_m, bottom_n = shangpin_area(huojia1_id)
    cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)

    ratio = float(bottom - top) / (right - left)
    # # print(ratio)
    # if ratio < 2.0:
    #     # print('filtered out')
    #     return left, top, right, bottom, 999

    img1 = jieduan(frame_reid, left, top, right, bottom)

    img = preprocess(img1)

    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1

    try:
        pkl_file = open('/data/reid/renti/data.pkl', 'rb')
        shujuku = pickle.load(pkl_file)
        pkl_file.close()
    except:
        pkl_file = open('/data/reid/renti/data_bu.pkl', 'rb')
        shujuku = pickle.load(pkl_file)
        pkl_file.close()

    rentidir = '/home/tujh/renti/'
    # pkl_file = open('/data/reid/renti/data.pkl', 'rb')
    # shujuku = pickle.load(pkl_file)
    # pre_item_huoid={}
    # for id_name, pre_item in pre_res.items():
    #     if huojia1_id == pre_item[-1]:
    #         pre_item_huoid[id_name]=pre_item ##person id in front of huojia_id

    if change_idnum:  #len(pre_res) == len(shujuku) and pre_item_huoid:
        id_name = reid_draw_multi(pre_res, b_b)
        pre_fix = 'B:'
    else:
        # for feature2,filename in shujuku:
        for query in shujuku:
            for fea in shujuku[query]:
                distan = pairwise_distance(feature, fea)
                if minsim > distan or minsim == -1:
                    minsim = distan
                    id_name = int(query)
        pre_fix = 'R:'
    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame, pre_fix + str(id_name), (left, top),
                cv2.FONT_HERSHEY_COMPLEX, 6, (255, 0, 0), 2)
    cv2.imwrite(
        '/home/zhaocy/yhr/tmp_imgs/' + str(save_box_no) + '_' + str(id_name) +
        '.jpg', img1)
    save_box_no += 1

    return left, top, right, bottom, id_name
Exemplo n.º 18
0
def reid_draw(frame, b_b, model, cfg, shujuku, threadPubMsg, camera_id, flag):
    global size

    # print("size = ", size)
    # print("b_b = ", b_b)
    # print model

    cfg.cuda()

    left = int((b_b[0] - b_b[2] / 2.0) * size[0])
    top = int((b_b[1] - b_b[3] / 2.0) * size[1])
    right = int((b_b[0] + b_b[2] / 2.0) * size[0])
    bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])
    img1 = jieduan(frame, left, top, right, bottom)
    img = np.transpose(img1, (2, 0, 1)).astype(np.float32)
    img = torch.from_numpy(img)
    img = torch.unsqueeze(img, 0)

    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1
    id_name = 'new'
    rentidir = '/home/tujh/renti/'
    #for feature2,filename in shujuku:

    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature, fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                id_name = query

    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame, id_name, (left, top), cv2.FONT_HERSHEY_COMPLEX, 6,
                (255, 0, 0), 2)
    print(frame.shape)
    # left_x = 230
    # top_y = 520
    # right_m = 480
    # bottom_n = 800
    # left_x = 89
    # top_y = 236
    # right_m = 470
    # bottom_n = 508
    left_x = 600
    top_y = 30
    right_m = 800
    bottom_n = 520
    cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)
    calcIOU1 = calcIOU(left, top, right, bottom, left_x, top_y, right_m,
                       bottom_n)

    if calcIOU1 <= 0 and flag == 0:  # back
        flag = 0
        flag1 = 0
        flag2 = 0
    elif calcIOU1 > 0 and flag == 0:  # in
        flag = 1
        flag1 = 1
        flag2 = 0
    elif calcIOU1 <= 0 and flag == 1:  # out
        flag = 0
        flag1 = 0
        flag2 = 1
    # elif calcIOU1 > 0 and flag == 1:  # back
    else:
        flag = 1
        flag1 = 0
        flag2 = 0
    print(flag)
    print(calcIOU1)

    print("set customer message")
    customer_name = "name" + str(id_name)
    customer_id = id_name  # number
    threadPubMsg.set_customer(name=customer_name,
                              person_id=12,
                              camera_id=camera_id,
                              x=left,
                              y=top,
                              w=right,
                              h=bottom)
    threadPubMsg.set_commodity_recognition_trigger(camera_id=camera_id,
                                                   flag1=flag1,
                                                   flag2=flag2)
    # return frame
    return flag
Exemplo n.º 19
0
def update_shujuku(frame, b_b, new_name, newout):
    global print_frame
    global x_history
    left=int((b_b[0] - b_b[2]/2.0) * size[0])
    top=int((b_b[1]- b_b[3]/2.0) * size[1])
    right=int((b_b[0] + b_b[2]/2.0) * size[0])
    bottom=int((b_b[1] + b_b[3]/2.0) * size[1])
    center_x = b_b[0]*size[0]
    #print(right- left)
    print('bottom:{}'.format(bottom))
    print('width:{}'.format(right-left))
    print('height:{}'.format(bottom-top))
    if top<0 or top>=bottom or left>=right or left<0 or right<0 or bottom<0: #or left<300: 
      return False
    if right - left < 110:
      #print('bounding box width too small')
      return False
    if bottom - top < 150:
      #print('bounding box height too small')
      return False
  
    #if bottom < 480:
    #  return False
    print('center_x:{}'.format(center_x))
    x_history.append(center_x)
    print('x_history:')
    print(x_history)
    img1 = jieduan(frame,left,top,right,bottom)

    print_frame += 1
    img = preprocess(img1)

    #print(left)
    #print(top)
    #print(right)
    #print(bottom)
    #print(img.shape)
    feature = extract_cnn_feature(model, img.cuda())
    if len(shujuku):
      minsim = -1
      id_name = 'new'
      #rentidir = '/home/tujh/renti/'
      #for feature2,filename in shujuku:
      for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature,fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                id_name = query


      print('minsim:{}'.format(minsim))
      print('new_in:' + str(new_in))

      new_people_shujuku(feature, new_name)
      cv2.imwrite('./tmp2/' + str(print_frame) +'_' + str(new_name) +'.jpg',
                  img1)
      features_at_door.append(feature)

      return False
    else:
      new_people_shujuku(feature, new_name)
      features_at_door.append(feature)
      cv2.imwrite('./tmp2/' + str(print_frame) +'_' + str(new_name) +'.jpg', img1)  
      return False
Exemplo n.º 20
0
def evaluate():
    print('Get dataloader... ')
    single_train_loader, single_query_loader, multi_query_loader, gallery_loader = get_dataloader(
    )

    print('Create and load pre-trained model...')
    model = models.create('resnet50',
                          dropout=0.0,
                          num_features=2048,
                          num_classes=632)
    # checkpoint = load_checkpoint('./logs/deep-person-1-new-augmentation/market1501-resnet50/model_best.pth.tar')
    checkpoint = load_checkpoint('./logs/duke2market/model_best.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])
    model = nn.DataParallel(model).cuda()
    model.eval()

    print('Extract train&single_query&gallery feature...')
    single_train_feat, single_train_ids, single_train_cams, train_files = extract_features(
        model, single_train_loader)
    single_query_feat, single_query_ids, single_query_cams, query_files = extract_features(
        model, single_query_loader)
    gallery_feat, gallery_ids, gallery_cams, _ = extract_features(
        model, gallery_loader)

    print('Get multi_query feature...')
    multi_query_dict = dict()
    for i, (imgs, fnames) in enumerate(multi_query_loader):
        outputs = extract_cnn_feature(model, imgs)
        # test time augmentation
        for fname, output in zip(fnames, outputs):
            if multi_query_dict.get(fname[:7]) == None:
                multi_query_dict[fname[:7]] = []
            multi_query_dict[fname[:7]].append(output.numpy())

    query_max_feat = []
    query_avg_feat = []
    for query_file in query_files:
        index = query_file[:7]
        multi_features = multi_query_dict[index]
        multi_features = normalize(multi_features)
        query_max_feat.append(np.max(multi_features, axis=0))
        query_avg_feat.append(np.mean(multi_features, axis=0))

    assert len(query_max_feat) == len(query_avg_feat) == len(single_query_feat)

    print('Write to mat file...')
    import scipy.io as sio
    if not os.path.exists('./matdata'):
        os.mkdir('./matdata')
    sio.savemat('./matdata/trainID.mat',
                {'trainID': np.array(single_train_ids)})
    sio.savemat('./matdata/queryID.mat',
                {'queryID': np.array(single_query_ids)})
    sio.savemat('./matdata/trainCAM.mat',
                {'trainCAM': np.array(single_train_cams)})
    sio.savemat('./matdata/queryCAM.mat',
                {'queryCAM': np.array(single_query_cams)})
    sio.savemat('./matdata/testID.mat', {'testID': np.array(gallery_ids)})
    sio.savemat('./matdata/testCAM.mat', {'testCAM': np.array(gallery_cams)})
    sio.savemat('./matdata/Hist_train.mat',
                {'Hist_train': np.array(single_train_feat)})
    sio.savemat('./matdata/Hist_query.mat',
                {'Hist_query': np.array(single_query_feat)})
    sio.savemat('./matdata/Hist_test.mat',
                {'Hist_test': np.array(gallery_feat)})
    sio.savemat('./matdata/Hist_query_max.mat',
                {'Hist_max': np.array(query_max_feat)})
    sio.savemat('./matdata/Hist_query_avg.mat',
                {'Hist_avg': np.array(query_avg_feat)})

    return
Exemplo n.º 21
0
def reid_draw(frame, b_b, model, cfg, shujuku, threadPubMsg, camera_id):
    global size

    # print("size = ", size)
    # print("b_b = ", b_b)
    # print model
    id_name = 0

    cfg.cuda()

    left = int((b_b[0] - b_b[2] / 2.0) * size[0])
    top = int((b_b[1] - b_b[3] / 2.0) * size[1])
    right = int((b_b[0] + b_b[2] / 2.0) * size[0])
    bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])
    if top >= bottom or left >= right or top <= 0 or left <= 0:
        return 0, 0, 0, 0, 0

    ratio = float(bottom - top) / (right - left)
    # print(ratio)
    # if ratio < 2.0:
    #     # print('filtered out')
    #     return left, top, right, bottom, 999

    if bottom > 530:
        ratio = float(bottom - top) / (right - left)
        #print("ratio is: {}".format(ratio))
        if ratio < 1.5:
            #print("ratio is: {}".format(ratio))
            #print('filtered out')
            return left, top, right, bottom, 999

    img1 = jieduan(frame, left, top, right, bottom)

    img = preprocess(img1)

    feature = extract_cnn_feature(model, img.cuda())

    minsim = -1
    # id_name = 1
    rentidir = '/home/tujh/renti/'
    # for feature2,filename in shujuku:

    for query in shujuku:
        for fea in shujuku[query]:
            distan = pairwise_distance(feature, fea)
            if minsim > distan or minsim == -1:
                minsim = distan
                id_name = query
            '''
        distan=0
        discount=0
        for fea in shujuku[query]:
            distan += pairwise_distance(feature,fea)
            discount+=1
        if discount!=0:
          distan=distan/discount
        if minsim > distan or minsim == -1 or distan!=0:
            minsim = distan
            id_name = query
            '''

    cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
    cv2.putText(frame, str(id_name), (left, top), cv2.FONT_HERSHEY_COMPLEX, 6,
                (255, 0, 0), 2)
    # print(frame.shape)
    # left_x = 450
    # top_y = 30
    # right_m = 570
    # bottom_n = 130

    left_x = 86
    top_y = 130
    right_m = 413
    bottom_n = 479
    #cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)
    calcIOU1 = calcIOU(left, top, right, bottom, left_x, top_y, right_m,
                       bottom_n)
    print("calIOU : {}".format(calcIOU1))

    if calcIOU1 > 0.8:
        flag_check = 1
    else:
        flag_check = 0

    print('flag', flag_check)
    print(calcIOU1)
    print("set customer message")
    customer_name = "name" + str(id_name)
    customer_id = id_name  # number
    global isAskPerson
    print(isAskPerson)
    # if isAskPerson == True and flag_check==1:
    # cv2.imwrite('/home/zhaocy/catkin_touching_AI/checkout_img/checkout_' + str(customer_id) + '.jpg')
    if flag_check == 1:
        threadPubMsg.set_customer(name=customer_name,
                                  person_id=customer_id,
                                  camera_id=camera_id,
                                  x=left,
                                  y=top,
                                  w=right,
                                  h=bottom)
        threadPubMsg.set_commodity_recognition_trigger(camera_id=camera_id,
                                                       person_id=customer_id,
                                                       flag=1,
                                                       flag1=0,
                                                       flag2=0)
        print('save_images')
        cv2.imwrite(
            '/home/zhaocy/catkin_touching_AI/checkout_img/checkout_' +
            str(customer_id) + '.jpg', frame)
    else:
        # print(1111111111111)
        threadPubMsg.set_customer(name=customer_name,
                                  person_id=customer_id,
                                  camera_id=0,
                                  x=left,
                                  y=top,
                                  w=right,
                                  h=bottom)
        threadPubMsg.set_commodity_recognition_trigger(camera_id=camera_id,
                                                       person_id=customer_id,
                                                       flag=0,
                                                       flag1=0,
                                                       flag2=0)
    # return frame
    return left, right, top, bottom, id_name