Exemple #1
0
def _level(img, bbox, landmark, cnns, padding):
    """
        LEVEL-?
    """
    for i in range(5):
        x, y = landmark[i]
        patch, patch_bbox = getPatch(img, bbox, (x, y), padding[0])
        patch = cv2.resize(patch, (15, 15)).reshape((1, 1, 15, 15))
        patch = processImage(patch)
        d1 = cnns[2 * i].forward(patch)  # size = 1x2
        patch, patch_bbox = getPatch(img, bbox, (x, y), padding[1])
        patch = cv2.resize(patch, (15, 15)).reshape((1, 1, 15, 15))
        patch = processImage(patch)
        d2 = cnns[2 * i + 1].forward(patch)

        d1 = bbox.project(patch_bbox.reproject(d1[0]))
        d2 = bbox.project(patch_bbox.reproject(d2[0]))
        landmark[i] = (d1 + d2) / 2
    return landmark
Exemple #2
0
def main(args):
    inputfile = args.input
    checkpoint = args.checkpoint
    top_k = args.top_k
    category_names = args.category_names
    gpu = args.gpu
    print(args)

    # load checkpoint
    model, optimizer = utils.loadCheckPoint(checkpoint)

    # get tensor image
    image = torch.from_numpy(utils.processImage(inputfile))
    image.unsqueeze_(
        0
    )  # https://discuss.pytorch.org/t/expected-stride-to-be-a-single-integer-value-or-a-list/17612/4
    image = Variable(image)

    # cuda is avaliable
    cuda = utils.isCudaAvaliable(gpu)
    if cuda:
        model.cuda()
        image = image.cuda()
    else:
        model.cpu()

    # predict image
    model.eval()
    with torch.no_grad():
        output = model.forward(image.float())
        ps = torch.exp(output).data

    probs, classes = ps.topk(top_k)

    if cuda:
        probs = probs.cpu().numpy()
        classes = classes.cpu().numpy()
    else:
        probs = probs.numpy()
        classes = classes.numpy()

    classes = np.vectorize(model.idx_to_class.get)(classes)

    # class name mapping
    if category_names:
        category_names = utils.getJSONFile(category_names)
        classes = np.vectorize(category_names.get)(classes)

    classes = classes[0]
    probs = probs[0]

    probs_max_index = np.argmax(probs)
    np.set_printoptions(suppress=True)
    print(classes[probs_max_index], probs[probs_max_index])
    return classes[probs_max_index], probs[probs_max_index]
Exemple #3
0
def level1(img, bbox, FOnly=True):
    """
        LEVEL-1
        img: gray image
        bbox: bounding box of face
    """
    F, EN, NM = getCNNs(level=1)
    # F
    f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
    f_face = img[f_bbox.top:f_bbox.bottom + 1, f_bbox.left:f_bbox.right + 1]
    f_face = cv2.resize(f_face, (39, 39))
    en_face = f_face[:31, :]
    nm_face = f_face[8:, :]

    f_face = f_face.reshape((1, 1, 39, 39))
    f_face = processImage(f_face)
    f = F.forward(f_face)
    if FOnly:
        return f
    # EN
    # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
    # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]
    en_face = cv2.resize(en_face, (31, 39)).reshape((1, 1, 31, 39))
    en_face = processImage(en_face)
    en = EN.forward(en_face)
    # NM
    # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
    # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]
    nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 1, 31, 39))
    nm_face = processImage(nm_face)
    nm = NM.forward(nm_face)

    landmark = np.zeros((5, 2))
    landmark[0] = (f[0] + en[0]) / 2
    landmark[1] = (f[1] + en[1]) / 2
    landmark[2] = (f[2] + en[2] + nm[0]) / 3
    landmark[3] = (f[3] + nm[1]) / 2
    landmark[4] = (f[4] + nm[2]) / 2
    return landmark
Exemple #4
0
def EN(img, bbox):
    """
        LEVEL-1, EN
        img: gray image
        bbox: bounding box of face
    """
    bbox = bbox.expand(0.05)
    face = img[bbox.top:bbox.bottom + 1, bbox.left:bbox.right + 1]
    face = cv2.resize(face, (39, 39)).reshape((1, 1, 39, 39))
    face = processImage(face)

    F, EN, NM = getCNNs(level=1)  # TODO more flexible load needed.
    landmark = EN.forward(face[:, :, :31, :])
    return landmark
def EN(img, bbox):
    """
        LEVEL-1, EN
        img: gray image
        bbox: bounding box of face
    """
    bbox = bbox.expand(0.05)
    face = img[bbox.top:bbox.bottom+1,bbox.left:bbox.right+1]
    face = cv2.resize(face, (39, 39)).reshape((1, 1, 39, 39))
    face = processImage(face)

    F, EN, NM = getCNNs(level=1) # TODO more flexible load needed.
    landmark = EN.forward(face[:, :, :31, :])
    return landmark
def level1_Forward(img, bbox):
    """
        LEVEL-1
        img: gray image
        bbox: bounding box of face
    """
    F, _, _ = getCNNs(level=1)
    # F
    f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
    f_face = img[f_bbox.top:f_bbox.bottom + 1, f_bbox.left:f_bbox.right + 1]
    f_face = cv2.resize(f_face, (39, 39))

    f_face = f_face.reshape((1, 1, 39, 39))
    f_face = processImage(f_face)
    F.forward(f_face)
    return F
def level1_Forward(img, bbox):
    """
        LEVEL-1
        img: gray image
        bbox: bounding box of face
    """
    F, _, _ = getCNNs(level=1)
    # F
    f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
    f_face = img[f_bbox.top:f_bbox.bottom+1,f_bbox.left:f_bbox.right+1]
    f_face = cv2.resize(f_face, (39, 39))
    
    f_face = f_face.reshape((1, 1, 39, 39))
    f_face = processImage(f_face)
    F.forward(f_face)
    return F
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-2
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.05)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx],
                                             padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s' % name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/2_%s/%s.h5' % (name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/2_%s/%s.txt' % (name, mode), 'w') as fd:
            fd.write('train/2_%s/%s.h5' % (name, mode))
def generate(ftxt, mode, argument=False):
    """
        Generate Training Data for LEVEL-2
        mode = train or test
    """
    data = getDataFromTxt(ftxt)

    trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)

        landmarkPs = randomShiftWithArgument(landmarkGt, 0.05)
        if not argument:
            landmarkPs = [landmarkPs[0]]

        for landmarkP in landmarkPs:
            for idx, name, padding in types:
                patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)
                patch = cv2.resize(patch, (15, 15))
                patch = patch.reshape((1, 15, 15))
                trainData[name]['patches'].append(patch)
                _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))
                trainData[name]['landmarks'].append(_)

    for idx, name, padding in types:
        logger('writing training data of %s'%name)
        patches = np.asarray(trainData[name]['patches'])
        landmarks = np.asarray(trainData[name]['landmarks'])
        patches = processImage(patches)

        shuffle_in_unison_scary(patches, landmarks)

        with h5py.File('train/2_%s/%s.h5'%(name, mode), 'w') as h5:
            h5['data'] = patches.astype(np.float32)
            h5['landmark'] = landmarks.astype(np.float32)
        with open('train/2_%s/%s.txt'%(name, mode), 'w') as fd:
            fd.write('train/2_%s/%s.h5'%(name, mode))
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []

    for (imgPath, landmarkGt, bbox) in data:
        img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)
        # plt.imshow(img)
        # plt.show()

        f_face = img[int(bbox[0]):int(bbox[2]), int(bbox[1]):int(bbox[3])]
        plt.imshow(f_face)
        plt.show()

        f_face = cv2.resize(f_face, (39, 39))

        f_face = f_face.reshape((1, 39, 39))

        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)

    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)

    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)
Exemple #11
0
###########end config###########

caffe.set_mode_gpu()
caffe.set_device(0)
model_def = 'prototxts/rpn.pt'
model_weight = 'models/vgg16_faster_reshape.caffemodel'
net = caffe.Net(model_def, model_weight, caffe.TEST)
rt = '/home/xing/py-faster-rcnn/data/drh_data/' + data_set + '/'
imgs = os.listdir(rt)
feats = {}
indx = -1
im_id = 0
for i in imgs:
    indx += 1
    im = cv2.imread(rt + i, 1)
    try:
        im = caffe.io.load_image(rt + i)
    except:
        continue
    net.blobs['data'].reshape(1, 3, im.shape[0], im.shape[1])
    pro_img = utils.processImage(im)
    info = [im_id, im.shape[0], im.shape[1]]
    net.blobs['info'].data[...] = np.array(info).reshape(1, 1, 1, 3)
    feat = utils.getNormMac(net, pro_img)
    feats[i] = feat
    im_id += 1
    print feat.shape
    print indx, len(feat), i
np.savez('outputs/' + save_rpn_feat_name, feats=feats)
print save_rpn_feat_name, 'save done!'
Exemple #12
0
 def main(self):
     try:
         # newImgNameLst=['a','b','c','d','e']
         newImgNameLst = ['2a', '2b', '2c', '2d', '2e']
         # 根据生成图片及标签尺寸进行循环
         for size in self.sizeLst:
             tagLst = readTag(self.tagPath)
             #根据所读的每个标签进行循环
             for i, line in enumerate(tagLst):
                 if i > 25000:
                     return
                 imageInfo = line.split()  #将单个数据分隔开存好
                 imgName = imageInfo[0]
                 x = int(imageInfo[1])
                 y = int(imageInfo[2])
                 # 更改默认标签框,由于原图框略大
                 w = int(imageInfo[3]) * 0.9
                 h = int(imageInfo[4]) * 0.85
                 if w == 0 or h == 0:
                     continue
                 x2 = x + w
                 y2 = y + h
                 #中心点坐标
                 centX = x + w / 2
                 centY = y + h / 2
                 #对下面操作执行多次,产生多张图片
                 j = 0
                 bigsize = 0
                 while j < 2:
                     bigsize = bigsize + 1
                     if bigsize > 1000:  # 重复1000次,仍然没有匹配认为匹配不了
                         j = j + 1
                     #1.移动最小w,h的正负0.2倍,定义最大边长为最小w,h 0.8~最大w,h 1.2 倍
                     #2.按最小边倍数随机移动,得到最新框图,并且计算出坐标
                     movePosition1 = random.uniform(-0.2, 0.2) * min(w, h)
                     movePosition2 = random.uniform(-0.2, 0.2) * min(w, h)
                     rand = random.uniform(0.8, 1.2)
                     side = random.uniform(rand * min(w, h),
                                           rand * max(w, h))
                     #新图形中心点坐标
                     newCentX = centX + movePosition1
                     newCentY = centY + movePosition2
                     newImgLeftTopX = newCentX - side / 2
                     newImgLeftTopY = newCentY - side / 2
                     newImgRightBottomX = newCentX + side / 2
                     newImgRightBottomY = newCentY + side / 2
                     offsetX1 = (newImgLeftTopX - x) / side
                     offsetY1 = (newImgLeftTopY - y) / side
                     offsetX2 = (newImgRightBottomX - x2) / side
                     offsetY2 = (newImgRightBottomY - y2) / side
                     #对原图和新图求IOU
                     p1 = (x, y)
                     p2 = (x2, y2)
                     newP1 = (newImgLeftTopX, newImgLeftTopY)
                     newP2 = (newImgRightBottomX, newImgRightBottomY)
                     newImgPosition = (newImgLeftTopX, newImgLeftTopY,
                                       newImgRightBottomX,
                                       newImgRightBottomY)
                     iouValue = iouFun((p1, p2, 0), (newP1, newP2, 0))
                     #使用三个不同值进行范围缩放
                     #分别执行,1.从原图抠图 2.保存不同尺寸图片 3.保存坐标文件
                     imgPath2 = ''
                     confidence = 0
                     if iouValue > 0.7:
                         imgPath2 = 'positive'
                         confidence = MyEnum.positive.value
                     elif iouValue < 0.3:
                         imgPath2 = 'negative'
                         confidence = MyEnum.negative.value
                     # elif iouValue>0.1 and iouValue<0.35:
                     # if iouValue>=0 and iouValue<0.35:
                     #     imgPath2='part'
                     #     confidence=MyEnum.part.value
                     if imgPath2:
                         newImgName = newImgNameLst[j] + imgName
                         offset = (newImgName, confidence, offsetX1,
                                   offsetY1, offsetX2, offsetY2)
                         j = j + 1
                         print("第{}轮,第{}次".format(i, j))
                         processImage(newImgName,
                                      imgName,
                                      self.imagePath,
                                      self.saveImgPath,
                                      imgPath2,
                                      self.saveTagPath,
                                      offset,
                                      newImgPosition,
                                      outImgSize=size)
         print("Done...............", size)
     except Exception as e:
         print("ERROR:", "main" + str(e))
def generate_hdf5_data(filelist, output, fname, argument=False):
    data = getDataFromTxt(filelist)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []
    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)
        # Paper Table2 jitter F-layer
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom + 1,
                     f_bbox.left:f_bbox.right + 1]

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation +5 degrees
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha,
                                                   (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha,
                                                      landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation -5 degrees
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha,
                                                   (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha,
                                                      landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        ## data argument for EN
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)
        ## data argument for NM

        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)
    #Convert the list to array
    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs), np.asarray(NM_landmarks)
    ### normalize the data and shu
    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
def generate_hdf5_data(filelist, output, fname, argument=False):
    data = getDataFromTxt(filelist)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []
    for (imgPath, bbox, landmarkGt) in data:    
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)
        # Paper Table2 jitter F-layer
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom+1,f_bbox.left:f_bbox.right+1]
        
        
        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation +5 degrees
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation -5 degrees
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)
        
        ## data argument for EN   
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark) 
        ## data argument for NM
        
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)
    #Convert the list to array
    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs),np.asarray(NM_landmarks)
    ### normalize the data and shu
    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)
    
    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)
        
    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
Exemple #15
0
                        #分别执行,1.从原图抠图 2.保存不同尺寸图片 3.保存坐标文件
                        imgPath2=''
                        confidence=0
                        # if iouValue>0.65:
                        #     imgPath2='positive'
                        #     confidence=MyEnum.positive.value
                        # el
                        # if iouValue<0.3:
                        #     imgPath2='negative'
                        #     confidence=MyEnum.negative.value
                        # el
                        if iouValue>0.4 or iouValue<0.65:
                            imgPath2='part'
                            confidence=MyEnum.part.value
                        if imgPath2:
                            newImgName='b'+"-"+str(s)+"-"+str(j)+"-"+image_name_detail
                            if imgPath2=='negative':
                                offset=(newImgName,confidence,0,0,0,0)
                            else:
                                offset=(newImgName,confidence,offset_x1,offset_y1,offset_x2,offset_y2)
                            j=j+1
                            print("生成{}尺寸,第{}轮,第{}次".format(my_format,i,j))
                            processImage(newImgName,image_name_detail,image_name,save_images_folder,imgPath2,save_tag_path,offset,newImgPosition,outImgSize=my_format)
            except Exception as e:
                print("ERROR:","__name__"+str(e))
    #    cv2.rectangle(image, pt1=(x, y), pt2=(x + w, y + h),color=(255,0,0))

   # for i, sample in enumerate(dataset):
   #     print(i, sample['image'])
   # 
   # print(len(dataset))