def generateSamples(trainData, data): t = 0 print '##########################################################################' for (imgPath, landmarkGt, bbox) in data: img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE) assert (img is not None) logger("process %s" % imgPath) height, width = img.shape[:2] #downsampled by 3: 3x3 patch cephaImg = cv2.resize(img, (int(width / 3), int(height / 3)), interpolation=cv2.INTER_NEAREST) #raw data #trainData,t = getData(trainData,landmarkGt,cephaImg,t) #print ('After getting raw data,there are %d datas') % t r1 = 20 / 3 r2 = 20 #60/3 r3 = 400 #400/3 for idx, landmark in enumerate(landmarkGt): #19个landmark # 25 Positive samples landmarkPs25 = randomShiftWithArgument(landmark, 0, r1, 25) trainData, t = getData(trainData, landmarkPs25, cephaImg, t) print('After getting 25 positive samples,there are %d datas') % t # 500 negative samples landmarkNs500 = randomShiftWithArgument(landmark, r2, r3, 500) trainData, t = getData(trainData, landmarkNs500, cephaImg, t) print( 'After getting 25 positive and 500 negative samples,there are %d datas' ) % t if idx == 1: break return trainData
def generateSamples(trainData, data, landmarks): t = 0 for (imgPath, bbox) in data: img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE) assert (img is not None) logger('process %s' % imgPath) height, width = img.shape[:2] #downsampled by 3: 3X3 Patch size = (int(width / 3), int(height / 3)) img = cv2.resize(img, size, interpolation=cv2.INTER_NEAREST) trainData, t = getTrainData(trainData, landmarks, img) print('After getting raw data,there are %d datas') % t r2 = 20 / 3 r3 = 10 #400 / 3 for idx, landmark in enumerate(landmarks): print '@@@@@@@@@@@@@@' + str(idx) # 25 Positive samples landmarkPs25 = randomShiftWithArgument(landmark, 0, r2, 25) trainData, t = getTrainData(trainData, landmarkPs25, img) #print ('After getting 25 positive samples,there are %d datas') % t # 500 negative samples landmarkNs500 = randomShiftWithArgument(landmark, r2, r3, 500) trainData, t = getTrainData(trainData, landmarkNs500, img) print( 'After getting 25 positive and 500 negative samples,there are %d datas' ) % t return trainData
def generate(ftxt, mode, argument=False): ''' 第二阶段数据源制作 :param ftxt: 数据源文件位置和label :param mode: 训练或测试 :param argument: :return: ''' data = getDataFromTxt(ftxt) trainData = defaultdict(lambda: dict(patches=[], landmarks=[])) for (imgPath, bbox, landmarkGt) in data: img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE) assert (img is not None) logger("process %s" % imgPath) landmarkPs = randomShiftWithArgument(landmarkGt, 0.05) if not argument: landmarkPs = [landmarkPs[0]] for landmarkP in landmarkPs: for idx, name, padding in types: patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding) patch = cv2.resize(patch, (15, 15)) patch = patch.reshape((1, 15, 15)) trainData[name]['patches'].append(patch) _ = patch_bbox.project(bbox.reproject(landmarkGt[idx])) trainData[name]['landmarks'].append(_) for idx, name, padding in types: logger('writing training data of %s' % name) patches = np.asarray(trainData[name]['patches']) landmarks = np.asarray(trainData[name]['landmarks']) patches = processImage(patches) shuffle_in_unison_scary(patches, landmarks) with h5py.File( '/python/face_key_point/data_hdf5/train/2_%s/%s.h5' % (name, mode), 'w') as h5: h5['data'] = patches.astype(np.float32) h5['landmark'] = landmarks.astype(np.float32) with open( '/python/face_key_point/data_hdf5/train/2_%s/%s.txt' % (name, mode), 'w') as fd: fd.write('/python/face_key_point/data_hdf5/train/2_%s/%s.h5' % (name, mode))
def generate(ftxt, mode, argument=False): """ Generate Training Data for LEVEL-3 mode = train or test """ data = getDataFromTxt(ftxt) trainData = defaultdict(lambda: dict(patches=[], landmarks=[])) for (imgPath, bbox, landmarkGt) in data: img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE) assert (img is not None) logger("process %s" % imgPath) landmarkPs = randomShiftWithArgument(landmarkGt, 0.01) if not argument: landmarkPs = [landmarkPs[0]] for landmarkP in landmarkPs: for idx, name, padding in types: patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding) patch = cv2.resize(patch, (15, 15)) patch = patch.reshape((1, 15, 15)) trainData[name]['patches'].append(patch) _ = patch_bbox.project(bbox.reproject(landmarkGt[idx])) trainData[name]['landmarks'].append(_) for idx, name, padding in types: logger('writing training data of %s' % name) patches = np.asarray(trainData[name]['patches']) landmarks = np.asarray(trainData[name]['landmarks']) patches = processImage(patches) shuffle_in_unison_scary(patches, landmarks) with h5py.File( '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5' % (name, mode), 'w') as h5: h5['data'] = patches.astype(np.float32) h5['landmark'] = landmarks.astype(np.float32) with open( '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.txt' % (name, mode), 'w') as fd: fd.write( '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5' % (name, mode))
def generateSamples(testData,data,landmarks): for (imgPath,bbox) in data: img = cv2.imread(imgPath,cv2.CV_LOAD_IMAGE_GRAYSCALE) assert(img is not None) logger('process %s' % imgPath) height,width = img.shape[:2] #downsampled by 3: 3X3 Patch #size = (int(width/3),int(height/3)) size = (width,height)#不进行下采样 img = cv2.resize(img,size,interpolation=cv2.INTER_NEAREST) testData = getData(testData,landmarks,img)#test此时为一个字典,存储样本点和对应的图像块 print ('After getting raw data,there are %d datas') % len(testData) for idx,landmark in enumerate(landmarks): # 产生样本点 samples landmark_samples = randomShiftWithArgument(landmark,0,100,150) testData = getData(testData,landmark_samples,img) return testData
def generate(ftxt, mode, argument=False): """ Generate Training Data for LEVEL-3 mode = train or test """ data = getDataFromTxt(ftxt) trainData = defaultdict(lambda: dict(patches=[], landmarks=[])) for (imgPath, bbox, landmarkGt) in data: img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE) assert(img is not None) logger("process %s" % imgPath) landmarkPs = randomShiftWithArgument(landmarkGt, 0.01) if not argument: landmarkPs = [landmarkPs[0]] for landmarkP in landmarkPs: for idx, name, padding in types: patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding) patch = cv2.resize(patch, (15, 15)) patch = patch.reshape((1, 15, 15)) trainData[name]['patches'].append(patch) _ = patch_bbox.project(bbox.reproject(landmarkGt[idx])) trainData[name]['landmarks'].append(_) for idx, name, padding in types: logger('writing training data of %s'%name) patches = np.asarray(trainData[name]['patches']) landmarks = np.asarray(trainData[name]['landmarks']) patches = processImage(patches) shuffle_in_unison_scary(patches, landmarks) with h5py.File('train/3_%s/%s.h5'%(name, mode), 'w') as h5: h5['data'] = patches.astype(np.float32) h5['landmark'] = landmarks.astype(np.float32) with open('train/3_%s/%s.txt'%(name, mode), 'w') as fd: fd.write('train/3_%s/%s.h5'%(name, mode))