def show(camera): while True: frame, img = camera.getStreaming() if auto: imgP.imageProcess(img) cv2.waitKey(41) # Rasp yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def DoCutImage(resImageIndex, imageFilenameX, atxBaseDirStr, resTitlePy): if atxBaseDirStr != None: imageFileDir, imageFileName = os.path.split(imageFilenameX) imageFilenameX = os.path.join(atxBaseDirStr, imageFileDir, imageFileName) imageProcessObj = imageProcess.imageProcess() imageProcessObj.OpenPNG(imageFilenameX) iWidth, iHeight = imageProcessObj.imageObj.size cutedImageObj = imageProcessObj.CutBlackBorder() cutedImageData = imageProcessObj.GetRGB555Data() resCutedWidth, resCutedHeight = cutedImageObj.size ## #根据图片动态计算 resTitlePy ## resCutedTop = imageProcessObj.cutTop ## resTitlePy = resCutedTop - 5 ## #根据 resCutedTop 计算出 resTitlePy ## resCutedTop = imageProcessObj.cutTop ## resTitlePy = 0 ## if resCutedTop >= 40: ## resTitlePy = 40 ## if resCutedTop >= 45: ## resTitlePy = 40 ## if resCutedTop >= 70: ## resTitlePy = 65 ## if resCutedTop >= 95: ## resTitlePy = 90 ## if resCutedTop >= 115: ## resTitlePy = 110 imageHeaderAndDataDict = { 'resImageIndex' : resImageIndex, 'resSize' : len(cutedImageData), #RGB555 'resTitlePy' : resTitlePy, 'resCutedLeft' : imageProcessObj.cutLeft, 'resCutedTop' : imageProcessObj.cutTop, 'resCutedWidth' : resCutedWidth, 'resCutedHeight' : resCutedHeight, 'cutedImageData' : cutedImageData, } global lock lock.acquire() ##print '%s' % resImageIndex if imageProcessObj.isBlackImage: print '%s' % resImageIndex, else: print '.', lock.release() return imageHeaderAndDataDict
def PrintImageInfo(self): imageProcessObj = imageProcess.imageProcess() print 'len(self.packageImageHeaderList):', len(self.packageImageHeaderList) print 'len(self.packageImageDatalist):', len(self.packageImageDatalist) for x in range(len(self.packageImageHeaderList)): #计算出图片宽度 packageImageHeaderX = self.packageImageHeaderList[x] width = packageImageHeaderX['resCutedWidth'] height = packageImageHeaderX['resCutedHeight'] print width, height, packageImageHeaderX['resSize'] imageProcessObj.PrintRGB555Data(width, height, self.packageImageDatalist[x]) return
def getProfilePicture(self): url = "https://graph.facebook.com/" + self.username + "/picture?width=400&height=400" savingToDirectory = "/srv/www/davidguo.ca/public_html/hackathon2013/hackathon2013/core/images/" imgFileName = self.id + "img.jpg" fileExist = False for files in os.listdir(savingToDirectory): if files == imgFileName and files.endswith(".jpg"): fileExist = True break if fileExist == False: urllib.urlretrieve(url, savingToDirectory + imgFileName) self.ip = imageProcess.imageProcess() self.ip.fileName = savingToDirectory + imgFileName self.ip.pixelate() return imgFileName
def GetPNGFromAnRPFile(self,packageName = None): if packageName == None: packageName = self.packageName lenth = len(self.packageImageDatalist) for i in range(lenth): imageDataX = self.packageImageDatalist[i] imageHeaderDictX = self.packageImageHeaderList[i] width = imageHeaderDictX['resCutedWidth'] height = imageHeaderDictX['resCutedHeight'] rgb555Data = imageDataX rgb555Data = [] f = open('%s.anrp' % packageName, 'rb') f.seek(imageHeaderDictX['resImageDataPos']) packFormatStr = '%dB' % imageHeaderDictX['resSize'] packBufferStr = f.read(imageHeaderDictX['resSize']) rgb555Data = struct.unpack(packFormatStr, packBufferStr) imageProcessObj = imageProcess.imageProcess() rgb888DataList = imageProcessObj.GetRGB888from555(width, height, rgb555Data) imageObj = imageProcessObj.CreateFromRGB888(width, height, rgb888DataList) imageProcessObj.Save2PNG(imageObj, '%s_%s' %(packageName,i)) #del anrpObj return
def getTaggedPhoto(self): url = self.makeURL("me?fields=friends.uid(" + self.id + ").fields(photos.fields(tags,id))") request = urllib.urlopen(url) response = json.loads(request.read()) listOfPhotos = response["friends"]["data"][0]["photos"]["data"] for photo in listOfPhotos: if "tags" not in photo: continue for tag in photo["tags"]["data"]: if tag["id"] == self.id: photoId = photo["id"] tagXCoor = tag["x"] # x coordinate of tag tagYCoor = tag["y"] url = self.makeURL(photoId) img = Image() imgFileName = self.id + "img.jpg" urllib.urlretrieve(url, imgFileName) self.ip = imageProcess.imageProcess() self.ip.fileName = imgFileName img return imgFileName
"(Default: C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Phase1\\Hands_test2\\) : " ) if path == '': path = 'C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Phase1\\Hands_test2\\' dim = dimReduction(path, '*.jpg') feature = input( 'Please choose a feature model - SIFT(s), Moments(m), LBP(l), Histogram(h): ' ) if feature not in ('s', 'm', 'l', 'h'): print('Please enter a valid feature model!') exit() technique = input( 'Please choose a dimensionality reduction technique - PCA(pca), SVD(svd), NMF(nmf), LDA(lda)' ) image = input("Insert the name of your image: ") k = input('Please provide the number of latent semantics(k): ') db = 'imagedata_' + feature + '_' + technique task2 = imageProcess( "/home/anhnguyen/ASU/CSE-515/Project/Phase 2/Project - Phase 2/Data/testset1/" ) task2.similarity(feature, technique, db, int(k), image) # similarity(feature, technique, db, int(k), image) # imgs_sort, feature_sort = dim.saveDim(feature, technique, db, int(k)) # phase1 = imageProcess("/home/anhnguyen/ASU/CSE-515/Project/Phase 2/Project - Phase 2/Data/testset1/") # print('\n') # print('Data Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Data_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k)) # print('\n') # print('Feature Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Feature_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k))
def saveDim(self, feature, model, dbase, k, password='******', host='localhost', database='mwdb', user='******', port=5432, label=None, meta=False, negative_handle='n'): imageDB = imageProcess(self.dirpath) imgs = imageDB.dbProcess(password=password, process='f', model=feature, dbase=dbase) kmeans_model = 'kmeans_' + str(no_clusters) technique_model = feature + '_' + model if label is not None: filteredImage = imageDB.CSV(label) label = label.replace(" ", "_") dbase += '_' + model + '_' + label kmeans_model += '_' + label technique_model += '_' + label else: dbase += '_' + model # print(technique_model) imgs_data = [] imgs_meta = [] i = -1 while i < len(imgs) - 1: # print (x[1].shape) i += 1 if label is not None and imgs[i][0] not in filteredImage: # print("label") del imgs[i] i -= 1 continue if feature != "s": imgs_data.append(imgs[i][1].reshape((-1))) else: imgs_data.extend(imgs[i][1]) # print (image_cmp.shape) imgs_meta.append(imgs[i][0]) # print(i) # print(len(imgs)) #Handle Negative Value of NMF if feature == 'm': if negative_handle == 'h': imgs_data = self.hist(imgs_data) else: imgs_data = self.normalize(imgs_data) imgs_data = np.asarray(imgs_data) # print(imgs_data.shape) # print(imgs_meta) # imgs_meta = [x[0] if x[0] in filteredImage for x in imgs] imgs_zip = list(zip(imgs_meta, imgs_data)) db = PostgresDB(password=password, host=host, database=database, user=user, port=port) conn = db.connect() if meta: imageDB.createInsertMeta(conn) model = model.lower() if feature == "s": if imgs_data.shape[0] < no_clusters: Kmeans = KMeans_SIFT(imgs_data.shape[0] // 2) else: Kmeans = KMeans_SIFT(no_clusters) clusters = Kmeans.kmeans_process(imgs_data) # print (imgs_zip) imgs_data = Kmeans.newMatrixSift(imgs, clusters, kmeans_model) imgs_zip = list(zip(imgs_meta, imgs_data)) if model == 'nmf': w, h = self.nmf(imgs_data, k, technique_model) imgs_red = np.dot(imgs_data, h.T).tolist() print(np.asarray(w).shape) print(np.asarray(h).shape) imgs_sort = self.imgSort(w.T, imgs_meta) feature_sort = self.imgFeatureSort(h, imgs_zip) elif model == 'lda': w, h = self.lda(imgs_data, k, technique_model) imgs_red = np.dot(imgs_data, h.T).tolist() print(np.asarray(w).shape) print(np.asarray(h).shape) imgs_sort = self.imgSort(w.T, imgs_meta) feature_sort = self.imgFeatureSort(h, imgs_zip) elif model == 'pca': data, U, Vt = self.pca(imgs_data, k, technique_model) imgs_red = data.tolist() imgs_sort = self.imgSort(U.T, imgs_meta) feature_sort = self.imgFeatureSort(Vt, imgs_zip) elif model == 'svd': # print(imgs_data.shape) data, U, Vt = self.svd(imgs_data, k, technique_model) imgs_red = data.tolist() # print(im) # U[:,:self.k].dot(Sigma[:self.k, :self.k]).dot(V[:self.k,:]) imgs_sort = self.imgSort(U.T, imgs_meta) feature_sort = self.imgFeatureSort(Vt, imgs_zip) # print("=======================") # print(imgs_sort) # print("=======================") # print(feature_sort) # Process the reduced Images imgs_red = list(zip(imgs_meta, imgs_red)) # print (np.asarray(imgs_sort).shape) # print(img_sort) # print (np.asarray(feature_sort).shape) # imgs_red = self.convString(imgs_red) print(imgs_red) self.createInsertDB(dbase, imgs_red, conn) return imgs_sort, feature_sort
import imageProcess import numpy as np ip = imageProcess.imageProcess() arg = input("Which task would you like to run (1/2/3)? ") if arg == '1': inp = input( "Would you like to save SIFT features or Moment features (s/m)?") ip.dbProcess(password='******', model=inp, process='s') elif arg == '2': inp1 = input( "Would you like to fetch SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") rec = ip.dbProcess(password='******', model=inp1, process='f') rec_arr = np.array(rec[0][1]) print('Features:', np.array(rec_arr)) print('Size', rec_arr.shape) ip.writeFile(rec[0][1], 'Output\\Task2\\task2_{x}_{y}.txt'.format(x=inp1, y=inp)) print('File saved to Output folder') elif arg == '3': inp1 = input( "Would you like to Compare SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") inp2 = input( "Provide the number of similar images you would like to return?") recs = ip.dbProcess(password='******', model=inp1, process='f') sim = ip.SimCalc(inp, recs, imgmodel=inp1, k=int(inp2))
"Please enter the home directory for the images " "(Default: C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Dataset\\) : " ) if path == '': path = 'C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Dataset\\' dim = dimReduction(path, '*.jpg') feature = input( 'Please choose a feature model - SIFT(s), Moments(m), LBP(l), Histogram(h): ' ) if feature not in ('s', 'm', 'l', 'h'): print('Please enter a valid feature model!') exit() technique = input( 'Please choose a dimensionality reduction technique - PCA(pca), SVD(svd), NMF(nmf), LDA(lda): ' ) image = input("Insert the name of your image: ") k = input('Please provide the number of latent semantics(k): ') label = input("Which label do you want: ") label = label.replace(" ", "_") db = 'imagedata_' + feature + '_' + technique + '_' + label dim = dimReduction(path, '*.jpg') task4 = imageProcess(path) # filteredImage = task4.CSV(label) task4.similarity(feature, technique, db, int(k), image, label) # print('\n') # print('Data Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Data_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k)) # print('\n') # print('Feature Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Feature_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k))
import imageProcess import numpy as np ip = imageProcess.imageProcess( dirpath='C:\\ASU\\Fall 2019\\MWDB\\Project\\Phase 2\\Dataset2\\') arg = input("Which task would you like to run (1/2/3)? ") if arg == '1': # inp = input("Would you like to save SIFT features or Moment features (s/m)?") for inp in ['s', 'm', 'l', 'h']: print(inp) ip.dbProcess(password='******', model=inp, process='s') elif arg == '2': inp1 = input( "Would you like to fetch SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") rec = ip.dbProcess(password='******', model=inp1, process='f') rec_arr = np.array(rec[0][1]) print('Features:', np.array(rec_arr)) print('Size', rec_arr.shape) ip.writeFile(rec[0][1], 'Output\\Task2\\task2_{x}_{y}.txt'.format(x=inp1, y=inp)) print('File saved to Output folder') elif arg == '3': inp1 = input( "Would you like to Compare SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") inp2 = input( "Provide the number of similar images you would like to return?")
if __name__ == "__main__": video = cv2.VideoCapture(videoName) showTrackbar() while True: success, img = video.read() if not success: video = cv2.VideoCapture(videoName) continue else: img = cv2.resize(img, (640, 480)) ret, buffer = cv2.imencode('.jpg', img) frame = buffer.tobytes() img_show, detected, i_param = imageProcess(img) # if videocap is None: # videocap = cv2.VideoWriter(saveName, fourcc, 20.0, (640, 480)) # else: # videocap.write(img_show) k = cv2.waitKey(33) & 0xff if k == 27: # press 'ESC' to quit break elif k == 64: time.sleep(10) # videocap.release() video.release() cv2.destroyAllWindows()
for d in kp: idx = kmeans.predict([d]) histo[ idx] += 1 / nkp # Because we need normalized histograms, I prefere to add 1/nkp directly histo_list.append(histo) # print(np.asarray(histo_list).shape) path = os.path.normpath(os.getcwd() + os.sep + os.pardir + os.sep + 'Models' + os.sep) with open(path + os.sep + model + '.joblib', 'wb') as f1: joblib.dump(kmeans, f1) return np.asarray(histo_list) imageDB = imageProcess( '/home/anhnguyen/ASU/CSE-515/Project/Phase 2/Project - Phase 2/Data/Dataset2/' ) imgs = imageDB.dbProcess(password='******', process='f', model='s', dbase='imagedata_s') # imgs_data = np.asarray(imgs) imgs_data = [] i = -1 while i < len(imgs) - 1: # print (x[1].shape) i += 1 imgs_data.extend(imgs[i][1]) imgs_data = np.asarray(imgs_data)
import imageProcess import numpy as np ip = imageProcess.imageProcess('C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Dataset\\') arg = input("Which task would you like to run (1/2/3)? ") if arg == '1': for x in ['s', 'l', 'h', 'm']: print(x) ip.dbProcess(password='******', model=x, process='s') elif arg == '2': inp1 = input("Would you like to fetch SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") rec = ip.dbProcess(password='******', model=inp1, process='f') rec_arr = np.array(rec[0][1]) print('Features:', np.array(rec_arr)) print('Size', rec_arr.shape) ip.writeFile(rec[0][1], 'Output\\Task2\\task2_{x}_{y}.txt'.format(x=inp1, y=inp)) print('File saved to Output folder') elif arg == '3': inp1 = input("Would you like to Compare SIFT features or Moment features (s/m)?") inp = input("Provide the image ID which you would like to display:") inp2 = input("Provide the number of similar images you would like to return?") recs = ip.dbProcess(password='******', model=inp1, process='f') sim = ip.SimCalc(inp, recs, imgmodel=inp1, k=int(inp2)) sim_list = [im[0] for im in sim] sim_list.insert(0, inp) print('\n')
def saveDim(self, feature, model, dbase, k, password='******', host='localhost', database='postgres', user='******', port=5432, label=None, meta=True, negative_handle='n'): imageDB = imageProcess(self.dirpath) imgs = imageDB.dbProcess(password=password, process='f', model=feature, dbase=dbase) kmeans_model = 'kmeans_' + str(no_clusters) + '_' + feature technique_model = feature + '_' + model db = PostgresDB(password=password, host=host, database=database, user=user, port=port) conn = db.connect() if meta: imageDB.createInsertMeta(conn) if label is not None: imgs = imageDB.CSV(conn, dbase, label) label = label.replace(" ", "_") dbase += '_' + model + '_' + label kmeans_model += '_' + label technique_model += '_' + label else: dbase += '_' + model # print(technique_model) imgs_data = [] imgs_meta = [] for img in imgs: if feature == "s" or (feature == "m" and model in ("nmf", "lda")): imgs_data.extend(img[1]) else: imgs_data.append(img[1].reshape((-1))) # print (image_cmp.shape) imgs_meta.append(img[0]) # print(i) # print(len(imgs)) # print(imgs_meta) # print(len(imgs_meta)) #Handle Negative Value of NMF # if feature == 'm' and (model == 'lda' or model == 'nmf'): # print ("Normalize") # if negative_handle == 'h': # imgs_data = self.hist(imgs_data) # else: # imgs_data = self.normalize(imgs_data) imgs_data = np.asarray(imgs_data) # print(imgs_data.shape) # print(imgs_data.shape) # print(imgs_meta) # imgs_meta = [x[0] if x[0] in filteredImage for x in imgs] imgs_zip = list(zip(imgs_meta, imgs_data)) model = model.lower() if feature == "s" or (feature == "m" and model in ("nmf", "lda")): if imgs_data.shape[0] < no_clusters: Kmeans = KMeans_SIFT(imgs_data.shape[0] // 2) else: Kmeans = KMeans_SIFT(no_clusters) clusters = Kmeans.kmeans_process(imgs_data) # print (imgs_zip) imgs_data = Kmeans.newMatrixSift(imgs, clusters, kmeans_model) imgs_zip = list(zip(imgs_meta, imgs_data)) if model == 'nmf': w, h = self.nmf(imgs_data, k, technique_model) imgs_red = np.dot(imgs_data, h.T).tolist() # print(np.asarray(w).shape) # print(np.asarray(h).shape) imgs_sort = self.imgSort(w.T, imgs_meta) feature_sort = self.imgFeatureSort(h, imgs_zip) U = w Vt = h elif model == 'lda': w, h = self.lda(imgs_data, k, technique_model) imgs_red = np.dot(imgs_data, h.T).tolist() # print(np.asarray(w).shape) # print(np.asarray(h).shape) imgs_sort = self.imgSort(w.T, imgs_meta) feature_sort = self.imgFeatureSort(h, imgs_zip) U = w Vt = h elif model == 'pca': data, U, Vt = self.pca(imgs_data, k, technique_model) imgs_red = data.tolist() imgs_sort = self.imgSort(U.T, imgs_meta) feature_sort = self.imgFeatureSort(Vt, imgs_zip) elif model == 'svd': # print(imgs_data.shape) data, U, Vt = self.svd(imgs_data, k, technique_model) imgs_red = data.tolist() # print(im) # U[:,:self.k].dot(Sigma[:self.k, :self.k]).dot(V[:self.k,:]) # print(U.T.shape) # print(imgs_meta.shape) imgs_sort = self.imgSort(U.T, imgs_meta) feature_sort = self.imgFeatureSort(Vt, imgs_zip) # print("=======================") # print(imgs_sort) # print("=======================") # print(feature_sort) # Process the reduced Images imgs_red = list(zip(imgs_meta, imgs_red)) # print (np.asarray(imgs_sort).shape) # print(img_sort) # print (np.asarray(feature_sort).shape) self.createInsertDB(dbase, imgs_red, conn) return imgs_sort, feature_sort, U, Vt
import cv2 import numpy as np import imageProcess if __name__ == "__main__": camera = cv2.VideoCapture(0) camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) camera.set(cv2.CAP_PROP_FPS, 30) imageProcess.showTrackbar() while True: success, ori_img = camera.read() if not success: break else: img = cv2.resize(ori_img[:, 160:1120], (640, 480)) ret, buffer = cv2.imencode('.jpg', img) frame = buffer.tobytes() imageProcess.imageProcess(img) k = cv2.waitKey(33) & 0xff if k == 27: # press 'ESC' to quit break elif k == 64: time.sleep(5) camera.release() cv2.destroyAllWindows()
import numpy as np from imageProcess import imageProcess path = input("Please enter the home directory for the images " "(Default: C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Hands_test2\\) : ") if path == '': path = 'C:\\Users\\pylak\\Documents\\Fall 2019\\MWDB\\Project\\Dataset\\' dim = dimReduction(path, '*.jpg') feature = input('Please choose a feature model - SIFT(s), Moments(m), LBP(l), Histogram(h): ') if feature not in ('s', 'm', 'l', 'h'): print('Please enter a valid feature model!') exit() technique = input('Please choose a dimensionality reduction technique - PCA(pca), SVD(svd), NMF(nmf), LDA(lda): ') image = input("Insert the name of your image: ") k = input('Please provide the number of latent semantics(k): ') db = 'imagedata_' + feature + '_' + technique task2 = imageProcess(path) task2.similarity(feature, technique, db, int(k), image) # similarity(feature, technique, db, int(k), image) # imgs_sort, feature_sort = dim.saveDim(feature, technique, db, int(k)) # phase1 = imageProcess("/home/anhnguyen/ASU/CSE-515/Project/Phase 2/Project - Phase 2/Data/testset1/") # print('\n') # print('Data Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Data_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k)) # print('\n') # print('Feature Latent Semantics Saved to Output Folder!') # dim.writeFile(imgs_sort, 'Output\\Task1\\Feature_ls_{x}_{y}_{z}.txt'.format(x=feature,y=technique,z=k))