def analyze(self, request_iterator, context): print("Calling Function Analyzeing") for req in request_iterator: img = req.data # print(type(img)) file_name = req.name time_start = req.elapsed_time file_name = Path + file_name print("Image Location:" + file_name) nparr = np.fromstring(img, np.uint8) picture = cv2.imdecode(nparr, cv2.IMREAD_COLOR) cv2.imwrite(file_name, picture) ImageProcess.prediction(file_name, load_time, engine, labels, face_engine, class_arr, emb_arr) response = Test_pb2.Result(result="Sucess", elapsed_time=10, process_time=20) return response
def __init__(self,ic:ImgClass): self.original=ic.img.copy() img = ic.img.copy() self.resize=cv2.resize(img,(800,600)) bp = ip.Binarization(self.resize,80) p,contours, heirs = cv2.findContours(bp,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) pic = np.zeros(self.resize.shape).astype(np.uint8) cv2.drawContours(pic,contours,-1,(0,0,255),2) ip.CvShow(pic)
def __init__(self): self.imPro = ImageProcess() self.km = KeyAndMouse() self.boolStart = 0 self.root = Tk() self.root.title('剑灵取色卡刀') self.lable_Welcome = Label( self.root, text='这是剑灵的取色卡刀软件,使用详情请阅读 README.txt文件').grid(row=3, columnspan=7, padx=10, pady=10, ipadx=5, ipady=5) self.lable_LB = Label(self.root, text='LB').grid(row=5, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_RB = Label(self.root, text='RB').grid(row=6, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_F = Label(self.root, text='F').grid(row=7, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_4 = Label(self.root, text='4').grid(row=8, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_V = Label(self.root, text='v').grid(row=9, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.ButtonItem('捕捉图片', 5, 1, self.LB_cal) self.ButtonItem('捕捉图片', 6, 1, self.RB_cal) self.ButtonItem('捕捉图片', 7, 1, self.F_cal) self.ButtonItem('捕捉图片', 8, 1, self.K_4_cal) self.ButtonItem('捕捉图片', 9, 1, self.V_cal) self.ButtonItem('初始化', 10, 0, self.Init_cal, 1, 2) self.ButtonItem('开始', 10, 3, self.Start_cal, 1, 2) self.ButtonItem('暂停', 10, 6, self.Pause_cal, 1, 2)
def display(self): ret, frame = self.capturer.read() if ret: self.imgpr.setFrame(cv2.flip(frame, 1)) if self.key == -1: ImP.processImg(self.imgpr, ord('o')) elif chr(self.key) not in self.keys: ImP.processImg(self.imgpr, self.key) else: index = self.keys.index(chr(self.key)) self.trackBarProcess[index](self.memVar[index]) pass return ret
def imageLoadEnv(argv): path = argv[0] processor = ImP.ImageProcessor(path, 'Image Processing') processor.showImg() key = cv2.waitKey(0) while key != 27: ImP.processImg(processor, key) key = cv2.waitKey(0) cv2.destroyAllWindows() pass
def loadmodel(): global load_time global engine global labels global face_engine global class_arr global emb_arr load_time = time.time() print("load_time", type(load_time), load_time) # Initialize engine. engine = DetectionEngine(Model_weight) print("engine", type(engine), engine) labels = None print("labels", type(labels), labels) # Face recognize engine face_engine = ClassificationEngine(FaceNet_weight) #print(face_engine) print("face_engine", type(face_engine), face_engine) # read embedding class_arr, emb_arr = ImageProcess.read_embedding(Embedding_book) print("class_arr", type(class_arr), class_arr) print("emb_arr", type(emb_arr), emb_arr) l = time.time() - load_time return l
def Predict(self, ic, option=0): self.LoadData() self.CreateKNN() Target = ic original = Target.img.copy() process = Target.BinaryProcess() final, contours = Target.Draw() NewContours = [] for cnt in contours: [x, y, w, h] = cv2.boundingRect(cnt) cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2) TargetPxs = process[y:y + h, x:x + w] TargetPxsSmall = cv2.resize(TargetPxs, (10, 10)) sample = TargetPxsSmall.reshape((1, 100)) sample = np.float32(sample) retval, results, neigh_resp, dists = self.model.findNearest(sample, k=3) if (results == 1): NewContours.append(cnt) pic = np.zeros((450, 600, 3)).astype(np.uint8) cv2.drawContours(pic, NewContours, -1, (255, 255, 255), 2) if (option == 1): ip.CvShow('Classification', pic) return pic, NewContours
def Predict(self, ic: ImgClass, contours): self.LoadData() self.CreateKNN() Target = ic original = Target.img.copy() process = Target.BinaryProcess() out = np.zeros(Target.img.shape, np.uint8) for cnt in contours: if cv2.contourArea(cnt) > 50: [x, y, w, h] = cv2.boundingRect(cnt) cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2) TargetPxs = process[y:y + h, x:x + w] TargetPxsSmall = cv2.resize(TargetPxs, (10, 10)) sample = TargetPxsSmall.reshape((1, 100)) sample = np.float32(sample) retval, results, neigh_resp, dists = self.model.findNearest( sample, k=1) string = self.Code.deCode(results[0][0]) cv2.putText(out, string, (x, y + h), 0, 1, (0, 255, 0)) ip.CvShow('Out', out)
def tiledTraining(imList, species, overlap, n): """Take in a list of training images and their corresponding species. Create a new set of images through tiling and return a list of training metrics and species for each subimage.""" metricList = [] #initialize an empty list of metrics. This will be used to track metrics for each data point. speciesList = [] #initialize an empty list of species. This will be expanded to have multiple points for each photo. for i in range(len(imList)): #for each image you are training on. imMetrics = [] #keep track of metrics for this image seperatly. image = Image.open(imList[i]) #load in the image. #Find the size of the image. size = image.size width = size[0] #pull out length and width length = size[1] smallTileSize = int(overlap*n) #Set the tilesize and overlap you want to train on. This should match the size you will test on. # Extract all tiles using a specific overlap (overlap depends on n). This happens for each image. for k in range(0,width -smallTileSize, smallTileSize): #Go through the entire image for j in range(0, length - smallTileSize, smallTileSize): box = (k,j,k+smallTileSize, j+smallTileSize) #edge coordinates of the current rectangle. newImage = image.crop(box) #pull out the desired rectangle ### METRIC CALCULATIONS: Get the metrics for each subrectangle in the image. Metrics = IP.getMetrics(newImage) #calculate all of the metrics on this cropped out image. imMetrics += [Metrics] #add these metrics to a list, imMetrics, that will keep track of metrics within each image. imSpecies = len(imMetrics)*[species[i]] #Extend the species list (mark all subrectangles as the same species) metricList += imMetrics #add to the overall lists of metrics and species speciesList += imSpecies return metricList, speciesList #Return the overal metric and species lists. These now include subdivided portions of each image.
def LineNotify(message, img=None): # APIとかトークンの設定とか line_notify_api = 'https://notify-api.line.me/api/notify' line_notify_token = '1iMvggeXCGlou8YL2VIN779puSIs1p6a622N3dAmuVR' headers = {'Authorization': 'Bearer ' + line_notify_token} # メッセージ payload = {'message': message} # 画像を含むか否か if img is None: requests.post(line_notify_api, data=payload, headers=headers) else: # opencv画像をPIL画像に pilImg = IP.OpenCV2PIL(img) #PIL画像をバイナリに output = io.BytesIO() pilImg.save(output, format='JPEG') jpgImg = output.getvalue() files = {"imageFile": jpgImg} #files = args[0] requests.post(line_notify_api, data=payload, headers=headers, files=files)
def BinaryProcess(self,value=80): #x=cv2.resize(self.img,(450,600)) self.threshold = value bp = ip.Binarization(self.img,self.threshold) return bp
def login(self): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36' } while self.count > 0: #访问登陆主界面,获取cookie r = self.requests.get(self.urlIndex, headers=headers) #获取验证码图片 r = self.requests.get(self.urlImg, headers=headers) fileName = self.imgPath with open(fileName, 'wb') as imgFile: imgFile.write(r.content) jsessionid = r.cookies['JSESSIONID'] #调用ImageProcess获取图片验证码 code = ImageProcess.getCode(fileName) #当验证码数据结果长度不为4时,重新获取 if len(code) != 4: self.count -= 1 continue #构造登陆请求页面数据 headers.update({ 'Host': '210.42.121.241', 'Origin': 'http://210.42.121.241', 'Referer': 'http://210.42.121.241/servlet/Login' }) data = { 'id': self.uid, 'pwd': hashlib.md5(self.pwd.encode()).hexdigest(), 'xdvfb': code } cookies = { 'sto-id-20480': self.sto_id_20480, 'JSESSIONID': jsessionid } #关闭重定向,获取页面jsessionid r = self.requests.post(self.urlLogin, data=data, headers=headers, cookies=cookies, allow_redirects=False) if str(r.status_code) == '302': self.isLogin = True self.jsessionid = jsessionid break self.count -= 1 #打印登陆结果信息 if self.isLogin: print "Login Successfully" print "JSESSIONID:", self.jsessionid else: print "Login Failed,Please try again" #删除最后的临时文件 os.remove(self.imgPath)
def InitSplit(self): # 分割界面 self.splitter =wx.SplitterWindow(self, -1, wx.DefaultPosition, wx.DefaultSize, wx.SP_3DBORDER|wx.SP_LIVE_UPDATE) self.rightPanel = RightPanel.MyPanel(self.everything, self.curPicList, self.splitter) self.imagePanel = ImageProcess.MyImage(self.everything, self.curPicList, self.rightPanel, self.splitter) self.rightPanel.everything = self.imagePanel.everything # 绑定字典 self.splitter.SplitVertically(self.imagePanel, self.rightPanel, 730) # 设置最小窗格大小,左右布局指左边窗口大小 self.splitter.SetMinimumPaneSize(330) pass
def setKey(self, key): if chr(key) == 'w': ImP.processImg(self.imgpr,key) return if chr(key) == 'h': print (self.imgpr.displayHelp()) return self.key = key self.resetMemVar() try: index = self.keys.index(chr(self.key)) except ValueError: index = -1 if index > -1: ImP.processImg(self.imgpr, self.key, self.callbacks[index]) pass
def deBug(self, x): im = Image.open('0.jpg') img = ImageTk.PhotoImage(image=im) cvim = cv2.imread('5799.jpg') ip.CvShow('5799', cvim) return img print(self.CurrentFile)
def Url2ic(self): if self.url_src.get(): url = self.url_src.get() cap = cv2.VideoCapture(url) if (cap.isOpened()): ret, img = cap.read() if ret: self.ic = ImgClass('') self.ic.FromUrlSrc(img) self.classfy = Classification(self.ic, self) self.program = Mainsys(self.ic, self) ip.CvShow('N', img)
def main(): print(torch.cuda.is_available()) in_arg = get_input_args() category_names_path = in_arg.category_names category_names = None if len(category_names_path) > 0: with open(category_names_path, 'r') as f: category_names = json.load(f) ps_K, flower_tpye = ImageProcess.predict(in_arg.input_path, in_arg.checkpoint, category_names, in_arg.top_k, in_arg.gpu) print(ps_K) print(flower_tpye)
def __init__(self, parent=None): super(UiMainWindow, self).__init__(parent) self.ui = MainWindowUi.Ui_MainWindow() self.ui.setupUi(self) self.setWindowIcon(QtGui.QIcon("./ICON/Icon.jpg")) self.timer_camera = QtCore.QTimer() # 计时器用来固定时长取帧 self.timer_get_pic = QtCore.QTimer() # 计时器用来固定时长取识别的图片 self.cap = cv2.VideoCapture() self.CAM_NUM = 0 self.flag_show_detected = 0 # 是否显示识别后的整图 self.detected_flag = 0 # 是否识别完毕 self.image = None # 读取到的帧 self.detected_pic = None # 识别后的图片 self.result_infos_pics = None # 识别到的每个单塑件信息list [中心点坐标,倾斜角度,类型,区别度,图片] self.detect_min_square = 50 self.detect_max_square = 20000 self.display_page_flag = 0 self.updated_result_flag = False self.switch_display_widget = Qt.QStackedLayout(self.ui.switch_frame) self.modify_bar_page = ModifyBarPage(self.detect_min_square, self.detect_max_square) self.pic_info_page = PicInfoPage() self.switch_display_widget.addWidget(self.pic_info_page) self.switch_display_widget.addWidget(self.modify_bar_page) self.switch_display_widget.setCurrentIndex(self.display_page_flag) # 把显示检测结果的四个label放进list便于之后遍历更改 self.three_pic_label = [ self.pic_info_page.ui.dict_1, self.pic_info_page.ui.dict_2, self.pic_info_page.ui.dict_3, self.pic_info_page.ui.dict_4 ] self.three_info_label = [ self.pic_info_page.ui.dict_1_info, self.pic_info_page.ui.dict_2_info, self.pic_info_page.ui.dict_3_info, self.pic_info_page.ui.dict_4_info ] # ********************** self.refresh_flag = 0 self.slot_init() self.detecter = ImageProcess.Detect()
def __init__(self): r = requests.get(self.url) self.r = r if r.status_code == requests.codes.ok: # 以 BeautifulSoup 解析 HTML 程式碼 soup = BeautifulSoup(r.text, 'html.parser') # 以 CSS 的 class 抓出各類 #print(soup) imgs = soup.find_all('img') print(len(imgs)) for img in imgs: c = img.get('alt') print(c) urlsrc = img.get('src') print(urlsrc) cap = cv2.VideoCapture(urlsrc) if (cap.isOpened()): ret, img = cap.read() if ret: ip.CvShow('N', img) else: print('Wrong Request')
print 'pt_id {}: VOI JSON file was NOT found!'.format(pt_id) continue for jj in voi_lps_paths: # determine breast side info_fname = os.path.basename(jj) check = re.search(r'{}_(\w+)_(\d+).info.json'.format(pt_id), info_fname) if check: breast_side = check.group(1) else: print 'pt_id {}: cannot determine breast side!'.format(pt_id) print 'breast_side: {}'.format(breast_side) # initialize a Dataframe for each patient data chunk CENTER, HALFLENGTHS, time_sorted_IMNAMES = ImSeg.GetImInfo(jj) # DMI MRI volumes (from brtool output) pt_series_dir = glob.glob('{}/{:0>3d}/*/*'.format(imdir, pt_id))[0] pt_dmi_list = [ '{}/dce/{}'.format(pt_series_dir, ff) for ff in time_sorted_IMNAMES ] print 'image file dmi name: {}'.format(pt_dmi_list[0]) dceSeries = dmi.DMI(pt_dmi_list[0]) mri_itk_img = ITKImageHelper.generate_oriented_itkImage(dceSeries) IG_mri_img = image_geometry.ImageGeometry(mri_itk_img) voxsize = IG_mri_img.samplingRCS for ii in range(len(voxsize)): tmp_dict = {}
# Imports from Driver import Driver, Direction import ImageProcess import pygame import picamera import tty import sys import NeuralNetworkThree # Initializing variables continue=True stream=io.BytesIO() #where we store the pictures processer = ImageProcess.ImageProcess() driver = Driver() cam = picamera.PiCamera() cam.hflip = True cam.vflip=True cam.resolution = (1280, 720) cam.framerate=80 print("Auto mode started") # Loop me til you kill me while continue: # take a picture cam.capture(stream,use_video_port=True) # Read In Photo try: stream.seek(0)
""" # Read Three-day image directory from local root = Tk() root.withdraw() filename = askopenfilename() # Get test image path print filename path_1 = askdirectory() # Get First Day Image path_2 = askdirectory() # Get Second Day Image path_3 = askdirectory() # Get Third Day Image root.destroy() path = filename.rsplit('/', 1)[0] print path # Get SKY REGION MASK sky_region_mask = ImageProcess.getSkyRegion(path) # Get Sun Orbit among three-day image stream using SUN REGION MASK sun_orbit = [] sun_orbit = ImageProcess.getSunOrbit(path_1, path_2, path_3, sky_region_mask) # Get Centroid of all the Detected Sun in sun_orbit. centroid_list = [] centroid_list = ImageProcess.getCentroidList(sun_orbit) # Fit the centroid list sample to a quadratic equation(general parabola). theta, coeffs = ImageProcess.generalParabola(centroid_list) # Detect sun of test image img_centroid_radius = ImageProcess.sunDetect(filename, sky_region_mask)
class GUIProcess: def __init__(self): self.imPro = ImageProcess() self.km = KeyAndMouse() self.boolStart = 0 self.root = Tk() self.root.title('剑灵取色卡刀') self.lable_Welcome = Label( self.root, text='这是剑灵的取色卡刀软件,使用详情请阅读 README.txt文件').grid(row=3, columnspan=7, padx=10, pady=10, ipadx=5, ipady=5) self.lable_LB = Label(self.root, text='LB').grid(row=5, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_RB = Label(self.root, text='RB').grid(row=6, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_F = Label(self.root, text='F').grid(row=7, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_4 = Label(self.root, text='4').grid(row=8, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.lable_V = Label(self.root, text='v').grid(row=9, column=0, padx=10, pady=10, ipadx=5, ipady=5) self.ButtonItem('捕捉图片', 5, 1, self.LB_cal) self.ButtonItem('捕捉图片', 6, 1, self.RB_cal) self.ButtonItem('捕捉图片', 7, 1, self.F_cal) self.ButtonItem('捕捉图片', 8, 1, self.K_4_cal) self.ButtonItem('捕捉图片', 9, 1, self.V_cal) self.ButtonItem('初始化', 10, 0, self.Init_cal, 1, 2) self.ButtonItem('开始', 10, 3, self.Start_cal, 1, 2) self.ButtonItem('暂停', 10, 6, self.Pause_cal, 1, 2) def ButtonItem(self, text, row, column, callback, rowspan=1, columnspan=1): bu = Button(self.root, text=text) bu.bind("<Button-1>", callback) bu.grid(row=row, column=column, padx=10, pady=10, ipadx=5, ipady=5, rowspan=rowspan, columnspan=columnspan) def LableCathItem(self, row, prefix=''): self.imPro.saveImage(prefix) imNum = self.imPro._getNum(prefix) im = Image.open(self.imPro.IMAGEPATH + prefix + '_' + str(imNum - 1) + '.png') bm = ImageTk.PhotoImage(im) lb = Label(self.root, image=bm) lb.image = bm lb.grid(row=row, column=imNum + 1, padx=10, pady=10, ipadx=5, ipady=5) def LB_cal(self, event): self.LableCathItem(5, 'LB') def RB_cal(self, event): self.LableCathItem(6, 'RB') def F_cal(self, event): self.LableCathItem(7, 'F') def K_4_cal(self, event): self.LableCathItem(8, '4') def V_cal(self, event): self.LableCathItem(9, 'V') def LabelInitItem(self, prefix, row): imNum = self.imPro._getNum(prefix) if imNum == 0: return for n in range(0, imNum): im = Image.open(self.imPro.IMAGEPATH + prefix + '_' + str(n) + '.png') bm = ImageTk.PhotoImage(im) lb = Label(self.root, image=bm) lb.image = bm lb.grid(row=row, column=n + 2, padx=10, pady=10, ipadx=5, ipady=5) def Init_cal(self, event): self.LabelInitItem('LB', 5) self.LabelInitItem('RB', 6) self.LabelInitItem('F', 7) self.LabelInitItem('4', 8) self.LabelInitItem('V', 9) def simul_click(self): while (True): if self.boolStart == 1: continue if self.imPro.compareImage('LB') == True: pass #self.km.click_LB() if self.imPro.compareImage('RB') == True: pass #self.km.click_RB() if self.imPro.compareImage('F') == True: self.km.click_F() if self.imPro.compareImage('4') == True: self.km.click_4() if self.imPro.compareImage('V') == True: self.km.click_V() def Start_cal(self, event): self.imPro.readyToCompare("LB") self.imPro.readyToCompare("RB") self.imPro.readyToCompare("F") self.imPro.readyToCompare("4") self.imPro.readyToCompare("V") t = threading.Thread(target=self.simul_click) t.setDaemon(True) t.start() def Pause_cal(self, event): self.boolStart = 1 - self.boolStart
parser.add_argument("scale", help="scale for the costs", type=int) args = parser.parse_args() n = args.n samples = args.samples cscale = args.scale # initialize training data print("Generating Data...") gs.Initialize("Shapes", 250) #replace 250 with samples gg.Initialize(n * 200, n) trueGrid = pf.Grid("grid.txt", impassable=[7]) ground = np.array(trueGrid.collapseGrid()) # train and predict print("Training Model...") predictedGrid = ip.train(samples, n, ground) predictedGrid = pf.Grid(predictedGrid.tolist(), scale=cscale, impassable=[7]) # wrap grid trueGrid.scaleGrid(cscale) # pathfind using predicted grid print("Pathfinding...") algos1 = [ pf.AStar(predictedGrid, pf.ManhattanHeuristic()), pf.AStar(predictedGrid, pf.EuclideanHeuristic()), pf.Dijkstra(predictedGrid), pf.DFSB(predictedGrid), pf.BFS(predictedGrid), pf.GreedyDFS(predictedGrid) ]
continue for jj in voi_lps_paths: # initialize a Dataframe for each patient data chunk pt_features_data = pd.DataFrame() # determine breast side info_fname = os.path.basename(jj) check = re.search(r'{}_(\w+)_(\d+).info.json'.format(pt_id), info_fname) if check: breast_side = check.group(1) else: print 'pt_id {}: cannot determine breast side!'.format(pt_id) CENTER, HALFLENGTHS, time_sorted_IMNAMES = ImSeg.GetImInfo(jj) # Get the SER mask (.dmi) from the brtool output findserdmi = glob.glob('{}/{}_{}*.SER_MASK.dmi'.format( im_info_dir, pt_id, breast_side)) if findserdmi: ser_mask_fname = findserdmi[0] else: print 'pt_id {}: SER mask DMI file was NOT found!'.format( pt_id) continue ser_mask_itk = ITKImageHelper.generate_oriented_itkImage( dmi.DMI(ser_mask_fname)) ser_mask_sagittal = ITKImageHelper.itkImage_orient_to_sagittal( ser_mask_itk)
checkpointer = ModelCheckpoint(filepath="weights.hdf5", verbose=1, save_best_only=True) batch_size = 64 nb_classes = 12 nb_epoch = 200 data_augmentation = False # input image dimensions img_rows, img_cols = 224, 224 # The images are RGB. img_channels = 3 # The data, shuffled and split between train and test sets: (X_train, Y_train), (X_test, Y_test) = ImageProcess.load_data() # Convert class vectors to binary class matrices. # Y_train = np_utils.to_categorical(y_train, nb_classes) # Y_test = np_utils.to_categorical(y_test, nb_classes) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # subtract mean and normalize mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_test -= mean_image X_train /= 255. X_test /= 255.
cval=0) batch = generator.flow_from_directory(directory=directory, target_size=(img_width, img_height), batch_size=8, subset='training', seed=1) x_batch = next(batch)[0] multi_images_plot(x_batch) ''' @preprocess input by inceptionV3 preprocess inceptionV3.preprocess_input() take a 4D ndarray as input, it is same as imagnet_utils.preprocess_input() ''' from keras.applications import inception_v3 x_batch = ImageProcess.BatchImagesRead('./WR/noTag/') multi_images_plot(inception_v3.preprocess_input(x_batch)) ''' @preprocess from original inceptv3 model ''' generator = ImageDataGenerator( preprocessing_function=inception_v3.preprocess_input, # zca_whitening=True, # zca_epsilon=.1, # rotation_range=360, # width_shift_range=0.2, # height_shift_range=0.2, # shear_range=0.2, # zoom_range=0.2, horizontal_flip=True, vertical_flip=True,
from ImageProcess import getImages import ImageProcess processer = ImageProcess.ImageProcess() # create the image processing object def imageProcesser(images): #returns a list of processed images in ARRAY FORM for item in images: processer.imageProcesser(item) print(item + " was the " + str(len( processer.pictures))) # helps visualize what my code is doing # the greyscale function is MUUUUCH slower than the longer ImageProcesser function return processer.pictures # as it has to check each pixel indivudaly, then average the R+G+B values of non red #pixels to manually convert each pixel to greyscale images = getImages( ) # commenting out this line will cause the program to not process old or new images convertedImages = imageProcesser( images) #returns a list of processed IMAGES IN ARRAY FORM print( convertedImages ) # shows this list of image arrays, *NOTE* FOR SOME REASON THE IMAGES PROCESSED BY GREYSCALE LOOK # slightly different than the images processed by image processor, all the images look the #when they are saved. so idk have to test with quinton's code to see if I need to convert # all the images the same way
# -*- coding: utf-8 -*- """ Created on 10/11/16 3:52 PM @author: shuang Shih-ying Huang @goal: cast the data type for the nrrd files for the PET tumor mask to a given datatype for ease of I/O for radiomics processing """ import ImageProcess as ImP import glob rootdir = '/data/francgrp1' mask_dir = '{}/breast_radiomics/her2/ALL_TUMORS/mevis_manual_segmentation'.format( rootdir) all_nrrd_fname = glob.glob('{}/*.nrrd'.format(mask_dir)) the_cast_dtype = 'unsigned char' for ff in all_nrrd_fname: print 'cast nrrd image {}'.format(ff) ImP.NRRDImageCast(ff, the_cast_dtype)
# Extract images from Sequence File images_d1 = images_d1.map(ImageProcess.extract_image) images_d2 = images_d2.map(ImageProcess.extract_image) images_d3 = images_d3.map(ImageProcess.extract_image) # Save images RDD in memory for later use (get correlation coefficient) images_d1.persist(StorageLevel.MEMORY_ONLY) images_d2.persist(StorageLevel.MEMORY_ONLY) images_d3.persist(StorageLevel.MEMORY_ONLY) '''======================Sky Region Detection Part======================''' # Get new RDD: Sky Region for each image images_sky_region = images_d2.map(lambda x: ImageProcess.checkSkyRegion(x[1])) # Combine sky_region_color and sky_region_edge skyRegionPixelCount = (np.zeros((HEIGHT, WIDTH), np.uint8), np.zeros((HEIGHT, WIDTH), np.uint8), 0) pixel = sc.accumulator(skyRegionPixelCount, MatrixAccumulatorParam()) images_sky_region.foreach(lambda x: pixel.add(x)) images_count = pixel.value[2] # Generate sky-region-binary-image beta1 = 0.75 beta2 = 0.5 resultPic = ImageProcess.getSkyRegionMask(pixel.value[0], pixel.value[1], images_count * beta1, images_count * beta2) # Erode the sky-region-binary-image and mark three largest (if any) contour area. kernel = np.ones((5, 5), np.uint8)
def import_winxuan_to_sql(server, urls): print("Starting Import winxuan article to database...\n") ignored = [] sql = server["mysql"] ftp = server["ftp"] connection = pymysql.connect(sql[0], sql[1], sql[2], sql[3], charset=sql[4]) fconn = ftplib.FTP(ftp[0], ftp[1], ftp[2]) try: for url, tag in urls.items(): text = utility.post_html_text(url) if not text: ignored.append(url) continue data = "" if tag == "json": data = json.loads(text) #区分测试和主数据库 goodstable = "" goodsattrtable = "" goodscattable = "" goodsgallerytable = "" goodsimagepath = "" if sql[3] == 'zhongw_test': goodstable = "ecs_test_goods" goodsattrtable = "ecs_test_goods_attr" goodscattable = "ecs_test_goods_cat" goodsgallerytable = "ecs_test_goods_gallery" goodsimagepath = "test/" + ftp[3] elif sql[3] == 'zhongwenshu_db1': goodstable = "ecs_goods" goodsattrtable = "ecs_goods_attr" goodscattable = "ecs_goods_cat" goodsgallerytable = "ecs_goods_gallery" goodsimagepath = ftp[3] #商品分类定义在表ecs_category中,先登到准上架分类'134' catid = "134" #唯一商品货号 sn = "" title = "" if "title" in data["shop_items"][0]: title = data["shop_items"][0]["title"] if title: sn = SpiderToSQL.generate_sn(data["shop_items"][0]["title"]) #数量 goodsnumber = 0 if "stock" in data["shop_items"][0]: goodsnumber = data["shop_items"][0]["stock"] #ISBN isbn = "" if "barcode" in data["shop_items"][0]: isbn = data["shop_items"][0]["barcode"] #价格 oriprice = "0.00" shopprice = 0.00 marketprice = "0.00" if "list_price" in data["shop_items"][0]: oriprice = u"%s%.2f" % (u"¥", data["shop_items"][0]["list_price"]) shopprice = data["shop_items"][0]["list_price"] * 2 / 7 marketprice = u"%.2f" % (data["shop_items"][0]["list_price"] * 1.2) #重量kg goodsweight = 0.000 #商品图片 homeimgUrl = "" largeimgUrls = {} img = ImageProcess.Processor("") if "shop_item_images" in data["shop_items"][0]: for image in data["shop_items"][0]["shop_item_images"]: if image["image_type"] == "HOME_IMAGE": homeimgUrl = image["winxuan_image_url"] img = ImageProcess.Processor(homeimgUrl) elif image["image_type"] == "LARGE_IMAGE": largeimgUrls[ image["index"]] = image["winxuan_image_url"] oriImg = "" goodsImg = "" thumbImg = "" galleryOriImg = "" galleryGoodsImg = "" galleryThumbImg = "" if img.Loaded(): fconn.cwd(goodsimagepath) src = img.Save("./temp", sn, img.Format()) target = img.Upload(fconn, src, "source_img", sn, img.Format()) if target: oriImg = ftp[3] + target target = img.Upload(fconn, src, "source_img", sn + "_P", img.Format()) if target: galleryOriImg = ftp[3] + target target = img.Upload(fconn, src, "goods_img", sn + "_G_P", img.Format()) if target: galleryGoodsImg = ftp[3] + target if img.Width() > 230 and img.Height() > 230: img.Thumb(230, 230) src = img.Save("./temp", sn + "_G", img.Format()) target = img.Upload(fconn, src, "goods_img", sn + "_G", img.Format()) if target: goodsImg = ftp[3] + target img.Thumb(100, 100) src = img.Save("./temp", sn + "_T", img.Format()) target = img.Upload(fconn, src, "thumb_img", sn + "_T", img.Format()) if target: thumbImg = ftp[3] + target target = img.Upload(fconn, src, "thumb_img", sn + "_T_P", img.Format()) if target: galleryThumbImg = ftp[3] + target else: target = img.Upload(fconn, src, "goods_img", sn + "_G", img.Format()) if target: goodsImg = ftp[3] + target if img.Width() > 100 and img.Height() > 100: img.Thumb(100, 100) src = img.Save("./temp", sn + "_T", img.Format()) target = img.Upload(fconn, src, "thumb_img", sn + "_T", img.Format()) if target: humbImg = ftp[3] + target target = img.Upload(fconn, src, "thumb_img", sn + "_T_P", img.Format()) if target: galleryThumbImg = ftp[3] + target else: target = img.Upload(fconn, src, "thumb_img", sn + "_T", img.Format()) if target: thumbImg = ftp[3] + target target = img.Upload(fconn, src, "thumb_img", sn + "_T_P", img.Format()) if target: galleryThumbImg = ftp[3] + target #商品详情 fields = [ "feature", "editor_recommendation", "content_introduce", "author_introduce", "catalog", "preface", "media_comment" ] sections = { "feature": { "id": u"feature", "title": u"产品特色" }, "editor_recommendation": { "id": u"abstract", "title": u"编辑推荐" }, "content_introduce": { "id": u"content", "title": u"内容简介" }, "author_introduce": { "id": u"authorIntroduction", "title": u"作者简介" }, "catalog": { "id": u"catalog", "title": u"目 录" }, "preface": { "id": u"preface", "title": u"在线试读" }, "media_comment": { "id": u"media", "title": u"媒体评论" } } prodtext = u"" if "shop_item_attribute" in data["shop_items"][0]: for field in fields: sectiontext = u'' if field in data["shop_items"][0]["shop_item_attribute"]: sectiontext += u'<div class="section" id="' + sections[ field]["id"] + '">\ <div class="title"><span>' + sections[field][ "title"] + '</span></div>\ <div class="descrip">' sectiontext += data["shop_items"][0][ "shop_item_attribute"][field] sectiontext += u'<div> </div></div></div>' else: if field == "feature": if largeimgUrls: sectiontext += u'<div class="section" id="' + sections[ field]["id"] + '">\ <div class="title"><span>' + sections[ field]["title"] + '</span></div>\ <div class="descrip">' for url in largeimgUrls.values(): sectiontext += '<img alt="" src="' + url + '" />' sectiontext += u'<div> </div></div></div>' prodtext += sectiontext if prodtext: zwsprodtext = u"<div><zws-product>" + prodtext + u"</zws-product></div>" else: zwsprodtext = u"<p>本商品暂无详情。</p>" #作者 出版社 出版时间 开本 包装 ISBN 定价 attrs = { "author": "", "publish_house": "", "publish_date": "", "size": "", "binding": "" } if "shop_item_attribute" in data["shop_items"][0]: for key in attrs: if key in data["shop_items"][0]["shop_item_attribute"]: attrs[key] = data["shop_items"][0][ "shop_item_attribute"][key] #时间戳 addtime = str(int(time.time())) #商品大类定义在表ecs_goods_type中, '1'代表书 gtype = '1' with connection.cursor() as cursor: sql = "INSERT INTO " + goodstable + " (`goods_id`, `cat_id`, `goods_sn`,`goods_name`,\ `goods_name_style`, `click_count`, `brand_id`, `provider_name`, `goods_number`,\ `goods_weight`, `market_price`, `virtual_sales`, `shop_price`, `promote_price`,\ `promote_start_date`, `promote_end_date`, `warn_number`, `keywords`, `goods_brief`,\ `goods_desc`, `goods_thumb`, `goods_img`, `original_img`, `is_real`, `extension_code`,\ `is_on_sale`, `is_alone_sale`, `is_shipping`, `integral`, `add_time`, `sort_order`,\ `is_delete`, `is_best`, `is_new`, `is_hot`, `is_promote`, `bonus_type_id`, `last_update`,\ `goods_type`, `seller_note`, `give_integral`, `rank_integral`, `suppliers_id`, `is_check`) \ VALUES (NULL, %s, %s, %s,\ '+', '0', '0', '', %s,\ %s, %s, '', %s, '0.00',\ '0', '0', '1', '', '',\ %s, %s, %s, %s, '1', '',\ '1', '1', '0', '0', %s, '100',\ '0', '0', '0', '0', '0', '0', '0',\ %s, '', '-1', '-1', '0', NULL)" cursor.execute(sql, (catid, sn, title, goodsnumber, goodsweight, marketprice, shopprice, zwsprodtext, thumbImg, goodsImg, oriImg, addtime, gtype)) #创建书籍信息字典 #所有商品属性定义在表ecs_attribute中 attridx = { 1: attrs["author"], 2: attrs["publish_house"], 3: isbn, 4: attrs["publish_date"], 5: attrs["size"], 7: attrs["binding"], 232: oriprice } #唯一商品编号 sql = "SELECT `goods_id` FROM " + goodstable + " WHERE `goods_sn`=%s" cursor.execute(sql, sn) goodsid = cursor.fetchone()[0] print(goodsid) #填入书籍信息 for attrid, attr in attridx.items(): sql = "INSERT INTO " + goodsattrtable + " (`goods_attr_id`, `goods_id`, `attr_id`,\ `attr_value`, `attr_price`) VALUES (NULL, %s, %s, %s, '0')" cursor.execute(sql, (goodsid, attrid, attr)) #填入书籍画册 if galleryOriImg: sql = "INSERT INTO " + goodsgallerytable + " (`img_id`, `goods_id`, `img_url`, `img_desc`,\ `thumb_url`, `img_original`) VALUES (NULL, %s, %s, '', %s, %s)" cursor.execute(sql, (goodsid, galleryGoodsImg, galleryThumbImg, galleryOriImg)) connection.commit() finally: connection.close() fconn.quit() for url in ignored: print(url + " ignored!\n") print("Finished.")
for jj in voi_lps_paths: # initialize a Dataframe for each patient data chunk pt_features_data = pd.DataFrame() # Get the images and files needed for MRI # determine breast side info_fname = os.path.basename(jj) check = re.search(r'{}_(\w+)_(\d+).info.json'.format(pt_id), info_fname) if check: breast_side = check.group(1) else: print 'pt_id {}: cannot determine breast side!'.format(pt_id) CENTER, HALFLENGTHS, time_sorted_IMNAMES = ImP.GetImInfo(jj) # Get the SER mask (.dmi) from the brtool output findserdmi = glob.glob('{}/{}_{}*.SER_MASK.dmi'.format( im_info_dir, pt_id, breast_side)) if findserdmi: ser_mask_fname = findserdmi[0] else: print 'pt_id {}: SER mask DMI file was NOT found!'.format( pt_id) continue ser_mask_dmi = dmi.DMI(ser_mask_fname) ser_mask_itk = ITKImageHelper.generate_oriented_itkImage( ser_mask_dmi) ser_mask_sagittal = ITKImageHelper.itkImage_orient_to_sagittal(
ig_mask = image_geometry.ImageGeometry(mask_itk) mask_array = ITKImageHelper.itkImage_to_ndarray(mask_itk) if petct_array.shape != mask_array.shape: dcimg_diff_size_id.append('{}_{}'.format(id,breast_side)) if petct_array.shape == mask_array.shape: print 'image and mask arrays have the same size!' # find the slice where tumor mask exists check = np.nonzero(mask_array) s1_idx = np.min(check[0]) - 2 s2_idx = np.max(check[0]) + 2 s1_idx = s1_idx if s1_idx >= 0 else 0 s2_idx = s2_idx if s2_idx <= petct_array.shape[0] else petct_array.shape[0] ImP.display_overlay_volume(petct_array[s1_idx:s2_idx], mask_array[s1_idx:s2_idx], 'PID {} side {}: PETCT image + Tumor mask'.format(id, breast_side), aspect=ig_pet.samplingRCS[0]/ig_pet.samplingRCS[1]) else: print 'image and mask arrays have DIFFERENT size! O_O, pet img size: {} vs mask size: {}'.format(petct_array.shape,mask_array.shape) # retro-fit image geometry of mask to image geometry of the dicom series PET data ss, rr, cc = np.nonzero(mask_array) src_lst = zip(ss, rr, cc) mask_array_retrofit2pet = np.zeros(petct_array.shape) for src in src_lst: world_coord = ig_mask.idx_to_coords(src[2], src[1], src[0]) pet_img_idx = ig_pet.coords_to_idx(world_coord[0], world_coord[1], world_coord[2]) print 'world_coord: {}, pet_img_idx: {}'.format(world_coord, pet_img_idx) mask_array_retrofit2pet[pet_img_idx[2], pet_img_idx[0], pet_img_idx[1]] = 1 # plot the retro-fit-to-PET mask with the PET dicom series data ImP.display_overlay_volume(petct_array, mask_array_retrofit2pet, 'PID {} side {}, Retrofit PET image + Tumor mask'.format(id,breast_side),