def convert2(path): image=Image.open(path) out = os.path.splitext(path)[0] + ".png" non_transparent=Image.new('RGBA', image.size, (255,255,255,255)) non_transparent.paste(image,(0,0),image) #non_transparent.paste(image,image) non_transparent.save(out)
def findCorners(self, horizontal, vertical, colorEdges): ''' Finds corners at intersection of horizontal and vertical lines. ''' # Find corners (intersections of lines) corners = [] for v in vertical: for h in horizontal: s1, s2 = v.find_intersection(h) corners.append([s1, s2]) # remove duplicate corners dedupeCorners = [] for c in corners: matchingFlag = False for d in dedupeCorners: if math.sqrt((d[0] - c[0]) * (d[0] - c[0]) + (d[1] - c[1]) * (d[1] - c[1])) < 20: matchingFlag = True break if not matchingFlag: dedupeCorners.append(c) for d in dedupeCorners: cv2.circle(colorEdges, (d[0], d[1]), 10, (0, 0, 255)) if board_Recognition.debug: #Show image with corners circled Image.show("Corners", colorEdges) return dedupeCorners
def Readh5SI(filename): """ Read hdf5 data file (written for CL data from Odemis)""" fuzzing = False print('Loading...', filename) data = h5py.File(filename, 'r') SEM = np.array([]) survey = np.array([]) for kk in list(data)[:4]: # for kk in data.keys()[:4]: if 'Acquisition' in kk: AcqData = data[kk]['ImageData']['Image'] DataShape = np.shape(AcqData) if DataShape[0] != 1: SI = np.squeeze(np.array(AcqData)) Wavelengths = np.array(data[kk]['ImageData']['DimensionScaleC']) elif DataShape[1] != 1: _ = AcqData # Drift image elif DataShape[3] == DataShape[4] == 512: survey = Image(np.squeeze(AcqData), calibration=np.array(data[kk]['ImageData']['DimensionScaleX'])) else: SEM = Image(np.squeeze(AcqData), calibration=np.array(data[kk]['ImageData']['DimensionScaleX'])) if np.shape(SEM.data)[-1] == 4 * np.shape(SI)[-1]: fuzzing = True return SI, Wavelengths, SEM, survey, fuzzing
def draw(self, passes=8): if self.world == None: self.set_world() ppu = self.camera_r.ppu w = int(ppu * self.camera_r.width) width = 2 * w height = int(ppu * self.camera_r.height) self.image = Image(width, height) c = Color() for y in range(height): print 'drawing line', y + 1, 'of', height for x in range(w): # draw pixel from left camera c.set_rgb(0.0, 0.0, 0.0) for p in range(passes): c = c + self.world.sample(self.camera_l.get_ray(x, y)) self.image.set_pixel(x, y, c.dim(1.0 / passes)) # draw pixel from right camera c.set_rgb(0.0, 0.0, 0.0) for p in range(passes): c = c + self.world.sample(self.camera_r.get_ray(x, y)) self.image.set_pixel(x + w, y, c.dim(1.0 / passes)) self._drawn = True return self
def draw(self, passes = 8): if self._world == None: self.set_world() if self._camera == None: self.set_camera() ppu = self._camera.ppu width = int(ppu * self._camera.width) height = int(ppu * self._camera.height) self._image = Image(width, height) c = Color() t_0 = time.time() for y in range(height): lt0 = time.time() print 'drawing line', y + 1, 'of', height for x in range(width): c.set_rgb(0.0,0.0,0.0) for p in range(passes): c = c + self._world.sample(self._camera.get_ray(x,y)) self._image.set_pixel(x,y,c.dim(1.0 / passes)) lt1 = time.time() ltime = lt1 - lt0 ttime = lt1 - t_0 lleft = height - 1 - y mleft1 = ltime * lleft / 60 mleft2 = ttime / (y + 1) * lleft / 60 print 'line took {0:.3} seconds.'.format(ltime), print '{0:.5} to {1:.5} minutes left'.format(mleft1, mleft2) self._drawn = True return self
def load(self): """ Load the image and return an :class:`Image` instance. """ bits_per_pixel = self.get_bits_per_sample() height = self.get_image_length() samples_per_pixel = self.get_sample_per_pixel() width = self.get_image_width() photometric = self.get_photometric() planar_config = self.get_planar_config() is_planar = planar_config == Tiff.PLANARCONFIG_SEPARATE # Tiff.PLANARCONFIG_CONTIG = 1 = Chunky format # Tiff.PLANARCONFIG_SEPARATE = 2 = Planar format if samples_per_pixel == 1 and bits_per_pixel == 16 and photometric == Tiff.PHOTOMETRIC_MINISBLACK: format_ = 'gray16' elif samples_per_pixel == 3 and bits_per_pixel == 8 and photometric == Tiff.PHOTOMETRIC_RGB: format_ = 'rgb8' else: raise NameError('Image format of %s is not supported' % (self.file_name)) image = Image(format_, width, height, is_planar) image.reshape_to_linear() if format_ == 'gray16': Tiff.read_scanline_gray16(self.tif, image.buffer) elif format_ == 'rgb8': Tiff.read_scanline_rgb8(self.tif, image.buffer) else: raise NotImplementedError image.reshape() return image
def main(): # set command argument parser = argparse.ArgumentParser(description="Convertion between Video,\ Image and Ascii") parser.add_argument('--file', dest = 'file', required = True, help = \ "Default path in Video2Ascii/files") parser.add_argument('--mode', dest = 'mode', required = True, help = \ "Video2Image or Video2Ascii or Image2Ascii") parser.add_argument('--output', dest = 'outFile', required = False, help = \ "Default path in Video2Ascii/output", default = 'output') parser.add_argument('--cols', dest = 'cols', required = False, help = \ "Numbers of Ascii column", default = 200) parser.add_argument('--scale', dest = 'scale', required = False, help = \ "vertical scaling factor - higher leads a longer face", default = 0.5) # parser.add_argument('--morelevels', dest='moreLevels', action='store_true')#default:False(less levels) # get command argument args = parser.parse_args() if args.mode == 'Video2Image': video = Video(args.file, args.outFile, args.cols, args.scale) video.Video2Image() elif args.mode == 'Video2Ascii': video = Video(args.file, args.outFile, args.cols, args.scale) video.Video2Ascii() elif args.mode == 'Image2Ascii': image = Image(args.file, args.outFile, args.cols, args.scale) image.Image2Ascii() else: print("Arguments not found!")
def findLines(self, edges, colorEdges): ''' Finds the lines in the photo and sorts into vertical and horizontal ''' # Infer lines based on edges lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, np.array([]), 100, 80) # Draw lines a, b, c = lines.shape for i in range(a): cv2.line(colorEdges, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 255, 0), 2, cv2.LINE_AA) if board_Recognition.debug: # Show image with lines drawn Image.show("Lines", colorEdges) # Create line objects and sort them by orientation (horizontal or vertical) horizontal = [] vertical = [] for l in range(a): [[x1, y1, x2, y2]] = lines[l] newLine = Line(x1, x2, y1, y2) if newLine.orientation == 'horizontal': horizontal.append(newLine) else: vertical.append(newLine) return horizontal, vertical
def open_image(self, imageWidget: dict, channel: int) -> None: image = Image() if not image.path: return if len(self.img) == 1: if f'Image {2//channel}' in self.img: if not image.compare(self.img[f'Image {2//channel}']['image']): qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size') return else : self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget} if f'Image {channel}' not in self.available_images: self.available_images[f'Image {channel}'] = f'Image {channel}' self.append_outputs(isOneChanneled=False) else : self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget} elif len(self.img) >= 2: if not image.compare(self.img[f'Image {2//channel}']['image']): qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size') return self.img[f'Image {channel}']["image"] = image self.img[f'Image {channel}']["widgets"] = imageWidget else : self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget} if f'Image {channel}' not in self.available_images: self.available_images[f'Image {channel}'] = f'Image {channel}' self.append_outputs(channel=self.available_images[f'Image {channel}']) imageWidget['original'].setPixmap(image.get_pixmap().scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation)) imageWidget['picker'].setDisabled(False) self.ui.output_select.setDisabled(False)
def getAll(self, env=None): imglinks = Model.getAll(self,env) image_model = Image() for imglink in imglinks: img = image_model.get(imglink.Imageid) imglink['uri'] = img.uri if img is not None else '' return imglinks
def Create_Instance(self, instance_name, image_id, flavor_id): image = Image() image_href = image.get_anactiveimagehref() flavor = Flavor() flavor_href = flavor.get_specificflavorhref(flavor_id) instance = { "server": { "name": instance_name, "imageRef": image_href, "flavorRef": flavor_href } } params = json.dumps(instance) headers = { "X-Auth-Token": self.apitoken, "Content-type": "application/json" } conn = httplib.HTTPConnection(self.apiurlt[1]) conn.request("POST", "%s/servers" % self.apiurlt[2], params, headers) request = conn.getresponse() instance_metadata = request.read() instance_metadata = json.loads(instance_metadata) conn.close() return instance_metadata
def GetImage(self): # 清空圖片和文字輸入框 for i, obj in enumerate(self.inputBoxs): obj.clear() for i,obj in enumerate(self.pixBoxs): obj.clear() # 取得驗證碼stream s = requests.Session() req = s.get('http://railway.hinet.net/ImageOut.jsp') im = pilIm.open(StringIO(req.content)).convert('RGB') io = StringIO() im.save(io, format='png') qimg = QtGui.QImage.fromData(io.getvalue()) self.captchaPic.qimage = qimg # 原始驗證碼 self.captchaPic.setPixmap(QtGui.QPixmap(qimg)) Img = Image(req.content) # 取得處理完後的圖片 self.imgarr = Img.StartProcess() # 用來儲存轉成QPixmap的圖片 用來存檔 self.PixMaparr = [] for index,img in enumerate(self.imgarr): try: height, width,channel = img.shape bytes = 3*width qimg = QtGui.QImage(img.data, width, height,bytes, QtGui.QImage.Format_RGB888) pixmap = QtGui.QPixmap(qimg) self.pixBoxs[index].qimage = qimg self.pixBoxs[index].setPixmap(pixmap) self.PixMaparr.append(pixmap) except: pass
def createImageList(): # returns object list of images and its parameters imageList = [] blank = cv.imread('photos/blank.png', 0) imageBlank = Image(blank, 0, 0.85, 255, 255, 153) imageList.append(imageBlank) one = cv.imread('photos/one.png', 0) imageOne = Image(one, 1, 0.85, 0, 0, 255) imageList.append(imageOne) two = cv.imread('photos/two.png', 0) imageTwo = Image(two, 2, 0.87, 65, 153, 255) imageList.append(imageTwo) three = cv.imread('photos/three.png', 0) imageThree = Image(three, 3, 0.87, 51, 255, 255) imageList.append(imageThree) four = cv.imread('photos/four.png', 0) imageFour = Image(four, 4, 0.85, 0, 255, 128) imageList.append(imageFour) five = cv.imread('photos/five.png', 0) imageFive = Image(five, 5, 0.87, 153, 255, 51) imageList.append(imageFive) six = cv.imread('photos/six.png', 0) imageSix = Image(six, 6, 0.85, 255, 153, 51) imageList.append(imageSix) seven = cv.imread('photos/seven.png', 0) imageSeven = Image(seven, 7, 0.87, 255, 0, 127) imageList.append(imageSeven) eight = cv.imread('photos/eight.png', 0) imageEight = Image(eight, 8, 0.84, 255, 51, 255) imageList.append(imageEight) nine = cv.imread('photos/nine.png', 0) imageNine = Image(nine, 9, 0.86, 0, 0, 0) imageList.append(imageNine) return imageList
def importFolder(self, folderPath): breakMarker = False with open(folderPath + "/LogFile.txt") as logFile: for line in logFile: lineData = line.split(":") # frameNumber = lineData[0] # leftMotorPower = lineData[1] # rightMotorPower = lineData[2] # duration = lineData[3] # timestamp = lineData[4] file = glob.glob(folderPath + "/" + self.framesFolder + "/FRAME_" + lineData[0] + "_*") if len(file) != 1: print("Frame " + lineData[0] + " cannot be found") if len(self.dataArray) > 1: breakMarker = True continue if breakMarker: self.dataArray.append( Image(file[0], 0.0, 0.0, 0.0, float(lineData[4].rstrip()))) else: self.dataArray.append( Image(file[0], float(lineData[1]), float(lineData[2]), float(lineData[3]), float(lineData[4].rstrip())))
def run(self): with tf.Session() as sess: for train in self.csv_file.read(): try: image = Image( train[0], int(config.get(CONFIG_SECTION, "IMAGE_SIZE_PX"))) label = train[1] to_image_path = self.get_output_filepath( image.file_path, label) # 新しい訓練用データセットの情報をファイルに書き出し csv = CsvFile(self.output_file_list_path) csv.append(to_image_path + "," + label + "\n") if self.overwrite == 0 and path.exists(to_image_path): continue print("\nFrom : %s \nTo => %s\n" % (image.file_path, to_image_path)) image.write(sess, to_image_path) except Exception as e: print("Exception!! args:", e.args) except: print("All error catch precure!! :") # print(sys.exc_info()) import traceback
def output(id): '''Renders output page with lightbox image and related photos. Lightbox title is absent from the page source unless logged into iStockphoto so making title flat and editable on page''' file_contents = get_lightbox_source(id) exp_imgs = re.compile(r"file_thumbview_approve\\\/(\d+)\\\/") # soup = BeautifulSoup(file_contents) # title = soup.findAll(id='searchTitleCaption') # matches = re.search(r'\| Lightbox: ([\b\w\s\b]+) ', file_contents) ubbstr = '' thumbs = [] # if matches: # name = matches.group(1) # else: # name = soup.title #'Lightbox Name Goes Here' ids = exp_imgs.findall(file_contents) ids = list(set(ids)) # dedupe for image_id in ids: i = Image(None, None, None, image_id) ubbstr += i.get_ubb_string() + ' ' thumbs.append({ 'id' : image_id, 'src' : i.get_thumb_src(), 'ubb' : i.get_ubb_string() }) return render_template('output.html', id=id, name='Lightbox', thumbs=thumbs, ubbstring=ubbstr)
def initialize_mask(self, adaptiveThresh, img): ''' Finds border of chessboard and blacks out all unneeded pixels ''' # Find contours (closed polygons) _, contours, hierarchy = cv2.findContours(adaptiveThresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Create copy of original image imgContours = img.copy() for c in range(len(contours)): # Area area = cv2.contourArea(contours[c]) # Perimenter perimeter = cv2.arcLength(contours[c], True) # Filtering the chessboard edge / Error handling as some contours are so small so as to give zero division #For test values are 70-40, for Board values are 80 - 75 - will need to recalibrate if change #the largest square is always the largest ratio if c == 0: Lratio = 0 if perimeter > 0: ratio = area / perimeter if ratio > Lratio: largest = contours[c] Lratio = ratio Lperimeter = perimeter Larea = area else: pass # Draw contours cv2.drawContours(imgContours, [largest], -1, (0, 0, 0), 1) if board_Recognition.debug: # Show image with contours drawn Image.show("Chess Board Contours", imgContours) # Epsilon parameter needed to fit contour to polygon epsilon = 0.1 * Lperimeter # Approximates a polygon from chessboard edge chessboardEdge = cv2.approxPolyDP(largest, epsilon, True) # Create new all black image mask = np.zeros((img.shape[0], img.shape[1]), 'uint8') * 125 # Copy the chessboard edges as a filled white polygon size of chessboard edge cv2.fillConvexPoly(mask, chessboardEdge, 255, 1) # Assign all pixels that are white (i.e the polygon, i.e. the chessboard) extracted = np.zeros_like(img) extracted[mask == 255] = img[mask == 255] # remove strip around edge extracted[np.where((extracted == [125, 125, 125]).all(axis=2))] = [0, 0, 20] if board_Recognition.debug: # Show image with mask drawn Image.show("mask", extracted) return extracted
def loadH36M_all(self, batch_idx, frmStartNum=0, mode='train', replace=False, tApp=False,with_background=False): if not hasattr(self, 'frmList'): self.frmList = [] if not tApp: self.frmList = [] with_back = '' if with_background: with_back = 'with_back' self.frmList = [] from h36m import H36M data = H36M(mode) nSamples = data.nSamples nums_in_onebatch = (64*200) nbatch = nSamples//nums_in_onebatch print('Processing batch {} in {}'.format(batch_idx, nbatch)) pickleCachePath = '{}h36m_{}_{}_{}_{}.pkl'.format( self.cache_base_path, mode, 'all', with_back, batch_idx) if os.path.isfile(pickleCachePath) and not replace: print('direct load from the cache') t1 = time.time() f = open(pickleCachePath, 'rb') # (self.frmList) += pickle.load(f) (self.frmList) += pickle.load(f) t1 = time.time() - t1 print('loaded with {}s'.format(t1)) return self.frmList self.frmList = [] frmStartNum = batch_idx*nums_in_onebatch frmEndNum = min((batch_idx+1)*nums_in_onebatch, nSamples) for frmIdx in tqdm(range(frmStartNum, frmEndNum)): while True: if frmIdx==frmEndNum: break [frmPath, label] = data.getImgName_onehotLabel(frmIdx) frmPath_rgb = data.getImgName_RGB(frmIdx) skel = np.asarray(data.getSkel(frmIdx)) if os.path.exists(frmPath) and os.path.exists(frmPath_rgb) and skel.shape != (): break else: frmIdx = frmIdx+1 skel = np.asarray(data.getSkel(frmIdx)) if skel.shape == (): continue skel.shape = (-1) img=[] if with_background: img = Image('H36M', frmPath) img_RGB = Image('H36M', frmPath_rgb, RGB=True) self.frmList.append(Frame(img, img_RGB, skel, label, frmPath)) if not os.path.exists(self.cache_base_path): os.makedirs(self.cache_base_path) f = open(pickleCachePath, 'wb') pickle.dump((self.frmList), f, protocol=pickle.HIGHEST_PROTOCOL) f.close() print('loaded with {} frames'.format(len(self.frmList)))
def main(args): try: # Initializing init = pyossimtest.Init.instance() init.initialize() #Input and open image file imageFileName = raw_input() img = Image() if img.open(imageFileName): print "Opened: " + str(imageFileName) # Display bands selection bands = list() bands = img.getBandSelection() print bands if bands is not None: i = 0 for i in range(0, len(bands)): print "band[" + i + "]: " + bands[i] i = 0 for i in range(0, len(bands)): bands[i] = len(bands) - i + 1 img.selectBands(bands) else: print "Image.getBandSelection returned Null" #Set the histogram ext = str("his") im = imageFileName.split('.') im[0] = im[0] + ext his = str(im[0]) if img.setHistogram(his): his2 = img.getHistogramFile() print "Image.getHistogramFile() result: " + his2 else: print "Image.getHistogramFile() returned false!" #Set the overview ext = str("ovr") im = imageFileName.split('.') im[0] = im[0] + ext ovr = str(im[0]) if img.setOverview(ovr): ovr2 = img.getOverviewFile() print "Image.getOverviewFile() result: " + ovr2 else: print "Image.getOverviewFile() returned false!" except (Exception): print "Caught Exception"
def convert(path): from PIL import Image out = os.path.splitext(path)[0] + ".png" im = Image.open(path) bg = Image.new("RGB", im.size, (255,255,255)) bg.paste(im,im) bg.save(out)
def deserialize(self, serial): imlist = ast.literal_eval(serial) for k, v in imlist.iteritems(): #print "deserializing: %s" % k image = Image() image.deserialize(v) #print image self.addimage(image, k)
def __init__(self, img, rect, name, transparent=0): self.screen = pygame.display.get_surface() img_loader = Image() self.image = img_loader.load_image(img, transparent) self.rect = pygame.Rect(rect) self.visible = True self.name = name self.font_handler = FontHandler()
def _calibration_image_preparation(self, calibration_image): if calibration_image is not None: self.image = Image(self.cparams, self.plant_db) self.image.load(calibration_image) self.calibration_params[ 'center_pixel_location'] = self.get_image_center( self.image.images['current']) self.image.calibration_debug = self.debug
def deserialize(self,serial): imlist = ast.literal_eval(serial) for k,v in imlist.iteritems(): #print "deserializing: %s" % k image = Image() image.deserialize(v) #print image self.addimage(image, k)
def __init__(self, image, **kwargs): self.connectivity = kwargs.pop("connectivity", "8") self.filename = image self.image = Image(filename=self.filename) self.folder = kwargs.pop("folder", "chain") self.guz, self.theta0 = self.loadChain() self.gux = self.countChars() self.theta = numpy.copy(self.theta0) self.allThetas = {1: list(self.theta)}
def getImages(s, c): s.images = [] for r in c.execute("""SELECT tweet_id, file, url, checksum, downloaded FROM tweets WHERE tweet_id=""" + str(s.tweet_id)): image = Image(r[0], r[1], r[2]) image.checksum = r[3] image.downloaded = r[4] s.images.append(image)
def loadSkate(self, Fsize, frmStartNum=0, mode='train', replace=False, tApp=False): from skate import SKATE ''' mode: if train, only save the cropped image replace: replace the previous cache file if exists tApp: append to previous loaded file if True ''' if not hasattr(self, 'frmList'): self.frmList = [] if not tApp: self.frmList = [] pickleCachePath = '{}skate_{}_{}.pkl'.format( self.cache_base_path, mode, Fsize) if os.path.isfile(pickleCachePath) and not replace: print('direct load from the cache') t1 = time.time() f = open(pickleCachePath, 'rb') # (self.frmList) += pickle.load(f) (self.frmList) += pickle.load(f) t1 = time.time() - t1 print('loaded with {}s'.format(t1)) return self.frmList data = SKATE(mode) frmEndNum =data.nSamples for frmIdx in tqdm(range(frmStartNum, int(frmEndNum/Fsize)*Fsize, int(frmEndNum/Fsize))): while True: [frmPath, label] = data.getImgName_onehotLabel(frmIdx) frmPath_rgb = data.getImgName_RGB(frmIdx) if mode=='train': if os.path.exists(frmPath) and os.path.exists(frmPath_rgb): break else: frmIdx = frmIdx+1 else: break # skel = np.asarray(data.getSkel(frmIdx)) # if skel.shape == (): # continue # skel.shape = (-1) img = Image('Skate', frmPath) img_RGB = Image('Skate', frmPath_rgb, RGB=True) self.frmList.append( Frame(img, img_RGB, frmPath, label=label)) # self.frmList[-1].saveOnlyForTrain() if not os.path.exists(self.cache_base_path): os.makedirs(self.cache_base_path) f = open(pickleCachePath, 'wb') pickle.dump((self.frmList), f, protocol=pickle.HIGHEST_PROTOCOL) f.close() print('loaded with {} frames'.format(len(self.frmList)))
def __init__(self, **kwargs): self.connectivity = kwargs.pop("connectivity", 8) self.size = kwargs.pop("size", 100) self.iters = kwargs.pop("iters", 1000) self.r = kwargs.pop("r", (5, 10)) self.intensity = kwargs.pop("intensity", 1) self.params = numpy.array(list(kwargs.pop("params", (0, 0, 0)))) self.image = Image(matrix=numpy.ones((self.size, self.size), dtype=numpy.uint8)*255) self.circles = list()
def __init__(self, inv, size): self.img_loader = Image() self.seen_images = {} self.m_x, self.m_y = size self.inv = inv self.f_inv = {} self.filled_cells = set() self.find_filled_cells() self.fill_inv()
def loadImage(self,name): '''Open image with given name in viewer''' if self.image: del self.image self.image = Image(name) if not self.image: return False self.flushImage() return True
def readImages(self, image_dir): images = [] img_names = os.listdir(image_dir) for name in img_names: newImage = Image() newImage.path = image_dir newImage.fileName = name images.append(newImage) return np.array(images)
def _ami_backup(): ec2_filtered = EC2().filter(tag_key=tag_key, tag_value=tag_value) ec2_info: list = ec2_filtered.get_instance_ids_and_names() image = Image() for ec2 in ec2_info: name = ec2.get('instance_name') ec2_id = ec2.get('instance_id') image.bake(ec2_id=ec2_id, name=name)
def __init__(self, SI=np.array([]), Wavelengths=np.array([]), SEM=np.array([]), survey=np.array([]), fuzzing=False): self.fuzzing = fuzzing self.SI = SpectrumImage.CLSpectrumImage(SI, Wavelengths * 1e9) self.SEM = Image(SEM) self.survey = Image(survey)
def getDataFromFolder(self, path): files = os.listdir(path) data = [] for file in files: if (file[0] != '.'): newImage = Image() newImage.path = path newImage.fileName = file data.append(newImage) return np.array(data)
def __init__(self, buttonNum): super().__init__() self.buttonNum = buttonNum if buttonNum == 1: self.setImage(Image("beckyBlueButton.png")) elif buttonNum == 2: self.setImage(Image("orangeButton.png")) else: self.setImage(Image("yellowLemonVomitButton.png")) self.mouseDown = False
def getImg(allImgList, imgNum): localImgNum = 0 while (localImgNum < 10 and urlImgQueue.not_empty): url = urlImgQueue.get() thisHtml = Html(url) html = thisHtml.getHtml() if (html != None): thisImg = Image(html, localImgNum, allImgList, imgNum) localImgNum, allImgList = thisImg.getImg()
def __init__(self, image): Box.__init__(self) Visible.__init__(self) if issubclass(image.__class__, Image): self.w = image.w self.h = image.h self.nativeW = image.nativeW self.nativeH = image.nativeH self._texture = image._texture elif issubclass(image.__class__, pygame.Surface): self._texture = None Image.initFromSurface(self, image)
def generateImageFromFile(file_path, output_dir): 'Generates an image file from each slide. Returns the paths to the images.' presentation = PptxFile(file_path) no_of_slides = len(presentation.getSlides()) presentation.destroy() #Delete extracted folder image = Image() pptx = os.path.abspath(file_path) output_folder = os.path.abspath(output_dir) image_paths = image.generateFromPpt(pptx, output_folder, no_of_slides) return image_paths
def getImages(self): """ returns a dict of image_tag => Image instance """ self.images = {} for img in self.client.images(all=True): tags = [ ImageTag.parse(t) for t in img["RepoTags"] ] image = Image(img["Id"]) image.tags = tags for t in image.tags: if t: self.images[t] = image return self.images
def getImage(self, name): """ returns an Image for the given name, or None if not found """ retVal = None img_data = self.client.inspect_image(name) if img_data: img_cfg = img_data.get("config", {}) retVal = Image(img_data["id"]) retVal.tags = [ name if isinstance(name, ImageTag) else ImageTag.parse(name) ] retVal.entrypoint = img_cfg.get("Entrypoint", [None]) retVal.command = img_cfg.get("Cmd", [None]) return retVal
def Create_Instance(self,instance_name,image_id,flavor_id): image = Image() image_href = image.get_anactiveimagehref() flavor = Flavor() flavor_href = flavor.get_specificflavorhref(flavor_id) instance = { "server": { "name": instance_name, "imageRef": image_href, "flavorRef": flavor_href} } params = json.dumps(instance) headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" } conn = httplib.HTTPConnection(self.apiurlt[1]) conn.request("POST", "%s/servers" % self.apiurlt[2], params, headers) request = conn.getresponse() instance_metadata = request.read() instance_metadata = json.loads(instance_metadata) conn.close() return instance_metadata
def image_print(m, name='lines.png'): shape = m3d.shape(m) im = Image.new('RGB', shape) for j in xrange(shape[0]): for i in xrange(shape[1]): im.putpixel((i,j), m[j][i]) im.save(name)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #setup filter inImgType = itk.Image[inPixelType._type, dim] up = self.get_input("Upper Value") lo = self.get_input("Lower Value") self.filter_ = itk.ThresholdImageFilter[inImgType].New(im.getImg()) self.filter_.SetUpper(up) self.filter_.SetLower(lo) # self.filter_.ThresholdAbove(up) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(inPixelType) outIm.setDim(dim) self.set_output("Output Image", outIm)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] if self.has_input("Upper Threshold"): upper_threshold = self.get_input("Upper Threshold") else: upper_threshold = 255 lower_threshold = self.get_input("Lower Threshold") if self.has_input("Outside Value"): outside_value = self.get_input("Outside Value") else: outside_value = 0 if self.has_input("Inside Value"): inside_value = self.get_input("Inside Value") else: inside_value = 255 self.filter_ = itk.BinaryThresholdImageFilter[inImgType, outImgType].New(im.getImg()) self.filter_.SetUpperThreshold(upper_threshold) self.filter_.SetLowerThreshold(lower_threshold) self.filter_.SetOutsideValue(outside_value) self.filter_.SetInsideValue(inside_value) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType)
def test_accessor_methods(self): self.assertEquals(self.pl.get_test_photosize(), self.s) self.assertEquals(self.pl.get_test_size(), Image.open(self.pl.get_test_filename()).size) self.assertEquals(self.pl.get_test_url(), self.pl.cache_url() + '/' + \ self.pl._get_filename_for_size(self.s)) self.assertEquals(self.pl.get_test_filename(), os.path.join(self.pl.cache_path(), self.pl._get_filename_for_size(self.s)))
def __init__(self, parent=None, rect=sf.Rectangle(), \ widgetBackground=None, widgetForground=None, \ start=0, orientation = Direction.Horizontal): Widget.__init__(self, parent, rect) self._currentValue = start self._widgetBackground = widgetBackground if self._widgetBackground: self._widgetBackground.setParent(self) else: self._widgetBackground = Image(self, sf.Image.create(20, 20, sf.Color.BLACK)) self._widgetForground = widgetForground if self._widgetForground: self._widgetForground.setParent(self) else: self._widgetForground = Image(self, sf.Image.create(20, 20, sf.Color.GREEN)) self._widgetBackground.canFocus=False self._widgetForground.canFocus=False self._orientation = orientation self.rect = self.rect
def getImage(self, name): """ returns an Image for the given name, or None if not found """ retVal = None img_data = self.client.inspect_image(name) if img_data: retVal = Image.fromJson(img_data) retVal.tags = [ name if isinstance(name, ImageTag) else ImageTag.parse(name) ] return retVal
def __init__(self,format='%d-%m-%Y'): self.format = format self.tbox = TextBox() self.tbox.setVisibleLength(10) # assume valid sep is - / . or nothing if format.find('-') >= 0: self.sep = '-' elif format.find('/') >= 0: self.sep = '/' elif format.find('.') >= 0: self.sep = '.' else: self.sep = '' # self.sep = format[2] # is this too presumptious? self.calendar = Calendar() img = Image("icon_calendar.gif") img.addStyleName("calendar-img") self.calendarLink = HyperlinkImage(img) self.todayLink = Hyperlink('Today') self.todayLink.addStyleName("calendar-today-link") # # lay it out # hp = HorizontalPanel() hp.setSpacing(2) vp = VerticalPanel() hp.add(self.tbox) vp.add(self.calendarLink) vp.add(self.todayLink) #vp.add(self.calendar) hp.add(vp) Composite.__init__(self) self.initWidget(hp) # # done with layout, so now set up some listeners # self.tbox.addFocusListener(self) # hook to onLostFocus self.calendar.addSelectedDateListener(getattr(self,"onDateSelected")) self.todayLink.addClickListener(getattr(self,"onTodayClicked")) self.calendarLink.addClickListener(getattr(self,"onShowCalendar"))
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] try: self.filter_ = itk.CurvatureAnisotropicDiffusionImageFilter[inImgType, inImgType].New(im.getImg()) except: raise ModuleError(self, "Filter requires a decimal PixelType") #default values are recommended if self.has_input("Iterations"): iterations = self.get_input("Iterations") else: iterations = 5 if self.has_input("TimeStep"): timestep = self.get_input("TimeStep") else: if dim == 2: timestep = 0.125 else: timestep = 0.0625 if self.has_input("Conductance"): conductance = self.get_input("Conductance") else: conductance = 3.0 self.filter_.SetNumberOfIterations(iterations) self.filter_.SetTimeStep(timestep) self.filter_.SetConductanceParameter(conductance) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(inPixelType) outIm.setDim(dim) self.set_output("Output Image", outIm) self.set_output("Filter", self)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() if self.has_input("Seed2D"): seed = self.get_input("Seed2D") else: seed = self.get_input("Seed3D") replace = self.get_input("Replace Value") multiplier = self.get_input("Multiplier") iterations = self.get_input("Iterations") radius = self.get_input("Neighborhood Radius") #setup filter inImgType = itk.Image[inPixelType._type,dim] outImgType = itk.Image[outPixelType._type,dim] self.filter_ = itk.ConfidenceConnectedImageFilter[inImgType,outImgType].New(im.getImg()) self.filter_.SetReplaceValue(replace) self.filter_.SetMultiplier(multiplier) self.filter_.SetNumberOfIterations(iterations) self.filter_.SetInitialNeighborhoodRadius(radius) self.filter_.SetSeed(seed.ind_) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) self.set_output("Output Image", outIm) self.set_output("Output PixelType", outPixelType)
def compute(self): im = self.getInputFromPort("Input Image") #check for input PixelType if self.hasInputFromPort("Input PixelType"): inPixelType = self.getInputFromPort("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.hasInputFromPort("Output PixelType"): outPixelType = self.getInputFromPort("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.hasInputFromPort("Input Dimension"): dim = self.getInputFromPort("Input Dimension") else: dim = im.getDim() if self.hasInputFromPort("Output Dimension"): outDim = self.getInputFromPort("Output Dimension") else: outDim = dim #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, outDim] self.filter_ = itk.RegionOfInterestImageFilter[inImgType, outImgType].New() #TODO this is not correct, needs fixing if self.hasInputFromPort("Input Region"): self.region_ = self.getInputFromPort("Input Region").region_ else: self.region_ = itk.ImageRegion[indim]() self.setStart(indim) self.region_.SetSize(self.getInputFromPort("Region Size").size_) self.filter_.SetRegionOfInterest(self.region_) self.filter_.SetInput(im.getImg()) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(outDim) self.setResult("Output Image", outIm) self.setResult("Output PixelType", outPixelType) self.setResult("Output Dimension", outDim) self.setResult("Filter", self)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] self.filter_ = itk.IsolatedWatershedImageFilter[inImgType, outImgType].New(im.getImg()) if self.has_input("Seed1"): self.filter_.SetSeed1(self.get_input("Seed1").ind_) if self.has_input("Seed2"): self.filter_.SetSeed2(self.get_input("Seed2").ind_) if self.has_input("ReplaceValue1"): self.filter_.SetReplaceValue1(self.get_input("ReplaceValue1")) if self.has_input("ReplaceValue2"): self.filter_.SetReplaceValue2(self.get_input("ReplaceValue2")) if self.has_input("Threshold"): self.filter_.SetThreshold(self.get_input("Threshold")) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) self.set_output("Output Image", outIm) self.set_output("Output PixelType", outPixelType) self.set_output("Filter", self)
def scale_one(size, smooth, sourceImage, targetImage): oldImage = Image.from_file(sourceImage) if oldImage.width <= size and oldImage.height <= size: oldImage.save(targetImage) return Result(1, 0) else: if smooth: scale = min(size / oldImage.width, size / oldImage.height) newImage = oldImage.scale(scale) else: stride = int(math.ceil(max(oldImage.width / size, oldImage.height / size))) newImage = oldImage.subsample(stride) newImage.save(targetImage) return Result(0, 1)
def generateImageFromData(data, image_names): 'Generates an image file from each slide. Input is the binary data of the slide or deck' 'and a list of names. Renames the images and returns a list of paths.' temp_folder = relative_path("Files/Temp") bin = temp_folder + "/File.bin" pptx = temp_folder + "/File.pptx" if not (os.path.exists(temp_folder)): os.makedirs(temp_folder) open(bin,"wb").write(data) shutil.move(bin, pptx) presentation = PptxFile(pptx) no_of_slides = len(presentation.getSlides()) presentation.destroy() #Delete extracted folder image = Image() pptx_dir = os.path.abspath(pptx) temp_paths = image.generateFromPpt(pptx_dir, temp_folder, no_of_slides) n = len(temp_paths) if (len(image_names) != n): for i in xrange (0, n): os.remove(temp_paths[i]) raise Exception ("Amount of generated slides differs from lenght of input parameter") for i in xrange (0, n): shutil.move(temp_paths[i], image_names[i]) os.remove(pptx) return image_names
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] timestep = self.get_input("Timestep") conductance = self.get_input("Conductance") iterations = self.get_input("Iterations") try: self.filter_ = itk.GradientAnisotropicDiffusionImageFilter[inImgType, outImgType].New(im.getImg()) except: raise ModuleError(self, "Requires Decimal PixelType") self.filter_.SetTimeStep(timestep) self.filter_.SetConductanceParameter(conductance) self.filter_.SetNumberOfIterations(iterations) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType)
def scale_one(size, smooth, scr_image, dest_image): old_image = Image.from_file(scr_image) if old_image.width <= size and old_image.height <= size: old_image.save(dest_image) return Result(1, 0, dest_image) else: if smooth: scale = min(size / old_image.width, size / old_image.height) new_image = old_image.scale(scale) else: stride = int(math.ceil(max(old_image.width / size, old_image.height / size))) new_image = old_image.subsample(stride) new_image.save(dest_image) return Result(0, 1, dest_image)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() if self.has_input("Seed2D"): seed = self.get_input("Seed2D") else: seed = self.get_input("Seed3D") replace = self.get_input("Replace Value") t_lower = self.get_input("Lower Value") t_upper = self.get_input("Upper Value") #setup filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] self.filter_ = itk.ConnectedThresholdImageFilter[inImgType,outImgType].New(im.getImg()) self.filter_.SetSeed(seed.ind_) self.filter_.SetReplaceValue(replace) self.filter_.SetLower(t_lower) self.filter_.SetUpper(t_upper) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) self.set_output("Output Image", outIm) self.set_output("Output PixelType", outPixelType)
def GenThumb(self, size=160): """Generate a thumbnail for the image""" tempdir = tempfile.mkdtemp() sub = subprocess.Popen([mplayer, self.fullPath, "-vo", "jpeg:outdir=%s" % tempdir, "-ao", "null", "-frames", "1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) sub.wait() listThumb = [os.path.join(tempdir, i) for i in os.listdir(tempdir)] if len(listThumb) != 1: print ("Unexpected result ... have a look at %s ther should only be one jpeg image" % tempdir) self.thumbName = OP.splitext(self.fullPath)[0] + "--Thumb.jpg" img = Image.open(listThumb[0]) img.thumbnail((size, size)) img.save(self.thumbName) # sub = subprocess.Popen(["%s -geometry %ix%i %s/*.jpg %s" % (convert, size, size, tempdir, self.thumbName)) for i in listThumb: os.remove(i) os.rmdir(tempdir)
def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] gaussian_variance = self.get_input("Gaussian Variance") max_kernel_width = self.get_input("Max Kernel Width") try: self.filter_ = itk.DiscreteGaussianImageFilter[inImgType, outImgType].New(im.getImg()) except: raise ModuleError(self, "Requires Signed PixelType") self.filter_.SetVariance(gaussian_variance) self.filter_.SetMaximumKernelWidth(max_kernel_width) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType)