def __init__(self, starterWindow): super(phaseMonster, self).setupUi(starterWindow) self.imageName = None # holds image path self.imageFormat = None # holds image format self.mixer = image.mixer2Image( ) # holds the mixer object from image.py self.image1 = image.image() # holds the image object from image.py self.image2 = image.image() # holds the image object from image.py self.out1 = image.image() # holds the image object of the first output self.out2 = image.image( ) # holds the image object of the second output self.imagesShapes = [] # holds images shapes loaded to be checked self.images = [self.image1, self.image2] # not used may be deleted later self.logger = logging.getLogger() self.logger.setLevel(logging.DEBUG) # UI components self.imageWidgets = [ self.imageOneOrigin, self.imageTwoOrigin, self.imageOneMods, self.imageTwoMods, self.output1, self.output2 ] self.sliders = [self.slider1, self.slider2] self.showCmbxs = [self.image1Cmbx, self.image2Cmbx] self.mixerCmbxs = [self.mixerCmbx1, self.mixerCmbx2] self.componentCmbxs = [self.component1, self.component2] self.loadbtns = [self.img1Load, self.img2Load] # loop settings # imageView settings for img in self.imageWidgets: img.ui.histogram.hide() img.ui.roiBtn.hide() img.ui.menuBtn.hide() img.ui.roiPlot.hide() img.view.setBackgroundColor([255.0, 255.0, 255.0]) # buttons settings for btn in self.loadbtns: btn.clicked.connect(partial(self.loadImage, btn.property('image'))) for bx in self.showCmbxs: bx.activated.connect( partial(self.selectComponents, bx.property('image'))) self.mixerOutput.currentTextChanged.connect(self.chosenOutput) for index, mxrbx in enumerate(self.mixerCmbxs): mxrbx.currentTextChanged.connect(self.chosenOutput) mxrbx.currentTextChanged.connect(self.setComponents) for index, component in enumerate(self.componentCmbxs): component.currentTextChanged.connect(self.chosenOutput) for index, slider in enumerate(self.sliders): slider.valueChanged.connect(self.chosenOutput)
def main(arg): hostname = arg['-s'] version = arg['-v'] password = arg['-p'] username = arg['-u'] vm = arg["--vm"] logfile_name = 'Image_firstnet_provision_patch_%s.log' % hostname fp, out, err, log = cf.logfile_name_gen_open(logfile_name) image.cleanup(hostname, username, password) sys.stdout.flush() time.sleep(300) print "Will image the host %s" % hostname sys.stdout.flush() image.image(hostname, version, vm) ips = configure_firstnet.configure_firstnet(hostname, version, vm) print "Finish configure firstnet, you can use the following ip to configure:" print ips sys.stdout.flush() if not configure_firstnet.is_dcs(hostname, version): print "This host is OAK stack, please continue to deploy to %s manually!" % ( hostname, version) sys.stdout.flush() else: print "Will do the provision and patch to latest version!" sys.stdout.flush() host = oda_lib.Oda_ha(ips[0], "root", "welcome1") d_p_p.provision_patch(host) sys.stdout.flush() cf.closefile(fp, out, err) print "Done, please check the log %s for details!" % log
def __init__(self, parent=None): super(MyWidget, self).__init__(parent) self.setupUi(self) self.beach_dir = "" self.forest_dir = "" self.d = weights.data("weights") self.ow, self.oh, self.inpf, self.inpb, self.w, self.m = self.d.load() if self.ow == -1: QtGui.QMessageBox.about(self, "ERROR", "error in weights file") exit(0) self.s = som.som(self.ow * self.oh * 3, self.inpf + self.inpb, 0.01) self.s.init() print self.ow * self.oh * 3 * (self.inpf + self.inpb), " ", len(self.m) self.s.put_weights(self.w, self.m) self.img1 = image.image(self.ow, self.oh) self.img2 = image.image(self.ow, self.oh) self.text = "" self.f_dir = "" self.b_dir = "" self.timer = QtCore.QTimer() self.timer.setInterval(1000) self.timer.timeout.connect(self.re_write) self.timer.start() self.start.clicked.connect(self.start_func) self.open_b.clicked.connect(self.open_b_dir) self.open_f.clicked.connect(self.open_f_dir) self.run = False
def append_tags(imagepath, width, height, title, url, query, result): tags = list() # add query tags.append(query) # add attribute from face++ tags.append(get_zn_from_dict(std_format(result['age']['value']))) if result['race']['confidence'] > 80: tags.append(get_zn_from_dict(std_format(result['race']['value']))) if result['gender']['confidence'] > 80: tags.append(get_zn_from_dict(std_format(result['gender']['value']))) if result['smiling']['value'] > 60: tags.append(get_zn_from_dict(std_format('smile'))) # parser title and add to tags title_tag_list = text2word_list(title) for t in title_tag_list: tags.append(t) # remove repeated elements tags = list(set(tags)) # save img to mongodb if title_tag_list.__len__() > 20: img = image(imagepath, query, url, width, height, tags) else: img = image(imagepath, title, url, width, height, tags) img.save() # save tags info updateTags(tags, imagepath) return
def initiate(): fixedNumber = "" print request.form['number'] print "A" print request.values.get('number') for elem in request.values.get('number'): if elem.isnumeric(): fixedNumber = fixedNumber + str(elem) print "B" if len(fixedNumber) == 10: fixedNumber = "+1" + fixedNumber elif len(fixedNumber) == 11: fixedNumber = "+" + fixedNumber print "C" r = Record(fixedNumber, request.values.get("image")) phones = Record.query.filter_by(phone=fixedNumber).all() print "D" if phones == None: db.session.add(r) db.session.commit() else: pass print "E" image(fixedNumber, request.values.get("image")) client = TwilioRestClient(twilio_account_sid, twilio_auth_token) message = client.messages.create(to=fixedNumber, from_=twilio_number, body="Hey! Want to HEAR what your PICTURE looks like? Send \"yes\" to this SMS!") return "None"
def generateFresh(self): after = self.request.args.get('after') if after is None: msg = None if g.user: msg = Database.query( '''select image from uploads ul where (select score from imagescores where image=ul.image) < ? and ul.image not in (select image from likes where userid=?) order by (select score from imagescores where image=ul.image) limit ?''', [ config.get('freshCeil'), session['userid'], config.get('perPage') ]) else: msg = Database.query( '''select image from uploads ul where (select score from imagescores where image=ul.image) < ? order by (select score from imagescores where image=ul.image) limit ?''', [config.get('freshCeil'), config.get('perPage')]) l = [image(f['image']) for f in msg] return l score = Database.query('select score from imagescores where image=?', [after], one=True)['score'] if g.user: msg = Database.query( '''select caption, image, image_url from uploads ul where (select score from imagescores where image=ul.image) < ? and (select score from imagescores where image=ul.image) < ? and ul.image not in (select image from likes where userid=?) order by (select score from imagescores where image=ul.image) limit ?''', [ config.get('freshCeil'), score, session['userid'], config.get('perPage') ]) else: msg = Database.query( '''select caption, image, image_url from uploads ul where (select score from imagescores where image=ul.image) < ? and (select score from imagescores where image=ul.image) < ? order by (select score from imagescores where image=ul.image) limit ?''', [config.get('freshCeil'), score, config.get('perPage')]) l = [image(f['image']) for f in msg] return msg
def setType(self, type): #Delete old panels for i in self.GetChildren(): if i.GetName() in ["imagePanel", "customPanel", "colorPanel", "rotationPanel", "textPanel", "vartextPanel"]: i.Destroy() if type == "color": self.color = color(self) self.customSizer.Add(self.color, 1, wx.EXPAND|wx.ALL, 4) if type == "image": self.image = image(self) self.customSizer.Add(self.image, 1, wx.EXPAND|wx.ALL, 4) if type == "text": self.rotation = rotation(self) self.customSizer.Add(self.rotation, 0, wx.EXPAND|wx.ALL, 4) self.text = text(self) self.customSizer.Add(self.text, 1, wx.EXPAND|wx.ALL, 4) if type == "vartext": self.rotation = rotation(self) self.customSizer.Add(self.rotation, 0, wx.EXPAND|wx.ALL, 4) self.vartext = vartext(self) self.customSizer.Add(self.vartext, 1, wx.EXPAND|wx.ALL, 4) self.SetSizerAndFit(self.sizer) self.Layout() self.parent.Layout() self.parent.propertiesPanel.Layout()
def imread(imageFile, dimension=None, fileFormat='', crop=None, module=''): newImage = image.image(imageFile, dimension=dimension, fileFormat=fileFormat, crop=crop, module=module) return newImage
def setType(self, type): #Delete old panels for i in self.GetChildren(): if i.GetName() in [ "imagePanel", "customPanel", "colorPanel", "rotationPanel", "textPanel", "vartextPanel" ]: i.Destroy() if type == "color": self.color = color(self) self.customSizer.Add(self.color, 1, wx.EXPAND | wx.ALL, 4) if type == "image": self.image = image(self) self.customSizer.Add(self.image, 1, wx.EXPAND | wx.ALL, 4) if type == "text": self.rotation = rotation(self) self.customSizer.Add(self.rotation, 0, wx.EXPAND | wx.ALL, 4) self.text = text(self) self.customSizer.Add(self.text, 1, wx.EXPAND | wx.ALL, 4) if type == "vartext": self.rotation = rotation(self) self.customSizer.Add(self.rotation, 0, wx.EXPAND | wx.ALL, 4) self.vartext = vartext(self) self.customSizer.Add(self.vartext, 1, wx.EXPAND | wx.ALL, 4) self.SetSizerAndFit(self.sizer) self.Layout() self.parent.Layout() self.parent.propertiesPanel.Layout()
def __createFeed(self, hot, recs): if hot == []: l = [f for f in recs] random.shuffle(l) random.shuffle(l) return [image(f) for f in l] a = set() lHot = hot[-1] hot = hot[:-1] [a.add(f.permID) for f in hot] [a.add(f.permID) for f in recs] l = [f for f in a] random.shuffle(l) random.shuffle(l) l = [image(f) for f in l] l.append(lHot) return l
def __init__(self,rootdir="caltech",dirlist=None): self.categories = list(set(dirlist)) self.imgs = {} for i in self.categories: tmplist = [] for j in glob.glob(rootdir + "/" + i + "/*.jpg"): tmplist.append(im.image(j,i)) self.imgs[i] = c.copy(tmplist)
def openfn(self): dir_path = filedialog.askdirectory() self.img_index = 0 self.images = [ image(imgPath, self.master) for imgPath in glob(os.path.join(dir_path, '*.png')) ] self.drawImage()
def fileOpen(self): self.openFile = tkFileDialog.askopenfilename(filetypes=(('TARGA Image', '*.tga'), )) if self.openFile: TGA = image(self.openFile) self.fnLabel.config(state=NORMAL) self.fnLabel.delete('1.0', END) self.fnLabel.insert('1.0', self.openFile) self.fnLabel.config(state=DISABLED)
def parse_image(self, path): pathes = [ path + global_vars.split + f for f in os.listdir(path) if os.path.isfile(path + global_vars.split + f) and os.path.splitext(f)[1] in global_vars.image_valid_extension ] images = [image(p) for p in pathes] return images
def accum_mean(self, source, num_channel, out_size): print 'Loading mean', source frame_num = self._get_frame_num(source) self.count += frame_num for c in range(num_channel): for i in range(frame_num): image_name = os.path.join(source, 'ch%i_%i_image.jpg' % (c, i)) self.image_sums['ch%i_image' % (c,)] += \ image(image_name).load(out_size, out_size)
def setup(directory): mslist = [ name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name)) ] #gathers all the ms subdirectories in mlist images = [] for d in mslist: images.append(image.image(d, directory +"/" + d)) #formats mlist return images images = imagearray.imageArray(directory) return images
def __init__(self,imagecsvs=None): if imagecsvs is not None: rawfile = pd.read_csv(imagecsvs) self.categories = list(set(rawfile['category'].tolist())) self.imgs = {} for i in self.categories: tmplist = [] for j in rawfile[rawfile['category'] == i]['paths'].tolist(): tmplist.append(im.image(j,i)) self.imgs[i] = c.copy(tmplist)
def openImages(imgRange, imgDir, imgParams, imgSet): images = [] for i in imgRange: images.append( im.image(imgDir[imgSet]['dir'] + str(i) + imgDir[imgSet]['ext'], imgParams)) return images
def predict_and_visualize2(self,img,newimg): dummyimg = image.image("dummyimage.jpg","None") dummyimg2 = image.image("dummyimage2.jpg","None") dummyimg3 = image.image("dummyimage3.jpg","None") dummyimg4 = image.image("dummyimage4.jpg","None") if img.data.shape[0] > 600: b = img.data.shape[1]*600/img.data.shape[0] img.data = cv2.resize(img.data,(b,600)) self.getDescriptors([dummyimg, dummyimg2, img, dummyimg3, dummyimg4]) if self.pca_before_kmeans is True: self.PCA_test() self.KMeans_test() else: self.KMeans_test() self.PCA_test() prediction = self.predict() ptext = prediction[2] cv2.namedWindow("Result", cv2.WINDOW_NORMAL) pt = (0,3*img.data.shape[0] // 4) cv2.putText(img.data, ptext, pt, cv2.FONT_HERSHEY_SCRIPT_COMPLEX,2, [0, 255, 0], 2) cv2.imwrite(newimg,img.data)
def load_image(filename, label): pic_size = (28, 28) img = image.image(filename) img.resize(pic_size) data = [] width, height = (pic_size) for y in range(height): for x in range(width): p = img.img.getpixel((x, y)) data.append(p[0] * 0.2989 + p[1] * 0.5870 + p[2] * 0.1140) return (np.array(data), label)
def load_tledata(dirs,nprims,masks,pps,yn,vn): retval = {} for nprim,dirr in zip(nprims,dirs): fyn = mkpath(dirr,yn) fvn = mkpath(dirr,vn) # if not os.path.isfile(fyn) or not os.path.isfile(fvn): # fyn = mkpath(dirr,yn[:-4]+'10.mhd') # fvn = mkpath(dirr,vn[:-4]+'10.mhd') imagey = image.image(fyn,pps=pps,nprim=nprim,type='yield') imagev = image.image(fvn,pps=pps,nprim=nprim,type='var') imagey.applymask(*masks) imagev.applymask(*masks) #imagev.divide(10) #divide variance by number of jobs in batch) #print "assuming 10 jobs in batch..." retval[nprim] = {} retval[nprim]['yield'] = imagey retval[nprim]['var'] = imagev return retval
def load_tledata(dirs, nprims, masks, pps, yn, vn): retval = {} for nprim, dirr in zip(nprims, dirs): fyn = mkpath(dirr, yn) fvn = mkpath(dirr, vn) # if not os.path.isfile(fyn) or not os.path.isfile(fvn): # fyn = mkpath(dirr,yn[:-4]+'10.mhd') # fvn = mkpath(dirr,vn[:-4]+'10.mhd') imagey = image.image(fyn, pps=pps, nprim=nprim, type='yield') imagev = image.image(fvn, pps=pps, nprim=nprim, type='var') imagey.applymask(*masks) imagev.applymask(*masks) #imagev.divide(10) #divide variance by number of jobs in batch) #print "assuming 10 jobs in batch..." retval[nprim] = {} retval[nprim]['yield'] = imagey retval[nprim]['var'] = imagev return retval
async def on_message(ctx): # If the message starts with a command # Add more if-statements when more commands are added if ctx.content.startswith('.sanders'): # Remomving prefix msg = ctx.content.replace(".sanders", "").strip() # If nothing else is after the prefix (just ".sokkafy") if len(msg) == 0: await ctx.channel.send("Make sure to enter a valid string after `.sokkafy`! For example, `.sokkafy please stop`") return # If the text goes out of the permitted space in the image elif len(msg) > 27: await ctx.channel.send("Make sure that your string is under 28 characters!") return # Getting the avatar url of the message sender url = str(ctx.author.avatar_url).replace("1024", "256") print(url) # Using the name filename = url.split('/')[-1].split("?")[0].replace("webp", "jpg") # Downloading image img_data = requests.get(url).content with open(f'pfps/{filename}.jpg', 'wb') as handler: handler.write(img_data) handler.close() # Resetting the sokka object by redefining it sokka = image("pls.jpg") # Settings the self.txt variable to the value of the message sokka.set_txt(msg) # Pasting profile picture sokka.paste(filename) # Writing to the image sokka.write() # Saving to /downloaded name = sokka.save() # Getting file to send from /downloaded directory to_send = discord.File(f"downloaded/{name}.jpg") # Sending image await ctx.channel.send(file=to_send) # Deleteing image from /downloaded os.remove(f"downloaded/{name}.jpg") # Deleting pfp image from /pfps os.remove(f"pfps/{filename}.jpg")
def scrapeImages(): """ Main entry point to scrape images off the given subreddit """ listings = redditConnection.get_subreddit(subreddit).get_hot(limit=count) for idx, submission in enumerate(listings): uprint('#'+ str(idx + 1) + ' getting: ' + submission.url[:35] + ' titled: ' + submission.title[:25]) data = image(submission.title, submission.url) if data.imageType != FILE_TYPES[0] and data.imageType != FILE_TYPES[1]: print('^ERROR! Could not download the file. No proper extension was given^') continue path = TOP_FOLDER + '\\' + data.folderName os.makedirs(path, exist_ok=True) if not os.path.isfile(path + '\\' + data.fileName): urllib.request.urlretrieve(data.url, TOP_FOLDER + '\\' + data.folderName+'\\'+data.fileName)
def __getImagesFromUsers(self, userList): unlikedImages = set() userLikes = Database.query('select image from likes where userid=?', [ self.userID ]) userSet = set() [userSet.add(f['image']) for f in userLikes] for user in userList: otherLikesQuery = Database.query('select image from likes where userid=?', [ user.userID ]) otherLikes = [f['image'] for f in otherLikesQuery] difLikes = [x for x in otherLikes if x not in userSet] [unlikedImages.add(f) for f in difLikes] unlikedImages = [image(f) for f in unlikedImages] imageWeights = [(f, self.__distance(f) ) for f in unlikedImages] imageWeights = sorted(imageWeights, key=itemgetter(1)) return [ f[0] for f in imageWeights ]
def __init__(self, parent=None): super(MyWidget, self).__init__(parent) self.setupUi(self) self.ow = 80 self.oh = 60 self.d = weights.data("weights") self.img = image.image(self.ow, self.oh) self.timer = QtCore.QTimer() self.timer.setInterval(1500) self.timer.timeout.connect(self.re_write) self.timer.start() self.start.clicked.connect(self.start_func) self.b_open.clicked.connect(self.open_b) self.f_open.clicked.connect(self.open_f) self.run = False
def main(): paths = find_images() height = 600 width = 600 window = pygame.display.set_mode((width, height)) running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False img = image.image(choice(paths)) img.scale(width, height) img.random_filter()() window.fill((255, 255, 255)) window.blit(img.get_surface(), (0, 0)) pygame.display.update() pygame.time.wait(2000)
def loadASCII(filePath): img = image.image() img.data = np.loadtxt(filePath) with open(filePath, 'r') as f: line = f.readline() if len(line) >= 8: if line[2:7] != 'ASCII': raise Exception('Not an ASCII image file from medImgProc.') else: raise Exception('Not an ASCII image file from medImgProc.') while line[2:-1] != 'ENDOFPROPERTIES': if len(line) >= 11: if line[2:10] == 'dtype = ': if line[10:-1] == 'None': img.dtype = None else: img.dtype = line[10:-1] elif line[2:8] == 'dim = ': if line[8:-1] == 'None': img.dim = None else: img.dim = line[9:-2].split(',') elif line[2:11] == 'dimlen = ': if line[11:-1] == 'None': img.dimlen = None else: dimlenstr = line[12:-2].split(',') img.dimlen = {} for n in range(len(dimlenstr)): dimlenstrsplit = dimlenstr[n].split(':') img.dimlen[dimlenstrsplit[0]] = float( dimlenstrsplit[1]) elif line[2:10] == 'shape = ': if line[10:-1] != 'None': shape = tuple(map(int, line[11:-2].split(','))) img.data = img.data.reshape(shape, order='F') line = f.readline() if img.data is not None and img.dtype is not None: datamin, datamax = image.datatypeMinMax(np.dtype(img.dtype)) img.data = np.maximum(datamin, np.minimum(datamax, img.data)).astype(img.dtype) return img
def process(options): dbfile = options.dbfile overwrite = options.overwrite resfile = os.path.splitext(dbfile)[0] + '.txt' if overwrite: if os.path.exists(resfile): logger.info('%s exists. overwrite', resfile) else: if os.path.exists(resfile): logger.info('%s exists. skip', resfile) return img = image.image(dbfile) conn = sqlite3.connect(dbfile) conn.text_factory = str cursor = conn.execute( "SELECT user_id, image_id, submit_time, suggested_sentence, rank, submitted_sentence, submitted_label, real_image_id FROM STATE" ) fw = open(resfile, 'w') img_list = [] user_list = [] for row in cursor: user_list.append(row[0]) #image_id = row[7] #img.getimagename(row[1]) image_id = img.getimagename(row[1]) submit_time = row[2] suggested_sentence = row[3] rank = row[4] submitted_sentence = row[5] submitted_label = row[6] fw.write('%s\t%s\t%s\t%s\n' % (image_id, row[0], submitted_sentence, submitted_label)) img_list.append(image_id) fw.close() logger.info('Number of annotations: %d', len(img_list)) logger.info('Number of images: %d', len(set(img_list))) logger.info('Number of users: %d', len(set(user_list)))
def get_sentence(self, user_id, page): data = [] img = image.image(self.db_file) conn = sqlite3.connect(self.db_file) conn.text_factory = str cursor = conn.execute("SELECT count(image_id) FROM STATE WHERE user_id=%d" % user_id) for row in cursor: count = row[0] if (page - 1) * PAGE_LIMIT > count: return False, None, None cursor = conn.execute("SELECT user_id, image_id, submit_time, suggested_sentence, rank, submitted_sentence, submitted_label FROM STATE \ WHERE user_id = %d ORDER BY submit_time DESC LIMIT %d OFFSET %d" % ( user_id, PAGE_LIMIT, (page - 1) * PAGE_LIMIT)) import userControl as u user_control = u.user(self.db_file) for row in cursor: image_id = row[1] #import userControl as u #user_control = u.user(self.db_file) j, iid, image_id = user_control.getimageid(user_id, image_id) url = IMAGE_ROOT + img.getimagename(iid) submit_time = row[2] suggested_sentence = row[3] rank = row[4] submitted_sentence = row[5] submitted_label = row[6] set = {'image_id': image_id, 'url': url, 'submit_time': submit_time, 'suggested_sentence': suggested_sentence.encode('gbk'), 'rank': rank, 'submitted_sentence': submitted_sentence.encode('gbk'), 'submitted_label': submitted_label} data = data + [set] conn.close() import math return True, data, math.ceil(float(count) / PAGE_LIMIT)
def loadmat(fileName, arrayName='', dim=[], dimlen={}, dtype=None): newImage = image.image() try: matVariables = sio.loadmat(fileName) if not (arrayName): for key in matVariables: if type(matVariables[key]) == np.ndarray: newImage.data = matVariables[key] break else: print('Error Loading matlab file.') return except NotImplementedError: import h5py matVariables = h5py.File(fileName) if not (arrayName): for key in matVariables: print(type(matVariables[key].value)) if type(matVariables[key].value) == np.ndarray: newImage.data = matVariables[key].value break else: print('Error Loading matlab file.') return if len(dim) != len(newImage.data.shape): newImage.dim = image.DEFAULT_SEGMENTATION_DIMENSION[-len(newImage.data. shape):] else: newImage.dim = dim[:] newImage.dimlen = dict(dimlen) if len(newImage.dim) != len(newImage.dimlen): for dimension in newImage.dim: if dimension not in newImage.dimlen: newImage.dimlen[dimension] = 1. if dtype is None: newImage.dtype = newImage.data.dtype else: newImage.data = newImage.data.astype(dtype) newImage.dtype = dtype return newImage
def make_gallery_from_coefficients(self,norm=False): '''Creates a gallery of shapelets according to their coefficients If Norm set to true, all shapelets plotted as if coefficients are unity''' xmin = 0.0 xmax = self.n1*self.n2 ymin = xmin ymax = xmax nx = 100 ny = 100 array = np.zeros((nx,ny)) image = im.image(array, xmin,xmax,ymin,ymax) image.array[:,:] =0.0 for ni in range(self.n1): for nj in range(self.n2): shape = self.create_shapelet(ni, nj) shape.add_to_image(image, offsetx = ni, offsety = nj) return image
yn = 'mean3d.mhd' #yieldname vn = 'var3d.mhd' #varname yn4d = 'mean4d.mhd' #yieldname vn4d = 'var4d.mhd' #varname ### tledata tleprims = [1e3,1e4,1e5,1e6] # tledirs = ['run.wzd0','run.Ex3U','run.NSYS','tle1M'] tledirs = ['tle1k','tle10k','tle100k','tle1M'] ### analog anprims = [1e6,1e7,1e8,1e9] andirs = ['analog1M','analog10M','analog100M','analog1B'] ### helpers, masks mask90pc = image.image("analog1B/mean3d.mhd") mask90pc.to90pcmask() #mask90pc.save90pcmask() maskbeam = image.image("mask-beamline/maskfile.mhd") maskspect = image.image("mask-spect1-8/finalmask.mhd") maskbox2 = image.image("mask-box2/phantom_Parodi_TNS_2005_52_3_modified.maskfile2.mhd") maskbox8 = image.image("mask-box8/phantom_Parodi_TNS_2005_52_3_modified.maskfile8.mhd") #worstcaseim = image.image("analog1B/var.mhd",pps=ppsanalog,nprim=1e9,type='var') ### init tle3d = tle.load_tledata(tledirs,tleprims,[mask90pc],ppstle,yn,vn) an3d = tle.load_tledata(andirs,anprims,[mask90pc],ppsan,yn,vn) tle4d = tle.load_tledata(tledirs,tleprims,[mask90pc,maskspect],ppstle,yn4d,vn4d) an4d = tle.load_tledata(andirs,anprims,[mask90pc,maskspect],ppsan,yn4d,vn4d) tle4dbeam = tle.load_tledata(tledirs,tleprims,[mask90pc,maskbeam,maskspect],ppstle,yn4d,vn4d)
import image as im import coefficients as c import numpy as np imagefile = raw_input("What is the image filename? ") coeff_file = "coefficients_"+imagefile n1 = input("What is the first index of the shapelet to subtract? ") n2 = input("What is the second index of the shapelet to subtract? ") n1 = int(n1) n2 = int(n2) # Define image and coefficient objects inputimage = im.image(np.zeros((1,1)),0.0,0.0,0.0,0.0) inputcoeff = c.coefficients(1.0,4.0) # Load image inputimage.load_image(imagefile) # Load coefficients inputcoeff.read_from_file(coeff_file) # Define shapelet to subtract shapelet = inputcoeff.create_shapelet(n1, n2) #Subtract shapelet shapelet.subtract_from_image(inputimage) print "Image centroid after subtraction is ",inputimage.centroid()
import image as im import coefficients as c import numpy as np inputfile = raw_input("What is the input filename? ") nmax = input("What is the maximum order of decomposition? ") sigmamin = input("Define minimum blur in physical units: ") sigmamax = input("Define minimum blur in physical units: ") nsigma = input("How many blurring evaluations? ") # Read in image print 'Reading in image from file',inputfile initialimage = im.image(np.zeros((1,1)),0.0,0.0,0.0,0.0) initialimage.load_image(inputfile) dsigma = (sigmamax - sigmamin)/float(nsigma) for i in range(nsigma): sigma = sigmamin + i*dsigma print 'Carrying out decomposition for sigma ', sigma inputimage = initialimage.clone() inputimage.gaussian_blur(sigma) # Write this blurred image to file blur_file = 'blur_'+str(sigma)+'_'+inputfile inputimage.write_to_file(blur_file)
rtpfile = 'data/plan.txt' doseimage = 'results.vF3d/dosespotid.2gy.mhd' field = 2 geolayer = slice(-2, None) ################################################################################# # CODE orirtp = rtplan.rtplan([rtpfile], norm2nprim=False) #,MSWtoprotons=False) MSW = [] for spot in orirtp.spots: if spot[0] == 100 + field: # MSW.append(spot) spotim_ct = image.image(doseimage) #"results.vF3d/dosespotid.2gy.mhd") ct = spotim_ct.imdata.reshape(spotim_ct.imdata.shape[::-1]) xhist = np.linspace(-150, 150, 76) #4mm voxels, endpoints x = np.linspace(-148, 148, 75) #bincenters #print x[0],x[1],x[-1],len(x) #print xhist[0],xhist[1],xhist[-1],len(xhist) falloffs = [] for spindex, spot in enumerate(ct): crush = [0, 1, 1] #x crush = crush[::-1]
#!/usr/bin/env python import sys,image im1 = image.image(sys.argv[-1]) im1.printpixel([0,76,6,21])
#!/usr/bin/env python import image,argparse,numpy as np parser = argparse.ArgumentParser(description='secret') parser.add_argument('--ct') parser.add_argument('--mask') parser.add_argument('--frac') args = parser.parse_args() tmpval = -7389. ctim = image.image(str(args.ct)) mskim = image.image(str(args.mask)) assert(ctim.imdata.shape == mskim.imdata.shape) #binnen = np.ma.masked_where(mskim.imdata == 1., mskim.imdata) #buiten = np.ma.masked_where(mskim2.imdata == 0., mskim2.imdata) binnen = np.ma.masked_equal(mskim.imdata, 1.) buiten = np.ma.masked_equal(mskim.imdata, 0.) ct_mean = np.ma.masked_array(ctim.imdata, mask=buiten).mean() #daar waar pixel(buiten) = 1: negeer waarden. #ct_mean = 0. print 'Setting volume in mask to',ct_mean print np.ma.masked_array(ctim.imdata, mask=binnen).mean() ctim.imdata = np.ma.masked_array(ctim.imdata, mask=binnen).filled(ct_mean) ctim.saveas('maskedtest')
#!/usr/bin/env python import image,numpy as np,rtplan,auger,pickle rtplan = rtplan.rtplan(['data/plan.txt'],norm2nprim=False)#,noproc=True) doseimage = 'output/new_dosespotid-ct.mhd' shifttolerance = 8 ################################################################################ im = image.image(doseimage) ct = im.imdata.reshape(im.imdata.shape[::-1]).squeeze() ct_xhist = np.linspace(-150,150,301) #2mm voxels, endpoints ct_x = np.linspace(-149.5,149.5,300) #bincenters falloffs = [] falloffs_valid = [] for spindex,spot in enumerate(ct): fop = auger.get_fop(ct_x,spot) falloffs.append(fop) #if shifttolerance: # ensure a shift calculated at 20% and 80% height within 1mm of shift computed at default 50% fop2 = auger.get_fop(ct_x,spot,threshold=0.2) fop8 = auger.get_fop(ct_x,spot,threshold=0.8) if np.isclose([fop2,fop8],fop,atol=float(shifttolerance)).all():
def detectAreaRatio(self, folder=None): loadImg = LoadImages() if folder == None: folder = "../images/isolated_images/" self.image_list = loadImg.load(folder) # load the image and resize it to a smaller factor so that # the shapes can be approximated better index = 0 index_obj = 0 for img in self.image_list: resized = imutils.resize(img, width=300) ratio = img.shape[0] / float(resized.shape[0]) # convert the resized image to grayscale, blur it slightly, # and threshold it gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1] # find contours in the thresholded image and initialize the # shape detector cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) sd = ShapeDetector() # create image objects img_obj = image(img) img_obj.id = index self.image_obj_list.append(img_obj) # loop over the contours for c in cnts: #area = cv2.contourArea(c) #print('area is:', area) if cv2.contourArea(c) <= 200: continue # compute the center of the contour, then detect the name of the # shape using only the contour M = cv2.moments(c) cX = 0 cY = 0 if M["m00"] != 0: cX = int((M["m10"] / M["m00"]) * ratio) cY = int((M["m01"] / M["m00"]) * ratio) shape = sd.detect(c) # multiply the contour (x, y)-coordinates by the resize ratio, # then draw the contours and the name of the shape on the image c = c.astype("float") c *= ratio c = c.astype("int") cv2.drawContours(img, [c], -1, (0, 255, 0), 2) #cv2.putText(img, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, # 0.5, (255, 255, 255), 2) # Rotated Rectangle rect = cv2.minAreaRect(c) box = cv2.boxPoints(rect) box = np.int0(box) imgRotRect = cv2.drawContours(img, [box], 0, (0, 0, 255), 2) # calculate the area area_obj = cv2.contourArea(c) area_box = self.findAreaRotRect(box) # create dna_object d_obj = dna_object() d_obj.id = index_obj d_obj.getAttributes(c, area_obj, area_box) self.dna_object_list.append(d_obj) r = d_obj.area_ratio print("# ", index) print("area obj: %.2f" % area_obj) print("area box: %.2f" % area_box) print("area ratio: %.2f" % r) index_obj += 1 #cv2.imshow(str(index), imgRotRect) #cv2.waitKey(0) index += 1
count_line = 0 count_right_line = 0 # 行數且字數正確 count_wrong_line = 0 # 該頁行數錯誤 # del pages[0] for page in pages: print(page[0].split(".")[0]) # , right, "/", count_page # print(page[1]) if page[1] == [] or len(page[0].split(".")) > 1 or page[0] not in files: print("Not Exist!") continue # count_page += 1 # if count_page < 2642: # continue count_page += 1 p = image( input_folder + "PDF_to_JPG" + "/" + page[0].split(".")[0] + '.jpg', page[1]) # p = image("DD1379BX3000021-101.jpg") p.pre_process() p.new_Vertical() img = p.new_CutWord() for words in page[1]: count_word += len(words) count_line += len(page[1]) # print("切割行數: " + str(len(img)) + "/" + str(len(page[1]))) if len(page[1]) == len(img): # 行數正確 count = 0 for i in range(len(page[1])): if len(page[1][i]) == len(img[i]): count += 1 count_right_line += count
def rebuild_glyphs(self): """Recreate all glyphs to represent current state of text image.""" glyphs = [] indent = 0 linewrap = self.linewrap px, py = self.pos self.font.pygame_font.set_underline(self._underline) self.font.pygame_font.set_italic(self._italic) self.font.pygame_font.set_bold(self._bold) skip = 0 num = 0 image_positions = {} text = self.text if self.font.images: for s in self.font.images: last = 0 while 1: n = text.find(s, last) if n >= 0: image_positions[n] = s last = n + len(s) else: break if self.font.images and image_positions: word = "" indent = 0 downdent = 0 newh = 0 _w = 0 for i in text: if skip: skip -= 1 num += 1 continue elif num in image_positions: if word: i = image.Image( self.font.pygame_font.render( word, True, (255, 255, 255))) i.colorize = self.color w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w word = "" glyphs.append(i) a = image_positions[num] i = self.font.images[a].copy() w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w glyphs.append(i) skip = len(a) - 1 elif i == "\n": if indent > _w: _w = indent indent = 0 downdent += newh newh = 0 elif i == " " and linewrap and indent and ( indent + self.font.pygame_font.get_size(word + " ")[0] > linewrap): i = image.image( self.font.pygame_font.render(word, True, (255, 255, 255))) i.colorize = self.color i.pos = (indent, downdent) w, h = i.get_size() indent = 0 downdent += max((h, newh)) newh = 0 glyphs.append(i) else: word += i num += 1 if word: i = image.Image( self.font.pygame_font.render(word, True, (255, 255, 255))) i.colorize = self.color w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w word = "" glyphs.append(i) if indent > _w: _w = indent if newh: downdent += newh else: indent = 0 downdent = 0 newh = 0 _w = 0 for line in text.split("\n"): _l = "" for word in line.split(" "): if linewrap and indent and ( indent + self.font.pygame_font.size(_l + " " + word)[0] > linewrap): i = image.Image( self.font.pygame_font.render( _l, True, (255, 255, 255))) i.colorize = self.color x, y = i.get_size() i.pos = (indent, downdent) downdent += newh newh = y indent += x if indent > _w: _w = int(indent) indent = 0 glyphs.append(i) _l = word else: if _l: _l += " " + word else: _l += word i = image.Image( self.font.pygame_font.render(_l, True, (255, 255, 255))) i.colorize = self.color x, y = i.get_size() i.pos = (indent, downdent) downdent += max((newh, y)) newh = 0 indent += x if indent > _w: _w = int(indent) indent = 0 glyphs.append(i) self.glyphs = glyphs self.size = (_w, downdent) if self._compiled: self.compile() self.font.pygame_font.set_underline(False) self.font.pygame_font.set_italic(False) self.font.pygame_font.set_bold(False)
def thread_func(self): self.run = True self.text = "" self.text = "Loading Images ..." self.img1 = image.image(self.ow, self.oh) self.img2 = image.image(self.ow, self.oh) self.nb = self.img1.get_all_images(self.b_dir) self.nf = self.img2.get_all_images(self.f_dir) self.text = self.text + " done !!!\n" self.text = self.text + "\ncategory - beach" self.text = self.text + "\n------------------\n" miss1 = 0 for i in range(0, self.nb): dataset = self.img1.get_data_set(i) name = self.img1.get_name(i) ct = int(self.s.find(dataset)) if ct < self.inpf: cat = "forest" miss1 = miss1 + 1 else: ct = ct - self.inpf cat = "beach" self.text = self.text + "image " + name[0] + " = " self.text = self.text + str(cat) + "\n" self.text = self.text + "\nnumber of errors = " + str(miss1) self.text = self.text + " total = " + str(self.nb) + "\n" self.text = self.text + "\ncategory - forest" self.text = self.text + "\n------------------\n" miss2 = 0 for i in range(0, self.nf): dataset = self.img2.get_data_set(i) name = self.img2.get_name(i) ct = int(self.s.find(dataset)) if ct < self.inpf: cat = "forest" else: ct = ct - self.inpf cat = "beach" miss2 = miss2 + 1 self.text = self.text + "image " + name[0] + " = " self.text = self.text + str(cat) + "\n" self.text = self.text + "\nnumber of errors = " + str(miss2) self.text = self.text + " total = " + str(self.nf) + "\n" miss = miss1 + miss2 total = self.nb + self.nf self.text = self.text + "\ntotal error = " self.text = self.text + str(float(miss) / float(total)) self.text = self.text + "\n\n -----TESTING COMPLEATED----\n\n" time.sleep(1) self.run = False
#!/usr/bin/env python import image,sys,numpy infile = sys.argv[-1] #in goes mhd print >> sys.stderr, 'Processing', infile print "This tool will set all voxel to water (HU=0)." img = image.image(infile) img.towater() img.saveas(".water")
def rebuild_glyphs(self): """Recreate all glyphs to represent current state of text image.""" glyphs = [] indent = 0 linewrap = self.linewrap px, py = self.pos self.font.pygame_font.set_underline(self._underline) self.font.pygame_font.set_italic(self._italic) self.font.pygame_font.set_bold(self._bold) skip = 0 num = 0 image_positions = {} text = self.text if self.font.images: for s in self.font.images: last = 0 while 1: n = text.find(s, last) if n >= 0: image_positions[n] = s last = n + len(s) else: break if self.font.images and image_positions: word = "" indent = 0 downdent = 0 newh = 0 _w = 0 for i in text: if skip: skip -= 1 num += 1 continue elif num in image_positions: if word: i = image.Image(self.font.pygame_font.render(word, True, (255, 255, 255))) i.colorize = self.color w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w word = "" glyphs.append(i) a = image_positions[num] i = self.font.images[a].copy() w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w glyphs.append(i) skip = len(a) - 1 elif i == "\n": if indent > _w: _w = indent indent = 0 downdent += newh newh = 0 elif ( i == " " and linewrap and indent and (indent + self.font.pygame_font.get_size(word + " ")[0] > linewrap) ): i = image.image(self.font.pygame_font.render(word, True, (255, 255, 255))) i.colorize = self.color i.pos = (indent, downdent) w, h = i.get_size() indent = 0 downdent += max((h, newh)) newh = 0 glyphs.append(i) else: word += i num += 1 if word: i = image.Image(self.font.pygame_font.render(word, True, (255, 255, 255))) i.colorize = self.color w, h = i.get_size() if linewrap and indent and indent + w > linewrap: if indent > _w: _w = indent indent = 0 downdent += newh newh = h newh = max((newh, h)) i.pos = (indent, downdent) indent += w word = "" glyphs.append(i) if indent > _w: _w = indent if newh: downdent += newh else: indent = 0 downdent = 0 newh = 0 _w = 0 for line in text.split("\n"): _l = "" for word in line.split(" "): if linewrap and indent and (indent + self.font.pygame_font.size(_l + " " + word)[0] > linewrap): i = image.Image(self.font.pygame_font.render(_l, True, (255, 255, 255))) i.colorize = self.color x, y = i.get_size() i.pos = (indent, downdent) downdent += newh newh = y indent += x if indent > _w: _w = int(indent) indent = 0 glyphs.append(i) _l = word else: if _l: _l += " " + word else: _l += word i = image.Image(self.font.pygame_font.render(_l, True, (255, 255, 255))) i.colorize = self.color x, y = i.get_size() i.pos = (indent, downdent) downdent += max((newh, y)) newh = 0 indent += x if indent > _w: _w = int(indent) indent = 0 glyphs.append(i) self.glyphs = glyphs self.size = (_w, downdent) if self._compiled: self.compile() self.font.pygame_font.set_underline(False) self.font.pygame_font.set_italic(False) self.font.pygame_font.set_bold(False)
'std_dust_pixel_value': np.std(dust_pixels), 'percent_dust_pixels': dust_pixels.size / image.size } start = time.time() np.seterr(all='ignore') path, dirs, files = next(os.walk("PATH_TO_DIRECTORY_OF_IMAGES")) print(f"Found {len(files)} files.") a = [] for i, j in enumerate(files): print(f"Processing file: {i+1} of {len(files)} - {j}", end="\r") filedata = image(path + "\\" + j) metadata = filedata.get_metadata() quality = filedata.get_quality_indicators() c4_cal_quality = quality['calibration_quality_flags'][:, 1] c5_cal_quality = quality['calibration_quality_flags'][:, 2] rad_to_temp_coeffs4 = [ metadata['ch_4_central_wavenumber'] / (10**3), metadata['ch_4_constant_1'] / (10**5), metadata['ch_4_constant_2'] / (10**6) ] rad_to_temp_coeffs5 = [ metadata['ch_5_central_wavenumber'] / (10**3), metadata['ch_5_constant_1'] / (10**5), metadata['ch_5_constant_2'] / (10**6) ]
def do_image(self): self.metadata() from image import image return image(), noduplicates()
def do_image(self): from image import image return image(), noduplicates()
def b_open_clicked(self): filename = QFileDialog.getOpenFileName( caption="Открыть изображение", filter="Файлы изображений (*.jpg)") if filename != ('', ''): self.im = image(filename[0]) self.output_image(self.im, 1)
def __init__(self, directory): self.directory = directory self.mslist = [ name for name in os.listdir(self.directory) if os.path.isdir(os.path.join(self.directory, name)) ] self.images = [] for d in self.mslist: self.images.append(image.image(d, directory + d))
#!/usr/bin/env python import image,sys if len(sys.argv) < 3: print "Supply an image and a mask." sys.exit() maskfile = sys.argv[-1] #mask infile = sys.argv[-2] #image print >> sys.stderr, 'Processing', infile, maskfile img = image.image(infile) msk = image.image(maskfile) img.applymask(msk) print img.getmean()
### params ppsan1 = (72.0856+71.8095)/2 ppstle1 = (69.2312+67.7542)/2 ppsan2 = (72.2114+71.9978)/2 ppstle2 = (68.1091+69.5367)/2 ppsan5 = (72.2351+74.2769)/2 ppstle5 = (68.3652+69.5583)/2 # yn = 'mean3d.mhd' #yieldname # vn = 'var3d.mhd' #varname yn4d = 'mean4d.mhd' #yieldname vn4d = 'var4d.mhd' #varname ### helpers, masks # TODO make mask90pc for 1,5mm voxels maskspect1 = image.image("vox/1mm1-8msk.mhd") maskspect2 = image.image("mask-spect1-8/finalmask.mhd") maskspect5 = image.image("vox/5mm1-8msk.mhd") mask901 = image.image("vox/90pcmask1.mhd") mask902 = image.image("analog1B/mean.90pcmask.mhd") mask905 = image.image("vox/90pcmask5.mhd") ### init tle1mm = tle.load_tledata(['vox/tle1mm'],[1e4],[mask901,maskspect1],ppstle1,yn4d,vn4d) tle2mm = tle.load_tledata(['tle10k'],[1e4],[mask902,maskspect2],ppstle2,yn4d,vn4d) tle5mm = tle.load_tledata(['vox/tle5mm'],[1e4],[mask905,maskspect5],ppstle5,yn4d,vn4d) an1mm = tle.load_tledata(['vox/an1mm'],[1e7],[mask901,maskspect1],ppsan1,yn4d,vn4d) an2mm = tle.load_tledata(['analog10M'],[1e7],[mask902,maskspect2],ppsan2,yn4d,vn4d) an5mm = tle.load_tledata(['vox/an5mm'],[1e7],[mask905,maskspect5],ppsan5,yn4d,vn4d) # to 3d
import ctypes, numpy as np, os, math, time from os import path import gpumcd, image print('Start of program.') sett = gpumcd.Settings("d:\\postdoc\\gpumcd_data") print(sett.planSettings.goalSfom) casedir = "d:\\postdoc\\analyses\\gpumcd_python" ct_image = image.image(path.join(casedir, 'ct.xdr')) ct_image.ct_to_hu(1000, 1) ct_image.resample([3, 3, 3]) ct = gpumcd.CT(sett, ct_image) #for dicoms, dont set intercept,slope. machfile = "d:/postdoc/gpumcd_data/machines/machine_van_sami/brentAgility.beamlets.gpumdt" engine = gpumcd.Engine(sett, ct, machfile) print('gpumcd init done.') print(engine.lasterror()) start_time = time.time() frame1size = 5 frame2size = 3 BeamFrames = gpumcd.make_c_array(gpumcd.BeamFrame, 2)
continue newline=[x.strip() for x in newline.split('=')] if 'ElapsedTime' in newline[0]: runtime = newline[1] dosephs = glob.glob(indir+"/**/epid-entry.root")[0] #1B doesnt have this, only without dash doseim = glob.glob(indir+"/**/epiddose-Dose.mhd")[0] doseuncim = glob.glob(indir+"/**/epiddose-Dose-Uncertainty.mhd")[0] dosetleim = glob.glob(indir+"/**/epiddose-tle-Dose.mhd")[0] dosetleuncim = glob.glob(indir+"/**/epiddose-tle-Dose-Uncertainty.mhd")[0] #electronprod = glob.glob(indir+"/**/epid-entry.root")[0] all = dump.get2D(dosephs,['X','Y']) doseim_ = image.image(doseim,type='yield') doseuncim_ = image.image(doseuncim,type='relunc') doseuncim_.imdata = doseuncim_.imdata.squeeze()*100. dosetleim_ = image.image(dosetleim,type='yield') dosetleuncim_ = image.image(dosetleuncim,type='relunc') dosetleuncim_.imdata = dosetleuncim_.imdata.squeeze()*100. unc_axis = np.linspace(0,50,50) f, ((ax1 ,ax2, ax3 ),(ax4, ax5, ax6))= plot.subplots(nrows=2, ncols=3, sharex=False, sharey=False)#,figsize=(28,10)) #f.subplots_adjust(hspace=.5) #f.subplots_adjust(wspace=.5) f.suptitle('Runtime: '+runtime+'s', fontsize=10) ax1.set_title("PhS, X,Y")
#!/usr/bin/env python import image, sys infile = sys.argv[-1] #in goes mhd print >> sys.stderr, 'Processing', infile img = image.image(infile) img.savenormalize()
#!/usr/bin/env python import plot, numpy as np,auger,image,rtplan from scipy.ndimage.filters import gaussian_filter ########################################################################################################### smooth_param = 8.5 #20 mm FWHM volume_offset=-141.59+7.96#spot sources ##61 pgelay = image.image('data/ct/source-ct-LAYERID-elayspot61.mhd') rppgelay = image.image('data/rpct/source-rpct-LAYERID-elayspot61.mhd') pggeolay = image.image('data/ct/source-ct-LAYERID-geolayspot61.mhd') rppggeolay = image.image('data/rpct/source-rpct-LAYERID-geolayspot61.mhd') ctelay = image.image('../doseactortest/output/dose-ct-LAYERID-elayspot61-Dose.mhd') rpctelay = image.image('../doseactortest/output/dose-rpct-LAYERID-elayspot61-Dose.mhd') ctgeolay = image.image('../doseactortest/output/dose-ct-LAYERID-geolayspot61-Dose.mhd') rpctgeolay = image.image('../doseactortest/output/dose-rpct-LAYERID-geolayspot61-Dose.mhd') ##40 #pgelay = image.image('data/ct/source-ct-LAYERID-elayspot61.mhd') #rppgelay = image.image('data/rpct/source-rpct-LAYERID-elayspot61.mhd') ##pggeolay = image.image('data/ct/source-ct-LAYERID-geolayspot40.mhd') ##rppggeolay = image.image('data/rpct/source-rpct-LAYERID-geolayspot40.mhd') #pggeolay = image.image('data/ct/source-ct-LAYERID-geolayspot40.mhd') #rppggeolay = image.image('data/rpct/source-rpct-LAYERID-geolayspot40.mhd') ##29 #pgelay = image.image('data/ct/source-ct-LAYERID-elayspot29.mhd')