def save_diff_image(expected, actual, output): expectedImage = _png.read_png(expected) actualImage = _png.read_png(actual) actualImage, expectedImage = crop_to_same( actual, actualImage, expected, expectedImage) expectedImage = np.array(expectedImage).astype(float) actualImage = np.array(actualImage).astype(float) if expectedImage.shape != actualImage.shape: raise ImageComparisonFailure( "Image sizes do not match expected size: {0} " "actual size {1}".format(expectedImage.shape, actualImage.shape)) absDiffImage = np.abs(expectedImage - actualImage) # expand differences in luminance domain absDiffImage *= 255 * 10 save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8) height, width, depth = save_image_np.shape # The PDF renderer doesn't produce an alpha channel, but the # matplotlib PNG writer requires one, so expand the array if depth == 3: with_alpha = np.empty((height, width, 4), dtype=np.uint8) with_alpha[:, :, 0:3] = save_image_np save_image_np = with_alpha # Hard-code the alpha channel to fully solid save_image_np[:, :, 3] = 255 _png.write_png(save_image_np, output)
def __init__(self, color, figName, path= None): if path == None: try: path1= wx.GetApp().installDir except: path1= sys.argv[0] path1= path1.decode( sys.getfilesystemencoding()) IMAGESPATH= os.path.join( path1, 'nicePlot','images') self.original_image = read_png( str( os.path.relpath( os.path.join(IMAGESPATH, "barplot", figName + '.png')))) else: self.original_image = read_png( str(os.path.relpath( os.path.join(path, figName + '.png')))) self.cut_location = 70 self.b_and_h= self.original_image[:,:,2] self.color= self.original_image[:,:,2] - self.original_image[:,:,0] self.alpha= self.original_image[:,:,3] self.nx= self.original_image.shape[1] rgb= matplotlib.colors.colorConverter.to_rgb(color) im= np.empty(self.original_image.shape, self.original_image.dtype) im[:,:,:3] = self.b_and_h[:,:,np.newaxis] im[:,:,:3] -= self.color[:,:,np.newaxis]*(1.-np.array(rgb)) im[:,:,3] = self.alpha self.im = im
def save_diff_image(expected, actual, output): expectedImage = _png.read_png(expected) actualImage = _png.read_png(actual) actualImage, expectedImage = crop_to_same( actual, actualImage, expected, expectedImage) expectedImage = np.array(expectedImage).astype(np.float) actualImage = np.array(actualImage).astype(np.float) assert expectedImage.ndim == actualImage.ndim assert expectedImage.shape == actualImage.shape absDiffImage = abs(expectedImage - actualImage) # expand differences in luminance domain absDiffImage *= 255 * 10 save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8) height, width, depth = save_image_np.shape # The PDF renderer doesn't produce an alpha channel, but the # matplotlib PNG writer requires one, so expand the array if depth == 3: with_alpha = np.empty((height, width, 4), dtype=np.uint8) with_alpha[:, :, 0:3] = save_image_np save_image_np = with_alpha # Hard-code the alpha channel to fully solid save_image_np[:, :, 3] = 255 _png.write_png(save_image_np.tostring(), width, height, output)
def rna_draw(seq, struct, name, out_type = 'svg'): lines = '{0}\n{1}\n'.format(seq,struct) if out_type == 'png': outfile = cfg.dataPath('rnafold/{0}.png'.format(name)) rprc = spc.Popen('RNAplot -o svg; convert rna.svg {0}'.format(outfile), shell = True, stdin = spc.PIPE, stdout = spc.PIPE) out = rprc.communicate(input = lines)[0].splitlines() from matplotlib._png import read_png image = read_png(outfile) elif out_type== 'svg': outfile = cfg.dataPath('rnafold/{0}.svg'.format(name)) tempdir = 'tmp_{0}'.format(name); rprc = spc.Popen('mkdir {1}; cd {1}; RNAplot -o svg; mv rna.svg {0}; cd ..; rm -r {1};'.format(outfile, tempdir), shell = True, stdin = spc.PIPE, stdout = spc.PIPE) out = rprc.communicate(input = lines)[0].splitlines() struct_svg = open(outfile).read() data = xparse.parse(struct_svg) arr = svg.get_polys(data)[0] else: raise Exception() return arr
def __init__(self, ax, n): self.ax = ax dir=os.path.abspath(os.path.dirname(sys.argv[0])) ax.axis([0,1,0,1]) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.set_frame_on=True ax.plot( [ 0.1, 0.2], [0.96, 0.96], color='blue', linewidth=2 ) ax.plot( [ 0.1, 0.2], [0.91, 0.91], color='green', linewidth=2 ) ax.plot( [ 0.1, 0.2], [0.86, 0.86], color='red', linewidth=1 ) self.text1 = ax.text( 0.3, self.ypos(2), '%d' % n ) fn = get_sample_data("%s/coolr-logo-poweredby-48.png" % dir, asfileobj=False) arr = read_png(fn) imagebox = OffsetImage(arr, zoom=0.4) ab = AnnotationBbox(imagebox, (0, 0), xybox=(.75, .12), xycoords='data', boxcoords="axes fraction", pad=0.5) ax.add_artist(ab)
def main(): # load sample data data = np.loadtxt("distmat799.txt", delimiter=",") dists = data / np.amax(data) # load images img_files = [img for img in os.listdir("799_patch") if re.search(r"\.png", img)] # mds mds = MDS(n_components=2, dissimilarity="precomputed") results = mds.fit(dists) # plot fig, ax = plt.subplots() for i, img_file in enumerate(img_files): img_file = os.path.join("799_patch", img_file) img = read_png(img_file) imagebox = OffsetImage(img, zoom=2.0) coords = results.embedding_[i, :] xy = tuple(coords) ab = AnnotationBbox(imagebox, xy) ax.add_artist(ab) ax.set_xlim(-1.0, 1.0) ax.set_ylim(-1.0, 1.0) plt.show()
def createMovie(self): ''' open all movie-template-*.png's and create a movie out of it ''' print "createMovie(): writing image data" frameSizeImage = read_png(''.join([self.templateMovDataDirectory,'/.screenShot',str(0),'.png'])) frameSize = (np.shape(frameSizeImage)[1],np.shape(frameSizeImage)[0]) try: FFMpegWriter = animation.writers['mencoder'] except: print "ERROR: Visualisation3D.createMovie(): mencoder libary is not installed, could not create movie!"; return try: fileName = ''.join([self.movieSaveDirectory,'/',self.networkName,'_',str(self.movieNumber),self.movieFileType]) imageName = ''.join(['mf://',self.templateMovDataDirectory,'/.screenShot%d.png']) imageType = ''.join(['type=png:w=',str(frameSize[0]),':h=',str(frameSize[1]),':fps=24']) command = ('mencoder', imageName, '-mf', imageType, '-ovc', 'lavc', '-lavcopts', 'vcodec=mpeg4', '-oac', 'copy', '-o', fileName) os.spawnvp(os.P_WAIT, 'mencoder', command) self.movieNumber = self.movieNumber+1 print "createMovie(): created movie sucessfull" except: print "ERROR: Visualisation3D.createMovie(): mencoder libary is not installed, could not create movie!"; return
def paste_image(filename): # annotate plot and paste image fig, ax = plt.subplots() xy = (0.5, 0.7) ax.plot(xy[0], xy[1], ".r") fn = get_sample_data(filename, asfileobj=False) arr_lena = read_png(fn) imagebox = OffsetImage(arr_lena, zoom=0.2) ab = AnnotationBbox(imagebox, xy, xybox=(120., -80.), xycoords='data', boxcoords="offset points", pad=0.5, arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=3") ) ax.add_artist(ab) ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.draw() plt.show()
def getPic(pic): for filename in os.listdir("./stimuli/"): if filename.startswith(pic): print filename picture = read_png('./stimuli/'+str(filename)) return picture
def get_grey(self, tex, fontsize=None, dpi=None): """returns the alpha channel""" key = tex, self.get_font_config(), fontsize, dpi alpha = self.grey_arrayd.get(key) if alpha is None: pngfile = self.make_png(tex, fontsize, dpi) X = read_png(os.path.join(self.texcache, pngfile)) self.grey_arrayd[key] = alpha = X[:, :, -1] return alpha
def add_logo_on_map(imagepath, ax, position, zoom, zorder): logo2plot = read_png(imagepath) imagebox = OffsetImage(logo2plot, zoom=zoom) # coordinates to position this image ab = AnnotationBbox(imagebox, position, xybox=(0.0, 0.0), xycoords="data", pad=0.0, boxcoords="offset points") ab.zorder = zorder ax.add_artist(ab)
def logo_box(self): logo_offset_image = OffsetImage(read_png(get_sample_data(logo_location, asfileobj=False)), zoom=0.25, resample=1, dpi_cor=1) text_box = TextArea(logo_text, textprops=dict(color='#444444', fontsize=50, weight='bold')) logo_and_text_box = HPacker(children=[logo_offset_image, text_box], align="center", pad=0, sep=25) anchored_box = AnchoredOffsetbox(loc=2, child=logo_and_text_box, pad=0.8, frameon=False, borderpad=0.) return anchored_box
def get_grey(self, tex, fontsize=None, dpi=None): """Return the alpha channel.""" from matplotlib import _png key = tex, self.get_font_config(), fontsize, dpi alpha = self.grey_arrayd.get(key) if alpha is None: pngfile = self.make_png(tex, fontsize, dpi) X = _png.read_png(os.path.join(self.texcache, pngfile)) self.grey_arrayd[key] = alpha = X[:, :, -1] return alpha
def save_diff_image(expected, actual, output): ''' Parameters ---------- expected : str File path of expected image. actual : str File path of actual image. output : str File path to save difference image to. ''' # Drop alpha channels, similarly to compare_images. from matplotlib import _png expected_image = _png.read_png(expected)[..., :3] actual_image = _png.read_png(actual)[..., :3] actual_image, expected_image = crop_to_same( actual, actual_image, expected, expected_image) expected_image = np.array(expected_image).astype(float) actual_image = np.array(actual_image).astype(float) if expected_image.shape != actual_image.shape: raise ImageComparisonFailure( "Image sizes do not match expected size: {} " "actual size {}".format(expected_image.shape, actual_image.shape)) abs_diff_image = np.abs(expected_image - actual_image) # expand differences in luminance domain abs_diff_image *= 255 * 10 save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8) height, width, depth = save_image_np.shape # The PDF renderer doesn't produce an alpha channel, but the # matplotlib PNG writer requires one, so expand the array if depth == 3: with_alpha = np.empty((height, width, 4), dtype=np.uint8) with_alpha[:, :, 0:3] = save_image_np save_image_np = with_alpha # Hard-code the alpha channel to fully solid save_image_np[:, :, 3] = 255 _png.write_png(save_image_np, output)
def initUI(self): # layout vbox = QtGui.QVBoxLayout() self.setLayout(vbox) # set up canvas self.figure = plt.figure(figsize=(10,5),facecolor='None',edgecolor='None') self.canvas = FigureCanvas(self.figure) vbox.addWidget(self.canvas) # image parameters, axes refer to setup axes self.x_max = 2 self.y_max = 4 self.mologram = read_png('./images/mologram.png') self.mologram_turned = read_png('./images/mologram_turned.png') # initial display self.initDisplay() cid = self.figure.canvas.mpl_connect('button_press_event', self.onclick)
def get_weather_icons(ww, time): from matplotlib._png import read_png """ Get the path to a png given the weather representation """ weather = [WMO_GLYPH_LOOKUP_PNG[w.astype(int).astype(str)] for w in ww.values] weather_icons=[] for date, weath in zip(time, weather): if date.hour >= 6 and date.hour <= 18: add_string='d' elif date.hour >=0 and date.hour < 6: add_string='n' elif date.hour >18 and date.hour < 24: add_string='n' pngfile=folder_glyph+'%s.png' % (weath+add_string) if os.path.isfile(pngfile): weather_icons.append(read_png(pngfile)) else: pngfile=folder_glyph+'%s.png' % weath weather_icons.append(read_png(pngfile)) return(weather_icons)
def put_team_logo_annotationbbox(filename,xy,xybox=(40., +80.)): fn = get_sample_data(filename, asfileobj=False) arr_lena = read_png(fn) imagebox = OffsetImage(arr_lena, zoom=0.02 ) # xy = [4, 75] ab = AnnotationBbox(imagebox, xy, xybox=xybox, xycoords='data', boxcoords="offset points", pad=0.0,frameon=False, arrowprops=dict(arrowstyle="->",alpha=.75,linestyle='dashed', connectionstyle="angle,angleA=50,angleB=90,rad=3") ) ax.add_artist(ab)
def draw_ant(position, color, ant_img, args): i = 5 ax = fig.add_subplot(111) ant_img = read_png('./images/Fourmisse.png') imagebox = OffsetImage(ant_img, zoom=.03) if args.images == True and i == 5: xy = [position[0], position[1]] ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords="offset points") ax.add_artist(ab) else: ant = plt.plot([position[0]], [position[1]], color=color, marker='.', markersize=40.0) return (ant)
def get_grey(self, tex, fontsize=None, dpi=None): """returns the alpha channel""" key = tex, self.get_font_config(), fontsize, dpi alpha = self.grey_arrayd.get(key) if alpha is None: pngfile = self.make_png(tex, fontsize, dpi) X = read_png(os.path.join(self.texcache, pngfile)) if rcParams['text.dvipnghack'] is not None: hack = rcParams['text.dvipnghack'] else: if TexManager._dvipng_hack_alpha is None: TexManager._dvipng_hack_alpha = dvipng_hack_alpha() hack = TexManager._dvipng_hack_alpha if hack: # hack the alpha channel # dvipng assumed a constant background, whereas we want to # overlay these rasters with antialiasing over arbitrary # backgrounds that may have other figure elements under them. # When you set dvipng -bg Transparent, it actually makes the # alpha channel 1 and does the background compositing and # antialiasing itself and puts the blended data in the rgb # channels. So what we do is extract the alpha information # from the red channel, which is a blend of the default dvipng # background (white) and foreground (black). So the amount of # red (or green or blue for that matter since white and black # blend to a grayscale) is the alpha intensity. Once we # extract the correct alpha information, we assign it to the # alpha channel properly and let the users pick their rgb. In # this way, we can overlay tex strings on arbitrary # backgrounds with antialiasing # # red = alpha*red_foreground + (1-alpha)*red_background # # Since the foreground is black (0) and the background is # white (1) this reduces to red = 1-alpha or alpha = 1-red #alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here? alpha = 1-X[:,:,0] else: alpha = X[:,:,-1] self.grey_arrayd[key] = alpha return alpha
def get_grey(self, tex, fontsize=None, dpi=None): """returns the alpha channel""" key = tex, self.get_font_config(), fontsize, dpi alpha = self.grey_arrayd.get(key) if alpha is None: pngfile = self.make_png(tex, fontsize, dpi) X = read_png(os.path.join(self.texcache, pngfile)) if rcParams['text.dvipnghack'] is not None: hack = rcParams['text.dvipnghack'] else: hack = self._dvipng_hack_alpha if hack: alpha = 1-X[:,:,0] else: alpha = X[:,:,-1] self.grey_arrayd[key] = alpha return alpha
def AddLogo(logo_name, ax, zoom=1.2): """Read logo from PNG file and add it to axes.""" logo_data = read_png(logo_name) fig_dpi = ax.get_figure().dpi fig_size = ax.get_figure().get_size_inches() # NOTE: This scaling is kinda ad hoc... zoom_factor = .1 / 1.2 * fig_dpi * fig_size[0] / np.shape(logo_data)[0] zoom_factor *= zoom logo_box = OffsetImage(logo_data, zoom=zoom_factor) ann_box = AnnotationBbox(logo_box, [0., 1.], xybox=(2., -3.), xycoords="axes fraction", boxcoords="offset points", box_alignment=(0., 1.), pad=0., frameon=False) ax.add_artist(ann_box)
def add_cut_glyph(parent_fig=None,parent_axes=None,pos=(0.8,0.8),size=(.1),cut='NS',pol='NS'): #transformation stuff to put the little axis in the desired subplot if parent_fig is None: parent_fig = gcf() if parent_axes is None: parent_axes = gca() axtrans = parent_axes.transAxes figtrans = parent_fig.transFigure.inverted() figpos = figtrans.transform(axtrans.transform(pos)) #get the desired position in figure units pos = tuple(figpos)+ (size,size) #form up the extent box tuple ax = parent_fig.add_axes(pos,'square') ax.set_yticks([]) ax.set_xticks([]) ax.set_frame_on(False) myglyph = '../figures/cut_glyphs_cut{cut}_rx{pol}_tx{pol}.png'.format(cut=cut,pol=pol) glyph = read_png(myglyph) ax.imshow(glyph,interpolation='none') return
def error(): """ """ # http://matplotlib.org/examples/pylab_examples/demo_annotation_box.html from matplotlib._png import read_png from matplotlib.cbook import get_sample_data from matplotlib.offsetbox import OffsetImage from matplotlib.offsetbox import AnnotationBbox import os fn = get_sample_data(os.path.join( os.path.dirname(__file__), "static", "white-background.png"), asfileobj=False) image = read_png(fn) imagebox = OffsetImage(image) xy = (0.5, 0.5) ab = AnnotationBbox(imagebox, xy) return ab
def add_logo(imagepath, ax, position, zoom, zorder): """Add an image on the figure :param imagepath: path to the image to add :param ax: axes object :param position: relative position on the map :param zoom: zoom level :param zorder: :return: """ logo2plot = read_png(imagepath) imagebox = OffsetImage(logo2plot, zoom=zoom) # coordinates to position this image ab = AnnotationBbox(imagebox, position, xybox=(0., 0.), xycoords='data', pad=0.0, boxcoords="offset points") ab.zorder = zorder ax.add_artist(ab)
class RibbonBox(object): original_image = read_png(get_sample_data("Minduka_Present_Blue_Pack.png", asfileobj=False)) cut_location = 70 b_and_h = original_image[:,:,2] color = original_image[:,:,2] - original_image[:,:,0] alpha = original_image[:,:,3] nx = original_image.shape[1] def __init__(self, color): rgb = matplotlib.colors.colorConverter.to_rgb(color) im = np.empty(self.original_image.shape, self.original_image.dtype) im[:,:,:3] = self.b_and_h[:,:,np.newaxis] im[:,:,:3] -= self.color[:,:,np.newaxis]*(1.-np.array(rgb)) im[:,:,3] = self.alpha self.im = im def get_stretched_image(self, stretch_factor): stretch_factor = max(stretch_factor, 1) ny, nx, nch = self.im.shape ny2 = int(ny*stretch_factor) stretched_image = np.empty((ny2, nx, nch), self.im.dtype) cut = self.im[self.cut_location,:,:] stretched_image[:,:,:] = cut stretched_image[:self.cut_location,:,:] = \ self.im[:self.cut_location,:,:] stretched_image[-(ny-self.cut_location):,:,:] = \ self.im[-(ny-self.cut_location):,:,:] self._cached_im = stretched_image return stretched_image
def plot(fname, z, quality=5): fn = get_sample_data( "/Users/instance/Desktop/fword/ritz/fishDomain/plots/coop_film_chart/ff/aa/" + fname, asfileobj=True) arr = read_png(fn) # 10 is equal length of x and y axises of your surface stepX, stepY = 10. / arr.shape[0], 10. / arr.shape[1] X1 = np.arange(-5, 5, stepX) Y1 = np.arange(-5, 5, stepY) X1, Y1 = np.meshgrid(X1, Y1) # stride args allows to determine image quality # stride = 1 work slow Z = np.ones(shape=Y1.shape) * z # arr = arr.reshape(100,96,4) print(X1.shape, Y1.shape, arr.shape, Z.shape) ax.plot_surface(X1, Z, Y1, rstride=quality, cstride=quality, facecolors=arr)
def logo_box(self): logo_offset_image = OffsetImage(read_png( get_sample_data(logo_location, asfileobj=False)), zoom=0.25, resample=1, dpi_cor=1) text_box = TextArea(logo_text, textprops=dict(color='#444444', fontsize=50, weight='bold')) logo_and_text_box = HPacker(children=[logo_offset_image, text_box], align="center", pad=0, sep=25) anchored_box = AnchoredOffsetbox(loc=2, child=logo_and_text_box, pad=0.8, frameon=False, borderpad=0.) return anchored_box
def generate_visualization(img_name, rotvec, tvec): # If our Z coordinate will be negative, we need to reverse the vector # because all photos are taken from above. if rotvec[2] < 0: rvec = -1 * rotvec else: rvec = rotvec xs = [rvec[0] * tvec[0]] ys = [rvec[1] * tvec[1]] zs = [rvec[2] * tvec[2]] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(xs, ys, zs=zs, label="Camera") #ax.scatter([0],[0], label="Image") # plot the vector of where there camera points l = 5 # to scale the arrows ax.quiver(xs[0], ys[0], zs[0], \ -rvec[0] * l, - rvec[1] * l, - rvec[2] * l, arrow_length_ratio=0.1) # try to add the image of the QR code to the graph fn = get_sample_data("pattern.png", asfileobj=False) img = read_png(fn) x, y = np.mgrid[-(QR_LENGTH) / 2.:(QR_LENGTH) / 2., -(QR_LENGTH) / 2.:(QR_LENGTH) / 2.] ax.plot_surface(x, y, 0, rstride=2, cstride=2) ax.set_xlabel('x distance (cm)') ax.set_ylabel('y distance (cm)') ax.set_zlabel(' Height (cm)') plt.legend(loc='best') title = "Camera location in " + img_name plt.title(title) plt.show()
class RibbonBox(object): original_image = read_png(get_sample_data("Minduka_Present_Blue_Pack.png", asfileobj=False)) cut_location = 70 b_and_h = original_image[:,:,2] color = original_image[:,:,2] - original_image[:,:,0] alpha = original_image[:,:,3] nx = original_image.shape[1] def __init__(self, color): rgb = matplotlib.colors.colorConverter.to_rgb(color) im = np.empty(self.original_image.shape, self.original_image.dtype) im[:,:,:3] = self.b_and_h[:,:,np.newaxis] im[:,:,:3] -= self.color[:,:,np.newaxis]*(1.-np.array(rgb)) im[:,:,3] = self.alpha self.im = im def get_stretched_image(self, stretch_factor): stretch_factor = max(stretch_factor, 1) ny, nx, nch = self.im.shape ny2 = int(ny*stretch_factor) stretched_image = np.empty((ny2, nx, nch), self.im.dtype) cut = self.im[self.cut_location,:,:] stretched_image[:,:,:] = cut stretched_image[:self.cut_location,:,:] = \ self.im[:self.cut_location,:,:] stretched_image[-(ny-self.cut_location):,:,:] = \ self.im[-(ny-self.cut_location):,:,:] self._cached_im = stretched_image return stretched_image class RibbonBoxImage(BboxImage): zorder = 1 def __init__(self, bbox, color, cmap = None, norm = None, interpolation=None, origin=None, filternorm=1, filterrad=4.0, resample = False, **kwargs ): BboxImage.__init__(self, bbox, cmap = cmap, norm = norm, interpolation=interpolation, origin=origin, filternorm=filternorm, filterrad=filterrad, resample = resample, **kwargs ) self._ribbonbox = RibbonBox(color) self._cached_ny = None def draw(self, renderer, *args, **kwargs): bbox = self.get_window_extent(renderer) stretch_factor = bbox.height / bbox.width ny = int(stretch_factor*self._ribbonbox.nx) if self._cached_ny != ny: arr = self._ribbonbox.get_stretched_image(stretch_factor) self.set_array(arr) self._cached_ny = ny BboxImage.draw(self, renderer, *args, **kwargs) if 1: from matplotlib.transforms import Bbox, TransformedBbox from matplotlib.ticker import ScalarFormatter fig, ax = plt.subplots() years = np.arange(2004, 2009) box_colors = [(0.8, 0.2, 0.2), (0.2, 0.8, 0.2), (0.2, 0.2, 0.8), (0.7, 0.5, 0.8), (0.3, 0.8, 0.7), ] heights = np.random.random(years.shape) * 7000 + 3000 fmt = ScalarFormatter(useOffset=False) ax.xaxis.set_major_formatter(fmt) for year, h, bc in zip(years, heights, box_colors): bbox0 = Bbox.from_extents(year-0.4, 0., year+0.4, h) bbox = TransformedBbox(bbox0, ax.transData) rb_patch = RibbonBoxImage(bbox, bc, interpolation="bicubic") ax.add_artist(rb_patch) ax.annotate(r"%d" % (int(h/100.)*100), (year, h), va="bottom", ha="center") patch_gradient = BboxImage(ax.bbox, interpolation="bicubic", zorder=0.1, ) gradient = np.zeros((2, 2, 4), dtype=np.float) gradient[:,:,:3] = [1, 1, 0.] gradient[:,:,3] = [[0.1, 0.3],[0.3, 0.5]] # alpha channel patch_gradient.set_array(gradient) ax.add_artist(patch_gradient) ax.set_xlim(years[0]-0.5, years[-1]+0.5) ax.set_ylim(0, 10000) fig.savefig('ribbon_box.png') plt.show()
def show_graphs(): plt.ion() predictions = np.load("filtered.npy") path = "/home/pold87/Downloads/imgs_first_flight/" # First set up the figure, the axis, and the plot element we want to animate plt.figure(1) ax = plt.axes(xlim=(0, 4300), ylim=(-2343, 2343)) line, = ax.plot([], [], lw=2) plt.title('Predictions') xs = predictions[:, 0] ys = predictions[:, 1] start_pic = 30 minidrone = read_png("img/minidrone.png") imagebox = OffsetImage(minidrone, zoom=1) background_map = plt.imread("../draug/img/bestmap.png") plt.imshow(background_map, zorder=0, extent=[0, 4300, -2343, 2343]) plt.figure(3) optitrack = np.load("optitrack_coords.npy") ax_opti = plt.axes(xlim=(-10, 10), ylim=(-10, 10)) line_opti, = plt.plot([], [], lw=2) xs_opti = optitrack[:, 0] ys_opti = optitrack[:, 1] ys_opti, xs_opti = rotate_coordinates(xs_opti, ys_opti, np.radians(37)) for i in range(start_pic, len(xs)): img_path = path + str(i) + ".jpg" xy = (xs[i], ys[i]) plt.figure(1) if i != start_pic: drone_artist.remove() ab = AnnotationBbox(imagebox, xy, xycoords='data', pad=0.0, frameon=False) line.set_xdata(xs[max(start_pic, i - 13):i]) # update the data line.set_ydata(ys[max(start_pic, i - 13):i]) drone_artist = ax.add_artist(ab) plt.figure(3) line_opti.set_xdata(xs_opti[max(start_pic, i - 25):i]) # update the data line_opti.set_ydata(ys_opti[max(start_pic, i - 25):i]) print(xs_opti[max(start_pic, i - 13):i]) plt.figure(2) pic = mpimg.imread(img_path) if i == start_pic: ax_img = plt.gca() img_artist = ax_img.imshow(pic) else: img_artist.set_data(pic) plt.pause(.001)
import matplotlib.pyplot as PLT from matplotlib.offsetbox import AnnotationBbox, OffsetImage from matplotlib._png import read_png PLT.rcParams['savefig.dpi'] = 300 #图片像素 PLT.rcParams['figure.dpi'] = 300 #分辨率 PLT.figure(figsize=(20, 20)) fig = PLT.gcf() fig.clf() ax = PLT.subplot(111) # add a first image arr_hand = read_png('logo_20200611212020.png') imagebox = OffsetImage(arr_hand, zoom=.1) print(arr_hand.shape) xy = [0.5, 0.5] # coordinates to position this image ab = AnnotationBbox(imagebox, xy, xybox=(0., 0), xycoords='data', boxcoords="offset points") ax.add_artist(ab) # add second image arr_vic = read_png('logo_20200611212020.png') imagebox = OffsetImage(arr_vic, zoom=.1) # xy = [.6, .3] # coordinates to position 2nd image ab = AnnotationBbox(imagebox, xy,
# ********************************************************************** # Nothing to be changed below # ********************************************************************** culture = array([[0 for i in range(width)] for j in range(height)]) seeds = [] for _ in range(no_of_seeds): seeds.append((r.randint(0, height - 1), r.randint(0, width - 1))) population = 0 kim = read_png('assets/kim.png') imagebox = OffsetImage(kim, zoom=.1) kim_pos = [0,0] def simulate(): next_gen = culture for _ in range(width): for __ in range(height): if _ == 0 or __ == 0: if _ == 0: u = 0 if __ == 0: l = 0 else: u = next_gen[_ - 1, __] l = next_gen[_, __ - 1]
Rot.angle = 0 axes.Draw() # Control parameters # Flag and Gaussian kernel size for smoothing Smooth = 0 SmoothBy = 250 # Scale factor for elevation. Scales grayscale value of pixel scalefactor = 1.0 # Image with surface features to draw pngfile = 'region3_map.png' img = read_png(pngfile) # Image with underlying elevation map pngfile = 'region3_el.png' elev = read_png(pngfile) if Smooth: img = SmoothX(img, SmoothBy) #If you want to smooth the surface features elev = SmoothX(elev, SmoothBy) #If you want to smooth the elevation map # Can either use grayscale combination of all three color channels for elevation, or just pick one #gray = 0.2989 * elev[:,:,0] + 0.5870 * elev[:,:,1] + 0.1140 * elev[:,:,2] gray = elev[:, :, 2] pi = np.pi cos = np.cos sin = np.sin
title = "Global Average Temperature vs. Number of Pirates" plt.plot(years, temperature, lw=2) plt.xlabel(xlabel) plt.ylabel(ylabel) # for every data point annotate with image and number for x in xrange(len(years)): # current data coordinate xy = years[x], temperature[x] # add image ax.plot(xy[0], xy[1], "ok") # load pirate image pirate = read_png('tall-ship.png') # zoom coefficient (move image with size) zoomc = int(pirates[x]) * (1 / 90000.) # create OffsetImage imagebox = OffsetImage(pirate, zoom=zoomc) # create anotation bbox with image and setup properties ab = AnnotationBbox(imagebox, xy, xybox=(-200.*zoomc, 200.*zoomc), xycoords='data', boxcoords="offset points", pad=0.1, arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=-30,rad=3")
def main(): if len(sys.argv ) == 1: # no arguments are given after python3 ShrimpSimBase.py numstart_fish = 20 # set default values timesteps = 50 zoomy = 0.03 print( '\nInput at the end of the command line includes: (1) number of eggs, (2) number of timesteps, (3) zoom level\n' ) #remind user print( 'Using default input: eggs = 20, timesteps = 50, zoom level = 3...\n' ) elif len(sys.argv) == 2: numstart_fish = int(sys.argv[1]) # number of eggs is set to the input timesteps = 50 # default time steps set zoomy = 0.03 # default zoom level set print( '\nInput at the end of the command line includes: (1) number of eggs, (2) number of timesteps, (3) zoom level\n' ) print('Using default input: timesteps = 50, zoom level = 3...\n') elif len(sys.argv) == 3: # #eggs and #time steps is specified numstart_fish = int(sys.argv[1]) timesteps = int(sys.argv[2]) + 1 zoomy = 0.03 # default zoom level set print( '\nInput at the end of the command line includes: (1) number of eggs, (2) number of timesteps, (3) zoom level\n' ) print('Using default input: zoom level = 3...\n') elif len( sys.argv ) == 4: # all arguments specified: command line input is used for #eggs, #timesteps, #zoomlevel numstart_fish = int(sys.argv[1]) timesteps = int(sys.argv[2]) + 1 zoomy = int(sys.argv[3]) / 100 if len(sys.argv ) <= 4: # test in case input arguments is greater than required fig = plt.gcf() fig.clf() ax = plt.subplot(111) # from grautur paths = [ 'egg.png', 'hatchling_M.png', 'hatchling_F.png', 'juvenile_M.png', 'juvenile_F.png', 'adult_M.png', 'adult_F.png', 'dead.png' ] # index png files in same directory boxes = [] for im in paths: im_array = read_png(im) boxes.append( OffsetImage(im_array, zoom=zoomy) ) # read images and convert to plottable format, with zoom ratio as specified eggbox = boxes[ 0] # assigning the plottable images to become annotation bboxes (refer to grataur) hatMbox = boxes[1] hatFbox = boxes[2] juvMbox = boxes[3] juvFbox = boxes[4] adultMbox = boxes[5] adultFbox = boxes[6] deadbox = boxes[7] print('\n# ---- SEA MONKEYS SIMULATOR ---- #\n') XMAX = 1000 # Tank size is 1000 x 1000 YMAX = 1000 shrimps = [] for i in range( numstart_fish ): # generate number of eggs as specified at command line randX = random.randint(0, XMAX) randY = random.randint( int(zoomy * 170 * 7), YMAX ) # eggs start at random positions, at least 7 timesteps away from bottom # (or however many timesteps are set for the egg stage) ID = i # give each shrimp a unique ID for pairing shrimps.append(Shrimp([randX, randY], i, zoomy)) for i in range( timesteps ): # produce number of outputs = timesteps specified by command line print("\n ### TIMESTEP ", i, "###") plt.cla() # to auto-update the plot timestep = i # timestep is an argument used in the functions in shrimp.py for fish in shrimps: fish.stepChange( shrimps, XMAX, YMAX, numstart_fish, timestep ) # apply stepChange() and all functions associated to the shrimp # for following ifs showing annotation bboxes: # create plottable annotation box for matching fish state and gender if fish.state == 'egg': ab = AnnotationBbox(eggbox, fish.pos, frameon=False) elif fish.state == 'hatchling': if fish.gender == 'single M' or fish.gender == 'coupled M': ab = AnnotationBbox(hatMbox, fish.pos, frameon=False) else: ab = AnnotationBbox(hatFbox, fish.pos, frameon=False) elif fish.state == 'juvenile': if fish.gender == 'single M' or fish.gender == 'coupled M': ab = AnnotationBbox(juvMbox, fish.pos, frameon=False) else: ab = AnnotationBbox(juvFbox, fish.pos, frameon=False) elif fish.state == 'adult': if fish.gender == 'single M' or fish.gender == 'coupled M': ab = AnnotationBbox(adultMbox, fish.pos, frameon=False) else: ab = AnnotationBbox(adultFbox, fish.pos, frameon=False) elif fish.state == 'dead': ab = AnnotationBbox(deadbox, fish.pos, frameon=False) elif fish.state == 'remove': # if fish.state becomes remove, remove the fish from the environment shrimps.remove(fish) ax.add_artist(ab) # Note plt origin is bottom left if i == timesteps - 1: print('\n# ---- END ---- #\n') plt.title('Timestep ' + str(i)) plt.xlim(0, XMAX) plt.ylim(0, YMAX) plt.pause(0.5) plt.savefig('Timestep_' + str(i) + '.png') #plt.show(block=False) else: print( '\nInclude at the end of the command line: (1) number of eggs, (2) number of timesteps, (3) zoom level = 1 for default\n' + 'Please try again...\n')
def DrawContourfAndLegend(contourf, legend, clipborder, patch, cmap, levels, extend, extents, x, y, z, m): """ :param contourf: :param legend: :param clipborder: :param patch: :param cmap: :param levels: :param extend: :param extents: :param x: :param y: :param z: :param m: :return: """ # 是否绘制色斑图 ------ 色版图、图例、裁切是一体的 xmax = extents.xmax xmin = extents.xmin ymax = extents.ymax ymin = extents.ymin if contourf.contourfvisible: # 绘制色斑图 if legend.micapslegendvalue: CS = m.contourf(x, y, z, cmap=cmap, levels=levels, extend=extend, orientation='vertical') else: CS = m.contourf( x, y, z, # cax=axins, levels=legend.legendvalue, colors=legend.legendcolor, extend=extend, orientation='vertical', hatches=legend.hatches) # 用区域边界裁切色斑图 if clipborder.path is not None and clipborder.using: for collection in CS.collections: collection.set_clip_on(True) collection.set_clip_path(patch) if m is plt: # 插入一个新坐标系 以使图例在绘图区内部显示 ax2 = plt.gca() axins = inset_axes(ax2, width="100%", height="100%", loc=1, borderpad=0) axins.axis('off') axins.margins(0, 0) axins.xaxis.set_ticks_position('bottom') axins.yaxis.set_ticks_position('left') axins.set_xlim(xmin, xmax) axins.set_ylim(ymin, ymax) # 画图例 if legend.islegendpic: # 插入图片 arr_lena = read_png(legend.legendpic) image_box = OffsetImage(arr_lena, zoom=legend.legendopacity) ab = AnnotationBbox(image_box, legend.legendpos, frameon=False) plt.gca().add_artist(ab) else: ticks = fmt = None CB = plt.colorbar( CS, cmap='RdBu', anchor=legend.anchor, shrink=legend.shrink, ticks=ticks, # fraction=0.15, # products.fraction, drawedges=True, # not products.micapslegendvalue, filled=False, spacing='uniform', use_gridspec=False, orientation=legend.orientation, # extendfrac='auto', format=fmt) else: CB = m.colorbar(CS, location=legend.location, size=legend.size, pad=legend.pad) if CB is not None: CB.ax.tick_params(axis='y', direction='in', length=0)
def plot_mutinf(mat_I, vec_s1, order, title=None): #MUTUAL INFORMATION PLOT: plt.figure() N = len(mat_I) theta = np.zeros(N) r = np.zeros(N) labels = np.zeros(N) area = np.zeros(N) o = np.array(order) - 1 slice_ = -2 * pi / N for i in range(N): theta[i] = i * slice_ + pi / 2 + slice_ / 2 r[i] = 1.0 labels[i] = order[i] area[i] = vec_s1[o[i]] * 500 ax = plt.subplot(111, polar=True) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.grid(b=False) c = plt.scatter(theta, r, c="Red", s=area) if title is not None: plt.title(title) #this is dummy: c1 = plt.scatter(theta - slice_ / 2, (r + 0.1), c="red", s=0) # generation of orbital images. If "-i" switch is passed to the script, the script will incorporate orbital pictures into the image. Pictures must be present in the current directory with names #.png where # is the number of each site. Such images can be conveniently generated with gabedit, vmd or any other orbital plotting program you desire. # Generation of pictures requires new python and matplotlib versions pics = False if (len(sys.argv) > 2): if (sys.argv[2] == '-i'): pics = True legendlines = {} for i in range(N): #plt.annotate(int(labels[i]),xy=(theta[i],(r[i]+0.2)),size='xx-large',) #plt.text(theta[i],(r[i]+0.18),int(labels[i]),size='xx-small',ha='center',va='center') plt.text(theta[i], (r[i] + 0.18), int(labels[i]), size='x-small', ha='center', va='center') if (pics): # generate pictures. from matplotlib.offsetbox import OffsetImage, AnnotationBbox from matplotlib.cbook import get_sample_data from matplotlib._png import read_png img = OffsetImage( read_png(str(int(labels[i])) + ".png"), zoom=0.125 ) # The zoom factor should be ideally adjusted to the size of the images ab = AnnotationBbox( img, [theta[i], r[i] + 0.47], frameon=False ) # pass Frameon=False to disable the frames around the images ax.add_artist(ab) for j in range(i, N): x = [theta[i], theta[j]] y = [1, 1] Iij = mat_I[o[i], o[j]] if Iij >= 0.1: line = lines.Line2D(x, y, linewidth=2 * 10 * Iij, color='black', linestyle='-', alpha=1, label='0.1') legendlines['0.1'] = line ax.add_line(line) elif Iij >= 0.01: line = lines.Line2D(x, y, linewidth=2 * 30 * Iij, color='gray', linestyle='--', alpha=1, label='0.01') legendlines['0.01'] = line ax.add_line(line) elif Iij >= 0.001: line = lines.Line2D(x, y, linewidth=1.5, color='lime', linestyle=':', alpha=1, label='0.001') legendlines['0.001'] = line ax.add_line(line) #plt.tight_layout(h_pad = 0.5) #plt.subplots_adjust(bottom=0.2) ax.legend(legendlines.values(), [l.get_label() for l in legendlines.values()], bbox_to_anchor=(0.00, 1.0), fancybox=True, shadow=True)
def make_movie(): exp_time = 103.129 tc = 200.0 per = 0.81347753 a = 0.01526 inc = 82.33 ecc = 0.0 omega = 90 a_rs = 4.855 rp = 0.15938366414961666 T_n = 1384.463713618203 delta_T = 495.64620450883035 xi = 0.30020002792596134 T_n = 1127.8096575845159 delta_T = 941.89297550917024 xi = 0.64556829219408363 u1 = 0 u2 = 0 T_s = 4520 n_slice = 5 n_ts = 100 ts = np.linspace(tc - per, tc, n_ts) ps = ((ts - tc) / per) data = np.loadtxt('WASP43_HST13467/lc_cor_out.txt') ps_corr = data[:, 0] lc_corr = data[:, 1] for i in range(0, len(ps)): if (ps[i] > 1): ps[i] = ps[i] - np.floor(ps[i]) if (ps[i] <= 0): ps[i] = ps[i] - np.ceil(ps[i]) + 1 if (ps[i] == 1): ps[0] = 0 pc = web.lightcurve(n_slice, ts, tc, per, a, inc, ecc, omega, a_rs, rp, xi, T_n, delta_T, u1, u2, T_s) bat_params = batman.TransitParams() bat_params.t0 = tc bat_params.t_secondary = tc + per / 2.0 bat_params.per = per bat_params.rp = rp bat_params.a = a_rs bat_params.inc = inc bat_params.ecc = ecc bat_params.w = omega bat_params.u = [0.4, 0] bat_params.limb_dark = "quadratic" #FIXME - specify this value in one of the config files m = batman.TransitModel(bat_params, ts, supersample_factor=3, exp_time=exp_time / 24. / 60. / 60.) lc = pc + (m.light_curve(bat_params) - 1.0) phase_c = [] phases = [] ratio = 1 / rp star_r = ratio dp = (T_n * 0.1) min_temp = T_n max_temp = (T_n + delta_T) star_r_pix = 180 star_offset_pix = 200 p_imrat = star_r_pix * rp fn = 'sun_transp.png' image = read_png(fn) # zero_temp = min_temp - dp zero_temp = min_temp data = [np.linspace(zero_temp, max_temp, 1000)] * 2 cax = plt.imshow(data, interpolation='none', cmap=plt.cm.inferno) plt.close() for i in range(0, n_ts - 1): t = ts[i] coords = web.separation_of_centers(t, tc, per, a, inc, ecc, omega, a_rs, ratio) star_x = 0.0 - coords[0] star_y = 0.0 - coords[1] star_z = 0.0 - coords[2] ni = str(i) if len(ni) < 2: ni = '0' + ni plotname = 'plots/blooper_' + '{:03d}'.format(i) + '.png' phase = ((t - tc) / per) if (phase > 1): phase = phase - np.floor(phase) if (phase < 0): phase = phase + np.ceil(phase) + 1 lambda0 = (np.pi + phase * 2 * np.pi) phi0 = np.arctan2(star_y, star_z) if (lambda0 > 2 * np.pi): lambda0 = lambda0 - 2 * np.pi if (lambda0 < -2 * np.pi): lambda0 = lambda0 + 2 * np.pi phases += [phase] planet = np.array( web.generate_planet(n_slice, xi, T_n, delta_T, lambda0, phi0, u1, u2)) ax1 = plt.subplot(211, aspect='equal') gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1]) # with sb.axes_style("ticks"): # ax2 = plt.subplot(gs[2]) # sb.despine() ax3 = plt.subplot(gs[3], aspect='equal') phase_c += [np.sum(planet[:, 16])] thetas = np.linspace(planet[0][10], planet[0][11], 100) r = planet[0][14] radii = [0, r] xs = star_offset_pix + p_imrat * np.outer( radii, np.cos(thetas)) + coords[0] * p_imrat ys = star_offset_pix + p_imrat * np.outer( radii, np.sin(thetas)) + coords[1] * p_imrat xs[1, :] = xs[1, ::-1] ys[1, :] = ys[1, ::-1] val = (dp + planet[0][17] - min_temp) / (dp + max_temp - min_temp) c = plt.cm.inferno(val) ax1.fill(np.ravel(xs), np.ravel(ys), edgecolor=c, color=c, zorder=2) ax3.fill(np.ravel(xs), np.ravel(ys), edgecolor=c, color=c, zorder=2) for j in range(1, len(planet)): val = (dp + planet[j][17] - min_temp) / (dp + max_temp - min_temp) c = plt.cm.inferno(val) n = planet[j] r1 = n[13] r2 = n[14] radii = [r1, r2] thetas = np.linspace(n[10], n[11], 100) xs = star_offset_pix + p_imrat * np.outer( radii, np.cos(thetas)) - coords[0] * p_imrat ys = star_offset_pix + p_imrat * np.outer( radii, np.sin(thetas)) - coords[1] * p_imrat xs[1, :] = xs[1, ::-1] ys[1, :] = ys[1, ::-1] ax1.fill(np.ravel(xs), np.ravel(ys), edgecolor=c, color=c, zorder=2) ax3.fill(np.ravel(xs), np.ravel(ys), edgecolor=c, color=c, zorder=2) thetas = np.linspace(planet[0][10], planet[0][11], 100) radii = [0, star_r] xs = np.outer(radii, np.cos(thetas)) ys = np.outer(radii, np.sin(thetas)) xs[1, :] = xs[1, ::-1] ys[1, :] = ys[1, ::-1] c = plt.cm.inferno(1.0) if (abs(abs(phase) - 0.5) > 0.25): #in front s_zorder = 1 else: s_zorder = 3 ax1.set_axis_bgcolor('black') ax1.set_xlim(star_offset_pix + 40 * p_imrat, star_offset_pix + -40 * p_imrat) ax1.set_ylim(star_offset_pix + -10 * p_imrat, star_offset_pix + 10 * p_imrat) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) im = ax1.imshow(image, zorder=s_zorder) patch = patches.Circle((200, 200), radius=200, transform=ax1.transData) im.set_clip_path(patch) ax1.set_axis_bgcolor('black') ax2.set_axis_bgcolor('black') ax3.set_axis_bgcolor('black') ax1.spines['bottom'].set_color("black") ax1.spines['left'].set_color("black") ax3.spines['bottom'].set_color("black") ax3.spines['left'].set_color("black") ax2.spines['bottom'].set_color("#04d9ff") ax2.spines['left'].set_color("#04d9ff") ax2.xaxis.label.set_color("#04d9ff") ax2.yaxis.label.set_color("#04d9ff") ax2.plot(ps[:(i + 1)], lc[:(i + 1)], lw=2, color=("#04d9ff")) ax2.plot(ps_corr, lc_corr, ',', color=("#04d9ff")) # ax2.set_xlim(ps[0],ps[-1]) # ax2.set_ylim(np.median(lc)+1.1*(np.min(lc) - np.median(lc)),np.median(lc)+1.1*(np.max(lc) - np.median(lc))) ax2.set_xlim(0 - 0.02, 1 + 0.02) ax2.set_ylabel('Relative flux') ax2.set_xlabel('Phase') ax2.set_ylim(0.9995, 1.0009) ax2.set_yticks( [0.9996, 0.9998, 1.0000, 1.0002, 1.0004, 1.0006, 1.0008]) ax2.set_yticklabels([ '0.9996', '0.9998', '1.0000', '1.0002', '1.0004', '1.0006', '1.0008' ]) bs = 1.1 * p_imrat p_xpos = star_offset_pix - coords[0] * p_imrat p_ypos = star_offset_pix - coords[1] * p_imrat ax3.set_xlim(p_xpos + bs, p_xpos - bs) ax3.set_ylim(p_ypos - bs, p_ypos + bs) ax3.get_xaxis().set_visible(False) ax3.get_yaxis().set_visible(False) im = ax1.imshow(image, zorder=s_zorder) fig = plt.gcf() divider3 = make_axes_locatable(ax3) # Append axes to the right of ax3, with 20% width of ax3 cax3 = divider3.append_axes("right", size="20%", pad=0.05) cbar3 = plt.colorbar(cax, cax=cax3, ticks=[1100, 1300, 1500, 1700, 1900]) cbar3.ax.tick_params(colors=("#04d9ff")) cbar3.set_label('T (K)', color=("#04d9ff")) # horizontal colorbar ax2.tick_params(axis='x', colors=("#04d9ff")) ax2.tick_params(axis='y', colors=("#04d9ff")) plt.tight_layout(pad=1.0) plt.savefig(plotname, bbox_inches='tight', pad_inches=0.1, facecolor='black') # plt.show() plt.close()
def get_star_png(): spiderdir = os.path.dirname(sp.__file__) png_name = os.path.join(spiderdir, 'art/sun_transp.png') image = read_png(png_name) return image
if args.solution and not os.path.exists(args.solution): raise IOError("Solution file not found") data = parse(args.map, args.solution) # Graph G = nx.Graph() G.add_edges_from(data['tunnels']) # Fruchterman-Reingold force-directed algorithm's to set rooms. pos = nx.spring_layout(G) # Ants speed speed_rate = 7 # create an array of * on the ants object ants = make_ants(pos, speed_rate, data) # size of the graph fig = plt.figure(figsize = (15, 10)) ant_img = read_png('./images/Fourmisse.png') ani = FuncAnimation( fig, re_init, frames=data['num_turns'] * speed_rate, fargs=( G, pos, speed_rate, data, ants, args, ant_img), interval=30, repeat=True)
import matplotlib.pyplot as plt ax = plt.subplot(111) ax.plot([1,2,3], label="test") l = ax.legend() d1 = l.draggable() xy = 1, 2 txt = ax.annotate("Test", xy, xytext=(-30, 30), textcoords="offset points", bbox=dict(boxstyle="round",fc=(0.2, 1, 1)), arrowprops=dict(arrowstyle="->")) d2 = txt.draggable() from matplotlib._png import read_png from matplotlib.cbook import get_sample_data from matplotlib.offsetbox import OffsetImage, AnnotationBbox fn = get_sample_data("lena.png", asfileobj=False) arr_lena = read_png(fn) imagebox = OffsetImage(arr_lena, zoom=0.2) ab = AnnotationBbox(imagebox, xy, xybox=(120., -80.), xycoords='data', boxcoords="offset points", pad=0.5, arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=3") ) ax.add_artist(ab) d3 = ab.draggable(use_blit=True) plt.show()
from albedo_recover import * num = 2 images = cache_images_yale(num) print("images cache complete") depth = get_f(images) print("depth complete") depth = depth[::2, ::2] save_albedo_colored_sized(168 / 2, 192 / 2, num) print("albedo complete") x, y = np.mgrid[0:168:2, 0:192:2] fn = get_sample_data("D:\\library\\python\\opencv\\04\\albedo.png", asfileobj=False) img = read_png(fn) ax = plt.subplot(111, projection='3d') ax.plot_surface(x, y, depth, rstride=2, cstride=1, cmap=plt.cm.coolwarm, facecolors=img) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') plt.show()
mpatches.PathPatch.draw(self, renderer) if 1: usetex = plt.rcParams["text.usetex"] fig = plt.figure(1) # EXAMPLE 1 ax = plt.subplot(211) from matplotlib._png import read_png fn = get_sample_data("lena.png", asfileobj=False) arr = read_png(fn) text_path = TextPath((0, 0), "!?", size=150) p = PathClippedImagePatch(text_path, arr, ec="k", transform=IdentityTransform()) #p.set_clip_on(False) # make offset box offsetbox = AuxTransformBox(IdentityTransform()) offsetbox.add_artist(p) # make anchored offset box ao = AnchoredOffsetbox(loc=2, child=offsetbox, frameon=True, borderpad=0.2) ax.add_artist(ao)
def plot_trigger_efficiency_curves(trigger_1, trigger_2, pT_upper_limit=800): properties = parse_file_turn_on('/home/aashish/turn_on.dat') # properties = parse_file_turn_on('./trigger_proper_turn_on.dat') event_numbers = properties['event_number'] pTs = properties['corrected_hardest_pts'] trigger_names = properties['trigger_names'] prescales = properties['prescale'] colors = ['magenta', 'blue', 'orange', 'green', 'black', 'red'] expected_trigger_names = [ "HLT\_Jet180U", "HLT\_Jet140U", "HLT\_Jet100U", "HLT\_Jet70U", "HLT\_Jet50U", "HLT\_Jet30U" ] color = colors[expected_trigger_names.index(trigger_1.replace("_", "\_"))] pt_hist_trigger_1 = Hist(100, 0, pT_upper_limit, title=trigger_1[4:], color=color, markersize=1.0, linewidth=5) pt_hist_trigger_2 = Hist(100, 0, pT_upper_limit, title=trigger_2[4:], color=color, markersize=1.0, linewidth=5) for i in range(0, len(pTs)): if trigger_1 in trigger_names[i]: pt_hist_trigger_1.Fill(pTs[i], prescales[i]) # The len thingy is to make sure trigger names like HLT_Jet15U_HcalNoiseFiltered_v3 are excluded. # if trigger_2 in trigger_names[i] and len(trigger_names[i]) > (len(trigger_2) + 3): if trigger_2 in trigger_names[i]: pt_hist_trigger_2.Fill(pTs[i], prescales[i]) pt_hist_trigger_1.Divide(pt_hist_trigger_2) rplt.errorbar(pt_hist_trigger_1, color=color, markersize=10, pickradius=8, capthick=5, capsize=8, elinewidth=5) data_plot = rplt.errorbar(pt_hist_trigger_1, color=color, markersize=10, pickradius=8, capthick=5, capsize=8, elinewidth=5) data_points_x = data_plot[0].get_xdata() data_points_y = np.log(data_plot[0].get_ydata()) # print data_points_y[0:5] fitted_poly_coeffs = np.polyfit(data_points_x, data_points_y, 1) print fitted_poly_coeffs fitted_poly = np.polynomial.Polynomial(fitted_poly_coeffs) x = np.arange(120, 200, 5) plt.plot(x, fitted_poly(x), lw=5, color="red") plt.gca().xaxis.set_tick_params(width=5, length=20, labelsize=70) plt.gca().yaxis.set_tick_params(width=5, length=20, labelsize=70) ab = AnnotationBbox(OffsetImage(read_png( get_sample_data( "/home/aashish/root-6.04.06/macros/MODAnalyzer/mod_logo.png", asfileobj=False)), zoom=0.15, resample=1, dpi_cor=1), (0.23, 0.895), xycoords='figure fraction', frameon=0) plt.gca().add_artist(ab) plt.gcf().text(0.29, 0.885, "Prelim. (20\%)", fontsize=50, weight='bold', color='#444444', multialignment='center') plt.gcf().set_snap(1) # Horizontal Line. plt.plot(list(pt_hist_trigger_1.x()), [1] * len(list(pt_hist_trigger_1.x())), color="black", linewidth=5, linestyle="dashed") if trigger_1 == "HLT_L1Jet6U": lower_pT = 37 elif trigger_1 == "HLT_Jet15U": lower_pT = 56 elif trigger_1 == "HLT_Jet30U": lower_pT = 84 elif trigger_1 == "HLT_Jet50U": lower_pT = 114 elif trigger_1 == "HLT_Jet70U": lower_pT = 153 elif trigger_1 == "HLT_Jet100U": lower_pT = 196 else: lower_pT = 0 if lower_pT != 0: # CMS Vertical line. plt.plot([lower_pT, lower_pT], [plt.gca().get_ylim()[0], 1.], color=color, linewidth=3, linestyle="dashed") # # MOD Vertical line. # efficient_pt_x = 0.0 # efficient_pt_y = 0.0 # efficient_pt_x_s = [] # efficient_pt_y_s = [] # distance_to_previous_point_close_to_one = 0 # for i in range(0, len(list(pt_hist_trigger_1.x()))): # if abs(list(pt_hist_trigger_1.y())[i] - 1.00) < 0.1: # if distance_to_previous_point_close_to_one > 25: # # Last point close to one too far away. # # Empty the lists. # efficient_pt_x_s, efficient_pt_y_s = [], [] # efficient_pt_x_s.append(list(pt_hist_trigger_1.x())[i]) # efficient_pt_y_s.append(list(pt_hist_trigger_1.y())[i]) # distance_to_previous_point_close_to_one = 0 # else: # distance_to_previous_point_close_to_one += 1 # if len(efficient_pt_x_s) > 75: # efficient_pt_x = efficient_pt_x_s[0] # efficient_pt_y = efficient_pt_y_s[0] # break # mod_efficient_pTs = [325, 260, 196, 153, 114, 84, 50, 32] # mod_efficient_pT = mod_efficient_pTs[expected_trigger_names.index(trigger_1.replace("_", "\_"))] # plt.plot([mod_efficient_pT, mod_efficient_pT], [plt.gca().get_ylim()[0], 1.], color="purple", linewidth=3, linestyle="dashed") # if lower_pT != 0: # plt.gca().annotate("CMS\n" + str(lower_pT) + " GeV", xy=(lower_pT, 1.), xycoords='data', xytext=(-100, 250), textcoords='offset points', color=color, size=40, va="center", ha="center", arrowprops=dict(arrowstyle="simple", facecolor=color, zorder=9999, connectionstyle="angle3,angleA=0,angleB=90") ) # plt.gca().annotate("MOD\n" + str(int(mod_efficient_pT)) + " GeV", xy=(mod_efficient_pT, 1.), xycoords='data', xytext=(250, 200), textcoords='offset points', color="purple", size=40, va="center", ha="center", arrowprops=dict(arrowstyle="simple", facecolor="purple", zorder=9999, connectionstyle="angle3,angleA=45,angleB=-90") ) plt.yscale('log') plt.gca().set_ylim(plt.gca().get_ylim()[0], 100) plt.legend(frameon=0) plt.xlabel('$p_T~\mathrm{(GeV)}$', fontsize=55, rotation=0) plt.ylabel('$\mathrm{A.U.}$', fontsize=75, rotation=0, labelpad=50.) plt.gcf().set_size_inches(30, 21.4285714, forward=1) plt.gcf().set_snap(True) plt.tight_layout(pad=1.08, h_pad=1.08, w_pad=1.08) plt.savefig("plots/Version 4/trigger_efficiency_fit/efficiency_curves_" + trigger_1 + ".pdf") # plt.show() plt.clf()
ab = AnnotationBbox(im, xy, xybox=(-50., 50.), xycoords='data', boxcoords="offset points", pad=0.3, arrowprops=dict(arrowstyle="->")) #arrowprops=None) ax.add_artist(ab) # another image from matplotlib._png import read_png fn = get_sample_data("grace_hopper.png", asfileobj=False) arr_lena = read_png(fn) imagebox = OffsetImage(arr_lena, zoom=0.2) ab = AnnotationBbox(imagebox, xy, xybox=(120., -80.), xycoords='data', boxcoords="offset points", pad=0.5, arrowprops=dict( arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=3")) ax.add_artist(ab)
transform=ax.transAxes, fontsize=16, color=palette[i], verticalalignment='top', horizontalalignment='left', bbox=props) ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(7)) ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(7)) first = False if i == 2 and ic == 1: # Molecule Inset dimer = '{0}.png'.format(args.molecule) arr_fig = read_png(dimer) imagebox = OffsetImage(arr_fig, zoom=0.25) xy = [0.1, 0.1] xybox = [0.68, 0.30] ab = AnnotationBbox( imagebox, xy, xybox=xybox, xycoords='axes fraction', boxcoords="axes fraction", frameon=False, ) #arrowprops=dict(arrowstyle="->")) mol = ax.add_artist(ab) if args.asymptotic:
def test_plot_weir_setting(): swmmobject = PySWMM(*get_model_files(MODEL_WEIR_SETTING_PATH)) swmmobject.swmm_open() swmmobject.swmm_start(True) fig = plt.figure() ax = fig.add_subplot(221) ax.set_ylabel('Flow Rate') line, = ax.plot([], [], label='C3') ax.grid() ax.legend() ax2 = fig.add_subplot(223, sharex=ax) ax2.set_ylabel('Setting') line2, = ax2.plot([], [], label='C3') ax2.grid() xdata, ydata = [], [] ydata2 = [] ax3 = fig.add_subplot(2, 2, (2, 4)) arr_lena = read_png(IMAGE_WEIR_SETTING_PATH) imagebox = OffsetImage(arr_lena, zoom=0.45) ab = AnnotationBbox( imagebox, (0.5, 0.5), xybox=(0.5, 0.5), xycoords='data', boxcoords="offset points", pad=0.0, ) ax3.add_artist(ab) ax3.axis('off') def data_gen(t=0): i = 0 while (True): time = swmmobject.swmm_stride(300) i += 1 if i == 80: swmmobject.setLinkSetting('C3', 0.9) elif i == 90: swmmobject.setLinkSetting('C3', 0.8) elif i == 100: swmmobject.setLinkSetting('C3', 0.7) elif i == 110: swmmobject.setLinkSetting('C3', 0.6) elif i == 120: swmmobject.setLinkSetting('C3', 0.5) elif i == 130: swmmobject.setLinkSetting('C3', 0.4) elif i == 140: swmmobject.setLinkSetting('C3', 0.3) elif i == 150: swmmobject.setLinkSetting('C3', 0.2) elif i == 160: swmmobject.setLinkSetting('C3', 0.1) elif i == 170: swmmobject.setLinkSetting('C3', 0.0) elif i == 220: swmmobject.setLinkSetting('C3', 1.0) if i > 0 and time == 0.0: break if i > 0 and time > 0: yield time def run(t): xdata.append(t) new_y = swmmobject.getLinkResult('C3', tka.LinkResults.newFlow) ydata.append(new_y) new_y2 = swmmobject.getLinkResult('C3', tka.LinkResults.setting) ydata2.append(new_y2) xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() ymin2, ymax2 = ax2.get_ylim() # ax if new_y > ymax: ax.set_ylim(-0.1, 1.5 * ymax) if t >= xmax: ax.set_xlim(xmin, 1.5 * xmax) ax.figure.canvas.draw() line.set_data(xdata, ydata) # ax1 if new_y2 > ymax2: ax2.set_ylim(-0.1, 1.2 * ymax2) line2.set_data(xdata, ydata2) ani = animation.FuncAnimation(fig, run, data_gen, blit=False, repeat=False, save_count=800, interval=10) show_fig = False if show_fig: plt.show() else: movie_path = os.path.join(DATA_PATH, "weir_setting.mp4") print(movie_path, ani) # ani.save(movie_path, fps=20, dpi=170, bitrate=50000) plt.close() swmmobject.swmm_end() swmmobject.swmm_close() print("Check Passed")
def plot_triad_motif_significance_profile(network, num_runs=20, title=None): """ Plots the triad motif significance profile for the input network. Arguments: network => The input network (directed or undirected). num_runs => The number of extraction iterations performed on the network. title => A custom title for the plot (let's you specify which plot you're looking at). Returns: Nothing but produces a plot showing the triad motif significance profile of several extraction runs on the network such that the results are overlayed on each other. """ # Determine if the network is directed or not. directed = nx.is_directed(network) # Create a figure with hardcoded dimensions and one subplot. fig = plt.figure(figsize=(16, 10)) ax = fig.add_subplot(1, 1, 1) # Set some parameters specific to the directionality of the network. if directed: num_motifs = 13 ax.margins(0.05, 0.05) motif_images = [OffsetImage(read_png("../project/static/directed_motif_1.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_2.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_3.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_4.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_5.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_6.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_7.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_8.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_9.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_10.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_11.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_12.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_13.png"), zoom=0.6)] else: num_motifs = 2 ax.margins(0.5, 0.5) motif_images = [OffsetImage(read_png("../project/static/undirected_motif_1.png"), zoom=0.6), OffsetImage(read_png("../project/static/undirected_motif_2.png"), zoom=0.6)] # Define x values (integer for each motif). X = np.arange(1, num_motifs + 1) plt.xticks(X) # Iterate through the extraction instances (synonymous with each color instance). for _ in range(num_runs): # Run the extraction code. Y = extract_triad_motif_significance_profile(network) # Plot the y-values with straight lines connecting them. ax.plot(X, Y) # Define the x-y coordinate offsets for the images on the plot. y_offset = -50 x_offset = 0 # Add each image to the plot. for image, x in zip(motif_images, X): ax.add_artist(AnnotationBbox(image, (x, ax.get_ylim()[0]), xybox=(x_offset, y_offset), xycoords='data', boxcoords='offset points', frameon=False)) # Title and label the plot. plt.title(title or "Triad Motif Significance Profile") plt.xlabel("Triad Motifs", labelpad=75) plt.ylabel("Normalized Z-Score") # Add the y = 0 line for readability. ax.axhline(y=0, color="black") # Set the y-axis tick marks. ax.yaxis.set_major_locator(ticker.MultipleLocator(5)) ax.yaxis.set_minor_locator(ticker.MultipleLocator(1)) # Add extra spacing at the bottom of the plot for the images. plt.subplots_adjust(bottom=0.2) # Show the resulting plot. plt.show()
def plot_footprint(footprints, labels, stderr=None, motif=None, title=None): import os from matplotlib.offsetbox import AnnotationBbox, OffsetImage from matplotlib._png import read_png num = len(footprints) # colors = [colorsys.hsv_to_rgb(h,0.9,0.7) for h in np.linspace(0,1,num+2)[:-1]] figure = plot.figure() subplot = figure.add_subplot(111) subplot = vizutils.remove_spines(subplot) fwdmax = 0 revmax = 0 for num, (footprint,color,label) in enumerate(zip(footprints,colors,labels)): width = footprint.size/2 fwd = footprint[:width] rev = footprint[width:] xvals = np.arange(-width/2,width/2) alpha = 0.7 subplot.plot(xvals, fwd, color=color, linestyle='-', linewidth=1, label=label, alpha=alpha) subplot.plot(xvals, -1*rev, color=color, linestyle='-', linewidth=1, label="_nolabel_", alpha=alpha) if stderr is not None: if stderr[num] is not None: subplot.fill_between(xvals, footprint[:width]-stderr[num][:width]/2, footprint[:width]+stderr[num][:width]/2, \ alpha=0.3, edgecolor=color, facecolor=color) subplot.fill_between(xvals, -(footprint[width:]+stderr[num][width:]/2), \ -(footprint[width:]-stderr[num][width:]/2), alpha=0.3, edgecolor=color, \ facecolor=color) fwdmax = max([fwdmax, fwd.max()]) revmax = max([revmax, rev.max()]) subplot.axhline(0, linestyle='--', linewidth=0.2) subplot.axvline(0, linestyle='--', linewidth=0.2) subplot.axis([xvals.min(), xvals.max(), -1*revmax, fwdmax]) legend = subplot.legend(loc=4) for text in legend.texts: text.set_fontsize('8') legend.set_frame_on(False) if motif: subplot.axvline(len(motif)-1, linestyle='--', c='g', linewidth=0.2) if motif: # overlap motif diagram over footprint motif.has_instances = False tag = '_'.join(title.split(' / ')) tmpfile = "/mnt/lustre/home/anilraj/linspec/fig/footprints/pwmlogo_%s.png"%tag motif.weblogo(tmpfile) zoom = 0.15*len(motif)/15. try: handle = read_png(tmpfile) imagebox = OffsetImage(handle, zoom=zoom) xy = [len(motif)/2-1,0] ab = AnnotationBbox(imagebox, xy, xycoords='data', frameon=False) subplot.add_artist(ab) except RuntimeError: print "Could not retrieve weblogo" pass os.remove(tmpfile) if title: plot.suptitle(title, fontsize=10) return figure
# We need a float array between 0-1, rather than # a uint8 array between 0-255 im2 = n.array(im).astype(n.float) / 255 fig = p.figure() ax=p.subplot(111) from matplotlib._png import read_png import matplotlib.pyplot as plt from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox from matplotlib.cbook import get_sample_data #fn = get_sample_data("../figuras/cabeca.png") arr_lena = read_png("../../figuras/cabeca.png") imagebox = OffsetImage(arr_lena, zoom=0.2) xy = (0.5, 0.7) ab = AnnotationBbox(imagebox, xy, xybox=(120., -80.), xycoords='data', boxcoords="offset points", pad=0.5, arrowprops=dict(arrowstyle="->",connectionstyle="angle,angleA=0,angleB=90,rad=3") ) ax.add_artist(ab)
def plot_triad_motif_counts(network, title=None): """ Plots the triad motif counts for the input network. Arguments: network => The input network (directed or undirected). title => A custom title for the plot (let's you specify which plot you're looking at). Returns: Nothing but produces a plot showing the triad motif counts. """ # Determine if the network is directed or not. directed = nx.is_directed(network) # Create a figure with hardcoded dimensions and one subplot. fig = plt.figure(figsize=(16, 10)) ax = fig.add_subplot(1, 1, 1) # Set some parameters specific to the directionality of the network. if directed: num_motifs = 13 ax.margins(0.05, 0.05) motif_images = [OffsetImage(read_png("../project/static/directed_motif_1.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_2.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_3.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_4.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_5.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_6.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_7.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_8.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_9.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_10.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_11.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_12.png"), zoom=0.6), OffsetImage(read_png("../project/static/directed_motif_13.png"), zoom=0.6)] else: num_motifs = 2 ax.margins(0.5, 0.5) motif_images = [OffsetImage(read_png("../project/static/undirected_motif_1.png"), zoom=0.6), OffsetImage(read_png("../project/static/undirected_motif_2.png"), zoom=0.6)] # Define x values (integer for each motif). X = np.arange(1, num_motifs + 1) # Run the extraction code. Y = count_triad_motifs(network) # Plot the y-values with straight lines connecting them. ax.scatter(X, Y, s=150) # Define the x-y coordinate offsets for the images on the plot. y_offset = -50 x_offset = 0 # Add each image to the plot. for image, x in zip(motif_images, X): ax.add_artist(AnnotationBbox(image, (x, ax.get_ylim()[0]), xybox=(x_offset, y_offset), xycoords='data', boxcoords='offset points', frameon=False)) # Title and label the plot. plt.title(title or "Triad Motif Counts") plt.xlabel("Triad Motifs", labelpad=75) plt.ylabel("Frequency") # Set the y-axis tick marks. ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_minor_locator(ticker.MultipleLocator(5)) # Add extra spacing at the bottom of the plot for the images. plt.subplots_adjust(bottom=0.2) # Show the resulting plot. plt.show()
def plot_integrated_recorded_lumi(cumulative=False): properties = filter_run_lumi( parse_file(input_analysis_file) ) timestamps = sorted(properties['time']) intg_rec_lumi = [x for (y,x) in sorted(zip(properties['time'], properties['intg_rec_lumi']))] intg_del_lumi = [x for (y,x) in sorted(zip(properties['time'], properties['intg_del_lumi']))] # Convert from (ub)-1 to (pb)-1 intg_rec_lumi = [x * 1e-6 for x in intg_rec_lumi] intg_del_lumi = [x * 1e-6 for x in intg_del_lumi] dates = [datetime.datetime.fromtimestamp( int(time) ) for time in timestamps] total_rec_lumi = sum(intg_rec_lumi) total_del_lumi = sum(intg_del_lumi) max_lumi = max(intg_rec_lumi) print "Total Rec. Luminosity: {}".format(total_rec_lumi) print "Total Del. Luminosity: {}".format(total_del_lumi) # print "Max Luminosity: {}".format(max_lumi) # rec = [(36.1 * i) / total_del_lumi for i in intg_rec_lumi] # deli = [(36.1 * i) / total_del_lumi for i in intg_del_lumi] # intg_rec_lumi, intg_del_lumi = rec, deli # dates = [datetime.datetime.fromtimestamp( int(time) ).strftime('%m-%d') for time in timestamps] if cumulative: label = "CMS Recorded: " + str(round(total_rec_lumi, 2)) + " $\mathrm{pb}^{-1}$" else: label = "CMS Recorded, max " + str(round(max_lumi, 2)) + " $\mathrm{pb}^{-1}$/day" print "Min date = ", min(dates) print "Max date = ", max(dates) plt.hist(mpl.dates.date2num(dates), label="Recorded", weights=intg_rec_lumi, lw=8, bins=50, cumulative=cumulative, histtype='step', color='orange') plt.hist(mpl.dates.date2num(dates), label="Delivered", weights=intg_del_lumi, lw=8, bins=50, cumulative=cumulative, histtype='step', color='green') years = mdates.YearLocator() # every year months = mdates.MonthLocator() # every month yearsFmt = mdates.DateFormatter('%Y-%M') # Format the ticks plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b')) # plt.gca().xaxis.set_major_locator(mdates.MonthLocator()) plt.xlabel("Date (2010)", labelpad=25, fontsize=70) plt.gca().ticklabel_format(axis='y', style='sci') if cumulative: plt.ylabel("Integrated Luminosity [A.U.]", labelpad=50, fontsize=70) else: plt.ylabel("Average Luminosity [A.U.]", labelpad=50, fontsize=70) plt.gca().get_yaxis().set_ticks([]) print max(intg_rec_lumi), handles, labels = plt.gca().get_legend_handles_labels() legend = plt.gca().legend(handles[::-1], labels[::-1], bbox_to_anchor=[0.007, 0.99], frameon=False, loc='upper left', fontsize=70) plt.gca().add_artist(legend) plt.gcf().set_size_inches(30, 21.4285714, forward=1) # plt.gca().get_yaxis().get_major_formatter().set_powerlimits((0, 0)) plt.autoscale() if cumulative: plt.ylim(0, 330) else: plt.ylim(0, 55) plt.xlim(datetime.date(2010, 9, 20), datetime.date(2010, 10, 31)) plt.locator_params(axis='x', nbins=5) # months = DayLocator([4, 10, 16, 22, 29]) # months = DayLocator([1, 8, 15, 22, 29]) months = WeekdayLocator(mdates.FR) monthsFmt = DateFormatter("%b '%y") plt.gca().xaxis.set_major_locator(months) # plt.gca().xaxis.set_major_formatter(monthsFmt) # plt.gca().set_xticks(plt.gca().get_xticks()[1:]) plt.tick_params(which='major', width=5, length=25, labelsize=70) plt.tick_params(which='minor', width=3, length=15) # plt.gca().xaxis.set_minor_locator(MultipleLocator(0.02)) if cumulative: plt.gca().yaxis.set_minor_locator(MultipleLocator(10)) else: plt.gca().yaxis.set_minor_locator(MultipleLocator(2)) extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0) outside_text = plt.gca().legend( [extra], ["CMS 2010 Open Data"], frameon=0, borderpad=0, fontsize=50, bbox_to_anchor=(1.0, 1.005), loc='lower right') plt.gca().add_artist(outside_text) # plt.xlim() if cumulative: logo = [0.062, 0.985] else: logo = [0.051, 0.978] logo_offset_image = OffsetImage(read_png(get_sample_data("/home/aashish/root/macros/MODAnalyzer/mod_logo.png", asfileobj=False)), zoom=0.25, resample=1, dpi_cor=1) text_box = TextArea("", textprops=dict(color='#444444', fontsize=50, weight='bold')) logo_and_text_box = HPacker(children=[logo_offset_image, text_box], align="center", pad=0, sep=25) anchored_box = AnchoredOffsetbox(loc=2, child=logo_and_text_box, pad=0.8, frameon=False, borderpad=0., bbox_to_anchor=logo, bbox_transform = plt.gcf().transFigure) plt.gca().add_artist(anchored_box) plt.tight_layout() plt.gcf().set_size_inches(30, 24, forward=1) if cumulative: plt.savefig("plots/Version 6/lumi_cumulative.pdf") else: plt.savefig("plots/Version 6/lumi.pdf") plt.clf()
def plot_embedding_unsuper_num_clusters(Z_tsne, y_tsne, num_clusters, title=None, legend=True, withClustersImg=True): import matplotlib.pyplot as plt import matplotlib.patches as mpatches from matplotlib import offsetbox from matplotlib.offsetbox import TextArea, AnnotationBbox import matplotlib.gridspec as gridspec from matplotlib.patches import ConnectionPatch import scipy.spatial as spatial from matplotlib._png import read_png x_min, x_max = np.min(Z_tsne, 0), np.max(Z_tsne, 0) Z_tsne = (Z_tsne - x_min) / (x_max - x_min) # get cluster centroids embedded t-SNE coordinates muZ_tsne = Z_tsne[len(y_tsne)-1:-1,:] Z_tsne = Z_tsne[0:len(y_tsne),:] # process labels figTSNE = plt.figure(figsize=(32, 24)) numclusters_plot = num_clusters+1 G = gridspec.GridSpec(numclusters_plot,numclusters_plot) # for tsne ax1 = plt.subplot(G[1:numclusters_plot,0:numclusters_plot]) # fo lesion id graph axes = [] for ind in range(num_clusters): ax = plt.subplot(G[0,ind]) axes.append(ax) # turn axes off ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) for ax in axes: ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.sca(ax1) classes = [str(c) for c in np.unique(y_tsne)] colors=plt.cm.Spectral(np.linspace(0,1,len(classes))) # plt.cm.PiYG c_patchs = [] greyc_U = np.array([0.5,0.5,0.5,0.5]) for k in range(len(classes)): if(str(classes[k])!='K'): c_patchs.append(mpatches.Patch(color=colors[k], label=classes[k])) else: c_patchs.append(mpatches.Patch(color=greyc_U, label='unlabeled')) for i in range(Z_tsne.shape[0]): for k in range(len(classes)): if str(y_tsne[i])==classes[k]: colori = colors[k] if(str(y_tsne[i])!='K'): plt.text(Z_tsne[i, 0], Z_tsne[i, 1], str(y_tsne[i]), color=colori, fontdict={'weight': 'bold', 'size': 10}) else: #print('{}..{}'.format(i,str(named_y[i]))) plt.text(Z_tsne[i, 0], Z_tsne[i, 1], '.', color=greyc_U, fontdict={'weight': 'bold', 'size': 32}) if(legend): plt.legend(handles=c_patchs, loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':14}) if title is not None: plt.title(title) if(withClustersImg): # plot closets image to cluster centroid: one per class from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox fig_path = r'Z:\Cristina\Section3\NME_DEC\figs' # to load SERw matrices for all lesions with gzip.open(os.path.join(NME_nxgraphs,'nxGdatafeatures_allNMEs_descStats.pklz'), 'rb') as fin: nxGdatafeatures = pickle.load(fin) YnxG_allNME = np.asarray([nxGdatafeatures['lesion_id'].values, nxGdatafeatures['roi_id'].values, nxGdatafeatures['classNME'].values, nxGdatafeatures['nme_dist'].values, nxGdatafeatures['nme_int'].values]) Z_embedding_tree = spatial.cKDTree(Z_tsne, compact_nodes=True) tsne_id_clus_centroids = [] for ind in range(num_clusters): distance,index = Z_embedding_tree.query(muZ_tsne[ind]) tsne_id_clus_centroids.append(index) ############################# ###### 2) load tsne_id and display (png) ############################# Z_tsne[tsne_id_clus_centroids[ind]] # us e ConnectorPatch is useful when you want to connect points in different axes con1 = ConnectionPatch(xyA=(0.5,0), xyB=muZ_tsne[ind], coordsA='axes fraction', coordsB='data', axesA=axes[ind], axesB=ax1, arrowstyle="simple",connectionstyle='arc3') ax.add_artist(con1) img_ROI = read_png( os.path.join(fig_path, '{}_{}_Closeness.png'.format(str(YnxG_allNME[0][tsne_id_clus_centroids[ind]]), str(YnxG_allNME[2][tsne_id_clus_centroids[ind]])) ) ) axes[ind].imshow(img_ROI, cmap=plt.cm.gray) axes[ind].set_adjustable('box-forced') axes[ind].set_title('MST_'+str(YnxG_allNME[0][tsne_id_clus_centroids[ind]])) #ax1.set_xlim(-0.1,1.1) #ax1.set_ylim(-0.1,1.1) return
def plot_inst_lumi(): properties = parse_file(input_analysis_file) timestamps = sorted(properties['time']) inst_lumi = [x for (y,x) in sorted(zip(properties['time'], properties['avg_inst_lumi']))] max_lumi = max(inst_lumi) print "Max Inst Luminosity: {}".format(max_lumi) inst_lumi = [x * 1e-3 for x in inst_lumi] dates = [datetime.datetime.fromtimestamp( int(time) ) for time in timestamps] # dates = [datetime.datetime.fromtimestamp( int(time) ).strftime('%m-%d') for time in timestamps] label = "Max. Inst. Lumi.: " + str(round(max_lumi, 2)) + " Hz/$\mathrm{\mu b}$" plt.hist(mpl.dates.date2num(dates), label=label, weights=inst_lumi, bins=25, color='orange', edgecolor='darkorange') years = mdates.YearLocator() # every year months = mdates.MonthLocator() # every month yearsFmt = mdates.DateFormatter('%Y-%M') # format the ticks plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b')) plt.gca().xaxis.set_major_locator(mdates.MonthLocator()) plt.xlabel("Date (UTC)", labelpad=40, fontsize=60) plt.ylabel("Peak Delivered Luminosity (Hz/$\mathrm{\mu b}$)", labelpad=50, fontsize=60) plt.legend(bbox_to_anchor=[0.007, 0.85], frameon=False, loc='upper left', fontsize=50) plt.gcf().set_size_inches(30, 21.4285714, forward=1) plt.gcf().autofmt_xdate() plt.autoscale() plt.xlim(datetime.date(2010, 3, 30), datetime.date(2010, 10, 31)) # plt.gca().xaxis.set_minor_locator(MultipleLocator(0.02)) extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0) outside_text = plt.gca().legend( [extra], ["CMS 2010 Open Data"], frameon=0, borderpad=0, fontsize=50, bbox_to_anchor=(1.0, 1.005), loc='lower right') plt.gca().add_artist(outside_text) plt.tick_params(which='major', width=5, length=15, labelsize=60) ab = AnnotationBbox(OffsetImage(read_png(get_sample_data("/home/aashish/root/macros/MODAnalyzer/mod_logo.png", asfileobj=False)), zoom=0.15, resample=1, dpi_cor=1), (0.205, 0.840), xycoords='figure fraction', frameon=0) plt.gca().add_artist(ab) preliminary_text = "Prelim. (20\%)" plt.gcf().text(0.270, 0.825, preliminary_text, fontsize=60, weight='bold', color='#444444', multialignment='center') plt.savefig("plots/Version 6/lumi/inst_lumi_number.pdf") plt.clf()
mpatches.PathPatch.draw(self, renderer) if 1: usetex = plt.rcParams["text.usetex"] fig = plt.figure(1) # EXAMPLE 1 ax = plt.subplot(211) from matplotlib._png import read_png fn = get_sample_data("grace_hopper.png", asfileobj=False) arr = read_png(fn) text_path = TextPath((0, 0), "!?", size=150) p = PathClippedImagePatch(text_path, arr, ec="k", transform=IdentityTransform()) #p.set_clip_on(False) # make offset box offsetbox = AuxTransformBox(IdentityTransform()) offsetbox.add_artist(p) # make anchored offset box ao = AnchoredOffsetbox(loc=2, child=offsetbox, frameon=True, borderpad=0.2)
import matplotlib.pyplot as plt import numpy as np import csv from matplotlib.offsetbox import AnnotationBbox, OffsetImage from matplotlib._png import read_png # make a subplot to allow for add_artist ax = plt.subplot(111) # get the house image house = read_png('house.png') houseimg = OffsetImage(house, zoom=.1) # get the house coordinates xy = [] battery = [] cable_length = 0 xy.append([3, 13]) xy.append([1, 7]) xy.append([5, 6]) xy.append([11, 1]) xy.append([15, 8]) xy.append([19, 12]) xy.append([19, 4]) battery.append([11, 8]) plt.scatter(battery[0][0], battery[0][1]) x_min = 11 x_max = 11