def add_cbar(fig, ax, im, **kwargs): # deal with kwargs kw = update_dict(add_cbar_kwargs_default, kwargs) find_bad_keys(add_cbar_kwargs_default, kwargs, 'add_cbar') # get fig dimensions fig_width_inches, fig_height_inches = fig.get_size_inches() fig_aspect = fig_height_inches / fig_width_inches # get ax dimensions ax_left, ax_right, ax_bottom, ax_top = axis_range(ax) ax_width = ax_right - ax_left ax_height = ax_top - ax_bottom if kw.cbar_pos == 'bottom': orientation = 'horizontal' cbar_height = kw.cbar_thick / fig_height_inches cbar_width = cbar_height / kw.cbar_aspect * fig_aspect if cbar_width > kw.tol * ax_width: # don't let cbar be thicker than plot! cbar_width = kw.tol * ax_width # centrally position colorbar underneath the axes label_buff = 3 / 8 / fig_height_inches # needs to contain # the colorbar ticklabels and little buffer space lilbit = 1 / 16 / fig_height_inches cbar_left = ax_left + 0.5 * ax_width - 0.5 * cbar_width cbar_bottom = ax_bottom - lilbit - cbar_height -\ (kw.cbar_no - 1)*(label_buff + cbar_height) elif kw.cbar_pos == 'right': orientation = 'vertical' cbar_width = kw.cbar_thick / fig_width_inches cbar_height = cbar_width / kw.cbar_aspect / fig_aspect if cbar_height > kw.tol * ax_height: cbar_height = kw.tol * ax_height # centrally position colorbar to right of axes label_buff = 3 / 4 / fig_width_inches # needs to contain # the colorbar ticklabels and little buffer space lilbit = 1 / 16 / fig_width_inches cbar_bottom = ax_bottom + 0.5 * ax_height - 0.5 * cbar_height cbar_left = ax_right + lilbit + (kw.cbar_no - 1) * (label_buff + cbar_width) cax = fig.add_axes((cbar_left, cbar_bottom, cbar_width, cbar_height)) cbar = plt.colorbar(im, cax=cax, orientation=orientation) # deal with labels if kw.nosci or kw.logscale: cbar_label = kw.units else: cbar_label = (r'$\times10^{%i}\ $' % kw.exp) + kw.units # font size for the tick labels cax.tick_params(labelsize=kw.cbar_fs) #cbar.ax.tick_params(labelsize=fontsize) # ticklabel format if not kw.logscale: if kw.posdef: tickvals = [kw.minmax[0], kw.minmax[1]] else: tickvals = [kw.minmax[0], 0, kw.minmax[1]] if kw.cbar_labels is None: fmt = '%.' + str(kw.cbar_prec) + 'f' ticklabels = [] for tickval in tickvals: ticklabels.append(fmt % tickval) else: ticklabels = kw.cbar_labels if not kw.ticklabels is None: ticklabels = kw.ticklabels if not kw.tickvals is None: tickvals = kw.tickvals cbar.set_ticks(tickvals) cbar.set_ticklabels(ticklabels) else: locator = ticker.LogLocator(subs='all') cbar.set_ticks(locator) if kw.cbar_pos == 'bottom': fig.text(cbar_left + cbar_width + lilbit*fig_aspect,\ cbar_bottom + 0.5*cbar_height, cbar_label,\ ha='left', va='center', fontsize=kw.cbar_fs) elif kw.cbar_pos == 'right': #fig.text(cbar_left + cbar_width + lilbit/fig_aspect,\ # cbar_bottom + 0.5*cbar_height, cbar_label,\ # ha='left', va='center', fontsize=kw.fontsize) cax.set_title(cbar_label, ha='left', fontsize=kw.cbar_fs)
def axis_label_and_ticks(self, axis, data_array, name, dim_to_shape): """ Get dimensions and label (if present) from requested axis. Also retun axes tick formatters and locators. """ # Create some default axis tick formatter, depending on whether log # for that axis will be True or False formatter = { False: ticker.ScalarFormatter(), True: ticker.LogFormatterSciNotation() } locator = {False: ticker.AutoLocator(), True: ticker.LogLocator()} dim = axis # Convert to Dim object? if isinstance(dim, str): dim = sc.Dim(dim) underlying_dim = dim non_dimension_coord = False var = None if dim in data_array.coords: dim_coord_dim = data_array.coords[dim].dims[-1] tp = data_array.coords[dim].dtype if tp == sc.dtype.vector_3_float64: var = make_fake_coord(dim_coord_dim, dim_to_shape[dim_coord_dim], unit=data_array.coords[dim].unit) form = ticker.FuncFormatter(lambda val, pos: "(" + ",".join([ value_to_string(item, precision=2) for item in self. scipp_obj_dict[name].coords[dim].values[int(val)] ]) + ")" if (int(val) >= 0 and int(val) < dim_to_shape[ dim_coord_dim]) else "") formatter.update({False: form, True: form}) locator[False] = ticker.MaxNLocator(integer=True) if dim != dim_coord_dim: underlying_dim = dim_coord_dim elif tp == sc.dtype.string: var = make_fake_coord(dim_coord_dim, dim_to_shape[dim_coord_dim], unit=data_array.coords[dim].unit) form = ticker.FuncFormatter( lambda val, pos: self.scipp_obj_dict[name].coords[ dim].values[int(val)] if (int(val) >= 0 and int( val) < dim_to_shape[dim_coord_dim]) else "") formatter.update({False: form, True: form}) locator[False] = ticker.MaxNLocator(integer=True) if dim != dim_coord_dim: underlying_dim = dim_coord_dim elif dim != dim_coord_dim: # non-dimension coordinate non_dimension_coord = True if dim_coord_dim in data_array.coords: var = data_array.coords[dim_coord_dim] else: var = make_fake_coord(dim_coord_dim, dim_to_shape[dim_coord_dim]) underlying_dim = dim_coord_dim form = ticker.FuncFormatter( lambda val, pos: value_to_string(data_array.coords[ dim].values[np.abs(var.values - val).argmin()])) formatter.update({False: form, True: form}) else: var = data_array.coords[dim] else: # dim not found in data_array.coords var = make_fake_coord(dim, dim_to_shape[dim]) label = var if non_dimension_coord: label = data_array.coords[dim] return underlying_dim, var, label, formatter, locator
def gen_snap_all(rarr, dir_name, base_name, snap_num, size, title_name=''): rc('font', size=15) fig = plt.figure() fig.set_figwidth(10) fig.set_figheight(8.05) fig.subplots_adjust(left=0.08, right=0.96, top=0.95, bottom=0.06) plt.figtext(0.5, 0.97, title_name, ha='center', size='large') majorLocator = ticker.MultipleLocator(10) minorLocator = ticker.MultipleLocator(5) ################### snap_den #################################### ax = fig.add_subplot(221) arr = np.sort(rarr, order='den') x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] den = arr['den'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') plt.ylabel('Z [kpc]') cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idw], z[idw], s = 1, c = den[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = 1e-7, vmax = 10, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Density [cm$^{-3}$]') plt.xlim(-size, size) plt.ylim(-size, size) ######################## gen_snap_T #################################### ax = fig.add_subplot(222) ax.apply_aspect() pos = ax.get_position().get_points() x4 = pos[0][0] width = pos[1][0] - pos[0][0] height = pos[1][1] - pos[0][1] arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] T = arr['T'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) # plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idw], z[idw], s = 1, c = T[idw], marker = 'o', cmap = cmap, \ norm = norm, vmin = 1e3, vmax = 1e7, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Temperature [K]') plt.xlim(-size, size) plt.ylim(-size, size) ################### gen_snap_cold_mass #################################### ax = fig.add_subplot(223) ax.apply_aspect() pos = ax.get_position().get_points() y4 = pos[0][1] arr = np.sort(rarr, order='mass') # arr = rarr x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] mass = arr['mass'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) if np.size(idc) == 0: return plt.xlabel('X [kpc]') plt.ylabel('Z [kpc]') # plt.subplots_adjust(left = 0.13, right = 0.93, top = 0.93, bottom = 0.13) cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idc], z[idc], s = 2, c = mass[idc], marker = 'o', \ cmap = cmap, norm = norm, vmin = 1e3, vmax = 1e7, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Cold Cloud Mass [M$_\odot$]') plt.xlim(-size, size) plt.ylim(-size, size) ################### dm_star #################################### ax = fig.add_subplot(224, aspect='equal') ax.set_anchor('W') # ax = fig.add_axes([x4, y4, width, height]) arr = np.sort(rarr, order='mass') x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] mass = arr['mass'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) idm = np.where(arr['type'] == mp.TYPE_DM_PARAL) # if np.size(idc) == 0: # return plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') # plt.subplots_adjust(left = 0.13, right = 0.93, top = 0.93, bottom = 0.13) plt.scatter(x[idm], z[idm], s = 1, c = 'k', marker = 'o', \ edgecolors = 'none') if (np.size(ids) > 0): plt.scatter(x[ids], z[ids], s = 2, c = 'r', marker = 'o', \ edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) plt.xlim(-size, size) plt.ylim(-size, size) ########################## save ###################################### print 'writing xz...' figure_name = 'Panel-xz-' + dir_name + '-' + base_name + '-' \ + snap_num + '.png' plt.savefig(figure_name) ###################################################################### ################################## X Y ############################### ###################################################################### plt.clf() fig = plt.figure() fig.set_figwidth(10) fig.set_figheight(8.05) fig.subplots_adjust(left=0.08, right=0.96, top=0.95, bottom=0.06) plt.figtext(0.5, 0.97, title_name, ha='center', size='large') ################### snap_den #################################### ax = fig.add_subplot(221) arr = np.sort(rarr, order='den') x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] den = arr['den'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') plt.ylabel('Y [kpc]') cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idw], y[idw], s = 1, c = den[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = 1e-7, vmax = 10, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Density [cm$^{-3}$]') plt.xlim(-size, size) plt.ylim(-size, size) ######################## gen_snap_T #################################### ax = fig.add_subplot(222) ax.apply_aspect() pos = ax.get_position().get_points() x4 = pos[0][0] width = pos[1][0] - pos[0][0] height = pos[1][1] - pos[0][1] arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] T = arr['T'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) # plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idw], y[idw], s = 1, c = T[idw], marker = 'o', cmap = cmap, \ norm = norm, vmin = 1e3, vmax = 1e7, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Temperature [K]') plt.xlim(-size, size) plt.ylim(-size, size) ################### gen_snap_cold_mass #################################### ax = fig.add_subplot(223) ax.apply_aspect() pos = ax.get_position().get_points() y4 = pos[0][1] arr = np.sort(rarr, order='mass') # arr = rarr x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] mass = arr['mass'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) if np.size(idc) == 0: return plt.xlabel('X [kpc]') plt.ylabel('Y [kpc]') # plt.subplots_adjust(left = 0.13, right = 0.93, top = 0.93, bottom = 0.13) cmap = mpl.cm.jet norm = mpl.colors.LogNorm() plt.scatter(x[idc], y[idc], s = 2, c = mass[idc], marker = 'o', \ cmap = cmap, norm = norm, vmin = 1e3, vmax = 1e7, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) cb = plt.colorbar(ticks=ticker.LogLocator(base=10.0)) cb.set_label('Cold Cloud Mass [M$_\odot$]') plt.xlim(-size, size) plt.ylim(-size, size) ################### dm_star #################################### ax = fig.add_subplot(224, aspect='equal') ax.set_anchor('W') # ax = fig.add_axes([x4, y4, width, height]) arr = np.sort(rarr, order='mass') x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] mass = arr['mass'][:] idw = np.where(arr['type'] == mp.TYPE_WARM) idc = np.where(arr['type'] == mp.TYPE_COLD) ids = np.where(arr['type'] == mp.TYPE_STAR_PARAL) idm = np.where(arr['type'] == mp.TYPE_DM_PARAL) # if np.size(idc) == 0: # return plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') # plt.subplots_adjust(left = 0.13, right = 0.93, top = 0.93, bottom = 0.13) plt.scatter(x[idm], y[idm], s = 1, c = 'k', marker = 'o', \ edgecolors = 'none') if (np.size(ids) > 0): plt.scatter(x[ids], y[ids], s = 2, c = 'r', marker = 'o', \ edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) plt.xlim(-size, size) plt.ylim(-size, size) ########################## save ###################################### figure_name = 'Panel-xy-' + dir_name + '-' + base_name + '-' \ + snap_num + '.png' plt.savefig(figure_name)
def multi_T(dir_name, base_name, nproc, size, snap_num): title = dir_name rc('font', size=18) fig = plt.figure() fig.set_figwidth(10) fig.set_figheight(9) fig.subplots_adjust(left = 0.10, right = 0.85, top = 0.95, bottom = 0.10, \ wspace = 0.0, hspace = 0.0) # plt.figtext(0.5, 0.94, title, ha = 'center', size = 'large') majorLocator = ticker.MultipleLocator(20) minorLocator = ticker.MultipleLocator(4) nullFormatter = ticker.NullFormatter() cmap = mpl.cm.jet norm = mpl.colors.LogNorm() vmin = 1.0E3 vmax = 1.0E7 ####################################################################### ########################### X - Z #################################### ####################################################################### rc('font', size=18) plt.clf() fig = plt.figure() fig.set_figwidth(10) fig.set_figheight(9) fig.subplots_adjust(left = 0.10, right = 0.85, top = 0.95, bottom = 0.10, \ wspace = 0.0, hspace = 0.0) # plt.figtext(0.5, 0.94, title, ha = 'center', size = 'large') ############################### snap 0 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[0], nproc) ax = fig.add_subplot(221) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') plt.ylabel('Z [kpc]') plt.scatter(x[idw], z[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) ax.xaxis.set_major_formatter(nullFormatter) # ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################# snap 1 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[1], nproc) ax = fig.add_subplot(222) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') plt.scatter(x[idw], z[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) ax.xaxis.set_major_formatter(nullFormatter) ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################# snap 2 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[2], nproc) ax = fig.add_subplot(223) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) plt.xlabel('X [kpc]') plt.ylabel('Z [kpc]') plt.scatter(x[idw], z[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) # ax.xaxis.set_major_formatter(nullFormatter) # ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################ snap 3 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[3], nproc) ax = fig.add_subplot(224) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) plt.xlabel('X [kpc]') # plt.ylabel('Z [kpc]') plt.scatter(x[idw], z[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) # ax.xaxis.set_major_formatter(nullFormatter) ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ######################### color bar ################################## # [xcoord, ycoord, width, height] cbaxes = fig.add_axes([0.87, 0.10, 0.03, 0.85]) cb = plt.colorbar(orientation='vertical', \ ticks = ticker.LogLocator(base = 10.0), cax = cbaxes) cb.set_label('Temperature [K]') ######################## label ###################################### label = 't = 0.5 Gyr' plt.figtext(0.28, 0.9, label, ha='center', size=20) label = 't = 1 Gyr' plt.figtext(0.68, 0.9, label, ha='center', size=20) label = 't = 2 Gyr' plt.figtext(0.28, 0.47, label, ha='center', size=20) label = 't = 3 Gyr' plt.figtext(0.68, 0.47, label, ha='center', size=20) ########################## save ###################################### figure_name = 'TPanel-xz-' + dir_name + '.png' plt.savefig(figure_name) ####################################################################### ########################### X - Y #################################### ####################################################################### rc('font', size=18) plt.clf() fig = plt.figure() fig.set_figwidth(10) fig.set_figheight(9) fig.subplots_adjust(left = 0.10, right = 0.85, top = 0.95, bottom = 0.10, \ wspace = 0.0, hspace = 0.0) # plt.figtext(0.5, 0.94, title, ha = 'center', size = 'large') ############################### snap 0 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[0], nproc) ax = fig.add_subplot(221) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') plt.ylabel('Y [kpc]') plt.scatter(x[idw], y[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) ax.xaxis.set_major_formatter(nullFormatter) # ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################# snap 1 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[1], nproc) ax = fig.add_subplot(222) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) # plt.xlabel('X [kpc]') # plt.ylabel('Y [kpc]') plt.scatter(x[idw], y[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) ax.xaxis.set_major_formatter(nullFormatter) ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################# snap 2 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[2], nproc) ax = fig.add_subplot(223) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) plt.xlabel('X [kpc]') plt.ylabel('Y [kpc]') plt.scatter(x[idw], y[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) # ax.xaxis.set_major_formatter(nullFormatter) # ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ################################ snap 3 #################################### ntot, time, rarr = mp.read_snap_paral(mp.prefix, dir_name, base_name, snap_num[3], nproc) ax = fig.add_subplot(224) arr = np.sort(rarr, order='T') arr = arr[::-1] x = arr['pos'][:, 0] y = arr['pos'][:, 1] z = arr['pos'][:, 2] type = arr['type'][:] r = np.sqrt(x * x + y * y + z * z) T = arr['T'][:] boolarr = np.logical_and(type == mp.TYPE_WARM, r < 50.0) # boolarr = np.logical_and(boolarr, np.abs(z) < 1.0) idw = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_COLD, r < 50.0) idc = np.where(boolarr) boolarr = np.logical_and(type == mp.TYPE_STAR_PARAL, r < 50.0) ids = np.where(boolarr) width, height = rcParams['figure.figsize'] sz = min(width, height) plt.xlabel('X [kpc]') # plt.ylabel('Y [kpc]') plt.scatter(x[idw], y[idw], s = 1, c = T[idw], marker = 'o', \ cmap = cmap, norm = norm, vmin = vmin, vmax = vmax, edgecolors = 'none') ax.xaxis.set_major_locator(majorLocator) ax.xaxis.set_minor_locator(minorLocator) ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_minor_locator(minorLocator) # ax.xaxis.set_major_formatter(nullFormatter) ax.yaxis.set_major_formatter(nullFormatter) plt.xlim(-size, size * 0.99) plt.ylim(-size, size) ######################### color bar ################################## # [xcoord, ycoord, width, height] cbaxes = fig.add_axes([0.87, 0.10, 0.03, 0.85]) cb = plt.colorbar(orientation='vertical', \ ticks = ticker.LogLocator(base = 10.0), cax = cbaxes) cb.set_label('Temperature [K]') ######################## label ###################################### label = 't = 0.5 Gyr' plt.figtext(0.28, 0.9, label, ha='center', size=20) label = 't = 1 Gyr' plt.figtext(0.68, 0.9, label, ha='center', size=20) label = 't = 2 Gyr' plt.figtext(0.28, 0.47, label, ha='center', size=20) label = 't = 3 Gyr' plt.figtext(0.68, 0.47, label, ha='center', size=20) ########################## save ###################################### figure_name = 'TPanel-xy-' + dir_name + '.png' plt.savefig(figure_name)
exact = np.zeros((model.nt,model.nx)) for k in range(0,model.nt): exact[k,:] = ufunc(model.dt*k,model.x,model.κ) error_kappas[j,i] = LA.norm(Strang_Beam_kap- exact,np.inf) xx, yy = np.meshgrid(1/nxs,kappas) Pe = 4*xx/yy from matplotlib import ticker, cm fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(17, 8)) pes = ax[0].contourf(1/nxs,kappas,Pe,levels = np.logspace(-3,1,9), locator=ticker.LogLocator(), cmap=cm.PuBu_r) cbar0 = fig.colorbar(pes, ax = ax[0]) cbar0.ax.set_title('$Pe$',fontsize='large', pad = 20) ax[0].set_ylabel(r'$\kappa$',fontsize='xx-large') ax[0].set_xlabel(r'$\Delta x$',fontsize='xx-large') ax[0].set_title('Peclet Number',fontsize='large') cs = ax[1].contourf(1/nxs,kappas,error_kappas, levels = np.logspace(-1,2,19), locator=ticker.LogLocator(), cmap=cm.viridis) cbar1 = fig.colorbar(cs, ax = ax[1]) cbar1.ax.set_title('$||U - u||_\infty$',fontsize='large', pad = 20) ax[1].set_xlabel(r'$\Delta x$',fontsize='xx-large') ax[1].set_title('Error',fontsize='large') plt.tight_layout()
def performance(tol): """ ============================================================================ Assesses the performance of Bracket Descent and Scipy L-BFGS-B Methods ============================================================================ Parameters ------------ tol : float Determines the tolerance for minimization Returns ------------ This function will produce 4 figures. The first 3 will represent a comparison of the precison of each method while the 4th will represent a comparison of the timing. The first three show the location of the computed minima for initial guesses of [-100,-3], [-50,-3], [-10,-3] and [-1,-3]. These are overlayed onto the original cost function; the Scipy L-BFGS-B results are represented by red diamonds while the Bracket Descent results are represented by blue diamonds. The three figures represent the cases when the noise amplitude is set to 0, 1, and 10. The final figure consists of four subplots, the upper row represents the computational time taken for convergence, given an initial x starting point, while the lower represents the number of iterations requried. In each case the Scipy L-BFGS-B method is shown on the left and the Bracket descent is shown on the right. A legend on each plot differentiates the cases when the Noise Ampplitude is set to 0, 1, and 10. Trends Observed ---------------- For all cases, the Scipy minimization function appears to be more consistent (to rely less on the initial guess) than the fortran Bracket Descent method. This is seen in figures hw241-hw243, where the B-D results are seen to cover a broader spead of final coordinated. These figures also illustrate that as the level of noise of the cost function is increased, the Scipy L-BFGS-B method becomes increasingly favourable over the Bracket descent approach, producing more precise results each time. This is a result of the lack of consideration for noise within the Bracket Descent method; that is to say that any random fluctations which result in two neighbouring points (along the convergence path) lying within the tolerance limit will be assumed to be the true minimum of the function as defined by the B-D method. However, it is likely that the Scipy L-BFGS-B method is adapted to smooth out noisy functions and hence find the true minimum more reliably. A consideration of figure hw244, however, demonstrates an advantage of the B-D method over the Scipy L-BGFS-B minimization in the form of timing. It can be seen that despite requiring more iterations before converging to within a set tolerance, the total computational time is less to within a factor of 10. """ plt.close('all') count = 0 hw2.tol = tol nintb = [] nintl = [] tlbfgsb = [] txfbd = [] lbfgsx = [] lbfgsy = [] xfbdx = [] xfbdy = [] cost.c_noise = True for cost.c_noise_amp in [0., 1., 10.]: count = count + 1 for [x, y] in [[-100., -3.], [-50., -3.], [-10., -3.], [-1., -3.]]: t12 = 0 t34 = 0 for i in range(0, 1000): t1 = time() scipy.optimize.minimize(cost.costj, [x, y], method='L-BFGS-B', tol=tol) t2 = time() t12 = t12 + (t2 - t1) t3 = time() hw2.bracket_descent([x, y]) t4 = time() t34 = t34 + (t4 - t3) tlbfgsb.append(t12 / 1000) txfbd.append(t34 / 1000) info = scipy.optimize.minimize(cost.costj, [x, y], method='L-BFGS-B', tol=tol) xfbd, jfbd, i2 = hw2.bracket_descent([x, y]) # print('method: ', 'Fortran Bracket Descent') # print('Value: ', jfbd) # print('number of iterations:', i2) # print('x: ', xfbd) # print('c_noise: ', cost.c_noise) # print(' ') #print(info) x = info.x lbfgsx.append(x[0]) lbfgsy.append(x[1]) xfbdx.append(xfbd[0]) xfbdy.append(xfbd[1]) nint = info.nit nintl.append(nint) nintb.append(i2) Minx = 1 + (min([ min(xfbdx[(count - 1) * 4:count * 4]), min(lbfgsx[(count - 1) * 4:count * 4]) ]) - 1) * 1.1 Maxx = 1 + (max([ max(xfbdx[(count - 1) * 4:count * 4]), max(lbfgsx[(count - 1) * 4:count * 4]) ]) - 1) * 1.1 Miny = 1 + (min([ min(xfbdy[(count - 1) * 4:count * 4]), min(lbfgsy[(count - 1) * 4:count * 4]) ]) - 1) * 1.1 Maxy = 1 + (max([ max(xfbdy[(count - 1) * 4:count * 4]), max(lbfgsy[(count - 1) * 4:count * 4]) ]) - 1) * 1.1 [X, Y] = np.linspace(Minx, Maxx, 200), np.linspace(Miny, Maxy, 200) #calculate noiseless cost function at each point on 2D grid j = [[cost.costj([xi, yi]) for xi in X] for yi in Y] #create contour plots of cost functions with and without noise fig, p4 = plt.subplots() cp = p4.contourf(X, Y, j, locator=ticker.LogLocator(), cmap=cm.GnBu) cbar = fig.colorbar(cp) BD, = p4.plot(xfbdx[(count - 1) * 4:count * 4], xfbdy[(count - 1) * 4:count * 4], 'b', linestyle='None', marker='d', markersize=6) Scipy, = p4.plot(lbfgsx[(count - 1) * 4:count * 4], lbfgsy[(count - 1) * 4:count * 4], 'r', linestyle='None', marker='d', markersize=6) BD.set_label('Fortran Bracket Descent') Scipy.set_label('Scipy optimize L-BFGS-B') plt.legend(loc='upper left', fontsize='small') plt.suptitle( 'Rosemary Teague, performance \n Comparison of converged values, Noise=' + str(int(cost.c_noise_amp))) #plt.tight_layout(pad=5) plt.savefig('hw24' + str(count), dpi=700) print(tlbfgsb) plt.close('all') f4, (p414, p424) = plt.subplots(2, 2, sharey=True) one, = p414[0].plot( tlbfgsb[:4], [np.abs(-100.), np.abs(-50.), np.abs(-10.), np.abs(-1.)], 'r', marker='x', markersize=12) two, = p414[0].plot( tlbfgsb[4:8], [np.abs(-100.), np.abs(-50.), np.abs(-10.), np.abs(-1.)], 'm', marker='x', markersize=12) three, = p414[0].plot( tlbfgsb[8:], [np.abs(-100.), np.abs(-50.), np.abs(-10.), np.abs(-1.)], '#c79fef', marker='x', markersize=12) one.set_label('No Noise') two.set_label('Noise = 1.0') three.set_label('Noise = 10.0') p414[0].set_title('Scipy Optimise L-BFGS-B') p414[0].set_xlabel('Time Taken') p414[0].legend(loc='upper right', fontsize='x-small') p414[0].xaxis.set_ticks(np.linspace(min(tlbfgsb), max(tlbfgsb), 3)) p414[0].ticklabel_format(useOffset=False) uno, = p414[1].plot(txfbd[:4], [ np.abs(-100. - xfbdx[0]), np.abs(-50. - xfbdx[1]), np.abs(-10. - xfbdx[2]), np.abs(-1. - xfbdx[3]) ], 'b', marker='x', markersize=12) dos, = p414[1].plot(txfbd[4:8], [ np.abs(-100. - xfbdx[4]), np.abs(-50. - xfbdx[5]), np.abs(-10. - xfbdx[6]), np.abs(-1. - xfbdx[7]) ], 'g', marker='x', markersize=12) tres, = p414[1].plot(txfbd[8:], [ np.abs(-100. - xfbdx[8]), np.abs(-50. - xfbdx[9]), np.abs(-10. - xfbdx[10]), np.abs(-1. - xfbdx[11]) ], 'c', marker='x', markersize=12) uno.set_label('No Noise') dos.set_label('Noise = 1.0') tres.set_label('Noise = 10.0') p414[1].set_title('Fortran Bracket Descent') p414[1].set_xlabel('Time Taken') p414[1].legend(loc='upper left', fontsize='x-small') p414[1].xaxis.set_ticks(np.linspace(min(txfbd), max(txfbd), 3)) one1, = p424[0].plot(nintl[:4], [ np.abs(-100. - lbfgsx[0]), np.abs(-50. - lbfgsx[1]), np.abs(-10. - lbfgsx[2]), np.abs(-1. - lbfgsx[3]) ], 'r', marker='x', markersize=12) two2, = p424[0].plot(nintl[4:8], [ np.abs(-100. - lbfgsx[4]), np.abs(-50. - lbfgsx[5]), np.abs(-10. - lbfgsx[6]), np.abs(-1. - lbfgsx[7]) ], 'm', marker='x', markersize=12) three3, = p424[0].plot(nintl[8:], [ np.abs(-100. - lbfgsx[8]), np.abs(-50. - lbfgsx[9]), np.abs(-10. - lbfgsx[10]), np.abs(-1. - lbfgsx[11]) ], '#c79fef', marker='x', markersize=12) one1.set_label('No Noise') two2.set_label('Noise = 1.0') three3.set_label('Noise = 10.0') p424[0].set_xlabel('Number of Iterations') p424[0].legend(loc='upper left', fontsize='x-small') p424[0].ticklabel_format(useOffset=False) uno1, = p424[1].plot(nintb[:4], [ np.abs(-100. - xfbdx[0]), np.abs(-50. - xfbdx[1]), np.abs(-10. - xfbdx[2]), np.abs(-1. - xfbdx[3]) ], 'b', marker='x', markersize=12) dos2, = p424[1].plot(nintb[4:8], [ np.abs(-100. - xfbdx[4]), np.abs(-50. - xfbdx[5]), np.abs(-10. - xfbdx[6]), np.abs(-1. - xfbdx[7]) ], 'g', marker='x', markersize=12) tres3, = p424[1].plot(nintb[8:], [ np.abs(-100. - xfbdx[8]), np.abs(-50. - xfbdx[9]), np.abs(-10. - xfbdx[10]), np.abs(-1. - xfbdx[11]) ], 'c', marker='x', markersize=12) uno1.set_label('No Noise') dos2.set_label('Noise = 1.0') tres3.set_label('Noise = 10.0') p424[1].set_xlabel('Number of Iterations') p424[1].legend(loc='upper left', fontsize='x-small') f4.text(0.04, 0.5, 'Initial x-distance from Converged minimum', va='center', rotation='vertical') plt.suptitle( 'Rosemary Teague, performance \n Time taken for values to converge', fontsize='large') plt.tight_layout(pad=3.5, h_pad=1, w_pad=1) plt.savefig('hw244', dpi=700)
f = figure(figsize =(3.2,2.5), dpi = 220*2.0/3.0 * 2.0) N, K = meshgrid(nlist, klist, indexing = 'ij') ylabel('k', fontsize = 10) xlabel('n',fontsize = 10) title('$\lambda$ = %i nm' % lamda) minorticks_on() gca().tick_params(axis='both', which='major', labelsize=10) if use_logscale: cf_levels = compute_outer_log_levels(error_map, base10delta = 0.1) CF = contourf(N,K, error_map, norm = LogNorm(), levels = cf_levels , locator=ticker.LogLocator() ) cbar = colorbar(CF, ticks = compute_inner_log_levels(error_map, base10delta = 0.2)) cbar.ax.set_ylabel('RMS Error (%)') #cbar.ax.yaxis.set_ticks(log10(cf_levels) , minor=True) else: cf_level_delta = 0.5 error_min = error_map.min() error_max = error_map.max() cfmax = (error_max - error_min) *0.15 +error_min cf_levels = arange( floor(error_min), ceil(cfmax) ,cf_level_delta ) CF = contourf(N,K, error_map, levels = cf_levels ) cbar = colorbar(CF) cbar.ax.set_ylabel('RMS Error (%)') min_indices = find_min_indices_2d_array(error_map)
#**************************************** #Plot Number 1 (start) #**************************************** fig1, ax1 = plt.subplots(2, 1, sharex=True) plt.xlabel('$P/P_{sat}$', fontname="Arial", fontsize=axis_Label_font_size) plt.ylabel(' $\u03BE$ (No. waters / nm$^2$)', fontname="Arial", fontsize=axis_Label_font_size) ax1[0].set_xticks(np.arange(0, 100, 10)) ax1[1].set_xticks(np.arange(0, 100, 10)) ax1[0].set_yticks(np.arange(-200, 200, E_per_nm_sq_10A_step)) ax1[1].set_yticks(np.arange(-200, 200, E_per_nm_sq_16A_step)) ax1[0].xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=5)) ax1[1].xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=5)) WSU_color = '#ff7f0e' NDU_color = '#1f77b4' ax1[0].errorbar(P_div_Po_10A_ads_Cass_list, Avg_of_E_per_area_10A_ads_Cass_list, Std_Dev_E_per_area_10A_ads_Cass_list, color=NDU_color, marker='D', linestyle='-', markersize=PointSizes + 2, linewidth=ConnectionLineSizes, fillstyle='full', label="Cassandra - adsorb",
def psd_plot(pore_widths, pore_dist, method=None, labeldiff='distribution', labelcum='cumulative', line_style=None, log=True, right=None, left=None, ax=None): """ Draw a pore size distribution plot. Parameters ---------- pore_widths : array Array of the pore radii which will become the x axis. pore_dist : array Contribution of each pore radius which will make up the y axis. method : str The method used. Will be a string part of the title. labeldiff : str The label for the plotted data, which will appear in the legend. labelcum : str, optional The label for the cumulative data, which will appear in the legend. Set to None to remove cumulative distribution line_style : dict, optional The style dictionary to send to the plot() function. log : int Whether to display a logarithmic graph. right : int Higher bound of the selected pore widths. right : int Lower bound of the selected pore widths. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. Returns ------- matplotlib.axes Matplotlib axes of the graph generated. The user can then apply their own styling if desired. """ # Generate the figure if needed if ax is None: fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(111) lst = {'marker': '', 'color': 'k'} if line_style is not None: lst.update(line_style) l1 = ax.plot(pore_widths, pore_dist, label=labeldiff, **lst) if labelcum: ax2 = ax.twinx() l2 = ax2.plot(pore_widths[1:], numpy.cumsum(pore_dist[1:] * numpy.diff(pore_widths)), marker='', color='r', linestyle="--", label=labelcum) # Func formatter def formatter(x, pos): return "{0:g}".format(x) if log: ax.set_xscale('log') ax.xaxis.set_minor_formatter(ticker.FuncFormatter(formatter)) ax.xaxis.set_major_formatter(ticker.FuncFormatter(formatter)) ax.xaxis.set_major_locator( ticker.LogLocator(base=10.0, numticks=15, numdecs=20)) ax.tick_params(axis='x', which='minor', width=0.75, length=2.5, **TICK_STYLE) ax.tick_params(axis='x', which='major', width=2, length=10, **TICK_STYLE) ax.set_xlim(left=left, right=right) else: if not left: left = 0 ax.set_xlim(left=left, right=right) ax.tick_params(axis='both', which='major', **TICK_STYLE) ax.set_title("PSD plot " + str(method), **TITLE_STYLE) ax.set_xlabel('Pore width (nm)', **LABEL_STYLE) ax.set_ylabel('Distribution (dV/dw)', **LABEL_STYLE) if labelcum: ax2.set_ylabel('Cumulative Vol ($cm^3 g^{-1}$)', **LABEL_STYLE) lns = l1 if labelcum: lns = l1 + l2 labs = [l.get_label() for l in lns] ax.legend(lns, labs, loc='lower right') ax.set_ylim(bottom=0) if labelcum: ax2.set_ylim(bottom=0) ax.grid(True) return ax
import numpy as np import matplotlib.pyplot as plt from matplotlib import ticker, cm R = 1 + 0.3j L = -2 - 0.1j def y(omega, p, R=R, L=L): return omega * (R * (1 + p) + L * (1 - p)) ps = np.linspace(0, 1.) omegas = np.logspace(-1, 1) omegabar = 1 pbar = 1. def chisquare(omega, p): return abs(y(omega, p) - y(omegabar, pbar))**2 O, P = np.meshgrid(omegas, ps) cs = plt.contourf(O, P, chisquare(O, P), locator=ticker.LogLocator(numticks=9)) plt.colorbar(cs) plt.xscale('log') # %%
coeffs.write_file("/tmp/REXI_"+rexi_method+"_"+unique_id_string+"_txt.txt", False) coeffs.write_file("/tmp/REXI_"+rexi_method+"_"+unique_id_string+"_bin.txt", True) #Save last names used met_names[imet]=unique_id_string fig, axes = plt.subplots(m_met,2, figsize=(12,12)) plt.suptitle(r'$e^{ix}$'+" reconstruction error") levels = np.geomspace(1e-16, 1, num=17) cmap=cm.viridis X, Y = np.meshgrid(x_points, K_tests ) axes1d=axes.reshape(-1) for i, rexi_method in enumerate(rexi_methods): cs = axes1d[i].contourf(X, Y, errors[i], levels, locator=ticker.LogLocator(), cmap=cmap) axes1d[i].set(xlabel="x", ylabel="Poles", title=str(met_names[i])) fig.tight_layout(pad=4.0) fig.subplots_adjust(right=0.8) fig.subplots_adjust(top=0.9) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) cbar=fig.colorbar(cs, cax=cbar_ax) cbar.set_label('Error in exp(ix)') plt.figtext(0.3,0.01, "lambda_include_imag="+str(lambda_include_imag)+" lambda_max_real="+str(lambda_max_real) ) plt.savefig("expi_recon.png") plt.show()
def test_bad_locator_subs(sub): ll = mticker.LogLocator() with pytest.raises(ValueError): ll.subs(sub)
for i in range(times_reduced_gp3.shape[0]): parallel_efficiency_gp3[i] = times_reduced_gp3[0]/(times_reduced_gp3[i]*nprocs[i]) # -- We prepare the plot area. bar_width = 1e-1 # Width of the bars in the bar plot. fig = plt.figure(figsize=(4,2.5)) ax = fig.add_subplot(111) vphys.adjust_spines(ax, ['left', 'bottom'], 2) #ax.spines['bottom'].set_position(('outward',0.0)) ax.set_xlim(0.75,1.5*np.amax(nprocs)) ax.set_ylim(0,1.05) ax.set_xscale('log') ax.xaxis.set_major_locator(ticker.LogLocator(base=2)) ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:1.0f}")) ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:1.1f}")) ax.set_xticks(nprocs) ax.set_xlabel(r"Number of processors") ax.set_ylabel(r"Parallel Efficiency on GP3", rotation='horizontal', ha='left') ax.yaxis.set_label_coords(-0.1,1.05) ax.grid(True,color='gray', linestyle="-", lw=0.3, zorder=0) # -- Remove minor tick marks. for toc in ax.xaxis.get_minor_ticks(): toc.tick1On = toc.tick2On = False width = 1e-1
def GenerateData(): plt.rcParams['font.serif'] = "Times New Roman" plt.rcParams['font.family'] = "serif" plt.rcParams['font.weight'] = "light" plt.rcParams['font.size'] = 14 plt.rcParams['mathtext.fontset'] = 'cm' plt.rcParams['mathtext.rm'] = 'serif' M1 = 10 M2 = 3 A = 0.75 comx, comy = COM(M1, M2, A) temp = Roche(comx, comy, M1, M2, A) print("COM (", comx, ",", comy, ") = ", temp) cblabel = "Gravitational Potential " r"$\Phi$" Factor = 1 steps = 1000 bound = 1 #lines = [0, 2, 5, 6,7, 10, 50, 75, 100, 150, 200, 250, 300, 400, 500] lines = 1000 xlist = np.linspace(-bound, bound, steps) ylist = np.linspace(-bound, bound, steps) x, y = np.meshgrid(xlist, ylist) z = [[0 for k in range(steps)] for l in range(steps)] i = 0 while i < steps: j = 0 while j < steps: z[i][j] = (Factor * Roche(xlist[i], ylist[j], M1, M2, A)) if (z[i][j] - 0)**2 < 3: print("0 = (", xlist[i], ", ", ylist[j], ") ") j = j + 1 i = i + 1 fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect("equal") cp = plt.contourf(y, x, z, lines, cmap=plt.cm.bone, locator=ticker.LogLocator(base=10.0, subs="all"), vmax=100) cb = fig.colorbar(cp, ticks=[1, 10, 100, 1000], label=cblabel) ax.text(A / 2 - 0.06, -0.26, r"$\mathbf{M_2}$", color='White', fontsize=14, fontweight='bold') ax.text(-A / 2 - 0.05, -0.26, r"$\mathbf{M_1}$", color='White', fontsize=14, fontweight='bold') #cp2 = plt.contour(cp, levels=[5.667865], colors='r') #cp3 = plt.contour(cp, levels=[5.66555], colors='r') cp4 = plt.contour(cp, levels=[4.9281], colors='r') plt.plot([A / 2], [0], "o", color="Yellow", markersize=6 * (M2 / (M1 + M2))) plt.plot([-A / 2], [0], "o", color="Yellow", markersize=6 * (M1 / (M1 + M2))) plt.axis('off') # plt.legend(loc="best") plt.savefig("RocheLobe.png", format='png', dpi=1200) plt.show() return x, y, z
def compute_dust_mass_volume_density(): # extra useful quantities (code units) Rinf = par.gas.redge[0:len(par.gas.redge) - 1] Rsup = par.gas.redge[1:len(par.gas.redge)] surface = np.zeros( par.gas.data.shape) # 2D array containing surface of each grid cell surf = np.pi * (Rsup * Rsup - Rinf * Rinf ) / par.gas.nsec # surface of each grid cell (code units) for th in range(par.gas.nsec): surface[:, th] = surf # Mass of gas in units of the star's mass Mgas = np.sum(par.gas.data * surface) # if hydro simulation is 2D, we first need to compute the dust's # surface density before getting its mass volume density if par.hydro2D == 'Yes': dustcube = compute_dust_mass_surface_density() print('--------- computing dust mass surface density ----------') print('--------- computing dust mass volume density ----------') DUSTOUT = open('dust_density.inp', 'w') DUSTOUT.write('1 \n') # iformat DUSTOUT.write(str(par.gas.nrad * par.gas.nsec * par.gas.ncol) + ' \n') # n cells DUSTOUT.write(str(int(par.nbin)) + ' \n') # nbin size bins # array (ncol, nbin, nrad, nsec) rhodustcube = np.zeros( (par.gas.ncol, par.nbin, par.gas.nrad, par.gas.nsec)) # dust aspect ratio as function of ibin and r (or actually, R, cylindrical radius) hd = np.zeros((par.nbin, par.gas.nrad)) # work out averaged Stokes number per size bin with fargo3d # Epstein regime assumed so far in the code -> St ~ pi/2 (s x rho_int) / Sigma_gas # where Sigma_gas should denote the azimuthally-averaged gas surface density here # so that St is understood as a 2D array (nbin, nrad) if par.fargo3d == 'Yes': Stokes_fargo3d = np.zeros((par.nbin, par.gas.nrad)) axirhogas = np.sum(par.gas.data, axis=1) / par.gas.nsec # in code units axirhogas *= (par.gas.cumass * 1e3) / ( (par.gas.culength * 1e2)**2.) # in g/cm^2 for ibin in range(par.nbin): if par.dustfluids != 'No': mysize = par.dust_size[par.dustfluids[0] - 1 + ibin] else: mysize = par.bins[ibin] print('mysize = ', mysize) Stokes_fargo3d[ibin, :] = 0.5 * np.pi * ( mysize * 1e2 ) * par.dust_internal_density / axirhogas # since dust size is in meters... #print('max Stokes number = ', Stokes_fargo3d[ibin,:].max()) # For the vertical expansion of the dust mass volume density, we # need to define a 2D array for the dust's aspect ratio: for ibin in range(par.nbin): if par.fargo3d == 'No': St = avgstokes[ibin] # avg stokes number for that bin if par.fargo3d == 'Yes' and par.dustfluids != 'No': St = Stokes_fargo3d[ibin] # gas aspect ratio (par.gas.rmed[i] = R in code units) hgas = par.aspectratio * (par.gas.rmed)**(par.flaringindex) # vertical extension depends on grain Stokes number: # T = theoretical: hd/hgas = sqrt(alpha/(St+alpha)) # T2 = theoretical: hd/hgas = sqrt(Dz/(St+Dz)) with Dz = 10xalpha here is the coefficient for # vertical diffusion at midplane, which can differ from alpha # F = extrapolation from the simulations by Fromang & Nelson 09 # G = Gaussian = same as gas (case of well-coupled dust for polarized intensity images) if par.z_expansion == 'F': hd[ibin, :] = 0.7 * hgas * ((St + 1. / St) / 1000.)**(0.2) if par.z_expansion == 'T': hd[ibin, :] = hgas * np.sqrt(par.alphaviscosity / (par.alphaviscosity + St)) if par.z_expansion == 'T2': hd[ibin, :] = hgas * np.sqrt(10.0 * par.alphaviscosity / (10.0 * par.alphaviscosity + St)) if par.z_expansion == 'G': hd[ibin, :] = hgas # dust aspect ratio as function of ibin, r and phi (2D array for each size bin) hd2D = np.zeros((par.nbin, par.gas.nrad, par.gas.nsec)) for th in range(par.gas.nsec): hd2D[:, :, th] = hd # nbin, nrad, nsec # grid radius function of ibin, r and phi (2D array for each size bin) r2D = np.zeros((par.nbin, par.gas.nrad, par.gas.nsec)) for ibin in range(par.nbin): for th in range(par.gas.nsec): r2D[ibin, :, th] = par.gas.rmed # work out exponential and normalization factors exp(-z^2 / 2H_d^2) # with z = r cos(theta) and H_d = h_d x R = h_d x r sin(theta) # r = spherical radius, R = cylindrical radius for j in range(par.gas.ncol): rhodustcube[j, :, :, :] = dustcube * np.exp( -0.5 * (np.cos(par.gas.tmed[j]) / hd2D)**2.0) # ncol, nbin, nrad, nsec rhodustcube[j, :, :, :] /= (np.sqrt(2. * np.pi) * r2D * hd2D * par.gas.culength * 1e2 ) # quantity is now in g / cm^3 # Renormalize dust's mass volume density such that the sum over the 3D grid's volume of # the dust's mass volume density x the volume of each grid cell does give us the right # total dust mass, which equals ratio x Mgas. Do that every time except fargo3D simulations # carried out in 2D used dust fluids if par.hydro2D == 'Yes' and par.dustfluids == 'No': rhofield = np.sum(rhodustcube, axis=1) # sum over dust bins Redge, Cedge, Aedge = np.meshgrid( par.gas.redge, par.gas.tedge, par.gas.pedge) # ncol+1, nrad+1, Nsec+1 r2 = Redge * Redge jacob = r2[:-1, :-1, :-1] * np.sin(Cedge[:-1, :-1, :-1]) dphi = Aedge[:-1, :-1, 1:] - Aedge[:-1, :-1, :-1] # same as 2pi/nsec dr = Redge[:-1, 1:, :-1] - Redge[:-1, :-1, :-1] # same as Rsup-Rinf dtheta = Cedge[1:, :-1, :-1] - Cedge[:-1, :-1, :-1] # volume of a cell in cm^3 vol = jacob * dr * dphi * dtheta * ( (par.gas.culength * 1e2)**3) # ncol, nrad, Nsec total_mass = np.sum(rhofield * vol) normalization_factor = par.ratio * Mgas * (par.gas.cumass * 1e3) / total_mass rhodustcube = rhodustcube * normalization_factor if par.verbose == 'Yes': print('total dust mass after vertical expansion [g] = ', np.sum(np.sum(rhodustcube, axis=1) * vol), ' as normalization factor = ', normalization_factor) # finally write mass volume densities for all size bins for ibin in range(par.nbin): print('dust species in bin', ibin, 'out of ', par.nbin - 1) for k in range(par.gas.nsec): for j in range(par.gas.ncol): for i in range(par.gas.nrad): DUSTOUT.write(str(rhodustcube[j, ibin, i, k]) + ' \n') # print max of dust's mass volume density at each colatitude if par.verbose == 'Yes': for j in range(par.gas.ncol): print('max(rho_dustcube) [g cm-3] for colatitude index j = ', j, ' = ', rhodustcube[j, :, :, :].max()) DUSTOUT.close() # plot azimuthally-averaged dust density vs. radius and colatitude for smallest and largest bin sizes if par.plot_dust_quantities == 'Yes': from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.ticker as ticker from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator, LogLocator, LogFormatter) matplotlib.rcParams.update({'font.size': 20}) matplotlib.rc('font', family='Arial') fontcolor = 'white' # azimuthally-averaged dust density: axidens_smallest = np.sum(rhodustcube[:, 0, :, :], axis=2) / par.gas.nsec # (nol,nrad) axidens_largest = np.sum(rhodustcube[:, par.nbin - 1, :, :], axis=2) / par.gas.nsec # (nol,nrad) radius_matrix, theta_matrix = np.meshgrid(par.gas.redge, par.gas.tedge) R = radius_matrix * np.sin( theta_matrix) * par.gas.culength / 1.5e11 # in au Z = radius_matrix * np.cos( theta_matrix) * par.gas.culength / 1.5e11 # in au # midplane dust mass volume density: midplane_dens_smallest = rhodustcube[par.gas.ncol // 2 - 1, 0, :, :] # (nrad,nsec) midplane_dens_largest = rhodustcube[par.gas.ncol // 2 - 1, par.nbin - 1, :, :] # (nrad,nsec) radius_matrix, theta_matrix = np.meshgrid(par.gas.redge, par.gas.pedge) X = radius_matrix * np.cos( theta_matrix) * par.gas.culength / 1.5e11 # in au Y = radius_matrix * np.sin( theta_matrix) * par.gas.culength / 1.5e11 # in au print( '--------- a) plotting azimuthally-averaged dust density (R,z) ----------' ) # --- smallest bin size --- fig = plt.figure(figsize=(8., 8.)) plt.subplots_adjust(left=0.17, right=0.92, top=0.88, bottom=0.1) ax = plt.gca() ax.tick_params(top='on', right='on', length=5, width=1.0, direction='out') ax.tick_params(axis='x', which='minor', top=True) ax.tick_params(axis='y', which='minor', right=True) ax.set_xlabel('Radius [au]') ax.set_ylabel('Altitude [au]') ax.set_ylim(Z.min(), Z.max()) ax.set_xlim(R.min(), R.max()) if axidens_smallest.max() / axidens_smallest.min() > 1e3: mynorm = matplotlib.colors.LogNorm(vmin=1e-3 * axidens_smallest.max(), vmax=axidens_smallest.max()) else: mynorm = matplotlib.colors.LogNorm(vmin=axidens_smallest.min(), vmax=axidens_smallest.max()) CF = ax.pcolormesh(R, Z, axidens_smallest, cmap='nipy_spectral', norm=mynorm) divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="2.5%", pad=0.12) cb = plt.colorbar(CF, cax=cax, orientation='horizontal') cax.xaxis.tick_top() cax.xaxis.set_tick_params(labelsize=20, direction='out') cax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=10)) cax.xaxis.set_label_position('top') cax.set_xlabel('dust density ' + r'[g cm$^{-3}$]') cax.xaxis.labelpad = 8 fileout = 'dust_density_smallest_Rz.pdf' plt.savefig('./' + fileout, dpi=160) plt.close(fig) # --- repeat for largest bin size --- fig = plt.figure(figsize=(8., 8.)) plt.subplots_adjust(left=0.17, right=0.92, top=0.88, bottom=0.1) ax = plt.gca() ax.tick_params(top='on', right='on', length=5, width=1.0, direction='out') ax.tick_params(axis='x', which='minor', top=True) ax.tick_params(axis='y', which='minor', right=True) ax.set_xlabel('Radius [au]') ax.set_ylabel('Altitude [au]') ax.set_ylim(Z.min(), Z.max()) ax.set_xlim(R.min(), R.max()) if axidens_largest.max() / axidens_largest.min() > 1e3: mynorm = matplotlib.colors.LogNorm(vmin=1e-3 * axidens_largest.max(), vmax=axidens_largest.max()) else: mynorm = matplotlib.colors.LogNorm(vmin=axidens_largest.min(), vmax=axidens_largest.max()) CF = ax.pcolormesh(R, Z, axidens_largest, cmap='nipy_spectral', norm=mynorm) divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="2.5%", pad=0.12) cb = plt.colorbar(CF, cax=cax, orientation='horizontal') cax.xaxis.tick_top() cax.xaxis.set_tick_params(labelsize=20, direction='out') cax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=10)) cax.xaxis.set_label_position('top') cax.set_xlabel('dust density ' + r'[g cm$^{-3}$]') cax.xaxis.labelpad = 8 fileout = 'dust_density_largest_Rz.pdf' plt.savefig('./' + fileout, dpi=160) plt.close(fig) print('--------- b) plotting dust density (x,y) ----------') # --- smallest bin size --- fig = plt.figure(figsize=(8., 8.)) plt.subplots_adjust(left=0.17, right=0.92, top=0.88, bottom=0.1) ax = plt.gca() ax.tick_params(top='on', right='on', length=5, width=1.0, direction='out') ax.tick_params(axis='x', which='minor', top=True) ax.tick_params(axis='y', which='minor', right=True) ax.set_xlabel('x [au]') ax.set_ylabel('y [au]') ax.set_ylim(Y.min(), Y.max()) ax.set_xlim(X.min(), X.max()) if midplane_dens_smallest.max() / midplane_dens_smallest.min() > 1e3: mynorm = matplotlib.colors.LogNorm( vmin=1e-3 * midplane_dens_smallest.max(), vmax=midplane_dens_smallest.max()) else: mynorm = matplotlib.colors.LogNorm( vmin=midplane_dens_smallest.min(), vmax=midplane_dens_smallest.max()) midplane_dens_smallest = np.transpose(midplane_dens_smallest) CF = ax.pcolormesh(X, Y, midplane_dens_smallest, cmap='nipy_spectral', norm=mynorm, rasterized=True) #CF = ax.pcolormesh(X,Y,midplane_dens_smallest,cmap='nipy_spectral',norm=mynorm) divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="2.5%", pad=0.12) cb = plt.colorbar(CF, cax=cax, orientation='horizontal') cax.xaxis.tick_top() cax.xaxis.set_tick_params(labelsize=20, direction='out') cax.xaxis.set_label_position('top') cax.set_xlabel('midplane dust density ' + r'[g cm$^{-3}$]') cax.xaxis.labelpad = 8 fileout = 'dust_density_smallest_midplane.pdf' plt.savefig('./' + fileout, dpi=160) plt.close(fig) # --- repeat for largest bin size --- fig = plt.figure(figsize=(8., 8.)) plt.subplots_adjust(left=0.17, right=0.92, top=0.88, bottom=0.1) ax = plt.gca() ax.tick_params(top='on', right='on', length=5, width=1.0, direction='out') ax.tick_params(axis='x', which='minor', top=True) ax.tick_params(axis='y', which='minor', right=True) ax.set_xlabel('x [au]') ax.set_ylabel('y [au]') ax.set_ylim(Y.min(), Y.max()) ax.set_xlim(X.min(), X.max()) if midplane_dens_largest.max() / midplane_dens_largest.min() > 1e3: mynorm = matplotlib.colors.LogNorm( vmin=1e-3 * midplane_dens_largest.max(), vmax=midplane_dens_largest.max()) else: mynorm = matplotlib.colors.LogNorm( vmin=midplane_dens_largest.min(), vmax=midplane_dens_largest.max()) midplane_dens_largest = np.transpose(midplane_dens_largest) CF = ax.pcolormesh(X, Y, midplane_dens_largest, cmap='nipy_spectral', norm=mynorm, rasterized=True) #CF = ax.pcolormesh(X,Y,midplane_dens_largest,cmap='nipy_spectral',norm=mynorm) divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="2.5%", pad=0.12) cb = plt.colorbar(CF, cax=cax, orientation='horizontal') cax.xaxis.tick_top() cax.xaxis.set_tick_params(labelsize=20, direction='out') cax.xaxis.set_label_position('top') cax.set_xlabel('midplane dust density ' + r'[g cm$^{-3}$]') cax.xaxis.labelpad = 8 fileout = 'dust_density_largest_midplane.pdf' plt.savefig('./' + fileout, dpi=160) plt.close(fig) # free RAM memory del rhodustcube, dustcube, hd2D, r2D
def initial_enthalpy_plot(loading, enthalpy, fitted_enthalpy, log=False, title=None, extras=None, ax=None): """ Draws the initial enthalpy calculation plot. Parameters ---------- loading : array Loadings for which the initial enthalpy was calculated. enthalpy : array The enthalpy corresponding to each loading. fitted_enthalpy : array The predicted enthalpy corresponding to each loading. log : int Whether to display a logarithmic graph title : str Name of the material to put in the title. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. Returns ------- matplotlib.axes Matplotlib axes of the graph generated. The user can then apply their own styling if desired. """ # Generate the figure if needed if ax is None: _, ax = plt.subplots() ax.plot(loading, enthalpy, marker='o', color='black', label='original', linestyle='') ax.plot(loading, fitted_enthalpy, color='r', label='fitted', linestyle='-') if extras is not None: for param in extras: ax.plot(param[0], param[1], label=param[2], linestyle='--') if log: ax.set_xscale('log') ax.xaxis.set_major_locator( ticker.LogLocator(base=10.0, numticks=15, numdecs=20)) ax.set_title(title + " initial enthalpy fit") ax.set_xlabel('Loading') ax.set_ylabel('Enthalpy') ax.legend(loc='best') ax.set_ylim(bottom=0, top=(max(enthalpy) * 1.2)) ax.set_xlim(left=0) ax.grid(True) return ax
def bracket_descent_test(xg, display=False, compare=False, i=1): """ ====================================================================================== Use the Bracket Descent method to minimize a cost function, j, defined in cost module. ====================================================================================== Parameters ---------- xg : list Initial guess display : Boolean, Optional If set to True, figures will be created to illustrate the optimization path taken and the distance from convergence at each step. compare : Boolean, optional If set to True, a figure will be created to directly compare Newton and Bracket Descent methods. i=1 : Integer, Optional Sets the name of the figures as hw231(/2)_i.png. Returns --------- xf : ndarray Computed location of minimum jf : float Computed minimum output : Tuple containing the time taken for the minimia to be found for each of newton and bracket descent methods. An average over 10 tests is taken, only set if compare parameter set to True, otherwise empty. Calling this function will produce two figures. The first will containing two subplots illustrating the location of each step in the minimization path, overlayed over the initial cost function, and the distance of j from the final, computed minimum at each iteration. The second plot (which is only produced when 'compare' is set to True) demonstrates the distance of each step from the final, converged minimum at each iteration. This shows that the newton method requires significantly fewer steps and is hence faster. Trends Observed ---------------- Figures hw321_i show the path taken during a bracket descent conversion is much longer than in a newton conversion (shown in figures hw22i). This is because the B-D method limits the size of a step to 2*L where L is definied by the size of an equilateral triangle whose centroid moved with each step. The method is furthermore designed such that this triangle will only decrease in size per iteration, and hence the maximum length a step can take can only be decreased (not increased) throughout the convergence. The figures further show that steps appear to be taken initially perpendicular to the curvature, finding the minimum along that strip, and then converging in down the parallel Path until they reach a level of tolerance. In contrast, the Newton approach is not limited in the size of the steps it is able to take and can hence converge in a much smaller number of iterations. This is a result of the use of gradients in this method. Figures hw22i illustrate how each step travels through many bands on the contour plot (representing differences of 1 order of magnitude each) as the method searches for the direction of minimisation. """ cost.c_noise = False hw2.tol = 10**(-6) hw2.itermax = 1000 t34 = 0 output = () if compare: N = 10 else: N = 1 for j in range(1, N): t3 = time() hw2.bracket_descent(xg) t4 = time() t34 = t34 + (t4 - t3) X, Y = hw2.xpath xf = [X[-1], Y[-1]] jf = hw2.jpath[-1] d1 = np.sqrt((X - xf[0])**2 + (Y - xf[1])**2) if display: Minx = min(X) - 1 Maxx = max(X) + 1 Miny = min(Y) - 1 Maxy = max(Y) + 1 [Xj, Yj] = np.linspace(Minx, Maxx, 200), np.linspace(Miny, Maxy, 200) #calculate noiseless cost function at each point on 2D grid j = [[cost.costj([xi, yi]) for xi in Xj] for yi in Yj] f, (p1, p2) = plt.subplots(1, 2) p1.contourf(Xj, Yj, j, locator=ticker.LogLocator(), cmap=cm.GnBu) p1.plot(X, Y, 'g', marker='d') p1.set_xlabel('X1-location') p1.set_ylabel('X2-location') p1.set_title('Convergence Path') p2.semilogy(np.linspace(1, len(X), len(X)), hw2.jpath) p2.set_xlabel('Iteration number') p2.set_ylabel('distance from converged minimum') p2.set_title('Rate') plt.suptitle('Rosemary Teague, bracket_descent_test, initial guess =' + str(xg) + ' \n Rate of convergence of a cost function') plt.tight_layout(pad=4) plt.savefig('hw231_' + str(i), dpi=700) if compare: plt.close('all') One, = plt.loglog(np.linspace(1, len(X), len(X)), hw2.jpath) xf2, jf2, outputn = newton_test(xg, timing=True) X2, Y2 = outputn[1], outputn[2] d2 = np.sqrt((X2 - xf2[0])**2 + (Y2 - xf2[1])**2) print(np.linspace(1, len(X2), len(X2)), outputn[3]) Two, = plt.loglog(np.linspace(1, len(X2), len(X2)), outputn[3]) One.set_label('Bracket Descent') Two.set_label('Newton') plt.xlabel('Iteration number') plt.ylabel('Distance from converged minimum') plt.legend() plt.title('Rosemary Teague, bracket_descent_test, initial guess =' + str(xg) + ' \n Comparison of Newton and Bracket Descent Methods') plt.savefig('hw232_' + str(i), dpi=700) output = (outputn[0], t34 / N) return xf, jf, output
def plot_data(df, title='', min=None, max=None, ylog=False, stack=False, subplot=111, fig=None, yticks=None, gf=False, timebase=28, c_mark=False): # Create new fig if fig is None: fig = plt.figure() # Add to existing fig ax = fig.add_subplot(subplot) # Create x and y data for plotting x = df.index.to_numpy() x_t = df.index.to_numpy(dtype='float64') y = df.to_numpy() # If the user wants a graph of Growth Rate, do some spooky mathemagic.... # This part needs better commenting! if gf: x = x[2:] y = y[2:] x_t = x_t[2:] y = np.diff(y, axis=0) y = np.insert(y, 0, y[0], axis=0) y = np.insert(y, -1, y[-1], axis=0) y = convolve2d(y, [[0.25], [0.5], [0.25]], mode='valid') y = np.diff(np.log(y + 1e-3), axis=0) y = np.insert(y, 0, y[0], axis=0) y = np.insert(y, -1, y[-1], axis=0) y = convolve2d(y, [[0.25], [0.5], [0.25]], mode='valid') x = x[2:] x_t = x_t[2:] #print(x.shape, y.shape) if stack: ax.stackplot(x, y.transpose(), labels=df.columns) else: # Mark data point every 7 days ax.plot(x, y, markevery=7) # Set axes, grid and ticks if min is not None and max is not None: ax.set_ylim([min, max]) if ylog: ax.set_yscale('log') locmin = plticker.LogLocator(base=10.0, subs=(1, 10)) ax.yaxis.set_major_formatter(plticker.FormatStrFormatter("%.0f")) ax.yaxis.set_major_locator(locmin) ax.grid(axis='y', which='major', linewidth=0.8) ax.grid(axis='y', which='minor', linewidth=0.3) ax.grid(axis='x', which='both') ax.xaxis.set_major_locator(plticker.MultipleLocator(base=timebase)) ax.xaxis.set_minor_locator(plticker.MultipleLocator(base=timebase / 2)) if stack: ax.legend(loc='upper left') else: for i, line in enumerate(ax.get_lines()): line.set_marker(MARKERS[i]) #line.get_color() if gf: # calc the trendline z = np.polyfit(x_t[4:], y[4:, i], 1) p = np.poly1d(z) ax.plot(x, p(x_t), "--", color=line.get_color()) ax.legend(ax.get_lines(), df.columns, loc='upper left') # Set graph title ax.set_title(title) ax.yaxis.tick_right() # place a text box in upper left in axes coords if c_mark: ax.text(0.99, 0.05, "(c) E. Maitland 2020", transform=ax.transAxes, fontsize=8.5, va='top', ha='right') # , bbox=props) return ax
def newton_test(xg, display=False, i=1, timing=False): """ ============================================================================ Use Newton's method to minimize a cost function, j, defined in cost module. ============================================================================ Parameters ---------- xg : list Initial guess display : Boolean, Optional If set to True, figures will be created to illustrate the optimization path taken and the distance from convergence at each step. i=1 : Integer, Optional Sets the name of the figures as hw22i.png timing : Boolean, Optional If set to true, an average time will be calculated for the completion of finding a minimum and will be appended to the tuple output. Returns --------- xf : ndarray Computed location of minimum jf : float Computed minimum output : Tuple containing the time taken for the minimia to be found. An average over 10 tests, only set if timining parameter set to True, otherwise empty. Calling this function will produce a figure containing two subplots. The first will illustrate the location of each step in the minimization path, overlayed over the initial cost function. The second will illustrate the distance from the final, computed minimum at each iteration. """ cost.c_noise = False hw2.tol = 10**(-6) hw2.itermax = 1000 t21 = 0 output = () if timing: N = 10 else: N = 1 for j in range(1, N): t1 = time() hw2.newton(xg) t2 = time() t21 = t21 + (t2 - t1) X, Y = hw2.xpath xf = [X[-1], Y[-1]] jpathn = [j for j in hw2.jpath] jf = hw2.jpath[-1] output = (t21 / N, X, Y, jpathn) if display: Minx = min(X) - 1 Maxx = max(X) + 1 Miny = min(Y) - 1 Maxy = max(Y) + 1 [Xj, Yj] = np.linspace(Minx, Maxx, 200), np.linspace(Miny, Maxy, 200) #calculate noiseless cost function at each point on 2D grid j = [[cost.costj([xi, yi]) for xi in Xj] for yi in Yj] f, (p1, p2) = plt.subplots(1, 2) p1.contourf(Xj, Yj, j, locator=ticker.LogLocator(), cmap=cm.GnBu) p1.plot(X, Y, 'g', marker='d') p1.set_xlim(min(X) - 1, max(X) + 1) p1.set_xlabel('X1-location') p1.set_ylabel('X2-location') p1.set_title('Convergence Path') p2.plot(np.linspace(0, len(X) - 1, len(X)), hw2.jpath - jf) p2.set_xlabel('Iteration number') p2.set_ylabel('distance from converged minimum') p2.set_title('Rate') plt.suptitle('Rosemary Teague, Newton_test, initial guess =' + str(xg) + ' \n Convergence of a cost function') plt.tight_layout(pad=4) plt.savefig('hw22' + str(i), dpi=700) return xf, jf, output
def log_minor_ticks(ax): locmin = ticker.LogLocator(base=10.0, subs=(0.1,0.2,0.4,0.6,0.8,1,2,4,6,8,10)) ax.yaxis.set_minor_locator(locmin) ax.yaxis.set_minor_formatter(ticker.NullFormatter())
setup(ax) ax.plot(range(0, 5), [0]*5, color='White') ax.xaxis.set_major_locator(ticker.IndexLocator(base=.5, offset=.25)) ax.text(0.0, 0.5, "IndexLocator(base=0.5, offset=0.25)", fontsize=14, transform=ax.transAxes) # Auto Locator ax = plt.subplot(n, 1, 6) setup(ax) ax.xaxis.set_major_locator(ticker.AutoLocator()) ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) ax.text(0.0, 0.5, "AutoLocator()", fontsize=14, transform=ax.transAxes) # MaxN Locator ax = plt.subplot(n, 1, 7) setup(ax) ax.xaxis.set_major_locator(ticker.MaxNLocator(4)) ax.xaxis.set_minor_locator(ticker.MaxNLocator(40)) ax.text(0.0, 0.5, "MaxNLocator(n=4)", fontsize=14, transform=ax.transAxes) # Log Locator ax = plt.subplot(n, 1, 8) setup(ax) ax.set_xlim(10**3, 10**10) ax.set_xscale('log') ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=15)) ax.text(0.0, 0.5, "LogLocator(base=10, numticks=15)", fontsize=15, transform=ax.transAxes) plt.show()
#b=parsec for j in range(N_M): n_p = rho/M_p_bins[j] for i in range(N_a): b = calc_b_max(M_p_bins[j], v_rms, a_bins[i], m1, m2) for k in range(N_enc): (notBound, a_new, e_new) = impulseEncounter(m1, m2, v_rms, b, a_bins[i], e, M_p_bins[j]) a_frac_avg[i,j] += (a_new-a_bins[i])/a_bins[i] #Normalise a_frac_avg a_frac_avg /= N_enc #Plot plt.title(r'Absolute average fractional change in semi-major axis due to single encounter at $b=b_{\mathrm{max}}$') ax = plt.gca() cs = ax.contourf(a_bins/au, M_p_bins/(2.0*10.0**30.0), np.transpose(np.absolute(a_frac_avg)), locator=ticker.LogLocator()) plt.colorbar(cs) plt.ylabel(r'Perturber mass, $M_\odot$') plt.xlabel('Initial semi-major axis, au') plt.xscale('log') plt.yscale('log') plt.show()
def plot_smps(d_mtx, fit_lognormal_modes=False, fig=None, ax=None, zlim=[10**1, 10**6], xlim=None, logscale_z=True, title='Size distribution', xlabel='Timestamp', ylabel='Mobility diameter (nm)', zlabel='dN/d(log$_{10}$d$_0$) ($cm^{-3}$)', saveorshowplot='show', output_path=None, output_filename='SMPS.png'): #https://matplotlib.org/examples/images_contours_and_fields/pcolormesh_levels.html d_mtx = _fill_smps_times(d_mtx) # Setup data input x0 = np.array([dates.date2num(d) for d in d_mtx.index]) # Time axis try: #try tsi first y0 = np.array([float(s) for s in d_mtx.columns.values]) # size axis except: #otherwise its grimm! y0 = np.array([float(s.split(' ')[0]) for s in d_mtx.columns.values]) x, y = np.meshgrid(x0, y0) z = d_mtx.as_matrix().transpose() with np.errstate( invalid='ignore'): # ignore runtimewarning about nan values z[z < zlim[0]] = zlim[ 0] # mask bad values so that there are no holes in the data # Plot contour cmap = plt.get_cmap('jet') if logscale_z: tick_loc = tck.LogLocator() cont_levels = np.logspace(np.log10(zlim[0]), np.log10(zlim[1]), 100) z_loc_arr = np.logspace(np.log10(zlim[0]), np.log10(zlim[1]), 6) else: tick_loc = tck.LinearLocator() cont_levels = np.linspace(zlim[0], zlim[1], 100) z_loc_arr = np.array( [round_to_1(x) for x in np.linspace(zlim[0], zlim[1], 6)]) if (fig is None) and (ax is None): fig, ax = plt.subplots(nrows=1, figsize=(15, 5)) elif (fig is None) or (ax is None): assert True, \ 'You must pass both a figure and axis object in to the \ size distribution plotting routine' cf = ax.contourf(x, np.log(y), z, levels=cont_levels, locator=tick_loc, cmap=cmap, interpolation=None) ax.set_title(title) # Format y axis labels y_loc_arr = np.logspace(np.log10(y0[0]), np.log10(y0[-1]), 7) rounding = [round_to_1(y) for y in y_loc_arr[1:-1]] y_loc_arr = np.append(np.insert(rounding, 0, np.ceil(y_loc_arr[0])), np.floor(y_loc_arr[-1])) y_loc = tck.FixedLocator(np.log(y_loc_arr)) y_loc_min = tck.FixedLocator( np.log( np.concatenate((np.arange(10, 100, 10), np.arange(100, 700, 100))))) ax.yaxis.set_major_locator(y_loc) ax.set_yticklabels(y_loc_arr) ax.yaxis.set_minor_locator(y_loc_min) ax.set_ylabel(ylabel) # Format x axis labels if xlim is not None: ax.set_xlim(xlim) x_loc_arr = get_time_ticks(x0, 6) x_loc = tck.FixedLocator(x_loc_arr) x_labels = np.array( [dates.num2date(dt).strftime('%d-%b\n%H:%M') for dt in x_loc_arr]) ax.xaxis.set_major_locator(x_loc) ax.set_xticklabels(x_labels) ax.set_xlabel(xlabel) # Format colorbar cbar = fig.colorbar(cf, ax=ax, ticks=z_loc_arr, pad=0.01) cbar.ax.set_xticklabels(z_loc_arr) cbar.set_label(zlabel) if fit_lognormal_modes: # Calculate the maximum in each mode using a lognormal fitting procedure mode_max = mode_max_from_dist_fit(d_mtx) # Overlay the mode sizes ax.plot(np.log(mode_max), '-k') atmosplots.saveorshowplot(plt, saveorshowplot, output_path, output_filename) return
# 生成网格数据 X, Y = np.meshgrid(x_value, y_value) #显示3D图像 fig = plt.figure() ax = plt.gca(projection='3d') #ax.plot(X,Y,f(X,Y)) ax.plot_surface(X,Y,f(X,Y),cmap='jet') #plt.show() #显示等高线 plt.figure() # 填充等高线的颜色, 8是等高线分为几部分 plt.contourf(X, Y, f(X, Y), 5, alpha=0) # 绘制等高线 C = plt.contour(X, Y, f(X, Y), 8, locator=ticker.LogLocator(), colors='black', linewidth=0.01) # 绘制等高线数据 plt.clabel(C, inline=True, fontsize=10) alpha = 0.09 beta = 0.8 x = np.array([[-0.2],[0.4]]) #随机选取一个起始点 eta = 0.000001 #学习率 #print(df(x)) xv = [x[0,0]] yv = [x[1,0]] plt.plot(x[0, 0], x[1, 0], marker='o') #plt.show()
win_levels = np.array([0, args.boundary]) all_levels = np.array([0, 1, 10, 100, 1000]) norm = cm.colors.Normalize(vmax=abs(z).max(), vmin=-abs(z).max()) cmap = cm.PRGn fig, ax = plt.subplots() ax.set_yscale("log") ax.set_xlabel('Lag in arrival time (hrs)') ax.set_ylabel('Challenger inoculum (CFU)') cset1 = plt.contourf(x, y, z, win_levels, locator=ticker.LogLocator(), colors=('#289600', '#ffffff')) #cmap=cm.get_cmap(cmap, len(levels) - 1), norm=norm) # t_com plt.axvline(x=3.76, color='k', linestyle='--', label='t_com') plt.text(2.7, 3000, 't_com', rotation=90) #cset2 = plt.contour(x, y, z, all_levels, locator=ticker.LogLocator(), colors='k', linewidths = 1) #plt.clabel(cset2, fmt='%1.0f', inline=1, fontsize=10) # Resident wins boundary cset3 = plt.contour(x, y, z, win_levels, colors='k', linewidths=2) plt.title('Isogenic challenger') #plt.colorbar(cset1) # legend plt.savefig(args.output + ".pdf")
def pps_eigenmodes_eval(self): ## LOAD ComputeSaveNormalizedEigenError: fig1 fig1_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig1.npz') normalized_relative_error = fig1_data['nre'] true_tsnap = fig1_data['tt'] linearEvolvingEigen = fig1_data['le'] relative_error = self.params['relative_error'] ## draw ComputeSaveNormalizedEigenError: fig1 # plot normalized relative error for each eigenmodes plt.figure(figsize=FIG_SIZE) cmap = self.get_cmap(normalized_relative_error.shape[1]) for i in range(normalized_relative_error.shape[1]): plt.plot(true_tsnap, normalized_relative_error[:, i], '-', c=cmap(i), label= str(i + 1) + 'th-eigenvalue: ' + "{0:.3f}".format(linearEvolvingEigen[i,i])) plt.xlabel('time',fontsize = 32) if relative_error: plt.ylabel('normalized error',fontsize = 32) else: plt.ylabel('error',fontsize = 32) plt.yscale('log') lgd = plt.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/normalized_relative_eigen_error.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close() ## LOAD ComputeSaveNormalizedEigenError: fig2 fig2_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig2.npz') mean_normalized_relative_error = fig2_data['mre'] small_to_large_error_eigen_index = fig2_data['stli'] small_to_large_error_eigen_index_kou = fig2_data['stli_kou'] abs_sum_kou = fig2_data['abs_sum_kou'] error_reconstruct_state_list = fig2_data['ersl'] # bool_index_further = fig2_data['iestli'] ## it is removed.. self.small_to_large_error_eigen_index = small_to_large_error_eigen_index # self.bool_index_further = bool_index_further ## draw ComputeSaveNormalizedEigenError: fig2 # error ordered fig= plt.figure(figsize=FIG_SIZE) ax1 = fig.add_subplot(111) ax2 = ax1.twinx() ax1.plot(range(1, normalized_relative_error.shape[1] + 1), mean_normalized_relative_error[small_to_large_error_eigen_index], 'b-^', label='max relative eigenfunction error') ax1.set_xlabel(r'number of selected eigenmodes $\hat{L}$',fontsize = 32) # ax1.legend(bbox_to_anchor=(1, 0.5)) ax1.set_yscale('log') if relative_error: ax1.set_ylabel('max linear evolving normalized error', color='b',fontsize = 32) else: ax1.set_ylabel('max error', color='b',fontsize = 32) # plot error from reconstruction state from eigenfunction values ax2.plot(range(1, normalized_relative_error.shape[1] + 1), error_reconstruct_state_list,'r-o', label='reconstruction normalized error') if relative_error: ax2.set_ylabel('reconstruction normalized error', color='r',fontsize = 32) else: ax2.set_ylabel('reconstruction error', color='r',fontsize = 32) # ax2.set_ylim([-1,20]) ax2.set_yscale('log') # set up ticks yticks = ticker.LogLocator() ax1.yaxis.set_major_locator(yticks) ax2.yaxis.set_major_locator(yticks) # ax2.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/reConstr_decay_normalized_relative_eigen_error.png', bbox_inches='tight') plt.close() ## LOAD ComputeSaveNormalizedEigenError: fig3 fig3_data = np.load(self.eval_dir + '/ComputeSaveNormalizedEigenError_fig3.npz') top_k_modes_list = fig3_data['tkm_index_list'] self.top_k_modes_list = top_k_modes_list # print out kou's result print('as a comparison: index chosen by Kou criterion: ') print(small_to_large_error_eigen_index_kou + 1) print('corresponding abs sum:') print(abs_sum_kou) self.index_selected_in_full = self.small_to_large_error_eigen_index[:self.top_k_modes_list[-1] + 1] # self.index_selected_in_full = self.small_to_large_error_eigen_index ## draw ComputeSaveNormalizedEigenError: fig3 # fig. 3: plot normalized relative error for top K smallest error eigenmodes plt.figure(figsize=FIG_SIZE) cmap = self.get_cmap(len(top_k_modes_list)) for i in top_k_modes_list: i_s = small_to_large_error_eigen_index[i] plt.plot(true_tsnap, normalized_relative_error[:, i_s], '-', c=cmap(i), label= str(i_s + 1) + 'th-eigenvalue: ' + "{0:.3f}".format(linearEvolvingEigen[i_s,i_s])) # print eigenvectors # print 'no. eigen vectors ', i_s+1 # print self.model.KoopmanEigenV[:, i_s] plt.xlabel('time',fontsize = 32) if relative_error: plt.ylabel('normalized error',fontsize = 32) else: plt.ylabel('error',fontsize = 32) plt.yscale('log') lgd = plt.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/top_' + str(len(top_k_modes_list)) + '_normalized_relative_eigen_error.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close() # load MTENET fig4_data = np.load(self.eval_dir + '/MultiTaskElasticNet_result.npz') alphas_enet = fig4_data['alphas_enet'] coefs_enet = fig4_data['coefs_enet'] residual_array = fig4_data['residual_array'] # coefficients vs alpha & number non-zero num_target_components = coefs_enet.shape[0] alphas_enet_log_negative = -np.log10(alphas_enet) # print("coef_enet real= ", np.real(coefs_enet)) # print("coef_enet imag= ", np.imag(coefs_enet)) for i_component in range(num_target_components): plt.figure(figsize=FIG_SIZE) cmap = self.get_cmap(len(top_k_modes_list)) for i in top_k_modes_list: i_s = small_to_large_error_eigen_index[i] plt.plot(alphas_enet_log_negative, abs(coefs_enet[i_component,i,:]), '-*', c=cmap(i), label = 'No. ' + str(i + 1) + ', index = ' + str(i_s+1)) max_val = np.max(abs(coefs_enet[i_component, :, -1])) min_val = np.min(abs(coefs_enet[i_component, :, -1])) diss = (max_val - min_val)/2 mean = (max_val + min_val)/2 plt.xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32) plt.ylabel('abs of coefficients',fontsize = 32) plt.ylim([mean - diss*1.05, mean + diss*3]) lgd = plt.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/multi-elastic-net-coef-' + str(i_component+1) + '.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close() # total number of non-zero terms1 plt.figure(figsize=FIG_SIZE) num_non_zeros = [len((coefs_enet[i_component, abs(coefs_enet[i_component, :, ii]) >0*np.max(abs(coefs_enet[i_component,:,ii])), ii])) for ii in range(coefs_enet.shape[2])] plt.plot(alphas_enet_log_negative, num_non_zeros , 'k^-') plt.xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32) plt.ylabel('number of selected features',fontsize = 32) lgd = plt.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/multi-elastic-net-coef-non-zeros-' + str(i_component+1) + '.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close() num_non_zero_all_alpha = [] for ii in range(coefs_enet.shape[2]): non_zero_index_per_alpha = [] for i_component in range(num_target_components): # non_zero_index_per_alpha_per_target = abs(coefs_enet[i_component, :, ii]) > 0 non_zero_index_per_alpha_per_target = abs(coefs_enet[i_component, :, ii]) > 0*np.max(abs(coefs_enet[i_component, :, ii])) non_zero_index_per_alpha.append(non_zero_index_per_alpha_per_target) non_zero_index_per_alpha_all_targets = np.logical_or.reduce(non_zero_index_per_alpha) num_non_zero_all_alpha.append(np.sum(non_zero_index_per_alpha_all_targets)) num_non_zero_all_alpha = np.array(num_non_zero_all_alpha) # total residual vs alpha AND number of non-zero modes vs alpha fig=plt.figure(figsize=FIG_SIZE) ax1 = fig.add_subplot(111) ax2 = ax1.twinx() ax1.plot(alphas_enet_log_negative, residual_array, 'k*-') ax1.set_xlabel(r'-$\log_{10}(\alpha)$',fontsize = 32) ax1.set_ylabel('normalized reconstruction MSE',color='k',fontsize = 32) # ax1.set_yscale('log') ax2.plot(alphas_enet_log_negative, num_non_zero_all_alpha,'r*-') ax2.set_ylabel('number of selected features',color='r',fontsize = 32) lgd = plt.legend(bbox_to_anchor=(1, 0.5)) plt.savefig(self.pps_dir + '/multi-elastic-net-mse.png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close()
# In[70]: freqs = r[0] vols = r[1:] ls = [1.53e-3] * 33 ls = np.round(np.cumsum(ls), 5) # In[104]: plt.close('all') plt.figure(figsize=(10, 10)) X, Y = np.meshgrid(freqs, ls) plt.contourf(X, Y, np.abs(vols), locator=ticker.LogLocator(base=2), cmap=plt.cm.YlGnBu_r) plt.xlabel("frequency") plt.ylabel('distance') plt.colorbar() plt.show() # In[98]: plt.close('all') #plt.figure(figsize=[10,10]) #plt.plot(vs_mat[1,:]) #plt.show() # In[90]:
def plot_surface( self, surf, minvalue=None, maxvalue=None, contourlevels=None, xlabelrotation=None, colormap=None, logarithmic=False, ): # pylint: disable=too-many-statements """Input a surface and plot it.""" # need a deep copy to avoid changes in the original surf logger.info("The key contourlevels %s is not in use", contourlevels) usesurf = surf.copy() if usesurf.yflip < 0: usesurf.swapaxes() if abs(surf.rotation) > 0.001: usesurf.unrotate() xi, yi, zi = usesurf.get_xyz_values() zimask = ma.getmaskarray(zi).copy() # yes need a copy! legendticks = None if minvalue is not None and maxvalue is not None: minv = float(minvalue) maxv = float(maxvalue) step = (maxv - minv) / 10.0 legendticks = [] for i in range(10 + 1): llabel = float("{0:9.4f}".format(minv + step * i)) legendticks.append(llabel) zi.unshare_mask() zi[zi < minv] = minv zi[zi > maxv] = maxv # need to restore the mask: zi.mask = zimask # note use surf.min, not usesurf.min here ... notetxt = ("Note: map values are truncated from [" + str(surf.values.min()) + ", " + str(surf.values.max()) + "] " + "to interval [" + str(minvalue) + ", " + str(maxvalue) + "]") self._fig.text(0.99, 0.02, notetxt, ha="right", va="center", fontsize=8) logger.info("Legendticks: %s", legendticks) if minvalue is None: minvalue = usesurf.values.min() if maxvalue is None: maxvalue = usesurf.values.max() # this will override current instance colormap locally, and is # therefore reset afterwards keepcolor = self.colormap if colormap is not None: self.colormap = colormap levels = np.linspace(minvalue, maxvalue, self.contourlevels) logger.debug("Number of contour levels: %s", levels) plt.setp(self._ax.xaxis.get_majorticklabels(), rotation=xlabelrotation) # zi = ma.masked_where(zimask, zi) # zi = ma.masked_greater(zi, _cxtgeo.UNDEF_LIMIT) if ma.std(zi) > 1e-07: uselevels = levels else: uselevels = 1 try: if logarithmic is False: locator = None ticks = legendticks im = self._ax.contourf(xi, yi, zi, uselevels, locator=locator, cmap=self.colormap) else: logger.info("use LogLocator") locator = ticker.LogLocator() ticks = None uselevels = None im = self._ax.contourf(xi, yi, zi, locator=locator, cmap=self.colormap) self._fig.colorbar(im, ticks=ticks) except ValueError as err: logger.warning("Could not make plot: %s", err) plt.gca().set_aspect("equal", adjustable="box") self.colormap = keepcolor
def parameter_variation(m, sh, thing_to_vary, thing_to_divide_by, factors, num_points, draw_plot=True): """ Explore the effect of varying different parameters on the upconversion yield of the acceptor. Parameters ---------- m : tripletpairs.kineticmodelling.steadystatemodels.Merrifield or tripletpairs.kineticmodelling.steadystatemodels.MerrifieldExplicit1TT A pre-prepared instance of either :class:`tripletpairs.kineticmodelling.steadystatemodels.Merrifield` or :class:`tripletpairs.kineticmodelling.steadystatemodels.MerrifieldExplicit1TT`. sh : tripletpairs.spin.SpinHamiltonian A pre-prepared instance of :class:`tripletpairs.spin.SpinHamiltonian`. thing_to_vary : str The name of the rate constant to vary. thing_to_divide_by : str or None The name of the rate constant to normalise to, if desired. factors : 2-tuple of float Rate constant will be varied geometrically between its starting value divided by the first entry in factors and its starting value multiplied by the second entry in factors. num_points : int Number of rate constant values to sample. draw_plot : bool, optional Whether to draw a plot of the result. The default is True. Raises ------ TypeError If the model given is invalid. ValueError If either of the given parameters is invalid. Returns ------- rates : numpy.ndarray The rate constant values of thing_to_vary, note that these have not been divided by anything. ucy_actual : numpy.ndarray The upconversion yield as a function of rates. ucy_nospin : numpy.ndarray The upconversion yield as a function of rates, assuming no spin statistical effects. """ if (m.model_name not in ['Merrifield', 'MerrifieldExplicit1TT' ]) or (m._time_resolved): raise TypeError('invalid model') m.initial_weighting = {'T1': 1} ucy_nospin = np.zeros(num_points) ucy_actual = np.zeros(num_points) if thing_to_vary not in m.rates: if thing_to_vary not in ['G', 'J']: raise ValueError('invalid thing_to_vary') if thing_to_divide_by is not None: if thing_to_divide_by not in m.rates: raise ValueError('invalid thing_to_divide_by') elif thing_to_vary in ['G', 'J']: raise ValueError( 'thing_to_divide_by must be None if thing_to_vary is G or J') if thing_to_vary == 'J': vars_object = sh else: vars_object = m orig = vars(vars_object)[thing_to_vary] rates = np.geomspace( vars(vars_object)[thing_to_vary] / factors[0], vars(vars_object)[thing_to_vary] * factors[1], num_points) for i, rate in enumerate(rates): vars(vars_object)[thing_to_vary] = rate sh.calculate_everything() m.cslsq = sh.cslsq m.simulate() ucy0 = 2 * m.kSNR * m.S1 / m.G m.cslsq = np.ones(9) / 9 m.simulate() ucy1 = 2 * m.kSNR * m.S1 / m.G ucy_nospin[i] = 100 * ucy1 ucy_actual[i] = 100 * ucy0 if draw_plot: rate_labels = { 'kSF': 'Forwards SF Rate', 'k_SF': 'Backwards SF Rate', 'kHOP': 'Fowards Hop Rate', 'k_HOP': 'Backwards Hop Rate', 'kHOP2': 'Spin Loss Rate', 'kDISS': 'Spin Loss Rate', 'kTTA': 'TTA Rate', 'kTTNR': r'$^1$(TT) Decay Rate', 'kTNR': 'Triplet Decay Rate', 'kSNR': 'Singlet Decay Rate', 'G': 'Generation Rate', 'J': 'Exchange Energy' } if thing_to_divide_by is None: x = rates vline = orig xlabel_text = rate_labels[thing_to_vary] if thing_to_vary == 'kTTA': xlabel_unit = r' (nm$^3$ns$^{-1}$)' elif thing_to_vary == 'G': xlabel_unit = r' (nm$^{-3}$ns$^{-1}$)' elif thing_to_vary == 'J': xlabel_unit = r' ($\mu$eV)' else: xlabel_unit = r' (ns$^{-1}$)' else: x = rates / vars(vars_object)[thing_to_divide_by] vline = orig / vars(vars_object)[thing_to_divide_by] xlabel_text = rate_labels[thing_to_vary] + '/' + rate_labels[ thing_to_divide_by] if thing_to_vary == 'kTTA': xlabel_unit = r' (nm$^3$)' elif thing_to_divide_by == 'kTTA': xlabel_unit = r' (nm$^{-3}$)' else: xlabel_unit = '' fig, ax1 = plt.subplots(figsize=(5, 4)) ax1.semilogx(x, ucy_nospin, 'b--', label='loss') ax1.semilogx(x, ucy_actual, 'b-', label='loss') ax1.set_ylim([0, 100]) ax1.set_xlabel(xlabel_text + xlabel_unit, fontsize=20) ax1.set_ylabel('Upconversion Yield (%)', fontsize=20, color='b') ax1.tick_params(axis='y', labelcolor='b') ax2 = ax1.twinx() gain = 100 * (ucy_nospin - ucy_actual) / ucy_actual ax2.semilogx(x, gain, 'r:') ax2.set_ylim([0, 1.1 * max(gain)]) ax2.set_ylabel('Potential Gain (%)', fontsize=20, color='r') ax2.tick_params(axis='y', labelcolor='r') ax1.xaxis.set_major_locator(mticker.LogLocator(numticks=12)) ax1.xaxis.set_minor_locator( mticker.LogLocator(subs=np.linspace(0.1, 0.9, 9), numticks=12)) ax1.set_xlim([min(x), max(x)]) ax1.axvline(vline, color='k', linestyle=':', linewidth=1, alpha=0.5) for ax in [ax1, ax2]: ax.tick_params(axis='both', which='major', labelsize=20, width=1.4, length=6) ax.tick_params(axis='both', which='minor', labelsize=20, width=1.4, length=3) for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth(1.4) vars(vars_object)[thing_to_vary] = orig return rates, ucy_actual, ucy_nospin
def histogram_1d(data, nbins=None, l_adjust_bins=False, l_xlog=False, x_label='', y_label='', legend_label=[], l_color=True, l_percentage=True, l_rel_mode=False, l_pope=False): """Probability distributions for multiple variables in a xarray-dataset.""" fig, ax = plt.subplots(figsize=(7 * 0.8, 5 * 0.8)) linestyle = [ 'solid', 'dashed', 'dotted', (0, (1, 1)), (0, (3, 5, 1, 5)), (0, (3, 1, 1, 1, 1, 1)) ] color = [ col.sol['magenta'], col.sol['blue'], 'k', col.sol['green'], col.sol['red'], col.sol['magenta'], col.sol['cyan'] ] lw = [1., 2., 2., 2., 2., 2.] for i, var in enumerate(data): # var = dataset[variable] if type(nbins) == int: bins = np.linspace(start=var.min(), stop=var.max(), num=nbins + 1) # 50 else: if l_adjust_bins: bins = np.linspace(start=m.sqrt(max(0, var.min())), stop=m.sqrt(var.max()), num=20)**2 else: bins = nbins[i] # sns.distplot(var[var.notnull()], bins=bins, kde=False, norm_hist=True) # hist_kws={'log': True}) total = var.notnull().sum().values # metric_clean = var.fillna(-1) # works if var is positive and thus bins as well metric_clean = var.where( var.notnull(), drop=True) # works if var is positive and thus bins as well h, edges = np.histogram(metric_clean, bins=bins) # , density=True) if l_rel_mode: total = h.max() bin_centre = 0.5 * (edges[1:] + edges[:-1]) dx = edges[1:] - edges[:-1] dlogx = dx / (bin_centre * m.log(10, m.e)) if l_xlog: h_normed = h / dlogx / total # equals density=True else: if l_percentage: h_normed = h / total * 100 else: h_normed = h / dx / total # equals density=True if l_color: plt.plot(bin_centre, h_normed, linestyle='-', marker='o', color=color[i], linewidth=lw[i]) else: h_normed_ext = np.zeros(shape=len(h_normed) + 1) h_normed_ext[0] = h_normed[0] h_normed_ext[1:] = h_normed # plot a step function instead of a continuous line h_to_plot = h_normed_ext # h_to_plot = np.cumsum(h_normed_ext) plt.step(edges, h_to_plot, color=color[i], linewidth=2.) #, linestyle=linestyle[i]) if l_xlog: plt.xscale('log') x_major = ticker.LogLocator(base=10.0, numticks=10) ax.xaxis.set_major_locator(x_major) x_minor = ticker.LogLocator(base=10.0, subs=np.arange(1.0, 10.0) * 0.1, numticks=20) ax.xaxis.set_minor_locator(x_minor) ax.xaxis.set_minor_formatter(ticker.NullFormatter()) plt.ylabel(y_label) plt.xlabel(x_label) #, **font) if len(legend_label) != 0: lg = plt.legend(legend_label, fontsize=14) # this sets only the legend background color to transparent (not the surrounding box) lg.get_frame().set_facecolor('none') if l_pope: ax.set_xticks([10, 30, 50, 70, 90], minor=True) ax.grid(b=True, which='both', axis='x') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) if not l_pope: ax.xaxis.set_major_formatter(ticker.ScalarFormatter()) ax.xaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.tick_params(axis='x', length=8) ax.spines['bottom'].set_position('zero') ax.tick_params(axis='x', direction='out') # ax.xaxis.set_ticks_position('none') # 'left', 'right' # ax.set_xlim(6) ax.tick_params(axis='y', direction='out') ax.yaxis.set_ticks_position('none') # 'left', 'right' return fig