def update_line(num, print_loss, data, axes, epochsInds, test_error, test_data, epochs_bins, loss_train_data, loss_test_data, colors, font_size = 18, axis_font=16, x_lim = [0,12.2], y_lim=[0, 1.08], x_ticks = [], y_ticks = []): """Update the figure of the infomration plane for the movie""" #Print the line between the points cmap = ListedColormap(LAYERS_COLORS) segs = [] for i in range(0, data.shape[1]): x = data[0, i, num, :] y = data[1, i, num, :] points = np.array([x, y]).T.reshape(-1, 1, 2) segs.append(np.concatenate([points[:-1], points[1:]], axis=1)) segs = np.array(segs).reshape(-1, 2, 2) axes[0].clear() if len(axes)>1: axes[1].clear() lc = LineCollection(segs, cmap=cmap, linestyles='solid',linewidths = 0.3, alpha = 0.6) lc.set_array(np.arange(0,5)) #Print the points for layer_num in range(data.shape[3]): axes[0].scatter(data[0, :, num, layer_num], data[1, :, num, layer_num], color = colors[layer_num], s = 35,edgecolors = 'black',alpha = 0.85) axes[1].plot(epochsInds[:num], 1 - np.mean(test_error[:, :num], axis=0), color ='r') title_str = 'Information Plane - Epoch number - ' + str(epochsInds[num]) plt_ut.adjustAxes(axes[0], axis_font, title_str, x_ticks, y_ticks, x_lim, y_lim, set_xlabel=True, set_ylabel=True, x_label='$I(X;T)$', y_label='$I(T;Y)$') title_str = 'Precision as function of the epochs' plt_ut.adjustAxes(axes[1], axis_font, title_str, x_ticks, y_ticks, x_lim, y_lim, set_xlabel=True, set_ylabel=True, x_label='# Epochs', y_label='Precision')
def plot_all_epochs(gen_data, I_XT_array, I_TY_array, axes, epochsInds, f, index_i, index_j, size_ind, font_size, y_ticks, x_ticks, colorbar_axis, title_str, axis_font, bar_font, save_name, plot_error = True,index_to_emphasis=1000): """Plot the infomration plane with the epochs in diffrnet colors """ #If we want to plot the train and test error if plot_error: fig_strs = ['train_error','test_error','loss_train','loss_test' ] fig_data = [np.squeeze(gen_data[fig_str]) for fig_str in fig_strs] f1 = plt.figure(figsize=(12, 8)) ax1 = f1.add_subplot(111) mean_sample = False if len(fig_data[0].shape)==1 else True if mean_sample: fig_data = [ np.mean(fig_data_s, axis=0) for fig_data_s in fig_data] for i in range(len(fig_data)): ax1.plot(epochsInds, fig_data[i],':', linewidth = 3 , label = fig_strs[i]) ax1.legend(loc='best') f = plt.figure(figsize=(12, 8)) axes = f.add_subplot(111) axes = np.array([[axes]]) I_XT_array = np.squeeze(I_XT_array) I_TY_array = np.squeeze(I_TY_array) if len(I_TY_array[0].shape) >1: I_XT_array = np.mean(I_XT_array, axis=0) I_TY_array = np.mean(I_TY_array, axis=0) max_index = size_ind if size_ind != -1 else I_XT_array.shape[0] cmap = plt.get_cmap('gnuplot') #For each epoch we have diffrenet color colors = [cmap(i) for i in np.linspace(0, 1, epochsInds[max_index-1]+1)] #Change this if we have more then one network arch nums_arc= -1 #Go over all the epochs and plot then with the right color for index_in_range in range(0, max_index): XT = I_XT_array[index_in_range, :] TY = I_TY_array[index_in_range, :] #If this is the index that we want to emphsis if epochsInds[index_in_range] ==index_to_emphasis: axes[index_i, index_j].plot(XT, TY, marker='o', linestyle=None, markersize=19, markeredgewidth=0.04, linewidth=2.1, color='g',zorder=10) else: axes[index_i, index_j].plot(XT[:], TY[:], marker='o', linestyle='-', markersize=12, markeredgewidth=0.01, linewidth=0.2, color=colors[int(epochsInds[index_in_range])]) plt_ut.adjustAxes(axes[index_i, index_j], axis_font=axis_font, title_str=title_str, x_ticks=x_ticks, y_ticks=y_ticks, x_lim=[0,25.1], y_lim=None, set_xlabel=index_i == axes.shape[0] - 1, set_ylabel=index_j == 0, x_label='$I(X;T)$', y_label='$I(T;Y)$', set_xlim=False, set_ylim=False, set_ticks=True, label_size=font_size) #Save the figure and add color bar if index_i ==axes.shape[0]-1 and index_j ==axes.shape[1]-1: plt_ut.create_color_bar(f, cmap, colorbar_axis, bar_font, epochsInds,title='Epochs') f.savefig(save_name+'.jpg', dpi=500, format='jpg')
def update_line_specipic_points(nums, data, axes, to_do, font_size, axis_font): """Update the lines in the axes for snapshot of the whole process""" colors =LAYERS_COLORS x_ticks = [0, 2, 4, 6, 8, 10] #Go over all the snapshot for i in range(len(nums)): num = nums[i] #Plot the right layer for layer_num in range(data.shape[3]): axes[i].scatter(data[0, :, num, layer_num], data[1, :, num, layer_num], color = colors[layer_num], s = 105,edgecolors = 'black',alpha = 0.85) plt_ut.adjustAxes(axes[i], axis_font=axis_font, title_str='', x_ticks=x_ticks, y_ticks=[], x_lim=None, y_lim=None, set_xlabel=to_do[i][0], set_ylabel=to_do[i][1], x_label='$I(X;T)$', y_label='$I(T;Y)$', set_xlim=True, set_ylim=True, set_ticks=True, label_size=font_size)
def update_axes(axes, xlabel, ylabel, xlim, ylim, title, xscale, yscale, x_ticks, y_ticks, p_0, p_1 ,font_size = 30, axis_font = 25,legend_font = 16 ): """adjust the axes to the ight scale/ticks and labels""" categories =6*[''] labels = ['$10^{-5}$', '$10^{-4}$', '$10^{-3}$', '$10^{-2}$', '$10^{-1}$', '$10^0$', '$10^1$'] #The legents of the mean and the std leg1 = plt.legend(p_0, categories, title=r'$\|Mean\left(\nabla{W_i}\right)\|$', loc='best',fontsize = legend_font,markerfirst = False, handlelength = 5) leg2 = plt.legend(p_1, categories, title=r'$STD\left(\nabla{W_i}\right)$', loc='best',fontsize = legend_font ,markerfirst = False,handlelength = 5) leg1.get_title().set_fontsize('21') # legend 'Title' fontsize leg2.get_title().set_fontsize('21') # legend 'Title' fontsize plt.gca().add_artist(leg1) plt.gca().add_artist(leg2) plt_ut.adjustAxes(axes,axis_font=20,title_str='', x_ticks=x_ticks, y_ticks=y_ticks, x_lim=xlim, y_lim=ylim, set_xlabel=True, set_ylabel=True, x_label=xlabel, y_label=ylabel,set_xlim=True,set_ylim=True, set_ticks=True,label_size=font_size, set_yscale=True, set_xscale = True, yscale=yscale, xscale=xscale, ytick_labels = labels, genreal_scaling=True)
def plot_by_training_samples(I_XT_array, I_TY_array, axes, epochsInds, f, index_i, index_j, size_ind, font_size, y_ticks, x_ticks, colorbar_axis, title_str, axis_font, bar_font, save_name, samples_labels): """Print the final epoch of all the diffrenet training samples size """ max_index = size_ind if size_ind!=-1 else I_XT_array.shape[2]-1 cmap = plt.get_cmap('gnuplot') colors = [cmap(i) for i in np.linspace(0, 1, max_index+1)] #Print the final epoch nums_epoch= -1 #Go over all the samples size and plot them with the right color for index_in_range in range(0, max_index): XT, TY = [], [] for layer_index in range(0, I_XT_array.shape[4]): XT.append(np.mean(I_XT_array[:, -1, index_in_range, nums_epoch, layer_index], axis=0)) TY.append(np.mean(I_TY_array[:, -1, index_in_range,nums_epoch, layer_index], axis=0)) axes[index_i, index_j].plot(XT, TY, marker='o', linestyle='-', markersize=12, markeredgewidth=0.2, linewidth=0.5, color=colors[index_in_range]) plt_ut.adjustAxes( axes[index_i, index_j], axis_font = axis_font, title_str = title_str, x_ticks = x_ticks, y_ticks = y_ticks, x_lim = None, y_lim = None, set_xlabel = index_i == axes.shape[0] - 1, set_ylabel = index_j == 0, x_label = '$I(X;T)$', y_label = '$I(T;Y)$', set_xlim = True, set_ylim = True, set_ticks = True,label_size =font_size ) #Create color bar and save it if index_i == axes.shape[0] - 1 and index_j == axes.shape[1] - 1: plt_ut.create_color_bar(f, cmap, colorbar_axis, bar_font, epochsInds,title='Training Data') f.savefig(save_name + '.jpg', dpi=150, format='jpg')
def update_line_each_neuron(num, print_loss, Ix, axes, Iy, train_data, accuracy_test, epochs_bins, loss_train_data, loss_test_data, colors, epochsInds, font_size = 18, axis_font = 16, x_lim = [0,12.2], y_lim=[0, 1.08],x_ticks = [], y_ticks = []): """Update the figure of the infomration plane for the movie""" #Print the line between the points axes[0].clear() if len(axes)>1: axes[1].clear() #Print the points for layer_num in range(Ix.shape[2]): for net_ind in range(Ix.shape[0]): axes[0].scatter(Ix[net_ind,num, layer_num], Iy[net_ind,num, layer_num], color = colors[layer_num], s = 35,edgecolors = 'black',alpha = 0.85) title_str = 'Information Plane - Epoch number - ' + str(epochsInds[num]) plt_ut.adjustAxes(axes[0], axis_font, title_str, x_ticks, y_ticks, x_lim, y_lim, set_xlabel=True, set_ylabel=True, x_label='$I(X;T)$',y_label='$I(T;Y)$') #Print the loss function and the error if len(axes)>1: axes[1].plot(epochsInds[:num], 1 - np.mean(accuracy_test[:, :num], axis=0), color='g') if print_loss: axes[1].plot(epochsInds[:num], np.mean(loss_test_data[:, :num], axis=0), color='y') nereast_val = np.searchsorted(epochs_bins, epochsInds[num], side='right') axes[1].set_xlim([0,epochs_bins[nereast_val]]) axes[1].legend(('Accuracy', 'Loss Function'), loc='best')
def plot_alphas(str_name, save_name='dist'): data_array = get_data(str_name) params = np.squeeze(np.array(data_array['information'])) I_XT_array = np.squeeze(np.array(extract_array(params, 'local_IXT'))) """" for i in range(I_XT_array.shape[2]): f1, axes1 = plt.subplots(1, 1) axes1.plot(I_XT_array[:,:,i]) plt.show() return """ I_XT_array_var = np.squeeze(np.array(extract_array(params, 'IXT_vartional'))) I_TY_array_var = np.squeeze(np.array(extract_array(params, 'ITY_vartional'))) I_TY_array = np.squeeze(np.array(extract_array(params, 'local_ITY'))) """ f1, axes1 = plt.subplots(1, 1) #axes1.plot(I_XT_array,I_TY_array) f1, axes2 = plt.subplots(1, 1) axes1.plot(I_XT_array ,I_TY_array_var) axes2.plot(I_XT_array ,I_TY_array) f1, axes1 = plt.subplots(1, 1) axes1.plot(I_TY_array, I_TY_array_var) axes1.plot([0, 1.1], [0, 1.1], transform=axes1.transAxes) #axes1.set_title('Sigmma=' + str(sigmas[i])) axes1.set_ylim([0, 1.1]) axes1.set_xlim([0, 1.1]) plt.show() return """ #for i in range() sigmas = np.linspace(0, 0.3, 20) for i in range(0,20): print (i, sigmas[i]) f1, axes1 = plt.subplots(1, 1) print (I_XT_array) axes1.plot(I_XT_array, I_XT_array_var[:,:,i], linewidth=5) axes1.plot([0, 15.1], [0, 15.1], transform=axes1.transAxes) axes1.set_title('Sigmma=' +str(sigmas[i])) axes1.set_ylim([0,15.1]) axes1.set_xlim([0,15.1]) plt.show() return epochs_s = data_array['params']['epochsInds'] f, axes = plt.subplots(1, 1) #epochs_s = [] colors = LAYERS_COLORS linestyles = [ '--', '-.', '-','', ' ',':', ''] epochs_s =[0, -1] for j in epochs_s: print (j) for i in range(0, I_XT_array.shape[1]): axes.plot(sigmas, I_XT_array_var[j,i,:],color = colors[i], linestyle = linestyles[j], label='Layer-'+str(i) +' Epoch - ' +str(epochs_s[j])) title_str = 'I(X;T) for different layers as function of $\sigma$ (The width of the gaussian)' x_label = '$\sigma$' y_label = '$I(X;T)$' x_lim = [0, 3] plt_ut.adjustAxes(axes, axis_font=20, title_str=title_str, x_ticks=[], y_ticks=[], x_lim=x_lim, y_lim=None, set_xlabel=True, set_ylabel=True, x_label=x_label, y_label=y_label, set_xlim=True, set_ylim=False, set_ticks=False, label_size=20, set_yscale=False, set_xscale=False, yscale=None, xscale=None, ytick_labels='', genreal_scaling=False) axes.legend() plt.show()
def plot_gradients(name_s): """Plot the gradients and the means of the networks over the batchs""" data_array= get_data(name_s[0][0]) gradients = data_array['var_grad_val'] ws = data_array['ws'] epochsInds = (data_array['params']['epochsInds']).astype(np.int) data = np.squeeze(np.array(data_array['information'])) I_TY_array = np.array(extract_array(data, 'local_ITY')) fig_size = (14, 10) f_norms, (axes_norms) = plt.subplots(1, 1, figsize=fig_size) f_log, (axes_log) = plt.subplots(1, 1,figsize=fig_size) f_log.subplots_adjust(left=0.097, bottom=0.11, right=.95, top=0.95, wspace=0.03, hspace=0.03) colors = LAYERS_COLORS #TODO - change it to auto num_of_layer = 6 #Go over the layers for layer_index in range(0,num_of_layer-1): traces_layers, means_layers, p_1, p_0, l2_norms = [], [], [], [], [] print (layer_index) #We want to skip the biasses so we need to go every 2 indexs layer = layer_index*2 #Go over the weights for k in range(len(gradients)): #print (k) grad = np.squeeze(gradients[k][0][0]) #ws_in = np.squeeze(ws[k][0][0]) ws_in = ws[k][0][0] cov_traces ,means,means,layer_l2_norm= [], [] ,[],[] #Go over all the epochs for epoch_number in range(len(ws_in)): print ('epoch number' ,epoch_number) #the weights of the layer as one-dim vector if type(ws_in[epoch_number][layer_index]) is list: flatted_list = [item for sublist in ws_in[epoch_number][layer_index] for item in sublist] else: flatted_list = ws_in[epoch_number][layer_index] layer_l2_norm.append(LA.norm(np.array(flatted_list), ord=2)) gradients_list = [] #For each neuron go over all the weights for i in range(len(grad[epoch_number])): current_list_inner = [] for neuron in range(len(grad[epoch_number][0][layer])): c_n = grad[epoch_number,i][layer][neuron] current_list_inner.extend(c_n) gradients_list.append(current_list_inner) #the gradients are dimensions of [#batchs][#weights] gradients_list = np.array(gradients_list) #the average over the batchs average_vec = np.mean(gradients_list, axis=0) #Sqrt of AA^T norm_mean = np.sqrt(np.dot(average_vec.T, average_vec)) covs_mat = np.zeros((average_vec.shape[0], average_vec.shape[0])) #Go over all the vectors of batchs, reduce thier mean and calculate the covariance matrix for batch in range(gradients_list.shape[0]): current_vec = gradients_list[batch, :] - average_vec current_cov_mat = np.dot(current_vec[:,None], current_vec[None,:]) covs_mat+=current_cov_mat #Take the mean cov matrix mean_cov_mat = np.array(covs_mat)/ gradients_list.shape[0] #The trace of the cov matrix trac_cov = np.trace(np.array(mean_cov_mat)) means.append(norm_mean) cov_traces.append(np.sqrt(trac_cov)) #Second method if we have a lot of neurons """ #cov_traces.append(np.mean(grad_norms)) #means.append(norm_mean) c_var,c_mean,total_w = [], [],[] for neuron in range(len(grad[epoch_number][0][layer])/10): gradients_list = np.array([grad[epoch_number][i][layer][neuron] for i in range(len(grad[epoch_number]))]) total_w.extend(gradients_list.T) grad_norms1 = np.std(gradients_list, axis=0) mean_la = np.abs(np.mean(np.array(gradients_list), axis=0)) #mean_la = LA.norm(gradients_list, axis=0) c_var.append(np.mean(grad_norms1)) c_mean.append(np.mean(mean_la)) #total_w is in size [num_of_total_weights, num of epochs] total_w = np.array(total_w) #c_var.append(np.sqrt(np.trace(np.cov(np.array(total_w).T)))/np.cov(np.array(total_w).T).shape[0]) #print (np.mean(c_mean).shape) means.append(np.mean(c_mean)) cov_traces.append(np.mean(c_var)) """ l2_norms.append(layer_l2_norm) means_layers.append(np.array(means)) traces_layers.append((np.array(cov_traces))) #Normalize by the l_2 norms y_var = np.mean(np.array(traces_layers), axis=0) / np.mean(l2_norms, axis=0) y_mean = np.mean(np.array(means_layers), axis=0)/ np.mean(l2_norms, axis=0) #Plot the gradients and the means c_p1, = axes_log.plot(epochsInds[:], y_var,markersize = 4, linewidth = 4,color = colors[layer_index], linestyle=':', markeredgewidth=0.2, dashes = [4,4]) c_p0,= axes_log.plot(epochsInds[:], y_mean, linewidth = 2,color = colors[layer_index]) #plot the norms axes_norms.plot(epochsInds[:], np.mean(np.array(l2_norms), axis=0),linewidth = 2, color = colors[layer_index]) #For the legend p_0.append(c_p0) p_1.append(c_p1) #adejust the figure according the specipic labels, scaling and legends #Change the log and log to linear if you want linear scaling #update_axes(reg_axes, '# Epochs', 'Normalized Mean and STD', [0, 10000], [0.000001, 10], '', 'log', 'log', [1, 10, 100, 1000, 10000], [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10], p_0, p_1) update_axes(axes_log, '# Epochs', 'Normalized Mean and STD', [0, 9000], [0.000001, 1000], '', 'log', 'log', [1, 10, 100, 1000, 20000], [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100], p_0, p_1) plt_ut.adjustAxes(axes_norms, axis_font=20, title_str='', set_xlabel=True, set_ylabel=True, x_label='# Epochs', y_label='$L_2$') # the legends categories = [r'$\|W_1\|$', r'$\|W_2\|$', r'$\|W_3\|$', r'$\|W_4\|$', r'$\|W_5\|$', r'$\|W_6\|$'] axes_norms.legend(categories, loc='best', fontsize=16) f_log.savefig('log_gradient.jpg', dpi=200, format= 'jpg') f_norms.savefig('norms.jpg', dpi=200, format= 'jpg')