def loadConfigs(pt, model): if DEBUG_Mario: print("\n--- load configs ---") print("in pt:", pt) print("in model:", model) dcdic = dcfgs.dcdic print("\nCurrent pruning configuration (loaded from lib/cfgs.c.dcdic)") for key, value in dcdic.items(): print(key, value) changeConfigs = checkOperation( question='\nModify the prunig configuration?', default='no') if changeConfigs: net = Net(pt, model=model, phase=caffe.TEST) convs = net.convs for conv in convs: print( 'input number remaining filters of %s after pruning [original number of filters %d ]' % (conv, net.conv_param_num_output(conv))) dc = input("[leave blank to skip]: ") if dc == '': dcdic[conv] = dcdic[conv] else: dcdic[conv] = int(dc) return dcdic
def checkSensitivity(pt, model, graph_title='<Model>-<Dataset>'): """ This funtion prunes the model layer by layer, from 100% to 100/p_Nx % of its filter. It then plots the sensibility curves and saves the plot data to a pickle file for later analysis. NOTE: The pruning performed here does not alter the model shape, simply makes filters zero, enough for accuracy evaluation. """ p_dic = {} #ploting dictionary p_Nx = 20 #number of points to plot (step for decreasing % of prune weights; 5% in this case) n_batches = 100 #number of test batches net = Net(pt, model=model, noTF=1) # ground truth net convs = net.convs net, BNs_dic = registerBNs(net) noBNs = net.noBNs #initial accuracy test print('@ Grand-truth model @') acc1_init, acc5_init = accuracy(net, n_batches) for conv, convnext in zip(convs[0:], convs[1:]): net = Net(pt, model=model, noTF=1) #reset the model bn, affine = checkIfBN(noBNs, conv, BNs_dic) dc_init = net.conv_param_num_output(conv) dc = dc_init p_x = [dc_init] #plot points of x_axis p_acc1 = [acc1_init] p_acc5 = [acc5_init] for i in range( p_Nx - 1 ): #the first point was already added (initial conditions), reduce range by 1 dc = round(dc - dc_init / p_Nx) net = pseudoPruneLayer(net, conv, convnext, bn, affine, dc) acc1, acc5 = accuracy(net, n_batches) p_x.append(dc) p_acc1.append(acc1) p_acc5.append(acc5) if DEBUG_Mario: print("Layer %s done.", conv) p_dic[conv] = [p_x, p_acc1, p_acc5] # load the dictionary with the ploting data if convnext == convs[ -1]: # If conv is the last of the list prune convnext before leaving the loop #(convnext will be the last pruned layer) net = Net(pt, model=model, noTF=1) #reset the model bn, affine = checkIfBN(noBNs, convnext, BNs_dic) dc_init = net.conv_param_num_output(convnext) dc = dc_init p_x = [dc_init] #plot points of x_axis p_acc1 = [acc1_init] p_acc5 = [acc5_init] for i in range( p_Nx - 1 ): #the first point was already added (initial conditions), reduce range by 1 dc = round(dc - dc_init / p_Nx) net = pseudoPruneLastLayer(net, convnext, bn, affine, dc) acc1, acc5 = accuracy(net, n_batches) p_x.append(dc) p_acc1.append(acc1) p_acc5.append(acc5) if DEBUG_Mario: print("Last layer %s done.", convnext) p_dic[convnext] = [p_x, p_acc1, p_acc5 ] # load the dictionary with the ploting data ### End of the for-loop#### saver(graph_title, p_dic) return p_dic