def manhattan(x, y, qbits): xnorm = torch.norm(x) ynorm = torch.norm(y) x = quantize(x / xnorm, qbits) y = quantize(y / ynorm, qbits) z = x - y #quantize(x-y,qbits).abs() return z.abs().sum().item()
def chebyshev(x, y, qbits): xnorm = torch.norm(x) ynorm = torch.norm(y) x = quantize(x / xnorm, qbits) y = quantize(y / ynorm, qbits) z = x - y #quantize(x-y,qbits) return z.abs().max().item()
def dot(x, y, qbits): x = quantize(x, qbits) y = quantize(y, qbits) z = quantize(x - y, qbits) z = x * y #quantize(x*y,qbits) z = 1 - z.sum() return z.item()
def run_net(net, epoch, trainSet, testSet, folder): # Find classes classes_training = [] classes_test = [] for dir in os.listdir(trainset_path): classes_training.append(dir) for dir in os.listdir(testset_path): classes_test.append(dir) if classes_test != classes_training: print('Wrong dataset') return classes = tuple(sorted(classes_test)) # Check if checkpoint exists out_dir = os.path.join('checkpoint', folder) if not os.path.exists(out_dir): os.makedirs(out_dir) # Start training model_path = os.path.join(out_dir, net.name() + '.pt') net = torch.load(model_path) if os.path.exists(model_path) else net train_net(net, epoch, trainSet, classes) torch.save(net, model_path) test_net(net, testSet, classes) quantize(net, testSet, classes) gen_code(net, folder)
def cosine(x, y, qbits): xnorm = torch.norm(x) ynorm = torch.norm(y) x = quantize(x / xnorm, qbits) y = quantize(y / ynorm, qbits) z = x * y #quantize(x*y,qbits) z = 1 - z.sum() return z.item()
def chebyshev(x, y, qbits=0): #xnorm = np.linalg.norm(x) #ynorm = np.linalg.norm(y) #x = x/xnorm #y = y/ynorm x = quantize(x, qbits) y = quantize(y, qbits) z = np.max(x - y) return z
def chebyshev(x, y): xnorm = np.linalg.norm(x) ynorm = np.linalg.norm(y) x = x / xnorm y = y / ynorm x = quantize(x, qbits) y = quantize(y, qbits) z = np.max(np.abs(x - y)) return z
def dotproductdist(x, y): x = quantize(x, qbits) y = quantize(y, qbits) z = np.multiply(x, y) if (qbits == 0): z = 1 - np.sum(z) else: z = 1 / (np.sum(z) + 0.01) return z
def forward(self, x): temp_saved = {} if self.quant and not self.training: temp_saved = quantize.backup_weights(self.layers_list, {}) quantize.quantize(self.layers_list, bitwidth=self.bitwidth) elif self.noise and self.training: # if self.training_stage==0: # temp_saved = quantize.backup_weights(self.layers_half_one,{}) # quantize.add_noise(self.layers_half_one, bitwidth=self.bitwidth, training=self.training) # else: # temp_saved = quantize.backup_weights(self.layers_half_one,{}) # quantize.quantize(self.layers_half_one, bitwidth=self.bitwidth) # temp_saved = quantize.backup_weights(self.layers_half_two,temp_saved) # quantize.add_noise(self.layers_half_two, bitwidth=self.bitwidth, training=self.training) temp_saved = quantize.backup_weights(self.layers_steps[self.training_stage], {}) quantize.add_noise(self.layers_steps[self.training_stage], bitwidth=self.bitwidth, training=self.training) for i in range(self.training_stage): temp_saved = quantize.backup_weights(self.layers_steps[i], temp_saved) quantize.quantize(self.layers_steps[i], bitwidth=self.bitwidth) # self.print_max_min_params() # print(temp_saved.keys()) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self.quant and not self.training: quantize.restore_weights(self.layers_list, temp_saved) # elif self.noise and self.training: # if self.training_stage==0: # quantize.restore_weights(self.layers_half_one, temp_saved) # else: # quantize.restore_weights(self.layers_half_one+self.layers_half_two, temp_saved) elif self.noise and self.training: quantize.restore_weights(self.layers_steps[self.training_stage], temp_saved) # Restore the noised layers for i in range(self.training_stage): quantize.restore_weights(self.layers_steps[i], temp_saved) # Restore the quantized layers return x
def cosinedist(x, y, qbits=0): #xnorm = np.linalg.norm(x) #ynorm = np.linalg.norm(y) #x = x/xnorm #y = y/ynorm x = quantize(x, qbits) y = quantize(y, qbits) z = quantize(np.multiply(x, y), qbits) z = 1 - np.sum(z) return z
def euclidean(x, y, qbits): xnorm = torch.norm(x) ynorm = torch.norm(y) x = x - x.mean() y = y - y.mean() x = quantize(x / xnorm, qbits) y = quantize(y / ynorm, qbits) z = x - y #quantize(x-y,qbits).abs() z = torch.pow(z, 2) #quantize(torch.pow(z, 2),qbits) return z.sum().item()
def run_net(net, epoch, trainSet, testSet, folder): if not os.path.exists(folder + "Checkpoint"): os.makedirs(folder + "Checkpoint") net = torch.load(folder + "Checkpoint/" + net.name() + ".pt") if os.path.exists(folder + "Checkpoint/" + net.name() + ".pt") else net train_net(net, epoch, trainSet) torch.save(net, folder + "Checkpoint/" + net.name() + ".pt") test_net(net, testSet) quantize(net, testSet) gen_code(net, folder)
def mcamdist(x, y): G = np.zeros(len(x)) x = quantize(x, qbits) y = quantize(y, qbits) for i in range(len(x)): if qbits == 4: G[i] = conductance.G_4bit[int(y[i])][int(x[i])] if qbits == 3: G[i] = conductance.G_3bit[int(y[i])][int(x[i])] d = np.sum(G) return d
def mcam_ideal(x, y): G = np.zeros(len(x)) x = quantize(x, qbits) y = quantize(y, qbits) z = abs(x - y) for i in range(len(x)): if qbits == 4: G[i] = conductance.G_4bit[0][int(z[i])] if qbits == 3: G[i] = conductance.G_3bit[0][int(z[i])] d = np.sum(G) return d
def minkowski(x, y): xnorm = np.linalg.norm(x) ynorm = np.linalg.norm(y) x = x / xnorm y = y / ynorm x = quantize(x, qbits) y = quantize(y, qbits) z = np.abs(np.power(x - y, minkowski_p)) z = np.power(z, 1 / minkowski_p) z = quantize(z, qbits) z = np.sum(z) return z
def forward(self, x): temp_saved = {} if self.quant and not self.training: temp_saved = quantize.backup_weights(self.layers_list, {}) quantize.quantize(self.layers_list, bitwidth=self.bitwidth) elif self.noise and self.training: # if self.training_stage==0: # temp_saved = quantize.backup_weights(self.layers_half_one,{}) # quantize.add_noise(self.layers_half_one, bitwidth=self.bitwidth, training=self.training) # else: # temp_saved = quantize.backup_weights(self.layers_half_one,{}) # quantize.quantize(self.layers_half_one, bitwidth=self.bitwidth) # temp_saved = quantize.backup_weights(self.layers_half_two,temp_saved) # quantize.add_noise(self.layers_half_two, bitwidth=self.bitwidth, training=self.training) temp_saved = quantize.backup_weights( self.layers_steps[self.training_stage], {}) quantize.add_noise(self.layers_steps[self.training_stage], bitwidth=self.bitwidth, training=self.training) for i in range(self.training_stage): temp_saved = quantize.backup_weights(self.layers_steps[i], temp_saved) quantize.quantize(self.layers_steps[i], bitwidth=self.bitwidth) x = self.features(x) x = x.view(-1, 256 * 6 * 6) x = self.classifier(x) if self.quant and not self.training: quantize.restore_weights(self.layers_list, temp_saved) # elif self.noise and self.training: # if self.training_stage==0: # quantize.restore_weights(self.layers_half_one, temp_saved) # else: # quantize.restore_weights(self.layers_half_one+self.layers_half_two, temp_saved) elif self.noise and self.training: quantize.restore_weights(self.layers_steps[self.training_stage], temp_saved) #Restore the noised layers for i in range(self.training_stage): quantize.restore_weights( self.layers_steps[i], temp_saved) #Restore the quantized layers return x
def mcam_ideal(x, y, qbits=0): G = np.zeros(len(x)) x = quantize(x, qbits) y = quantize(y, qbits) z = abs(x - y) for i in range(len(x)): if qbits == 4: G[i] = conductance.G_4bit[0][int(z[i])] if qbits == 3: G[i] = conductance.G_3bit[0][int(z[i])] #0.6*np.exp(conductance.conductance[int(x[i])][int(y[i])]*1e-9/503.236e-12) d = np.sum(G) return d
def minkowski(x, y, minkowski_p=2, qbits=0): #xnorm = np.linalg.norm(x) #ynorm = np.linalg.norm(y) #x = x/xnorm #y = y/ynorm #print(x,y) x = quantize(x, qbits) y = quantize(y, qbits) #print(qbits,x,y) z = np.abs(np.power(x - y, minkowski_p)) z = quantize(z, qbits) z = np.power(np.sum(z), 1 / minkowski_p) return z
def cosinedist(x, y): xnorm = np.linalg.norm(x) ynorm = np.linalg.norm(y) x = x / xnorm y = y / ynorm x = quantize(x, qbits) y = quantize(y, qbits) z = np.multiply(x, y) if (qbits == 0): z = 1 - np.sum(z) else: z = 1 / (np.sum(z) + 0.01) return z
def process_movie(mov, op, index, results): print(f"performing shot detection for {mov}") shot_detection(mov, op) print(f"Saving shots for {mov}") save_shots(mov, op) print(f"procesing shot colors for {mov}") shot_colors(mov, op) print(f"processing shot colors average for {mov}") shot_colors_avg(mov, op) print(f"processing movie colors for {mov}") movie_colors(mov, op) print(f"Calculating motion for {mov}") motion(mov, op) print(f"Sorting motion spectrum for {mov}") sort_motion_spectrum(mov, op) results[index] = quantize(mov, op) return
def test_quantize_unsorted_input(): x = list(range(10)) random.shuffle(x) x = np.array(x, dtype=np.float) boundaries, quantized = quantize(x, 10) assert np.all(list(range(10)) == boundaries) assert boundaries.dtype == np.float
def mcam_ideal(x, y, qbits): if qbits == 3: Gb = conductance.G_3bit elif qbits == 4: Gb = conductance.G_4bit else: raise Exception("MCAM only supports quantization bits up to 4") x = quantize(x, qbits) y = quantize(y, qbits) x = x.data.numpy() y = y.data.numpy() G = numpy.zeros(len(x)) for i in range(len(x)): G[i] = Gb[0][numpy.abs(int(y[i]) - int(x[i]))] d = numpy.sum(G) return d
def main(): # Parsing command-line arguments parser = argparse.ArgumentParser( description='parsing model and test data set paths') parser.add_argument('--model_path', required=True) parser.add_argument('--dataset_path', required=True) parser.add_argument('--output_model_path', type=str, default='calibrated_quantized_model.onnx') parser.add_argument( '--dataset_size', type=int, default=0, help= "Number of images or tensors to load. Default is 0 which means all samples" ) parser.add_argument( '--data_preprocess', type=str, required=True, choices=['preprocess_method1', 'preprocess_method2', 'None'], help="Refer to Readme.md for guidance on choosing this option.") args = parser.parse_args() model_path = args.model_path output_model_path = args.output_model_path images_folder = args.dataset_path calib_mode = "naive" size_limit = args.dataset_size # Generating augmented ONNX model augmented_model_path = 'augmented_model.onnx' model = onnx.load(model_path) augmented_model = augment_graph(model) onnx.save(augmented_model, augmented_model_path) # Conducting inference session = onnxruntime.InferenceSession(augmented_model_path, None) (samples, channels, height, width) = session.get_inputs()[0].shape # Generating inputs for quantization if args.data_preprocess == "None": inputs = load_pb_file(images_folder, args.dataset_size, samples, channels, height, width) else: inputs = load_batch(images_folder, height, width, size_limit, args.data_preprocess) print(inputs.shape) dict_for_quantization = get_intermediate_outputs(model_path, session, inputs, calib_mode) quantization_params_dict = calculate_quantization_params( model, quantization_thresholds=dict_for_quantization) calibrated_quantized_model = quantize( onnx.load(model_path), quantization_mode=QuantizationMode.QLinearOps, quantization_params=quantization_params_dict) onnx.save(calibrated_quantized_model, output_model_path) print("Calibrated, quantized model saved.")
def main(): parser = argparse.ArgumentParser( description='Quantize model with specified parameters') parser.add_argument('--no_per_channel', '-t', action='store_true', default=False) parser.add_argument('--nbits', type=int, default=8) parser.add_argument('--quantization_mode', default='Integer', choices=('Integer', 'QLinear')) parser.add_argument('--static', '-s', action='store_true', default=False) parser.add_argument('--asymmetric_input_types', action='store_true', default=False) parser.add_argument('--input_quantization_params', default='') parser.add_argument('--output_quantization_params', default='') parser.add_argument('model') parser.add_argument('output') args = parser.parse_args() args.per_channel = not args.no_per_channel del args.no_per_channel if args.quantization_mode == 'QLinear': args.quantization_mode = quantize.QuantizationMode.QLinearOps else: args.quantization_mode = quantize.QuantizationMode.IntegerOps if len(args.input_quantization_params) != 0: args.input_quantization_params = json.loads( args.input_quantization_params) else: args.input_quantization_params = None if len(args.output_quantization_params) != 0: args.output_quantization_params = json.loads( args.output_quantization_params) else: args.output_quantization_params = None # Load the onnx model model_file = args.model model = onnx.load(model_file) del args.model output_file = args.output del args.output # Quantize print('Quantize config: {}'.format(vars(args))) quantized_model = quantize.quantize(model, **vars(args)) print('Saving "{}" to "{}"'.format(model_file, output_file)) # Save the quantized model onnx.save(quantized_model, output_file)
def create_models_train(self, no_of_hidden_states_list, no_epochs=10): self.hmm_models = [] for i in range(self.n_classes): quantized = [] for seq in range(len(self.data[i])): quantized.append(quantize(self.data[i][seq], self.q)) self.hmm_models.append( DiscreteHMM(self.q, no_of_hidden_states_list[i])) self.hmm_models[i].train(quantized, method="BW", no_epochs=no_epochs)
def main(): model_path = './resnet50_v1.onnx' calibration_dataset_path = './calibration_data_set_test' dr = ResNet50DataReader(calibration_dataset_path) #call calibrate to generate quantization dictionary containing the zero point and scale values quantization_params_dict = calibrate(model_path, dr) calibrated_quantized_model = quantize( onnx.load(model_path), quantization_mode=QuantizationMode.QLinearOps, force_fusions=False, quantization_params=quantization_params_dict) output_model_path = './calibrated_quantized_model.onnx' onnx.save(calibrated_quantized_model, output_model_path) print('Calibrated and quantized model saved.')
def __init__(self, quant_epoch_step,quant_start_stage, quant=False, noise=False, bitwidth=32, step=2, quant_edges=True, act_noise=True, step_setup=[15, 9], act_bitwidth=32, act_quant=False, uniq=False, std_act_clamp=5, std_weight_clamp=3.45, wrpn=False,quant_first_layer=False, num_of_layers_each_step=1, noise_mask=0.05): super(UNIQNet, self).__init__() self.quant_epoch_step = quant_epoch_step self.quant_start_stage = quant_start_stage self.quant = quant self.noise = noise self.wrpn = wrpn if isinstance(bitwidth, list): assert (len(bitwidth) == step) self.bitwidth = bitwidth else: self.bitwidth = [bitwidth for _ in range(step)] self.training_stage = 0 self.step = step self.num_of_layers_each_step = num_of_layers_each_step self.act_noise = act_noise self.act_quant = act_quant self.act_bitwidth = act_bitwidth self.quant_edges = quant_edges self.quant_first_layer = quant_first_layer self.register_forward_pre_hook(save_state) self.register_forward_hook(restore_state) self.layers_b_dict = None self.noise_mask_init = 0. if not noise else noise_mask self.quantize = quantize.quantize(bitwidth, self.act_bitwidth, None, std_act_clamp=std_act_clamp, std_weight_clamp=std_weight_clamp, noise_mask=self.noise_mask_init) self.statistics_phase = False self.allow_grad = False self.random_noise_injection = False self.open_grad_after_each_stage = True self.quant_stage_for_grads = quant_start_stage self.noise_level = 0 self.noise_batch_counter = 0
def test_quantize(filename, result_dir=None): input_img = Image.open(filename) cases = [128, 32, 8, 4, 2] count = 1 for level in cases: print "Quantization Case %d" % (count, ) result = quantize(input_img, level) result_level = len(result.getcolors()) comparison = "expected level %d, actual level %d" % ( level, result_level) count += 1 expect( result_level == level, "[PASS] Quantization: " + comparison, "[FAIL] Quantization: " + comparison) if result_dir: result_name = 'quantize-%d.png' % (level, ) result_path = os.path.join(result_dir, result_name) result.save(result_path) print case_message('[Saved] ' + result_path)
import quantize as qt import matplotlib.pyplot as plt s_analog = 4.5 * np.load ('sample.npy') #importa o sinal análogico x = 0 #sinal de entrada y = 0 #sinal de saída xfinal = 0 #efeito somatório yfinal = 0 #efeito somatório xfinal2 = 0 #efeito somatório yfinal2 = 0 #efeito somatório lista_bits = [2, 3, 4, 5, 6, 7, 8] for n in lista_bits: s_analog_qnt = qt.quantize(s_analog, n, 'midtread') for t1 in range(0,100,1): #Considerando o número de amostras igual a 100 y = np.square(s_analog_qnt) yfinal = yfinal + y for t2 in range(0,100,1): #Considerando o número de amostras igual a 100 x = np.square(s_analog) xfinal = xfinal + x xfinal2 = np.sum(xfinal)/100 # potência media do sinal de entrada yfinal2 = np.sum(yfinal)/100 # potência media do sinal de saída z = abs(yfinal2 - xfinal2) # potência media do erro (pois log não admite valores negativos) print("SNRdB do bit " + str(n) + " = " + str(10*np.log10(xfinal2/z))) plt.plot (s_analog, label = 'sinal original')
def lichtenstein(img, qtNewCols=DEFAULT_COLOURS, qtSigma=4, qtNCols=8, edSigma=1.4, edThresH=0.2, edThresL=0.1, edColour=(0,0,0), htBox=8, htColour=ht.AVERAGE_COLOUR_ON_WHITE, htCRatio=1, aalias=2): '''Generates a Roy Lichtenstein RGB PIL image from a PIL image. Parameters: img [PIL Image] : a PIL Image object. Best results are created from RGB images. qtNewCols [tuple] : a tuple of 3-tuple RGB values which will be the new colours for the quantize process qtSigma [float] : the magnitude for the gaussian blur used to reduce the noise for the quantize process qtNCols [int] : the number of colours used to reduce the image to for the quantize process edSigma [float] : the magnitude for the gaussain blur used to recduce the noise for the edge detect process edThresH [float] : the higher threshold boundry used for edge linking and normalisation for the edge detect process edThresL [float] : the lower threshold boundry used for edge linking and normalisation for the edge detect process edColour [tuple] : the RGB colour for the edges created by the edge detect process htBox [int] : the halftoning box width and height used for the area of pixels a sample of luminosity values htColour [tuple] : a 2-tuple containing the foreground colour and background colour for the halftoning circles. Must be RGB values or 'ht.AVERAGE_COLOUR' htCRatio [float] : the circle ratio used for drawing the circles in the halftoning process so that they can be overall bigger or smaller in scale aalias [int] : The anti-alias amount for the edges of all the processes On Exit: Generates a Roy Lichtenstein image and returns an RGB PIL image. ''' img = img.convert('RGB') quantImg = qt.quantize(img, qtNewCols, qtNCols, qtSigma, aalias) halfImg = ht.halftoning(quantImg, htBox, htCRatio, aalias, htColour) edgeImg = ed.canny_edge_detection(img, edSigma, edThresH, edThresL, edColour) edgeMask = ImageOps.invert(pila.convert_rgba_to_mask(edgeImg)) halfMask = Image.new('1', img.size) halfMaskPix = halfMask.load() quantPix = quantImg.load() # Create a mask for the halftoning, making it visible where the colours # are still the orignal adaptive colours and not the new ones. for x,y in pila.pixel_generator(*img.size): if quantPix[x,y] in qtNewCols: halfMaskPix[x,y] = 1 else: halfMaskPix[x,y] = 0 compQuHt = Image.composite(quantImg, halfImg, halfMask) # Combine quant and half finalImg = Image.composite(compQuHt, edgeImg, edgeMask) # Combine compQuHt and edge return finalImg.convert('RGB')
output=args.onnx_model_path, opset=11) # ONNX optimization optimized_model = optimizer.optimize_model(args.onnx_model_path, model_type='bert', num_heads=12, hidden_size=768) optimized_onnx_model_path = os.path.join( os.path.dirname(args.onnx_model_path), 'bert_optimized.onnx') optimized_model.save_model_to_file(optimized_onnx_model_path) print('Optimized model saved at :', optimized_onnx_model_path) # ONNX quantization model = onnx.load(optimized_onnx_model_path) quantized_model = quantize(model, quantization_mode=QuantizationMode.IntegerOps, static=False) optimized_quantized_onnx_model_path = os.path.join( os.path.dirname(optimized_onnx_model_path), 'bert_optimized_quantized.onnx') onnx.save(quantized_model, optimized_quantized_onnx_model_path) print('Quantized&optimized model saved at :', optimized_quantized_onnx_model_path) #load data eval_df = pd.read_csv(args.eval_data_path) eval_df = eval_df[['text', 'class']] print('Number of examples: ', eval_df.shape[0]) examples = eval_df.text.values.tolist() # labels = eval_df.class.values.tolist()