def apply_finite_conductance_states_to_crossbar(crossbar, num_conductance_states): crossbar_min = torch.tensor(1 / (np.vectorize(lambda x: x.r_off)( crossbar.devices))).view(-1).to(device).float() crossbar_max = torch.tensor(1 / (np.vectorize(lambda x: x.r_on)( crossbar.devices))).view(-1).to(device).float() quantization.quantize(crossbar.conductance_matrix, num_conductance_states, crossbar_min, crossbar_max) return crossbar
def quantization_delegate(image, channels, *args): bins = get_quantization_bins() quantized_channels = quantization.quantize(channels, bins) if not any(bins): bins = (255, 255, 255) return quantized_channels, bins
def run( self ): # Note: running an experiment on a model will leave its parameters unchanged after completion. for experiment in self.experiments: torch.save(experiment.model.state_dict(), self.BACKUP_PATH) quantization.quantize(experiment.model, experiment.parameter_types, experiment.quantizer_type, experiment.n, base=experiment.base) accuracy = utilities.testAccuracy(experiment.model, experiment.test_ds) print(experiment.id + " accuracy:", accuracy) self.accuracies.append(accuracy) self.visualizer.plot_value(accuracy, 1, "accuracy") for parameter_type in experiment.parameter_types: self.visualizer.plot_distribution( experiment.model, self.visualizer.layout.plot_resolution, experiment.id, parameter_type) experiment.model.load_state_dict(torch.load(self.BACKUP_PATH)) if self.output_plots is True: self.visualizer.output_plots() os.remove(self.BACKUP_PATH)
def test_quantize(filename, dist): im = Image.open(filename) # different cases expections = [128, 32, 8, 4, 2] print "Quantiztion" for level in expections: out = quantize(im, level) out_name = "quantization-%d.png" % level out_path = os.path.join(dist, out_name) out.save(out_path) print " Picture %s has been saved to the assets folder !" % out_name
def main(): n_clusters = 5 dataset_path = join('datasets', 'adl') X, labels = quantize(dataset_path, n_clusters) y, encoder, classes = encode_labels(labels) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7) random_forest = RandomForestClassifier(tree_funcs=(learn_tree, predict_tree), n_trees=50, max_depth=3, min_leaf_size=5, min_split_size=10, feature_subset_size='sqrt') random_forest.fit(X_train, y_train) predictions = random_forest.predict(X_test) acc = np.mean(predictions == y_test) print(f'Test dataset predictions accuracy: {acc:2.2%}')
action='store', dest='layer_size_hl1') parser.add_argument('--layer_size_hl2', action='store', dest='layer_size_hl2') parser.add_argument('--layer_size_hl3', action='store', dest='layer_size_hl3') parser.add_argument('--learning_rate', action='store', dest='learning_rate') parser.add_argument('--optimizer', action='store', dest='optimizer') args = parser.parse_args() wandb.init(project="amcpy-team", config=HyperParameter(None).get_dict()) config = wandb.config model_id = train_rna(config) loaded_model, loaded_model_id = get_model_from_id(model_id) evaluate_rna(loaded_model) if not training: loaded_model, _ = get_model_from_id(' ') load_dict, info_dict = quantization.quantize( loaded_model, np.concatenate((X_train, X_test))) for info in info_dict: print(info + ' -> ' + info_dict[info]) weights = load_dict['weights'] biases = load_dict['biases'] # serial_communication()
resize_randomness=Param.d['resize_random'], **Param.d) images = np.empty([len(image_dirs)] + list(Param.d['image_size']), dtype=np.float32) print('Loading representative images...', end=' ') for i, (idir, ldir) in enumerate(zip(image_dirs, label_dirs)): img, _ = train_set._load_function(idir, ldir) images[i] = img print('Done.') print('') (tflite_model_file, tflite_model_quant_file) = quantization.quantize(model, images, ckpt_to_load, Param.save_dir, overwrite=overwrite, saved_model=saved_model, **Param.d) if evaluate_models: quantization.evaluate_quantized_model(tflite_model_file, tflite_model_quant_file, test_set, evaluator, num_processes=Param.d.get( 'num_parallel_calls', 4), **Param.d) if write_tensors: quantization.write_tensors(tflite_model_file, images[0], tensor_list=None,