def info_tutorial(): nn = NeuralNet() x_shape = dat.shape() test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE, max_samples=cfg.TEST_SET_SIZE) nn.test(test_gen, print_it=True) nn.net.initialize_spatial_layers(x_shape, cfg.BATCH_SIZE, PATCH_SIZE) nn.summary(x_shape, print_it=True) nn.print_weights() print(nn.output_size(x_shape)) # Spatial Operations, defined one the net itself. Remember that after enabling a layer, ops are affected assert nn.net.num_spatial_layers() != 0 nn.net.print_spatial_status() # nn.train(epochs=1, set_size=5000, lr=0.1, batch_size=cfg.BATCH_SIZE) # Train to see fully disabled performance nn.net.print_ops_summary() nn.net.print_ops_summary( use_conv=True) # Count convlution operations instead of MAC print(nn.net.num_ops()) # (ops_saved, total_ops) # Given x, we generate all spatial layer requirement sizes: spat_sizes = nn.net.generate_spatial_sizes(x_shape) print(spat_sizes) p_spat_sizes = nn.net.generate_padded_spatial_sizes(x_shape, PATCH_SIZE) print(p_spat_sizes) # Generate a constant 1 value mask over all spatial nets print(nn.net.enabled_layers()) nn.net.fill_masks_to_val(1) print(nn.net.enabled_layers()) print(nn.net.disabled_layers()) nn.net.print_spatial_status( ) # Now all are enabled, seeing the mask was set nn.train(epochs=1, set_size=5000, lr=0.1, batch_size=cfg.BATCH_SIZE ) # Train to see all layers enabled performance nn.net.print_ops_summary() nn.net.print_ops_summary( use_conv=True) # Count convlution operations instead of MAC nn.net.reset_spatial() # Disables layers as well nn.net.print_ops_summary() nn.net.print_ops_summary(use_conv=True) # Turns on 3 ids and turns off all others chosen_victims = random.sample(range(nn.net.num_spatial_layers()), 4) nn.net.strict_mask_update(update_ids=chosen_victims[0:3], masks=[ torch.zeros(p_spat_sizes[chosen_victims[0]]), torch.zeros(p_spat_sizes[chosen_victims[1]]), torch.zeros(p_spat_sizes[chosen_victims[2]]) ]) # Turns on one additional id and *does not* turn off all others nn.net.lazy_mask_update( update_ids=[chosen_victims[3]], masks=[torch.zeros(p_spat_sizes[chosen_victims[3]])]) nn.net.print_spatial_status() # print(nn.net.enabled_layers()) nn.train(epochs=1, set_size=5000, lr=0.1, batch_size=cfg.BATCH_SIZE) # Run with 4 layers on nn.net.print_ops_summary() nn.net.print_ops_summary(use_conv=True)
def __init__(self, resume=True, ckp_name_prefix=None): # Decide on device: if torch.cuda.is_available(): # print('CUDA FOUND!') self.device = torch.device('cuda') cudnn.benchmark = True if torch.cuda.device_count() > 1: raise NotImplementedError # This line enables multiple GPUs, but changes the layer names a bit # self.net = torch.nn.DataParallel(self.net) # Useful if you have multiple GPUs - does not hurt otherwise else: self.device = torch.device('cpu') # torch.set_num_threads(4) # Presuming 4 cores print('WARNING: Found no valid GPU device - Running on CPU') # Build Model: print(f'==> Building model {net.__name__} on the dataset {dat.name()}') self.net = net(self.device, dat.num_classes(), dat.input_channels(), dat.shape()) print(f'==> Detected family model of {self.net.family_name()}') if resume: print( f'==> Resuming from checkpoint via sorting method: {cfg.RESUME_METHOD}' ) assert os.path.isdir( cfg.CHECKPOINT_DIR), 'Error: no checkpoint directory found!' ck_file = self.__class__.resume_methods[cfg.RESUME_METHOD]( self.net.family_name(), ckp_name_prefix=ckp_name_prefix) if ck_file is None: print( f'-E- Found no valid checkpoints for {net.__name__} on {dat.name()}' ) self.best_val_acc = 0 self.start_epoch = 0 else: checkpoint = torch.load(ck_file, map_location=self.device) self._load_checkpoint(checkpoint['net']) self.best_val_acc = checkpoint['acc'] self.start_epoch = checkpoint['epoch'] assert (dat.name() == checkpoint['dataset']) print( f'==> Loaded model with val-acc of {self.best_val_acc:.3f}' ) else: self.best_val_acc = 0 self.start_epoch = 0 self.net = self.net.to(self.device) # Build SGD Algorithm: self.criterion = torch.nn.CrossEntropyLoss() self.train_gen, self.val_gen, self.classes = (None, None, None) self.optimizer = None
def training(): # dat.data_summary(show_sample=False) nn = NeuralNet(resume=True) # Spatial layers are by default, disabled nn.summary(dat.shape()) nn.train(epochs=50, lr=0.01) test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE, max_samples=cfg.TEST_SET_SIZE) test_loss, test_acc, count = nn.test(test_gen) print( f'==> Final testing results: test acc: {test_acc:.3f} with {count}, test loss: {test_loss:.3f}' )
def patch3x3(patterns_idx): ones_range = (patterns_idx, patterns_idx + 1) gran = 10 eval_baseline_and_runtimes(3, ones_range, gran, patterns_idx=patterns_idx) acc_loss = [10] run_all_acc_loss_possibilities(3, ones_range, gran, Mode.UNIFORM_LAYER, acc_loss_opts=acc_loss, patterns_idx=patterns_idx) run_all_acc_loss_possibilities(3, ones_range, gran, Mode.UNIFORM_FILTERS, acc_loss_opts=acc_loss, patterns_idx=patterns_idx) run_all_acc_loss_possibilities(3, ones_range, gran, Mode.UNIFORM_PATCH, acc_loss_opts=acc_loss, patterns_idx=patterns_idx) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), 3, ones_range, gran, acc_loss, 93.5)
def create_FR_after_retrain(self, mode, acc_loss, retrain=True, epochs=50, lr=0.01): final_rec = self.create_FR_with_different_acc_loss(mode, acc_loss) if retrain: self.retrain_with_mask(final_rec, epochs=epochs, lr=lr) retrain_nn = NeuralNet(ckp_name_prefix=final_rec.get_retrain_prefix()) retrain_nn.net.initialize_spatial_layers(dat.shape(), cfg.BATCH_SIZE, self.ps) retrain_nn.net.reset_spatial() retrain_nn.net.strict_mask_update(update_ids=list( range(len(final_rec.mask))), masks=final_rec.mask) if INNAS_COMP: test_acc = 100 ops_saved = 100 ops_total = 100 else: _, test_acc, _ = retrain_nn.test(self.test_gen) ops_saved, ops_total = retrain_nn.net.num_ops() final_rec.retrain_update(test_acc, ops_saved, ops_total, epochs, lr) print(final_rec) save_to_file(final_rec, path=cfg.RESULTS_DIR)
def gen_first_lvl_results(self, mode): rec_filename = self.record_finder.find_rec_filename( mode, RecordType.FIRST_LVL_REC) if rec_filename is not None: rcs = load_from_file(rec_filename, path='') st_point = rcs.find_resume_point() if st_point is None: return rcs layers_layout = self.nn.net.generate_spatial_sizes(dat.shape()) self._init_nn() if rec_filename is None: if self.input_patterns is None: rcs = Record(layers_layout, self.gran_thresh, True, mode, self.init_acc, self.ps, self.ones_range) else: rcs = Record(layers_layout, self.gran_thresh, False, mode, self.init_acc, self.input_patterns, self.ones_range) st_point = [0] * 4 if INNAS_COMP: rcs.filename = 'DEBUG_' + rcs.filename print('==> Result will be saved to ' + os.path.join(cfg.RESULTS_DIR, rcs.filename)) save_counter = 0 for layer, channel, patch, pattern_idx, mask in tqdm( mf.gen_masks_with_resume(self.ps, rcs.all_patterns, rcs.mode, rcs.gran_thresh, layers_layout, resume_params=st_point)): self.nn.net.strict_mask_update(update_ids=[layer], masks=[torch.from_numpy(mask)]) if INNAS_COMP: test_acc = 100 ops_saved = 100 ops_total = 100 else: _, test_acc, _ = self.nn.test(self.test_gen) ops_saved, ops_total = self.nn.net.num_ops() self.nn.net.reset_spatial() rcs.addRecord(ops_saved, ops_total, test_acc, layer, channel, patch, pattern_idx) save_counter += 1 if save_counter > cfg.SAVE_INTERVAL: save_to_file(rcs, True, cfg.RESULTS_DIR) save_counter = 0 save_to_file(rcs, True, cfg.RESULTS_DIR) print('==> Result saved to ' + os.path.join(cfg.RESULTS_DIR, rcs.filename)) return rcs
def main_plot_ops_saved_vs_max_acc_loss(ps, ones_range, gran_th, title=None): init_acc = get_init_acc(ps, ones_range, gran_th) run_all_acc_loss_possibilities(ps, ones_range, gran_th, Mode.UNIFORM_LAYER) run_all_acc_loss_possibilities(ps, ones_range, gran_th, Mode.UNIFORM_FILTERS) run_all_acc_loss_possibilities(ps, ones_range, gran_th, Mode.UNIFORM_PATCH) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), ps, ones_range, gran_th, ACC_LOSS_OPTS, init_acc, title=title) run_all_acc_loss_possibilities(ps, ones_range, gran_th, Mode.MAX_GRANULARITY) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), ps, ones_range, gran_th, ACC_LOSS_OPTS, init_acc)
def base_line_result(self): layers_layout = self.nn.net.generate_spatial_sizes(dat.shape()) self._init_nn() sp_list = [None] * len(layers_layout) for layer, layer_mask in enumerate( mf.base_line_mask(layers_layout, self.ps)): sp_list[layer] = torch.from_numpy(layer_mask) self.nn.net.strict_mask_update(update_ids=list( range(len(layers_layout))), masks=sp_list) _, test_acc, _ = self.nn.test(self.test_gen) ops_saved, ops_total = self.nn.net.num_ops() bl_rec = BaselineResultRc(self.init_acc, test_acc, ops_saved, ops_total, self.ps, cfg.NET.__name__, dat.name()) print(bl_rec) save_to_file(bl_rec, True, cfg.RESULTS_DIR)
def main_plot_ops_saved_vs_ones(mode): ps = 3 ones_possibilities = range(3, 8) init_acc = get_init_acc(ps, (ones_possibilities[0], ones_possibilities[1]), GRANULARITY_TH) run_all_ones_possibilities(ps, ones_possibilities, GRANULARITY_TH, ACC_LOSS) plotting.plot_ops_saved_vs_ones(cfg.NET.__name__, dat.name(), ps, ones_possibilities, GRANULARITY_TH, ACC_LOSS, init_acc, mode)
def __init__(self, patch_size, ones_range, gran_thresh, max_acc_loss, init_acc=None, test_size=cfg.TEST_SET_SIZE, patterns_idx=None): self.ps = patch_size self.max_acc_loss = max_acc_loss self.gran_thresh = gran_thresh if patterns_idx is None: self.ones_range = ones_range self.input_patterns = None else: patterns_rec = load_from_file( f'all_patterns_ps{self.ps}_cluster{patterns_idx}.pkl', path=cfg.RESULTS_DIR) self.ones_range = (patterns_rec[1], patterns_rec[1] + 1) self.input_patterns = patterns_rec[2] self.full_net_run_time = None self.total_ops = None self.nn = NeuralNet() self.nn.net.initialize_spatial_layers(dat.shape(), cfg.BATCH_SIZE, self.ps) self.test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE, max_samples=cfg.TEST_SET_SIZE) self.test_set_size = cfg.TEST_SET_SIZE if INNAS_COMP: init_acc = DEBUG_INIT_ACC if init_acc is None: _, test_acc, correct = self.nn.test(self.test_gen) print(f'==> Asserted test-acc of: {test_acc} [{correct}]\n ') self.init_acc = test_acc # TODO - Fix initialize bug else: self.init_acc = init_acc self.record_finder = RecordFinder(cfg.NET.__name__, dat.name(), patch_size, ones_range, gran_thresh, max_acc_loss, self.init_acc)
def _checkpoint(self, val_acc, epoch, ckp_name_prefix=None): # Decide on whether to checkpoint or not: save_it = val_acc > self.best_val_acc if save_it and cfg.DONT_SAVE_REDUNDANT: # target = os.path.join(cfg.CHECKPOINT_DIR, f'{self.net.family_name()}_{dat.name()}_*_ckpt.t7') # checkpoints = [os.path.basename(f) for f in glob.glob(target)] # if ckp_name_prefix is not None: # target = os.path.join(cfg.CHECKPOINT_DIR, f'{self.net.family_name()}_{dat.name()}_*_ckpt_{ckp_name_prefix}.t7') # checkpoints += [os.path.basename(f) for f in glob.glob(target)] if ckp_name_prefix is None: target = os.path.join( cfg.CHECKPOINT_DIR, f'{self.net.family_name()}_{dat.name()}_*_ckpt.t7') else: target = os.path.join( cfg.CHECKPOINT_DIR, f'{self.net.family_name()}_{dat.name()}_*_ckpt_{ckp_name_prefix}.t7' ) checkpoints = [os.path.basename(f) for f in glob.glob(target)] if checkpoints: best_cp_val_acc = max([ float( f.replace(f'{self.net.family_name()}_{dat.name()}', '').split('_')[1]) for f in checkpoints ]) if best_cp_val_acc >= val_acc: save_it = False print( f'\nResuming without save - Found valid checkpoint with higher val_acc: {best_cp_val_acc}' ) # Do checkpoint val_acc = round(val_acc, 3) # Don't allow too long a number if save_it: print( f'\nBeat val_acc record of {self.best_val_acc} with {val_acc} - Saving checkpoint' ) state = { 'net': self.net.state_dict(), 'dataset': dat.name(), 'acc': val_acc, 'epoch': epoch, } if not os.path.isdir(cfg.CHECKPOINT_DIR): os.mkdir(cfg.CHECKPOINT_DIR) if ckp_name_prefix is None: cp_name = f'{self.net.family_name()}_{dat.name()}_{val_acc}_ckpt.t7' else: cp_name = f'{self.net.family_name()}_{dat.name()}_{val_acc}_ckpt_{ckp_name_prefix}.t7' torch.save(state, os.path.join(cfg.CHECKPOINT_DIR, cp_name)) self.best_val_acc = val_acc
def plot_ops_saved_accuracy_uniform_network(self): layers_layout = self.nn.net.generate_spatial_sizes(dat.shape()) rcs = Record(layers_layout, self.gran_thresh, True, Mode.UNIFORM_LAYER, self.init_acc, self.ps, self.ones_range) no_of_patterns = rcs.all_patterns.shape[2] ops_saved_array = [None] * no_of_patterns acc_array = [None] * no_of_patterns self._init_nn() for p_idx in range(no_of_patterns): sp_list = [None] * len(layers_layout) for layer, layer_mask in enumerate( mf.base_line_mask(layers_layout, self.ps, pattern=rcs.all_patterns[:, :, p_idx])): sp_list[layer] = torch.from_numpy(layer_mask) self.nn.net.strict_mask_update(update_ids=list( range(len(layers_layout))), masks=sp_list) _, test_acc, _ = self.nn.test(self.test_gen) ops_saved, ops_total = self.nn.net.num_ops() self.nn.net.reset_ops() ops_saved_array[p_idx] = ops_saved / ops_total acc_array[p_idx] = test_acc plt.figure() plt.subplot(211) plt.plot(list(range(no_of_patterns)), ops_saved_array, 'o') plt.xlabel('pattern index') plt.ylabel('ops_saved [%]') plt.title(f'ops saved for uniform network, patch_size:{self.ps}') plt.subplot(212) plt.plot(list(range(no_of_patterns)), acc_array, 'o') plt.xlabel('pattern index') plt.ylabel('accuracy [%]') plt.title(f'accuracy for uniform network, patch_size:{self.ps}') data = [rcs.all_patterns, ops_saved_array, acc_array] save_to_file( data, False, cfg.RESULTS_DIR, 'baseline_all_patterns_{cfg.NET.__name__}_{dat.name()}' + f'acc{self.init_acc}_ps{self.ps}_ones{self.ones_range[0]}x{self.ones_range[1]}_mg{self.gran_thresh}.pkl' ) plt.savefig( f'{cfg.RESULTS_DIR}/baseline_all_patterns_{cfg.NET.__name__}_{dat.name()}' + f'acc{self.init_acc}_ps{self.ps}_ones{self.ones_range[0]}x{self.ones_range[1]}_mg{self.gran_thresh}.pdf' ) return data
def main_1x3_ones(): acc_loss = [3.5] eval_baseline_and_runtimes(2, (1, 3), 10) run_all_acc_loss_possibilities(2, (1, 3), 10, Mode.UNIFORM_LAYER, acc_loss_opts=acc_loss) run_all_acc_loss_possibilities(2, (1, 3), 10, Mode.UNIFORM_PATCH, acc_loss_opts=acc_loss) run_all_acc_loss_possibilities(2, (1, 3), 10, Mode.UNIFORM_FILTERS, acc_loss_opts=acc_loss) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), 2, (1, 3), 10, acc_loss, 93.5) run_all_acc_loss_possibilities(2, (1, 3), 10, Mode.MAX_GRANULARITY, acc_loss_opts=acc_loss) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), 2, (1, 3), 10, acc_loss, 93.5)
def _save_final_rec(self): self.resume_rec.save_csv(self.resume_param_filename) save_to_file(self.resume_rec, False, cfg.RESULTS_DIR, self.resume_param_filename) if self.resume_rec.curr_tot_ops == 0: print(f'==> No suitable Option was found for min accuracy of {self.min_acc}') return f_rec = FinalResultRc(self.min_acc + self.max_acc_loss, self.resume_rec.curr_best_acc, self.resume_rec.curr_saved_ops, self.resume_rec.curr_tot_ops, self.mode, self.resume_rec.curr_best_mask, self.patch_size, self.max_acc_loss, self.ones_range, cfg.NET.__name__, dat.name(), self.layers_layout) save_to_file(f_rec,True,cfg.RESULTS_DIR) print('==> result saved to ' + f_rec.filename) self.resume_rec.mark_finished(mark_all=True) save_to_file(self.resume_rec, False, cfg.RESULTS_DIR, self.resume_param_filename) return f_rec
def main_ones(ones_range, acc_loss=None): eval_baseline_and_runtimes(2, ones_range, 10) if acc_loss is None: acc_loss = [1, 3.5, 5] run_all_acc_loss_possibilities(2, ones_range, 10, Mode.UNIFORM_LAYER, acc_loss_opts=acc_loss) run_all_acc_loss_possibilities(2, ones_range, 10, Mode.UNIFORM_PATCH, acc_loss_opts=acc_loss) run_all_acc_loss_possibilities(2, ones_range, 10, Mode.UNIFORM_FILTERS, acc_loss_opts=acc_loss) plotting.plot_ops_saved_vs_max_acc_loss(cfg.NET.__name__, dat.name(), 2, ones_range, 10, acc_loss, 93.5)
def find_final_mask(self, max_acc_loss, nn=None, test_gen=None, should_save=False): init_acc = self.max_acc_loss + self.min_acc new_min_acc = init_acc - max_acc_loss if not self.resume_rec.is_finised(): print('Simulation not finished!') if nn is not None and test_gen is not None: self.simulate(nn, test_gen) final_mask_indexes, best_ops_saved, best_acc = self.resume_rec.find_best_mask(new_min_acc) if final_mask_indexes is None: print(f'==> No suitable Option was found for min accuracy of {new_min_acc}') return None self.sp_list = [None] * len(final_mask_indexes) for l_idx, p_idx in enumerate(final_mask_indexes): self._update_layer( l_idx, p_idx) f_rec = FinalResultRc(init_acc, best_acc, best_ops_saved, self.resume_rec.curr_tot_ops, self.mode, self.sp_list, self.patch_size, max_acc_loss, self.ones_range, cfg.NET.__name__, dat.name(), self.layers_layout) if should_save: save_to_file(f_rec,True,cfg.RESULTS_DIR) print('==> result saved to ' + f_rec.filename) return f_rec
def train(self, epochs, lr=0.1, set_size=None, batch_size=cfg.BATCH_SIZE, ckp_name_prefix=None): if ckp_name_prefix is not None: self.best_val_acc = 0 (self.train_gen, set_size), (self.val_gen, _) = dat.trainset(batch_size=batch_size, max_samples=set_size) print( f'==> Training on {set_size} samples with batch size of {batch_size} and lr = {lr}' ) if cfg.SGD_METHOD == 'Nesterov': self.optimizer = optim.SGD(filter(lambda x: x.requires_grad, self.net.parameters()), lr=lr, momentum=0.9, weight_decay=5e-4) elif cfg.SGD_METHOD == 'Adam': self.optimizer = torch.optim.Adam(filter(lambda x: x.requires_grad, self.net.parameters()), lr=lr) else: raise NotImplementedError self.scheduler = ReduceLROnPlateau( self.optimizer, 'min', patience=cfg.N_EPOCHS_TO_WAIT_BEFORE_LR_DECAY) p = Progbar(epochs) t_start = time.time() batches_per_step = math.ceil(set_size / batch_size) for epoch in range(self.start_epoch, self.start_epoch + epochs): if cfg.VERBOSITY > 0: banner(f'Epoch: {epoch}') t_step_start = time.time() train_loss, train_acc, train_count = self._train_step() val_loss, val_acc, val_count = self.test(self.val_gen) self.scheduler.step(val_loss) if cfg.VERBOSITY > 0: t_step_end = time.time() batch_time = round( (t_step_end - t_step_start) / batches_per_step, 3) p.add(1, values=[("t_loss", train_loss), ("t_acc", train_acc), ("v_loss", val_loss), ("v_acc", val_acc), ("batch_time", batch_time), ("lr", self.optimizer.param_groups[0]['lr'])]) else: p.add(1, values=[("t_loss", train_loss), ("t_acc", train_acc), ("v_loss", val_loss), ("v_acc", val_acc), ("lr", self.optimizer.param_groups[0]['lr'])]) self._checkpoint(val_acc, epoch + 1, ckp_name_prefix=ckp_name_prefix) t_end = time.time() print( f'==> Total train time: {t_end - t_start:.3f} secs :: per epoch: {(t_end - t_start) / epochs:.3f} secs' ) banner('Training Phase - End')
def eval_run_time(self, mode, no_of_tries=5): layers_layout = self.nn.net.generate_spatial_sizes(dat.shape()) if self.input_patterns is None: recs_first_lvl = Record(layers_layout, self.gran_thresh, True, mode, self.init_acc, self.ps, self.ones_range) else: recs_first_lvl = Record(layers_layout, self.gran_thresh, False, mode, self.init_acc, self.input_patterns, self.ones_range) first_lvl_runs = recs_first_lvl.size self.nn.net.reset_spatial() run_time_for_iter = 0 for idx in range(no_of_tries): layer = random.randint(0, recs_first_lvl.no_of_layers - 1) channel = random.randint(0, recs_first_lvl.no_of_channels[layer] - 1) patch = random.randint(0, recs_first_lvl.no_of_patches[layer] - 1) pattern_idx = random.randint( 0, recs_first_lvl.no_of_patterns[layer] - 1) pattern = recs_first_lvl.all_patterns[:, :, pattern_idx] mask = mf.get_specific_mask(layers_layout[layer], channel, patch, pattern, recs_first_lvl.patch_sizes[layer], mode) st_time = time.time() self.nn.net.reset_spatial() self.nn.net.strict_mask_update(update_ids=[layer], masks=[torch.from_numpy(mask)]) _, test_acc, _ = self.nn.test(self.test_gen) end_time = time.time() run_time_for_iter += (end_time - st_time) run_time_for_iter = run_time_for_iter / no_of_tries recs_first_lvl.fill_empty() if mode == Mode.UNIFORM_LAYER: second_lvl_runs = 0 lQ = LayerQuantizier(recs_first_lvl, self.init_acc, 0, self.ps, self.ones_range, self.get_total_ops()) lQ_runs = lQ.number_of_iters() elif mode == Mode.MAX_GRANULARITY: pQ = PatchQuantizier(recs_first_lvl, self.init_acc, 0, self.ps) pQ.output_rec.fill_empty() cQ = ChannelQuantizier(pQ.output_rec, self.init_acc, 0, self.ps) cQ.output_rec.fill_empty() second_lvl_runs = pQ.number_of_iters() + cQ.number_of_iters() lQ = LayerQuantizier(cQ.output_rec, self.init_acc, 0, self.ps, self.ones_range, self.get_total_ops()) lQ_runs = lQ.number_of_iters() elif mode == Mode.UNIFORM_FILTERS: pQ = PatchQuantizier(recs_first_lvl, self.init_acc, 0, self.ps) second_lvl_runs = pQ.number_of_iters() pQ.output_rec.fill_empty() lQ = LayerQuantizier(pQ.output_rec, self.init_acc, 0, self.ps, self.ones_range, self.get_total_ops()) lQ_runs = lQ.number_of_iters() elif mode == Mode.UNIFORM_PATCH: cQ = ChannelQuantizier(recs_first_lvl, self.init_acc, 0, self.ps) cQ.output_rec.fill_empty() second_lvl_runs = cQ.number_of_iters() lQ = LayerQuantizier(cQ.output_rec, self.init_acc, 0, self.ps, self.ones_range, self.get_total_ops()) lQ_runs = lQ.number_of_iters() no_of_runs = (first_lvl_runs, second_lvl_runs, lQ_runs) run_times = (round(run_time_for_iter, 3), self.get_full_net_run_time(no_of_tries)) return no_of_runs, run_times
def test(): nn = NeuralNet(resume=True) test_gen, _ = dat.testset(batch_size=cfg.BATCH_SIZE, max_samples=cfg.TEST_SET_SIZE) test_loss, test_acc, count = nn.test(test_gen, print_it=True)
def data_tutorial(): dat.data_summary(show_sample=True)