def select_data_mode(config_data): # __config_data is NOT YET IMPLEMENTED__ print("select_data_mode(). submode:%s. INTERNAL SWITCH? YES." % (str(config_data['console_submode']))) #### INTERNAL SWITCH #### verbose = 250 if config_data['console_submode'] is None: print(DATA_INFO) elif str(config_data['console_submode']) == '0': data_test(config_data) elif str(config_data['console_submode']) == 'mnist': ioload.load_mnist_0001(config_data, verbose=verbose) elif str(config_data['console_submode']) == 'showcase_mnist': show.showcase_mnist_0001(config_data, verbose=verbose) elif str(config_data['console_submode']) == 'showcase_mnist_2': show.showcase_mnist_0002(config_data, verbose=verbose) elif str(config_data['console_submode']) == 'showcase_mnist_3': show.showcase_mnist_0003(config_data, verbose=verbose) elif str(config_data['console_submode']) == 'showcase_cifar': show_cifar.showcase_cifar10_0001(config_data, verbose=250) elif str(config_data['console_submode']) == 'showcase_cifar_2': show_cifar.showcase_cifar10_0002(config_data, verbose=250) elif str(config_data['console_submode']) == 'showcase_cifar_3': show_cifar.showcase_cifar10_0003(config_data, verbose=250) else: print(DATA_INFO)
def showcase_mnist_0003(config_data, verbose=0): print('showcase_mnist_0003()') from pipeline.data.load_data import load_mnist_0001 config_data['data_from_torch']['mnist']['resize'] = ( 128, 128) # set to None for original size config_data['data_from_torch']['mnist']['training_mode'] = True dataloader = load_mnist_0001(config_data, batch_size=1, download=False, shuffle=False, verbose=0) this_iter = iter(dataloader) iter_counter = 0 n_iter = len(this_iter) while iter_counter < n_iter: iter_counter += 1 x, y = this_iter.next() print(x.shape) x1 = x.clone().detach().cpu().squeeze() break fig = plt.figure() ax = fig.add_subplot(111) ax.imshow(x1) plt.show()
def lrp_mnist_000X(net, state_tracker, config_data, relprop_mode, N_th_RUN_TO_LOAD, DEBUG_LOOP1_SIGNAL, DEBUG_LOOP2_SIGNAL, tab_level=0, verbose=0): N_SAVE_LRP = 10 # how many data points to LRP RELPROP_MODE = relprop_mode data_loader = load_mnist_0001(config_data, shuffle=True, batch_size=1, verbose=0) net.relprop_mode = RELPROP_MODE res = ResultWrapper() res.lrp_output = [] pm.printvm('Start lrp processing...', tab_level=tab_level,verbose=verbose, verbose_threshold=250) for i, data in enumerate(data_loader,0): x, y0 = data x = x.to(this_device) y = net(x) net.forward_lrp(x) if DEBUG_LOOP1(i,x,y,y0,net, DEBUG_LOOP1_SIGNAL,tab_level=tab_level+1): break R = net.relprop(y) if DEBUG_LOOP2(i, x, y, y0, R, net, DEBUG_LOOP2_SIGNAL,tab_level=tab_level+1): break res.lrp_output.append(compactify_result(x,y, y0,R)) if i>=N_SAVE_LRP: break working_dir = config_data['working_dir'] path_to_folder = os.path.join('checkpoint',state_tracker.training_series_name) filename = str(RELPROP_MODE) + '_' + str(N_th_RUN_TO_LOAD) + '.result' res.save_result(working_dir, path_to_folder, filename, verbose=250, tab_level=0,verbose_threshold=50)
def setup_training_and_data_loader(config_data, verbose=0, tab_level=0): config_data['data_from_torch']['mnist']['training_mode'] = True data_loader = load_mnist_0001(config_data, verbose=0) n_batch_train = len(data_loader) pm.printvm("setup_training_and_data_loader()"%(), tab_level=tab_level,verbose=verbose, verbose_threshold=0) pm.printvm("n_batch_train = len(data_loader):%s"%(str(n_batch_train)), tab_level=tab_level+1,verbose=verbose, verbose_threshold=0) return data_loader, n_batch_train
def eval_mnist_0001(config_data, state_tracker, net, debug_signal, tab_level=0, verbose=250): from utils.metrics import ClassAccuracy data_loader = load_mnist_0001(config_data, shuffle=False, batch_size=1, verbose=0) net.eval() Acc = ClassAccuracy() pm.printvm('Start evaluation...', tab_level=tab_level,verbose=verbose, verbose_threshold=250) for i, data in enumerate(data_loader,0): x, y0 = data y = net(x.to(this_device)).squeeze(3).squeeze(2) y1 = torch.argmax(y.squeeze(1)).clone().detach().cpu().numpy() Acc.update_acc(int(y1),int(y0)) if DEBUG_eval_smallnet_mnist_LOOP_0001(i, x, y0, y, net, debug_signal, tab_level=tab_level+1): break Acc.compute_acc() Acc.display_stats(tab_level=0, verbose=250) """
def visual_mnist(config_data): print('visual_mnist()') import pipeline.data.load_data as lo config_data['general']['batch_size'] = 9 data_loader = lo.load_mnist_0001(config_data) for i, data in enumerate(data_loader, 0): x, y = data x = x.detach().cpu().numpy() y = y.numpy() stack = np.concatenate([img for img in x], 2) print('x.shape:%s,y:%s' % (str(x.shape), str(y))) print('stack.shape:%s' % (str(stack.shape))) plt.figure() plt.imshow(stack.squeeze()) break plt.show()
def showcase_mnist_0001(config_data, verbose=0): print('showcase_mnist_0001()') from pipeline.data.load_data import load_mnist_0001 config_data['data_from_torch']['mnist']['resize'] = ( 64, 64) # set to None for original size config_data['data_from_torch']['mnist']['training_mode'] = True dataloader = load_mnist_0001(config_data, batch_size=1, download=False, shuffle=False, verbose=0) this_iter = iter(dataloader) iter_counter = 0 n_iter = len(this_iter) while iter_counter < n_iter: iter_counter += 1 x, y = this_iter.next() x1 = x.clone().detach().cpu().numpy() pm.print_in_loop('x.shape: %s, x max: %s , x min: %s , y:%s' % (str(x1.shape), str(np.max(x1)), str(np.min(x1)), str(y.clone().detach().cpu().numpy())), iter_counter, n_iter, first=5, last=2, tab_level=1, verbose=verbose, verbose_threshold=250) print("***Done*** iter is used up") try: this_iter.next() except: print("One more next() and error will be raised. Reached end!")
def train_smallnet_mnist_0001(config_data, tab_level=0, verbose=0): print('train_smallnet_mnist_0001().') from pipeline.data.load_data import load_mnist_0001 from utils.loss import compute_loss data_loader = load_mnist_0001(config_data, verbose=0) state_tracker = setup_state_tracker(config_data, verbose=verbose, tab_level=tab_level) net = new_or_load_model(state_tracker, config_data, verbose=verbose, tab_level=tab_level) criterion, optimizer = setup_training_tools_0001(net, config_data, verbose=verbose, tab_level=tab_level + 1) pm.printv('Start training...' % (), tab_level=tab_level) total_iter_in_this_run = 0 l_epoch = 1 + state_tracker.get_latest_saved_epoch() for n_epoch in range(l_epoch, l_epoch + config_data['general']['epoch']): state_tracker.setup_for_this_epoch(n_epoch) for i, data in enumerate(data_loader, 0): optimizer.zero_grad() x, y0 = data if DEBUG_train_loop_0001(DEBUG_train_smallnet_mnist_LOOP_SIGNAL, net, x, y0, tab_level=tab_level, verbose=verbose): return y = net(x.to(this_device)) loss = compute_loss(criterion, y.squeeze(3).squeeze(2).cpu(), y0) loss.backward() optimizer.step() # FOR LOGGING total_iter_in_this_run += 1 state_tracker.store_loss_by_epoch(loss.item(), n_epoch) stop_iter, stop_epoch = DEBUG_train_loop_0002( DEBUG_train_smallnet_mnist_LOOP2_SIGNAL, i, n_epoch - l_epoch, tab_level=tab_level + 1, verbose=verbose) if stop_iter: break state_tracker.update_epoch() if stop_epoch: break state_tracker.update_state(total_iter_in_this_run, config_data) save_model_by_n_th_run(net, state_tracker, tab_level=tab_level, verbose=verbose) state_tracker.display_end_state(tab_level=tab_level + 1, verbose=verbose)