def train(args, sample_iter, nsamples, epoch, max_epochs, train_graph, yolo_solver, on_memory_data, total_batches): total_loss = [] epoch_seen = 0 tic = time.time() for each_batch in range(0, int(total_batches)): data_tensor, target_tensor = sample_iter.next() lr = yolo_solver.get_current_lr(train_graph.seen) nB = data_tensor.shape[0] print('size={}, '.format(data_tensor.shape[2]), end='') stats = train_graph.forward_backward(data_tensor, target_tensor) print('s=%d/%d, b=%d/%d, e=%d/%d, lr %g, mIOU %.3f, nGT %d, recall %d,' 'proposals %d, loss: %.3f, exec: %.1f [ms]' % ((stats.seen % nsamples), nsamples, get_current_batch(stats.seen, args), args.max_batches, epoch, max_epochs, lr, stats.mIoU, stats.nGT, stats.nCorrect, stats.nProposals, stats.loss, stats.time)) epoch_seen += nB total_loss.append(float(stats.loss)) # Update parameters for every accum_times iterations. updated = yolo_solver.update_at_rate(lr) # Update if gradients are computed from previous update. yolo_solver.update_at_rate(lr, force=True) if (epoch + 1) % args.save_interval == 0: logging('save weights to %s/%06d.h5' % (args.output, epoch + 1)) nn.save_parameters('%s/%06d.h5' % (args.output, epoch + 1)) return np.sum(total_loss) / epoch_seen
def test_parameter_file_load_save_for_file_object(memory_buffer_format): module_creator = ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]) variable_inputs = module_creator.get_variable_inputs() a_module = module_creator.module outputs = a_module(*variable_inputs) another = TSTNetNormal() ref_outputs = another(*variable_inputs) extension = memory_buffer_format # Should not equal with pytest.raises(AssertionError) as excinfo: forward_variable_and_check_equal(outputs, ref_outputs) with io.BytesIO() as param_file: nn.save_parameters(param_file, a_module.get_parameters(), extension=extension) # load from file with nn.parameter_scope('', another.parameter_scope): nn.load_parameters(param_file, extension=extension) another.update_parameter() ref_outputs = another(*variable_inputs) # should equal forward_variable_and_check_equal(outputs, ref_outputs)
def test_save_load_parameters(): v = nn.Variable([64, 1, 28, 28], need_grad=False) with nn.parameter_scope("param1"): with nn.parameter_scope("conv1"): h = PF.convolution(v, 32, (3, 3)) b = PF.batch_normalization(h, batch_stat=True) with nn.parameter_scope("conv2"): h1 = PF.convolution(v, 32, (3, 3)) b2 = PF.batch_normalization(h1, batch_stat=True) for k, v in nn.get_parameters(grad_only=False).iteritems(): v.data.cast(np.float32)[...] = np.random.randn(*v.shape) with nn.parameter_scope("param1"): param1 = nn.get_parameters(grad_only=False) nn.save_parameters("tmp.h5") nn.save_parameters("tmp.protobuf") with nn.parameter_scope("param2"): nn.load_parameters('tmp.h5') param2 = nn.get_parameters(grad_only=False) with nn.parameter_scope("param3"): nn.load_parameters('tmp.protobuf') param3 = nn.get_parameters(grad_only=False) for par2 in [param2, param3]: assert param1.keys() == par2.keys() # Check order for (n1, p1), (n2, p2) in zip(sorted(param1.items()), sorted(par2.items())): assert n1 == n2 assert np.all(p1.d == p2.d) assert p1.data.dtype == p2.data.dtype assert p1.need_grad == p2.need_grad
def validate(i): pose_ = ds.poses[conf.valid_index:conf.valid_index+1, ...] intrinsic_ = ds.intrinsics[conf.valid_index:conf.valid_index+1, ...] mask_obj_ = ds.masks[conf.valid_index:conf.valid_index+1, ...] image = render(pose_, intrinsic_, mask_obj_, conf) monitor_image.add(i, image) nn.save_parameters(f"{monitor_path}/model_{i:05d}.h5")
def main(): args = get_args() nn.load_parameters(args.input) params = nn.get_parameters(grad_only=False) processed = False # Convert memory layout layout = get_memory_layout(params) if args.memory_layout is None: pass elif args.memory_layout != layout: logger.info(f'Converting memory layout to {args.memory_layout}.') convert_memory_layout(params, args.memory_layout) processed |= True else: logger.info('No need to convert memory layout.') if args.force_3_channels: ret = force_3_channels(params, args.memory_layout) if ret: logger.info('Converted first conv to 3-channel input.') processed |= ret if not processed: logger.info( 'No change has been made for the input. Not saving a new parameter file.' ) return logger.info(f'Save a new parameter file at {args.output}') for key, param in params.items(): nn.parameter.set_parameter(key, param) nn.save_parameters(args.output)
def main(): '''Main. ''' # Parse arguments args = get_args() # Create YOLOv2 detection network x = nn.Variable((1, 3, args.width, args.width)) y = yolov2.yolov2(x, args.anchors, args.classes, test=True) params = nn.get_parameters(grad_only=False) # Parse network parameters dn_weights = parser.load_weights_raw(args.input) cursor = 0 for i in range(1, 19): # 1 to 18 cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c{}'.format(i)) cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c18_19') cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c18_20') cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c13_14') cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c21') cursor = parser.load_convolutional_and_get_next_cursor(dn_weights, cursor, params, 'detection', no_bn=True) assert cursor == dn_weights.size # Save to a h5 file nn.save_parameters(args.output)
def test_save_load_parameters(): v = nn.Variable([64, 1, 28, 28], need_grad=False) with nn.parameter_scope("param1"): with nn.parameter_scope("conv1"): h = PF.convolution(v, 32, (3, 3)) b = PF.batch_normalization(h, batch_stat=True) with nn.parameter_scope("conv2"): h1 = PF.convolution(v, 32, (3, 3)) b2 = PF.batch_normalization(h1, batch_stat=True) for k, v in iteritems(nn.get_parameters(grad_only=False)): v.data.cast(np.float32)[...] = np.random.randn(*v.shape) with nn.parameter_scope("param1"): param1 = nn.get_parameters(grad_only=False) nn.save_parameters("tmp.h5") nn.save_parameters("tmp.protobuf") with nn.parameter_scope("param2"): nn.load_parameters('tmp.h5') param2 = nn.get_parameters(grad_only=False) with nn.parameter_scope("param3"): nn.load_parameters('tmp.protobuf') param3 = nn.get_parameters(grad_only=False) for par2 in [param2, param3]: assert param1.keys() == par2.keys() # Check order for (n1, p1), (n2, p2) in zip(sorted(param1.items()), sorted(par2.items())): assert n1 == n2 assert np.all(p1.d == p2.d) assert p1.data.dtype == p2.data.dtype assert p1.need_grad == p2.need_grad
def test_parameter_file_load_save_for_files(parameter_file): module_creator = ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]) variable_inputs = module_creator.get_variable_inputs() a_module = module_creator.module outputs = a_module(*variable_inputs) another = TSTNetNormal() ref_outputs = another(*variable_inputs) # Should not equal with pytest.raises(AssertionError) as excinfo: forward_variable_and_check_equal(outputs, ref_outputs) with create_temp_with_dir(parameter_file) as tmp_file: # save to file nn.save_parameters(tmp_file, a_module.get_parameters()) # load from file with nn.parameter_scope('', another.parameter_scope): nn.load_parameters(tmp_file) another.update_parameter() ref_outputs = another(*variable_inputs) # should equal forward_variable_and_check_equal(outputs, ref_outputs)
def save_parameters(self, path, extension=".h5"): """Save parameters of this module to a file. Args: path: str or file-like object """ params = self.get_parameters() nn.save_parameters(path, params=params, extension=extension)
def save_models(epoch_num, losses): # save generator parameter with nn.parameter_scope('Wave-U-Net'): nn.save_parameters(os.path.join(args.model_save_path, 'param_{:04}.h5'.format(epoch_num + 1))) # save results np.save(os.path.join(args.model_save_path, 'losses_{:04}.npy'.format(epoch_num + 1)), np.array(losses))
def main(): args = get_args() net, caffe_param_dims = load_caffe_net(args.prototxt, args.caffemodel) xy, params, params_dims = create_nnabla_net(args.resnext) assert caffe_param_dims == params_dims, "Number of parameter dimensions must be the same." convert_weights(net, params, args.bgr, not args.disable_category_reordering) nn.save_parameters(args.path_save)
def train(env, model, buffer, exploration, monitor, update_fn, eval_fn, final_step, update_start, update_interval, save_interval, evaluate_interval, loss_labels=[]): reward_monitor = MonitorSeries('reward', monitor, interval=1) eval_reward_monitor = MonitorSeries('eval_reward', monitor, interval=1) time_monitor = MonitorTimeElapsed('time', monitor, interval=10000) loss_monitors = [] for label in loss_labels: loss_monitors.append(MonitorSeries(label, monitor, interval=10000)) step = 0 while step <= final_step: obs_t = env.reset() ter_tp1 = False cumulative_reward = 0.0 model.reset(step) while not ter_tp1: # select best action act_t = model.infer(obs_t) # add exploration noise act_t = exploration.get(step, act_t) # iterate environment obs_tp1, rew_tp1, ter_tp1, _ = env.step(act_t) # store transition buffer.append(obs_t, [act_t], rew_tp1, obs_tp1, ter_tp1) # update parameters if step > update_start and step % update_interval == 0: for i, loss in enumerate(update_fn(step)): if loss is not None: loss_monitors[i].add(step, loss) # save parameters if step % save_interval == 0: path = os.path.join(monitor.save_path, 'model_%d.h5' % step) nn.save_parameters(path) if step % evaluate_interval == 0: eval_reward_monitor.add(step, np.mean(eval_fn())) step += 1 cumulative_reward += rew_tp1 obs_t = obs_tp1 time_monitor.add(step) # record metrics reward_monitor.add(step, cumulative_reward)
def save_parameters(self, path, grad_only=False): """Save all parameters into a file with the specified format. Currently hdf5 and protobuf formats are supported. Args: path : path or file object grad_only (bool, optional): Return parameters with `need_grad` option as `True`. """ params = self.get_parameters(grad_only=grad_only) nn.save_parameters(path, params)
def save_checkpoint(self, path, epoch, msg='', pixelcnn=False): file_name = os.path.join(path, 'epoch_' + str(epoch)) os.makedirs(file_name, exist_ok=True) if pixelcnn: nn.save_parameters(os.path.join(file_name, 'pixelcnn_params.h5')) self.solver.save_states( os.path.join(file_name, 'pixelcnn_solver.h5')) else: nn.save_parameters(os.path.join(file_name, 'params.h5')) self.solver.save_states(os.path.join(file_name, 'solver.h5')) print(msg)
def save_parameters(self, path, grad_only=False): r"""Saves the parameters to a file. Args: path (str): Path to file. grad_only (bool, optional): If `need_grad=True` is required for parameters which will be saved. Defaults to False. """ params = self.get_parameters(grad_only=grad_only) with nn.parameter_scope('', OrderedDict()): nn.save_parameters(path, params)
def train(self): # variables for training tx_in = nn.Variable( [self._batch_size, self._x_input_length, self._cols_size]) tx_out = nn.Variable( [self._batch_size, self._x_output_length, self._cols_size]) tpred = self.network(tx_in, self._lstm_unit_name, self._lstm_units) tpred.persistent = True loss = F.mean(F.squared_error(tpred, tx_out)) solver = S.Adam(self._learning_rate) solver.set_parameters(nn.get_parameters()) # variables for validation vx_in = nn.Variable( [self._batch_size, self._x_input_length, self._cols_size]) vx_out = nn.Variable( [self._batch_size, self._x_output_length, self._cols_size]) vpred = self.network(vx_in, self._lstm_unit_name, self._lstm_units) # data iterators tdata = self._load_dataset(self._training_dataset_path, self._batch_size, shuffle=True) vdata = self._load_dataset(self._validation_dataset_path, self._batch_size, shuffle=True) # monitors from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed monitor = Monitor(self._monitor_path) monitor_loss = MonitorSeries("Training loss", monitor, interval=10) monitor_err = MonitorSeries("Training error", monitor, interval=10) monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100) monitor_verr = MonitorSeries("Validation error", monitor, interval=10) # Training loop for i in range(self._max_iter): if i % self._val_interval == 0: ve = self._validate(vpred, vx_in, vx_out, vdata, self._val_iter) monitor_verr.add(i, ve / self._val_iter) te = self._train(tpred, solver, loss, tx_in, tx_out, tdata.next(), self._weight_decay) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, te) monitor_time.add(i) ve = self._validate(vpred, vx_in, vx_out, vdata, self._val_iter) monitor_verr.add(i, ve / self._val_iter) # Save a best model parameters nn.save_parameters(self._model_params_path)
def test_load_save_parameters(): module = MyModule(shape=(5, 5)) params = module.get_parameters() if not os.path.exists('__nnabla_nas__'): os.makedirs('__nnabla_nas__') nn.save_parameters('__nnabla_nas__/params.h5', params) nn.load_parameters('__nnabla_nas__/params.h5') params0 = nn.get_parameters() for k, v in params.items(): assert_allclose(v.d, params0[k].d)
def train_loop(env, model, num_actions, return_fn, logdir, eval_fn, args): monitor = Monitor(logdir) reward_monitor = MonitorSeries('reward', monitor, interval=1) eval_reward_monitor = MonitorSeries('eval_reward', monitor, interval=1) policy_loss_monitor = MonitorSeries('policy_loss', monitor, interval=10000) value_loss_monitor = MonitorSeries('value_loss', monitor, interval=10000) sample_action = lambda x: np.random.choice(num_actions, p=x) step = 0 obs_t = env.reset() cumulative_reward = np.zeros(len(env.envs), dtype=np.float32) obss_t, acts_t, vals_t, rews_tp1, ters_tp1 = [], [], [], [], [] while step <= args.final_step: # inference probs_t, val_t = model.infer(pixel_to_float(obs_t)) # sample actions act_t = list(map(sample_action, probs_t)) # move environment obs_tp1, rew_tp1, ter_tp1, _ = env.step(act_t) # clip reward between [-1.0, 1.0] clipped_rew_tp1 = np.clip(rew_tp1, -1.0, 1.0) obss_t.append(obs_t) acts_t.append(act_t) vals_t.append(val_t) rews_tp1.append(clipped_rew_tp1) ters_tp1.append(ter_tp1) # update parameters if len(obss_t) == args.time_horizon: vals_t.append(val_t) rets_t = return_fn(vals_t, rews_tp1, ters_tp1) advs_t = rets_t - vals_t[:-1] policy_loss, value_loss = model.train(pixel_to_float(obss_t), acts_t, rets_t, advs_t, step) policy_loss_monitor.add(step, policy_loss) value_loss_monitor.add(step, value_loss) obss_t, acts_t, vals_t, rews_tp1, ters_tp1 = [], [], [], [], [] # save parameters cumulative_reward += rew_tp1 obs_t = obs_tp1 for i, ter in enumerate(ter_tp1): step += 1 if ter: reward_monitor.add(step, cumulative_reward[i]) cumulative_reward[i] = 0.0 if step % 10**6 == 0: path = os.path.join(logdir, 'model_{}.h5'.format(step)) nn.save_parameters(path) eval_reward_monitor.add(step, eval_fn())
def save_parameters(self, path, params=None, grad_only=False): r"""Saves the parameters to a file. Args: path (str): Path to file. params (OrderedDict, optional): An `OrderedDict` containing parameters. If params is `None`, then the current parameters will be saved. grad_only (bool, optional): If need_grad=True is required for parameters which will be saved. Defaults to False. """ params = params or self.get_parameters(grad_only) nn.save_parameters(path, params)
def search_architecture(args, data_dict, controller_weights_dict): """ Execute architecture search. Keep searching until; it finds the architecture which achieves higher validation accuracy than the threshold set by users. it finishes the search for a certain number of times. arguments:num_nodes > 2 """ val_change = list() arch_change = list() best_val = 0.0 for k in range(args.max_search_iter): output_line = " Iteration {} / {} ".format((k + 1), args.max_search_iter) print("\n{0:-^80s}\n".format(output_line)) if k > 3: print("Previous 3 Accuracy Changes:", "{:.2f}".format(100 * val_change[-3]), "% ->", "{:.2f}".format(100 * val_change[-2]), "% ->", "{:.2f}".format(100 * val_change[-1]), "%\n") print("The best accuracy so far is: {:.2f} %.".format( np.max(val_change) * 100)) sample_arch, val_acc = sample_arch_and_train(args, data_dict, controller_weights_dict) arch_change.append(sample_arch) val_change.append(val_acc) if val_acc > best_val: best_val = val_acc best_arch = sample_arch print( "Achieved the best validation accuracy. Saved the model architecture..." ) np.save(args.recommended_arch, np.array(best_arch)) nn.save_parameters( os.path.join(args.model_save_path, 'controller_params.h5')) if args.early_stop_over < best_val: print("Reached at Stop Accuracy. Finishes Architecture Search.") break print("During {0} trial, Best Accuracy is {1:.2f} %.".format( (k + 1), 100 * np.max(val_change))) return arch_change, best_arch
def train(args): # create real images reals = create_real_images(args) # save real images for i, real in enumerate(reals): image_path = os.path.join(args.logdir, 'real_%d.png' % i) imwrite(denormalize(np.transpose(real, [0, 2, 3, 1])[0]), image_path) # nnabla monitor monitor = Monitor(args.logdir) # use cv2 backend at MonitorImage set_backend('cv2') prev_models = [] Zs = [] noise_amps = [] for scale_num in range(len(reals)): fs = min(args.fs_init * (2**(scale_num // 4)), 128) min_fs = min(args.min_fs_init * (2**(scale_num // 4)), 128) model = Model(real=reals[scale_num], num_layer=args.num_layer, fs=fs, min_fs=min_fs, kernel=args.kernel, pad=args.pad, lam_grad=args.lam_grad, alpha_recon=args.alpha_recon, d_lr=args.d_lr, g_lr=args.g_lr, beta1=args.beta1, gamma=args.gamma, lr_milestone=args.lr_milestone, scope=str(scale_num)) z_curr = train_single_scale(args, scale_num, model, reals, prev_models, Zs, noise_amps, monitor) prev_models.append(model) Zs.append(z_curr) noise_amps.append(args.noise_amp) # save data nn.save_parameters(os.path.join(args.logdir, 'models.h5')) save_pkl(Zs, os.path.join(args.logdir, 'Zs.pkl')) save_pkl(reals, os.path.join(args.logdir, 'reals.pkl')) save_pkl(noise_amps, os.path.join(args.logdir, 'noise_amps.pkl')) return Zs, reals, noise_amps
def main(): # Training settings args = Yolov2OptionTraining().parse_args() nsamples = file_lines(args.train) set_default_context_by_args(args) # Training parameters max_epochs = args.max_batches * args.batch_size * args.accum_times / nsamples + 1 if not os.path.exists(args.output): os.mkdir(args.output) ############### # Load parameters print("Load", args.weight, "...") if args.fine_tune: nn.load_parameters(args.weight) nn.parameter.pop_parameter("detection/conv/W") nn.parameter.pop_parameter("detection/conv/b") else: nn.load_parameters(args.weight) train_graph = TrainGraph(args, (args.size_aug[-1], args.size_aug[-1])) yolo_solver = YoloSolver(args) if args.on_memory_data: on_memory_data = dataset.load_on_memory_data(args.train) else: on_memory_data = None prefetch_iterator = PrefetchIterator() from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed monitor = Monitor(args.output) monitor_loss = MonitorSeries('Training loss', monitor, interval=1) monitor_time = MonitorTimeElapsed('Time per epoch', monitor, interval=1) # Epoch loop for epoch in range(0, int(max_epochs)): loss = train(args, epoch, max_epochs, train_graph, yolo_solver, prefetch_iterator, on_memory_data) monitor_loss.add(epoch, loss) monitor_time.add(epoch) # Save the final parameters logging('save weights to %s/%06d.h5' % (args.output, epoch + 1)) nn.save_parameters('%s/%06d.h5' % (args.output, epoch + 1))
def save_all_params(params_dict, c, k, j, bundle_size, step_size, save_dir, epoch): params_dict[c] = nn.get_parameters(grad_only=False).copy() c += 1 if c == bundle_size or j == step_size - 1: dn = os.path.join(save_dir, 'epoch%02d' % (epoch), 'weights') ensure_dir(dn) for cc, params in params_dict.items(): fn = '%s/model_step%04d.h5' % (dn, k + cc) nn.save_parameters(fn, params=params, extension=".h5") k += c c = 0 params_dict = {} return params_dict, c, k
def test_graph_clear_buffer(seed): np.random.seed(313) rng = np.random.RandomState(seed) x = nn.Variable([2, 3, 4, 4]) t = nn.Variable([2, 1]) x.d = rng.randn(*x.shape) t.d = rng.randint(0, 5, size=t.shape) # Network definition nn.set_default_context(nn.Context()) nn.clear_parameters() x1 = x + 1 x2 = x1 - 1 with nn.parameter_scope('conv1'): z = PF.convolution(x2, 3, (2, 2)) z2 = F.relu(z, inplace=True) with nn.parameter_scope('fc2'): z3 = PF.affine(z2, 5) z4 = PF.affine(z2, 5) l1 = F.softmax_cross_entropy(z3, t, 1) L1 = F.mean(l1) l2 = F.softmax_cross_entropy(z4, t, 1) L2 = F.mean(l2) # Forwardprop import tempfile import os tmpd = tempfile.mkdtemp() nn.save_parameters(os.path.join(tmpd, 'parameter.h5')) first = False for cnng in [False, True]: for cb in [False, True]: _ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5')) for v in nn.get_parameters().values(): v.grad.zero() nn.forward_all([L1, L2], clear_no_need_grad=cnng) # for now, the first backward cannot be # called with clear_buffer=True L1.backward(clear_buffer=False) L2.backward(clear_buffer=cb) if not first: first = True g = list(nn.get_parameters().values())[0].g.copy() else: g2 = list(nn.get_parameters().values())[0].g.copy() import platform if platform.machine() == 'ppc64le': pytest.skip("This test fails on ppc64le") assert np.all(g == g2)
def main(): args = get_args() nn.load_parameters(args.input) params = nn.get_parameters(grad_only=False) processed = False # Convert memory layout layout = get_memory_layout(params) if args.memory_layout is None: pass if args.affine_to_conv: rm_list = [] ret = affine_to_conv(params, args.memory_layout, rm_list) for r in rm_list: print(r) nn.parameter.pop_parameter(r) if ret: logger.info('Converted affine to conv.') processed |= ret if args.memory_layout != layout: logger.info(f'Converting memory layout to {args.memory_layout}.') convert_memory_layout(params, args.memory_layout) processed |= True else: logger.info('No need to convert memory layout.') if args.force_4_channels: ret = force_4_channels(params, args.memory_layout) if ret: logger.info('Converted first conv to 4-channel input.') processed |= ret if args.force_3_channels: ret = force_3_channels(params, args.memory_layout) if ret: logger.info('Converted first conv to 3-channel input.') processed |= ret nn.clear_parameters() for key, param in params.items(): print(key) print(param.shape) nn.parameter.set_parameter(key, param) if not processed: logger.info( 'No change has been made for the input. Not saving a new parameter file.') return logger.info(f'Save a new parameter file at {args.output}') nn.save_parameters(args.output)
def save_models(epoch_num, cle_disout, fake_disout, losses_gen, losses_dis, losses_ae): # save generator parameter with nn.parameter_scope("gen"): nn.save_parameters(os.path.join(args.model_save_path, 'generator_param_{:04}.h5'.format(epoch_num + 1))) # save discriminator parameter with nn.parameter_scope("dis"): nn.save_parameters(os.path.join(args.model_save_path, 'discriminator_param_{:04}.h5'.format(epoch_num + 1))) # save results np.save(os.path.join(args.model_save_path, 'disout_his_{:04}.npy'.format(epoch_num + 1)), np.array([cle_disout, fake_disout])) np.save(os.path.join(args.model_save_path, 'losses_gen_{:04}.npy'.format(epoch_num + 1)), np.array(losses_gen)) np.save(os.path.join(args.model_save_path, 'losses_dis_{:04}.npy'.format(epoch_num + 1)), np.array(losses_dis)) np.save(os.path.join(args.model_save_path, 'losses_ae_{:04}.npy'.format(epoch_num + 1)), np.array(losses_ae))
def train(args, epoch, max_epochs, train_graph, yolo_solver, prefetch_iterator, on_memory_data): sample_iter = dataset.listDataset(args.train, args, shuffle=True, train=True, seen=train_graph.seen, image_sizes=args.size_aug, image_size_change_freq=args.batch_size * args.accum_times * 10, on_memory_data=on_memory_data, use_cv2=not args.disable_cv2) batch_iter = dataset.create_batch_iter(iter(sample_iter), batch_size=args.batch_size) total_loss = [] epoch_seen = 0 tic = time.time() for batch_idx, ((data_tensor, target_tensor), preprocess_time) \ in enumerate(prefetch_iterator.create(batch_iter)): lr = yolo_solver.get_current_lr(train_graph.seen) nB = data_tensor.shape[0] print('size={}, '.format(sample_iter.shape[0]), end='') stats = train_graph.forward_backward(data_tensor, target_tensor) print('s=%d/%d, b=%d/%d, e=%d/%d, lr %g, mIoU %.3f, nGT %d, recall %d,' ' proposals %d, loss: %.3f, pre: %.1f [ms], exec: %.1f [ms]' % (stats.seen, len(sample_iter), get_current_batch( stats.seen, args), args.max_batches, epoch, max_epochs, lr, stats.mIoU, stats.nGT, stats.nCorrect, stats.nProposals, stats.loss, preprocess_time, stats.time)) epoch_seen += nB total_loss.append(float(stats.loss)) # Update parameters for every accum_times iterations. updated = yolo_solver.update_at_rate(lr) # Update if gradients are computed from previous update. yolo_solver.update_at_rate(lr, force=True) print() if (epoch + 1) % args.save_interval == 0: logging('save weights to %s/%06d.h5' % (args.output, epoch + 1)) nn.save_parameters('%s/%06d.h5' % (args.output, epoch + 1)) return np.sum(total_loss) / epoch_seen
def main(): args = get_args() # Defining network first x = nn.Variable((1, 3, 224, 224)) y = darknet19.darknet19_feature(x / 255, test=True) # Get NNabla parameters params = nn.get_parameters(grad_only=False) # Parse Darknet weights and store them into NNabla params dn_weights = parser.load_weights_raw(args.input) cursor = 0 for i in range(1, 19): # 1 to 18 print("Layer", i) cursor = parser.load_convolutional_and_get_next_cursor( dn_weights, cursor, params, 'c{}'.format(i)) nn.save_parameters(args.output)
def train(self): # On-start callback for callback in self.callback_on_start: callback() # Training loop for e in range(self.max_epoch): for j in range(self.iter_per_epoch): i = e * self.iter_per_epoch + j # On-start callback for callback in self.update_callback_on_start: callback(i) # Update for updater in self.updater: updater.update(i) # On-finish callback for callback in self.update_callback_on_finish: callback(i) # Save parameters if self.model_save_path is not None: nn.save_parameters( os.path.join(self.model_save_path, 'params_{:06}.h5'.format(i))) # Evaluate for evaluator in self.evaluator: evaluator.evaluate(i) # On-finish callback for callback in self.callback_on_finish: callback() # Save parameters if self.model_save_path is not None: nn.save_parameters( os.path.join(self.model_save_path, 'params_{:06}.h5'.format(i))) # Evaluate for evaluator in self.evaluator: evaluator.evaluate(i)
def save_parameters(current_epoch, log_dir, solvers): training_info_yaml = os.path.join(log_dir, "training_info.yaml") # save weights saved_parameter = os.path.join(log_dir, f"params_at_epoch_{current_epoch}.h5") nn.save_parameters(saved_parameter) # save solver's state for name, solver in solvers.items(): saved_states = os.path.join( log_dir, f"state_{name}_at_epoch_{current_epoch}.h5") solver.save_states(saved_states) with open(training_info_yaml, "r", encoding="utf-8") as f: lines = f.readlines()[:-1] lines.append(f"saved_parameters: params_at_epoch_{current_epoch}.h5") # update the training info .yaml with open(training_info_yaml, "w", encoding="utf-8") as f: f.writelines(lines)
def test_graph_clear_buffer(seed): np.random.seed(313) rng = np.random.RandomState(seed) x = nn.Variable([2, 3, 4, 4]) t = nn.Variable([2, 1]) x.d = rng.randn(*x.shape) t.d = rng.randint(0, 5, size=t.shape) # Network definition nn.set_default_context(nn.Context()) nn.clear_parameters() x1 = x + 1 x2 = x1 - 1 with nn.parameter_scope('conv1'): z = PF.convolution(x2, 3, (2, 2)) z2 = F.relu(z, inplace=True) with nn.parameter_scope('fc2'): z3 = PF.affine(z2, 5) l = F.softmax_cross_entropy(z3, t, 1) L = F.mean(l) # Forwardprop import tempfile import os tmpd = tempfile.mkdtemp() nn.save_parameters(os.path.join(tmpd, 'parameter.h5')) first = False for cnng in [False, True]: for cb in [False, True]: _ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5')) for v in nn.get_parameters().values(): v.grad.zero() L.forward(clear_no_need_grad=cnng) L.backward(clear_buffer=cb) if not first: first = True g = list(nn.get_parameters().values())[0].g.copy() else: g2 = list(nn.get_parameters().values())[0].g.copy() assert np.all(g == g2)
def main(): """ Main script. Steps: * Get and set context. * Load Dataset * Initialize DataIterator. * Create Networks * Net for Labeled Data * Net for Unlabeled Data * Net for Test Data * Create Solver. * Training Loop. * Test * Training * by Labeled Data * Calculate Cross Entropy Loss * by Unlabeled Data * Estimate Adversarial Direction * Calculate LDS Loss """ args = get_args() # Get context. from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) shape_x = (1, 28, 28) n_h = args.n_units n_y = args.n_class # Load MNist Dataset from mnist_data import MnistDataSource with MnistDataSource(train=True) as d: x_t = d.images t_t = d.labels with MnistDataSource(train=False) as d: x_v = d.images t_v = d.labels x_t = np.array(x_t / 256.0).astype(np.float32) x_t, t_t = x_t[:args.n_train], t_t[:args.n_train] x_v, t_v = x_v[:args.n_valid], t_v[:args.n_valid] # Create Semi-supervised Datasets x_l, t_l, x_u, _ = split_dataset(x_t, t_t, args.n_labeled, args.n_class) x_u = np.r_[x_l, x_u] x_v = np.array(x_v / 256.0).astype(np.float32) # Create DataIterators for datasets of labeled, unlabeled and validation di_l = DataIterator(args.batchsize_l, [x_l, t_l]) di_u = DataIterator(args.batchsize_u, [x_u]) di_v = DataIterator(args.batchsize_v, [x_v, t_v]) # Create networks # feed-forward-net building function def forward(x, test=False): return mlp_net(x, n_h, n_y, test) # Net for learning labeled data xl = nn.Variable((args.batchsize_l,) + shape_x, need_grad=False) hl = forward(xl, test=False) tl = nn.Variable((args.batchsize_l, 1), need_grad=False) loss_l = F.mean(F.softmax_cross_entropy(hl, tl)) # Net for learning unlabeled data xu = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False) r = nn.Variable((args.batchsize_u,) + shape_x, need_grad=True) eps = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False) loss_u, yu = vat(xu, r, eps, forward, distance) # Net for evaluating valiation data xv = nn.Variable((args.batchsize_v,) + shape_x, need_grad=False) hv = forward(xv, test=True) tv = nn.Variable((args.batchsize_v, 1), need_grad=False) # Create solver solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) # Monitor trainig and validation stats. import nnabla.monitor as M monitor = M.Monitor(args.model_save_path) monitor_verr = M.MonitorSeries("Test error", monitor, interval=240) monitor_time = M.MonitorTimeElapsed("Elapsed time", monitor, interval=240) # Training Loop. t0 = time.time() for i in range(args.max_iter): # Validation Test if i % args.val_interval == 0: n_error = calc_validation_error( di_v, xv, tv, hv, args.val_iter) monitor_verr.add(i, n_error) ################################# ## Training by Labeled Data ##### ################################# # input minibatch of labeled data into variables xl.d, tl.d = di_l.next() # initialize gradients solver.zero_grad() # forward, backward and update loss_l.forward(clear_no_need_grad=True) loss_l.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() ################################# ## Training by Unlabeled Data ### ################################# # input minibatch of unlabeled data into variables xu.d, = di_u.next() ##### Calculate Adversarial Noise ##### # Sample random noise n = np.random.normal(size=xu.shape).astype(np.float32) # Normalize noise vector and input to variable r.d = get_direction(n) # Set xi, the power-method scaling parameter. eps.data.fill(args.xi_for_vat) # Calculate y without noise, only once. yu.forward(clear_buffer=True) # Do power method iteration for k in range(args.n_iter_for_power_method): # Initialize gradient to receive value r.grad.zero() # forward, backward, without update loss_u.forward(clear_no_need_grad=True) loss_u.backward(clear_buffer=True) # Normalize gradinet vector and input to variable r.d = get_direction(r.g) ##### Calculate loss for unlabeled data ##### # Clear remained gradients solver.zero_grad() # Set epsilon, the adversarial noise scaling parameter. eps.data.fill(args.eps_for_vat) # forward, backward and update loss_u.forward(clear_no_need_grad=True) loss_u.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() ##### Learning rate update ##### if i % args.iter_per_epoch == 0: solver.set_learning_rate( solver.learning_rate() * args.learning_rate_decay) monitor_time.add(i) # Evaluate the final model by the error rate with validation dataset valid_error = calc_validation_error(di_v, xv, tv, hv, args.val_iter) monitor_verr.add(i, valid_error) monitor_time.add(i) # Save the model. parameter_file = os.path.join( args.model_save_path, 'params_%06d.h5' % args.max_iter) nn.save_parameters(parameter_file)
def main(): # Get arguments args = get_args() data_file = "https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.train.txt" model_file = args.work_dir + "model.h5" # Load Dataset itow, wtoi, dataset = load_ptbset(data_file) # Computation environment settings from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Create data provider n_word = len(wtoi) n_dim = args.embed_dim batchsize = args.batchsize half_window = args.half_window_length n_negative = args.n_negative_sample di = DataIteratorForEmbeddingLearning( batchsize=batchsize, half_window=half_window, n_negative=n_negative, dataset=dataset) # Create model # - Real batch size including context samples and negative samples size = batchsize * (1 + n_negative) * (2 * (half_window - 1)) # Model for learning # - input variables xl = nn.Variable((size,)) # variable for word yl = nn.Variable((size,)) # variable for context # Embed layers for word embedding function # - f_embed : word index x to get y, the n_dim vector # -- for each sample in a minibatch hx = PF.embed(xl, n_word, n_dim, name="e1") # feature vector for word hy = PF.embed(yl, n_word, n_dim, name="e1") # feature vector for context hl = F.sum(hx * hy, axis=1) # -- Approximated likelihood of context prediction # pos: word context, neg negative samples tl = nn.Variable([size, ], need_grad=False) loss = F.sigmoid_cross_entropy(hl, tl) loss = F.mean(loss) # Model for test of searching similar words xr = nn.Variable((1,), need_grad=False) hr = PF.embed(xr, n_word, n_dim, name="e1") # feature vector for test # Create solver solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) # Create monitor. monitor = M.Monitor(args.work_dir) monitor_loss = M.MonitorSeries( "Training loss", monitor, interval=args.monitor_interval) monitor_time = M.MonitorTimeElapsed( "Training time", monitor, interval=args.monitor_interval) # Do training max_epoch = args.max_epoch for epoch in range(max_epoch): # iteration per epoch for i in range(di.n_batch): # get minibatch xi, yi, ti = di.next() # learn solver.zero_grad() xl.d, yl.d, tl.d = xi, yi, ti loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.update() # monitor itr = epoch * di.n_batch + i monitor_loss.add(itr, loss.d) monitor_time.add(itr) # Save model nn.save_parameters(model_file) # Evaluate by similarity max_check_words = args.max_check_words for i in range(max_check_words): # prediction xr.d = i hr.forward(clear_buffer=True) h = hr.d # similarity calculation w = nn.get_parameters()['e1/embed/W'].d s = np.sqrt((w * w).sum(1)) w /= s.reshape((s.shape[0], 1)) similarity = w.dot(h[0]) / s[i] # for understanding output_similar_words(itow, i, similarity)
def train(): """ Naive Multi-Device Training NOTE: the communicator exposes low-level interfaces * Parse command line arguments. * Specify contexts for computation. * Initialize DataIterator. * Construct computation graphs for training and one for validation. * Initialize solvers and set parameter variables to those. * Instantiate a communicator and set parameter variables. * Create monitor instances for saving and displaying training stats. * Training loop * Computate error rate for validation data (periodically) * Get a next minibatch. * Execute forwardprops * Set parameter gradients zero * Execute backprop. * Inplace allreduce (THIS IS THE MAIN difference from a single device training) * Solver updates parameters by using gradients computed by backprop. * Compute training error """ # Parse args args = get_args() n_train_samples = 50000 bs_valid = args.batch_size # Create contexts extension_module = args.context if extension_module != "cuda" and \ extension_module != "cuda.cudnn": raise Exception("Use `cuda` or `cuda.cudnn` extension_module.") n_devices = args.n_devices ctxs = [] for i in range(n_devices): ctx = extension_context(extension_module, device_id=i) ctxs.append(ctx) ctx = ctxs[-1] # Create training graphs input_image_train = [] preds_train = [] losses_train = [] test = False for i in range(n_devices): image = nn.Variable((args.batch_size, 3, 32, 32)) label = nn.Variable((args.batch_size, 1)) device_scope_name = "device{}".format(i) pred = cifar100_resnet23_prediction( image, ctxs[i], device_scope_name, test) loss = cifar100_resnet32_loss(pred, label) input_image_train.append({"image": image, "label": label}) preds_train.append(pred) losses_train.append(loss) # Create validation graph test = True device_scope_name = "device{}".format(0) image_valid = nn.Variable((bs_valid, 3, 32, 32)) pred_valid = cifar100_resnet23_prediction( image_valid, ctxs[i], device_scope_name, test) input_image_valid = {"image": image_valid} # Solvers solvers = [] for i in range(n_devices): with nn.context_scope(ctxs[i]): solver = S.Adam() device_scope_name = "device{}".format(i) with nn.parameter_scope(device_scope_name): params = nn.get_parameters() solver.set_parameters(params) solvers.append(solver) # Communicator comm = C.DataParalellCommunicator(ctx) for i in range(n_devices): device_scope_name = "device{}".format(i) with nn.parameter_scope(device_scope_name): ctx = ctxs[i] params = nn.get_parameters() comm.add_context_and_parameters((ctx, params)) comm.init() # Create threadpools with one thread pools = [] for _ in range(n_devices): pool = ThreadPool(processes=1) pools.append(pool) # Once forward/backward to safely secure memory for device_id in range(n_devices): data, label = \ (np.random.randn(*input_image_train[device_id]["image"].shape), (np.random.rand(*input_image_train[device_id]["label"].shape) * 10).astype(np.int32)) ret = pools[device_id].apply_async(forward_backward, (input_image_train[device_id]["image"], data, input_image_train[device_id]["label"], label, losses_train[device_id], solvers[device_id])) ret.get() losses_train[device_id].d # sync to host # Create monitor. from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed monitor = Monitor(args.monitor_path) monitor_loss = MonitorSeries("Training loss", monitor, interval=10) monitor_err = MonitorSeries("Training error", monitor, interval=10) monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100) monitor_verr = MonitorSeries("Test error", monitor, interval=10) with data_iterator_cifar100(args.batch_size, True) as tdata, \ data_iterator_cifar100(bs_valid, False) as vdata: # Training-loop for i in range(int(args.max_iter / n_devices)): # Validation if i % int(n_train_samples / args.batch_size / n_devices) == 0: ve = 0. for j in range(args.val_iter): image, label = vdata.next() input_image_valid["image"].d = image pred_valid.forward() ve += categorical_error(pred_valid.d, label) ve /= args.val_iter monitor_verr.add(i * n_devices, ve) if i % int(args.model_save_interval / n_devices) == 0: nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % i)) # Forwards/Zerograd/Backwards fb_results = [] for device_id in range(n_devices): image, label = tdata.next() res = pools[device_id].apply_async(forward_backward, (input_image_train[device_id]["image"], image, input_image_train[device_id]["label"], label, losses_train[device_id], solvers[device_id])) fb_results.append(res) for device_id in range(n_devices): fb_results[device_id].get() # In-place Allreduce comm.allreduce() # Solvers update for device_id in range(n_devices): solvers[device_id].update() e = categorical_error( preds_train[-1].d, input_image_train[-1]["label"].d) monitor_loss.add(i * n_devices, losses_train[-1].d.copy()) monitor_err.add(i * n_devices, e) monitor_time.add(i * n_devices) nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % (args.max_iter / n_devices)))
def train(args): """ Main script. """ # Get context. from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Create CNN network for both training and testing. # TRAIN # Fake path z = nn.Variable([args.batch_size, 100, 1, 1]) fake = generator(z) fake.persistent = True # Not to clear at backward pred_fake = discriminator(fake) loss_gen = F.mean(F.sigmoid_cross_entropy( pred_fake, F.constant(1, pred_fake.shape))) fake_dis = fake.unlinked() pred_fake_dis = discriminator(fake_dis) loss_dis = F.mean(F.sigmoid_cross_entropy( pred_fake_dis, F.constant(0, pred_fake_dis.shape))) # Real path x = nn.Variable([args.batch_size, 1, 28, 28]) pred_real = discriminator(x) loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real, F.constant(1, pred_real.shape))) # Create Solver. solver_gen = S.Adam(args.learning_rate, beta1=0.5) solver_dis = S.Adam(args.learning_rate, beta1=0.5) with nn.parameter_scope("gen"): solver_gen.set_parameters(nn.get_parameters()) with nn.parameter_scope("dis"): solver_dis.set_parameters(nn.get_parameters()) # Create monitor. import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10) monitor_loss_dis = M.MonitorSeries( "Discriminator loss", monitor, interval=10) monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100) monitor_fake = M.MonitorImageTile( "Fake images", monitor, normalize_method=lambda x: x + 1 / 2.) data = data_iterator_mnist(args.batch_size, True) # Training loop. for i in range(args.max_iter): if i % args.model_save_interval == 0: with nn.parameter_scope("gen"): nn.save_parameters(os.path.join( args.model_save_path, "generator_param_%06d.h5" % i)) with nn.parameter_scope("dis"): nn.save_parameters(os.path.join( args.model_save_path, "discriminator_param_%06d.h5" % i)) # Training forward image, _ = data.next() x.d = image / 255. - 0.5 # [0, 255] to [-1, 1] z.d = np.random.randn(*z.shape) # Generator update. solver_gen.zero_grad() loss_gen.forward(clear_no_need_grad=True) loss_gen.backward(clear_buffer=True) solver_gen.weight_decay(args.weight_decay) solver_gen.update() monitor_fake.add(i, fake) monitor_loss_gen.add(i, loss_gen.d.copy()) # Discriminator update. solver_dis.zero_grad() loss_dis.forward(clear_no_need_grad=True) loss_dis.backward(clear_buffer=True) solver_dis.weight_decay(args.weight_decay) solver_dis.update() monitor_loss_dis.add(i, loss_dis.d.copy()) monitor_time.add(i) nnp = os.path.join( args.model_save_path, 'dcgan_%06d.nnp' % args.max_iter) runtime_contents = { 'networks': [ {'name': 'Generator', 'batch_size': args.batch_size, 'outputs': {'G': fake}, 'names': {'z': z}}, {'name': 'Discriminator', 'batch_size': args.batch_size, 'outputs': {'D': pred_real}, 'names': {'x': x}}], 'executors': [ {'name': 'Generator', 'network': 'Generator', 'data': ['z'], 'output': ['G']}, {'name': 'Discriminator', 'network': 'Discriminator', 'data': ['x'], 'output': ['D']}]} save.save(nnp, runtime_contents) from cpp_forward_check import check_cpp_forward check_cpp_forward(args.model_save_path, [z.d], [z], fake, nnp, "Generator")
def main(args): # Settings device_id = args.device_id batch_size = args.batch_size batch_size_eval = args.batch_size_eval n_l_train_data = 4000 n_train_data = 50000 n_cls = 10 learning_rate = 1. * 1e-3 n_epoch = 300 act = F.relu iter_epoch = int(n_train_data / batch_size) n_iter = n_epoch * iter_epoch extension_module = args.context alpha = args.alpha # Supervised Model ## ERM batch_size, m, h, w = batch_size, 3, 32, 32 ctx = extension_context(extension_module, device_id=device_id) x_l_0 = nn.Variable((batch_size, m, h, w)) y_l_0 = nn.Variable((batch_size, 1)) pred = cnn_model_003(ctx, x_l_0) loss_ce = ce_loss(ctx, pred, y_l_0) loss_er = er_loss(ctx, pred) loss_supervised = loss_ce + loss_er ## VRM (mixup) x_l_1 = nn.Variable((batch_size, m, h, w)) y_l_1 = nn.Variable((batch_size, 1)) coef = nn.Variable() coef_b = F.broadcast(coef.reshape([1]*x_l_0.ndim, unlink=True), x_l_0.shape) x_l_m = coef_b * x_l_0 + (1 - coef_b) * x_l_1 coef_b = F.broadcast(coef.reshape([1]*pred.ndim, unlink=True), pred.shape) y_l_m = coef_b * F.one_hot(y_l_0, (n_cls, )) \ + (1-coef_b) * F.one_hot(y_l_1, (n_cls, )) x_l_m.need_grad, y_l_m.need_grad = False, False pred_m = cnn_model_003(ctx, x_l_m) loss_er_m = er_loss(ctx, pred_m) #todo: need? loss_ce_m = ce_loss_soft(ctx, pred, y_l_m) loss_supervised_m = loss_ce_m #+ loss_er_m # Semi-Supervised Model ## ERM x_u0 = nn.Variable((batch_size, m, h, w)) x_u1 = nn.Variable((batch_size, m, h, w)) pred_x_u0 = cnn_model_003(ctx, x_u0) pred_x_u1 = cnn_model_003(ctx, x_u1) pred_x_u0.persistent, pred_x_u1.persistent = True, True loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1) loss_er0 = er_loss(ctx, pred_x_u0) loss_er1 = er_loss(ctx, pred_x_u1) loss_unsupervised = loss_sr + loss_er0 + loss_er1 ## VRM (mixup) x_u2 = nn.Variable((batch_size, m, h, w)) # not to overwrite x_u1.d coef_u = nn.Variable() coef_u_b = F.broadcast(coef_u.reshape([1]*x_u0.ndim, unlink=True), x_u0.shape) x_u_m = coef_u_b * x_u0 + (1-coef_u_b) * x_u2 pred_x_u0_ = nn.Variable(pred_x_u0.shape) # unlink forward pass but reuse result pred_x_u1_ = nn.Variable(pred_x_u1.shape) pred_x_u0_.data = pred_x_u0.data pred_x_u1_.data = pred_x_u1.data coef_u_b = F.broadcast(coef_u.reshape([1]*pred_x_u0.ndim, unlink=True), pred_x_u0.shape) y_u_m = coef_u_b * pred_x_u0_ + (1-coef_u_b) * pred_x_u1_ x_u_m.need_grad, y_u_m.need_grad = False, False pred_x_u_m = cnn_model_003(ctx, x_u_m) loss_er_u_m = er_loss(ctx, pred_x_u_m) #todo: need? loss_ce_u_m = ce_loss_soft(ctx, pred_x_u_m, y_u_m) loss_unsupervised_m = loss_ce_u_m #+ loss_er_u_m # Evaluatation Model batch_size_eval, m, h, w = batch_size, 3, 32, 32 x_eval = nn.Variable((batch_size_eval, m, h, w)) pred_eval = cnn_model_003(ctx, x_eval, test=True) # Solver with nn.context_scope(ctx): solver = S.Adam(alpha=learning_rate) solver.set_parameters(nn.get_parameters()) # Dataset ## separate dataset home = os.environ.get("HOME") fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz") separator = Separator(n_l_train_data) separator.separate_then_save(fpath) l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz") u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz") test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz") # data reader data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path, batch_size=batch_size, n_cls=n_cls, da=True, shape=True) # Training loop print("# Training loop") epoch = 1 st = time.time() acc_prev = 0. ve_best = 1. save_path_prev = "" for i in range(n_iter): # Get data and set it to the varaibles x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch() x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch() x_l_0.d, _ , y_l_0.d= x_l0_data, x_l1_data, y_l_data x_u0.d, x_u1.d= x_u0_data, x_u1_data # Train ## forward (supervised and its mixup) loss_supervised.forward(clear_no_need_grad=True) coef_data = np.random.beta(alpha, alpha) coef.d = coef_data x_l_1.d = np.random.permutation(x_l0_data) y_l_1.d = np.random.permutation(y_l_data) loss_supervised_m.forward(clear_no_need_grad=True) ## forward (unsupervised and its mixup) loss_unsupervised.forward(clear_no_need_grad=True) coef_data = np.random.beta(alpha, alpha) coef_u.d = coef_data x_u2.d = np.random.permutation(x_u1_data) loss_unsupervised_m.forward(clear_no_need_grad=True) ## backward solver.zero_grad() loss_supervised.backward(clear_buffer=False) loss_supervised_m.backward(clear_buffer=False) loss_unsupervised.backward(clear_buffer=False) loss_unsupervised_m.backward(clear_buffer=True) solver.update() # Evaluate if int((i+1) % iter_epoch) == 0: # Get data and set it to the varaibles x_data, y_data = data_reader.get_test_batch() # Evaluation loop ve = 0. iter_val = 0 for k in range(0, len(x_data), batch_size_eval): x_eval.d = get_test_data(x_data, k, batch_size_eval) label = get_test_data(y_data, k, batch_size_eval) pred_eval.forward(clear_buffer=True) ve += categorical_error(pred_eval.d, label) iter_val += 1 ve /= iter_val msg = "Epoch:{},ElapsedTime:{},Acc:{:02f}".format( epoch, time.time() - st, (1. - ve) * 100) print(msg) if ve < ve_best: if not os.path.exists(args.model_save_path): os.makedirs(args.model_save_path) if save_path_prev != "": os.remove(save_path_prev) save_path = os.path.join( args.model_save_path, 'params_%06d.h5' % epoch) nn.save_parameters(save_path) save_path_prev = save_path ve_best = ve st = time.time() epoch +=1
def train(): """ Main script. """ args = get_args() # Get context. from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Dataset # We use Tiny ImageNet from Stanford CS231N class. # https://tiny-imagenet.herokuapp.com/ # Tiny ImageNet consists of 200 categories, each category has 500 images # in training set. The image size is 64x64. To adapt ResNet into 64x64 # image inputs, the input image size of ResNet is set as 56x56, and # the stride in the first conv and the first max pooling are removed. data = data_iterator_tiny_imagenet(args.batch_size, 'train') vdata = data_iterator_tiny_imagenet(args.batch_size, 'val') num_classes = 200 tiny = True # TODO: Switch ILSVRC2012 dataset and TinyImageNet. t_model = get_model( args, num_classes, test=False, tiny=tiny) t_model.pred.persistent = True # Not clearing buffer of pred in backward v_model = get_model( args, num_classes, test=True, tiny=tiny) v_model.pred.persistent = True # Not clearing buffer of pred in forward # Create Solver. solver = S.Momentum(args.learning_rate, 0.9) solver.set_parameters(nn.get_parameters()) # Create monitor. import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10) monitor_err = M.MonitorSeries("Training error", monitor, interval=10) monitor_vloss = M.MonitorSeries("Validation loss", monitor, interval=10) monitor_verr = M.MonitorSeries("Validation error", monitor, interval=10) monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=10) # Training loop. for i in range(args.max_iter): # Save parameters if i % args.model_save_interval == 0: nn.save_parameters(os.path.join( args.model_save_path, 'param_%06d.h5' % i)) # Validation if i % args.val_interval == 0: # Clear all intermediate memory to save memory. # t_model.loss.clear_recursive() l = 0.0 e = 0.0 for j in range(args.val_iter): images, labels = vdata.next() v_model.image.d = images v_model.label.d = labels v_model.image.data.cast(np.uint8, ctx) v_model.label.data.cast(np.int32, ctx) v_model.loss.forward(clear_buffer=True) l += v_model.loss.d e += categorical_error(v_model.pred.d, v_model.label.d) monitor_vloss.add(i, l / args.val_iter) monitor_verr.add(i, e / args.val_iter) # Clear all intermediate memory to save memory. # v_model.loss.clear_recursive() # Training l = 0.0 e = 0.0 solver.zero_grad() # Gradient accumulation loop for j in range(args.accum_grad): images, labels = data.next() t_model.image.d = images t_model.label.d = labels t_model.image.data.cast(np.uint8, ctx) t_model.label.data.cast(np.int32, ctx) t_model.loss.forward(clear_no_need_grad=True) t_model.loss.backward(clear_buffer=True) # Accumulating gradients l += t_model.loss.d e += categorical_error(t_model.pred.d, t_model.label.d) solver.weight_decay(args.weight_decay) solver.update() monitor_loss.add(i, l / args.accum_grad) monitor_err.add(i, e / args.accum_grad) monitor_time.add(i) # Learning rate decay at scheduled iter if i in args.learning_rate_decay_at: solver.set_learning_rate(solver.learning_rate() * 0.1) nn.save_parameters(os.path.join(args.model_save_path, 'param_%06d.h5' % args.max_iter))
def train(args): """ Main script. """ # Get context. from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Create CNN network for both training and testing. margin = 1.0 # Margin for contrastive loss. # TRAIN # Create input variables. image0 = nn.Variable([args.batch_size, 1, 28, 28]) image1 = nn.Variable([args.batch_size, 1, 28, 28]) label = nn.Variable([args.batch_size]) # Create predition graph. pred = mnist_lenet_siamese(image0, image1, test=False) # Create loss function. loss = F.mean(contrastive_loss(pred, label, margin)) # TEST # Create input variables. vimage0 = nn.Variable([args.batch_size, 1, 28, 28]) vimage1 = nn.Variable([args.batch_size, 1, 28, 28]) vlabel = nn.Variable([args.batch_size]) # Create predition graph. vpred = mnist_lenet_siamese(vimage0, vimage1, test=True) vloss = F.mean(contrastive_loss(vpred, vlabel, margin)) # Create Solver. solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) # Create monitor. import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10) monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100) monitor_vloss = M.MonitorSeries("Test loss", monitor, interval=10) # Initialize DataIterator for MNIST. rng = np.random.RandomState(313) data = siamese_data_iterator(args.batch_size, True, rng) vdata = siamese_data_iterator(args.batch_size, False, rng) # Training loop. for i in range(args.max_iter): if i % args.val_interval == 0: # Validation ve = 0.0 for j in range(args.val_iter): vimage0.d, vimage1.d, vlabel.d = vdata.next() vloss.forward(clear_buffer=True) ve += vloss.d monitor_vloss.add(i, ve / args.val_iter) if i % args.model_save_interval == 0: nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % i)) image0.d, image1.d, label.d = data.next() solver.zero_grad() # Training forward, backward and update loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() monitor_loss.add(i, loss.d.copy()) monitor_time.add(i) parameter_file = os.path.join( args.model_save_path, 'params_%06d.h5' % args.max_iter) nn.save_parameters(parameter_file) nnp_file = os.path.join( args.model_save_path, 'siamese_%06d.nnp' % (args.max_iter)) runtime_contents = { 'networks': [ {'name': 'Validation', 'batch_size': args.batch_size, 'outputs': {'y': vpred}, 'names': {'x0': vimage0, 'x1': vimage1}}], 'executors': [ {'name': 'Runtime', 'network': 'Validation', 'data': ['x0', 'x1'], 'output': ['y']}]} save.save(nnp_file, runtime_contents) from cpp_forward_check import check_cpp_forward check_cpp_forward(args.model_save_path, [vimage0.d, vimage1.d], [ vimage0, vimage1], vpred, nnp_file)
def train(): """ Naive Multi-Device Training NOTE: the communicator exposes low-level interfaces * Parse command line arguments. * Instantiate a communicator and set parameter variables. * Specify contexts for computation. * Initialize DataIterator. * Construct a computation graph for training and one for validation. * Initialize solver and set parameter variables to that. * Create monitor instances for saving and displaying training stats. * Training loop * Computate error rate for validation data (periodically) * Get a next minibatch. * Execute forwardprop * Set parameter gradients zero * Execute backprop. * Inplace allreduce (THIS IS THE MAIN difference from a single device training) * Solver updates parameters by using gradients computed by backprop. * Compute training error """ # Parse args args = get_args() n_train_samples = 50000 bs_valid = args.batch_size # Communicator and Context extension_module = "cuda.cudnn" ctx = extension_context(extension_module) comm = C.MultiProcessDataParalellCommunicator(ctx) comm.init() n_devices = comm.size mpi_rank = comm.rank device_id = mpi_rank ctx = extension_context(extension_module, device_id=device_id) # Create training graphs test = False image_train = nn.Variable((args.batch_size, 3, 32, 32)) label_train = nn.Variable((args.batch_size, 1)) pred_train = cifar100_resnet23_prediction( image_train, ctx, test) loss_train = cifar100_resnet32_loss(pred_train, label_train) input_image_train = {"image": image_train, "label": label_train} # add parameters to communicator comm.add_context_and_parameters((ctx, nn.get_parameters())) # Create validation graph test = True image_valid = nn.Variable((bs_valid, 3, 32, 32)) pred_valid = cifar100_resnet23_prediction( image_valid, ctx, test) input_image_valid = {"image": image_valid} # Solvers solver = S.Adam() solver.set_parameters(nn.get_parameters()) base_lr = args.learning_rate warmup_iter = int(1. * n_train_samples / args.batch_size / n_devices) * args.warmup_epoch warmup_slope = 1. * n_devices / warmup_iter # Create monitor from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed monitor = Monitor(args.monitor_path) monitor_loss = MonitorSeries("Training loss", monitor, interval=10) monitor_err = MonitorSeries("Training error", monitor, interval=10) monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100) monitor_verr = MonitorSeries("Test error", monitor, interval=10) with data_iterator_cifar100(args.batch_size, True) as tdata, \ data_iterator_cifar100(bs_valid, False) as vdata: # Training-loop for i in range(int(args.max_iter / n_devices)): # Validation if mpi_rank == 0: if i % int(n_train_samples / args.batch_size / n_devices) == 0: ve = 0. for j in range(args.val_iter): image, label = vdata.next() input_image_valid["image"].d = image pred_valid.forward() ve += categorical_error(pred_valid.d, label) ve /= args.val_iter monitor_verr.add(i * n_devices, ve) if i % int(args.model_save_interval / n_devices) == 0: nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % i)) # Forward/Zerograd/Backward image, label = tdata.next() input_image_train["image"].d = image input_image_train["label"].d = label loss_train.forward() solver.zero_grad() loss_train.backward() # In-place Allreduce comm.allreduce(division=True) # Solvers update solver.update() # Linear Warmup if i < warmup_iter: lr = base_lr * n_devices * warmup_slope * i solver.set_learning_rate(lr) else: lr = base_lr * n_devices solver.set_learning_rate(lr) if mpi_rank == 0: e = categorical_error( pred_train.d, input_image_train["label"].d) monitor_loss.add(i * n_devices, loss_train.d.copy()) monitor_err.add(i * n_devices, e) monitor_time.add(i * n_devices) if mpi_rank == 0: nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % (args.max_iter / n_devices)))
def train(): """ Main script. Steps: * Parse command line arguments. * Specify a context for computation. * Initialize DataIterator for MNIST. * Construct a computation graph for training and validation. * Initialize a solver and set parameter variables to it. * Create monitor instances for saving and displaying training stats. * Training loop * Computate error rate for validation data (periodically) * Get a next minibatch. * Execute forwardprop on the training graph. * Compute training error * Set parameter gradients zero * Execute backprop. * Solver updates parameters by using gradients computed by backprop. """ args = get_args() # Get context. from nnabla.contrib.context import extension_context extension_module = args.context if args.context is None: extension_module = 'cpu' logger.info("Running in %s" % extension_module) ctx = extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Create CNN network for both training and testing. mnist_cnn_prediction = mnist_lenet_prediction if args.net == 'resnet': mnist_cnn_prediction = mnist_resnet_prediction # TRAIN # Create input variables. image = nn.Variable([args.batch_size, 1, 28, 28]) label = nn.Variable([args.batch_size, 1]) # Create prediction graph. pred = mnist_cnn_prediction(image, test=False) pred.persistent = True # Create loss function. loss = F.mean(F.softmax_cross_entropy(pred, label)) # TEST # Create input variables. vimage = nn.Variable([args.batch_size, 1, 28, 28]) vlabel = nn.Variable([args.batch_size, 1]) # Create predition graph. vpred = mnist_cnn_prediction(vimage, test=True) # Create Solver. solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) # Create monitor. from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed monitor = Monitor(args.monitor_path) monitor_loss = MonitorSeries("Training loss", monitor, interval=10) monitor_err = MonitorSeries("Training error", monitor, interval=10) monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100) monitor_verr = MonitorSeries("Test error", monitor, interval=10) # Initialize DataIterator for MNIST. data = data_iterator_mnist(args.batch_size, True) vdata = data_iterator_mnist(args.batch_size, False) # Training loop. for i in range(args.max_iter): if i % args.val_interval == 0: # Validation ve = 0.0 for j in range(args.val_iter): vimage.d, vlabel.d = vdata.next() vpred.forward(clear_buffer=True) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, ve / args.val_iter) if i % args.model_save_interval == 0: nn.save_parameters(os.path.join( args.model_save_path, 'params_%06d.h5' % i)) # Training forward image.d, label.d = data.next() solver.zero_grad() loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() e = categorical_error(pred.d, label.d) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, e) monitor_time.add(i) ve = 0.0 for j in range(args.val_iter): vimage.d, vlabel.d = vdata.next() vpred.forward(clear_buffer=True) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, ve / args.val_iter) parameter_file = os.path.join( args.model_save_path, '{}_params_{:06}.h5'.format(args.net, args.max_iter)) nn.save_parameters(parameter_file)
def main(args): # Settings device_id = args.device_id batch_size = 100 batch_size_eval = 100 n_l_train_data = 4000 n_train_data = 50000 n_cls = 10 learning_rate = 1. * 1e-3 n_epoch = 50 act = F.relu iter_epoch = n_train_data / batch_size n_iter = n_epoch * iter_epoch extension_module = args.context n_images = args.n_images fname, _ = os.path.splitext(__file__) dpath = "./{}_images_{}".format(fname, int(time.time())) # Model batch_size, m, h, w = batch_size, 3, 32, 32 ctx = extension_context(extension_module, device_id=device_id) x_u = nn.Variable((batch_size, m, h, w)) pred = cnn_ae_model_001(ctx, x_u) loss_recon = recon_loss(ctx, pred, x_u) ## evaluate batch_size_eval, m, h, w = batch_size, 3, 32, 32 x_eval = nn.Variable((batch_size_eval, m, h, w)) pred_eval = cnn_ae_model_001(ctx, x_eval, test=True) # Solver with nn.context_scope(ctx): solver = S.Adam(alpha=learning_rate) solver.set_parameters(nn.get_parameters()) # Dataset ## separate dataset home = os.environ.get("HOME") fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz") separator = Separator(n_l_train_data) separator.separate_then_save(fpath) l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz") u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz") test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz") # data reader data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path, batch_size=batch_size, n_cls=n_cls, da=True, shape=True) # Training loop print("# Training loop") epoch = 1 st = time.time() acc_prev = 0. for i in range(n_iter): # Get data and set it to the varaibles x_u_data, _, _ = data_reader.get_u_train_batch() x_u.d = x_u_data # Train loss_recon.forward(clear_no_need_grad=True) solver.zero_grad() loss_recon.backward(clear_buffer=True) solver.update() # Evaluate if (i+1) % iter_epoch == 0: # Get data and forward x_data, y_data = data_reader.get_test_batch() pred_eval.forward(clear_buffer=True) images = pred_eval.d # Save n images if not os.path.exists(dpath): os.makedirs(dpath) save_images(dpath, epoch, images[:n_images]) fpath = os.path.join(dpath, "epoch_{:05d}.h5".format(epoch)) nn.save_parameters(fpath) st = time.time() epoch +=1
def main(args): # Settings device_id = args.device_id batch_size = args.batch_size batch_size_eval = args.batch_size_eval n_l_train_data = args.n_label n_train_data = 73257 n_cls = 10 learning_rate = 1. * 1e-3 n_epoch = args.epoch act = F.relu iter_epoch = n_train_data / batch_size n_iter = int(n_epoch * iter_epoch) extension_module = args.context # Model ## supervised batch_size, m, h, w = batch_size, 3, 32, 32 ctx = extension_context(extension_module, device_id=device_id) x_l = nn.Variable((batch_size, m, h, w)) y_l = nn.Variable((batch_size, 1)) pred = cnn_model_003(ctx, x_l) loss_ce = ce_loss(ctx, pred, y_l) loss_er = er_loss(ctx, pred) loss_supervised = loss_ce + loss_er ## stochastic regularization x_u0 = nn.Variable((batch_size, m, h, w)) x_u1 = nn.Variable((batch_size, m, h, w)) pred_x_u0 = cnn_model_003(ctx, x_u0) pred_x_u1 = cnn_model_003(ctx, x_u1) loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1) loss_er0 = er_loss(ctx, pred_x_u0) loss_er1 = er_loss(ctx, pred_x_u1) loss_unsupervised = loss_sr + loss_er0 + loss_er1 ## evaluate batch_size_eval, m, h, w = batch_size, 3, 32, 32 x_eval = nn.Variable((batch_size_eval, m, h, w)) pred_eval = cnn_model_003(ctx, x_eval, test=True) # Solver with nn.context_scope(ctx): solver = S.Adam(alpha=learning_rate) solver.set_parameters(nn.get_parameters()) # Dataset ## separate dataset home = os.environ.get("HOME") fpath = os.path.join(home, "datasets/svhn/train.mat") separator = Separator(n_l_train_data) separator.separate_then_save(fpath) l_train_path = os.path.join(home, "datasets/svhn/l_train.mat") u_train_path = os.path.join(home, "datasets/svhn/u_train.mat") test_path = os.path.join(home, "datasets/svhn/test.mat") # data reader data_reader = SVHNDataReader(l_train_path, u_train_path, test_path, batch_size=batch_size, n_cls=n_cls, da=False, shape=True) # Training loop print("# Training loop") epoch = 1 st = time.time() acc_prev = 0. ve_best = 1. save_path_prev = "" for i in range(n_iter): # Get data and set it to the varaibles x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch() x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch() x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data x_u0.d, x_u1.d= x_u0_data, x_u1_data # Train loss_supervised.forward(clear_no_need_grad=True) loss_unsupervised.forward(clear_no_need_grad=True) solver.zero_grad() loss_supervised.backward(clear_buffer=True) loss_unsupervised.backward(clear_buffer=True) solver.update() # Evaluate if int((i+1) % iter_epoch) == 0: # Get data and set it to the varaibles x_data, y_data = data_reader.get_test_batch() # Evaluation loop ve = 0. iter_val = 0 for k in range(0, len(x_data), batch_size_eval): x_eval.d = get_test_data(x_data, k, batch_size_eval) label = get_test_data(y_data, k, batch_size_eval) pred_eval.forward(clear_buffer=True) ve += categorical_error(pred_eval.d, label) iter_val += 1 ve /= iter_val msg = "Epoch:{},ElapsedTime:{},Acc:{:02f}".format( epoch, time.time() - st, (1. - ve) * 100) print(msg) if ve < ve_best: if not os.path.exists(args.model_save_path): os.makedirs(args.model_save_path) if save_path_prev != "": os.remove(save_path_prev) save_path = os.path.join( args.model_save_path, 'params_%06d.h5' % epoch) nn.save_parameters(save_path) save_path_prev = save_path ve_best = ve st = time.time() epoch +=1
def save(filename, contents, include_params=False): '''Save network definition, inference/training execution configurations etc. Args: filename (str): Filename to store infomation. The file extension is used to determine the saving file format. ``.nnp``: (Recomended) Creating a zip archive with nntxt (network definition etc.) and h5 (parameters). ``.nntxt``: Protobuf in text format. ``.protobuf'': Protobuf in binary format (unsafe in terms of backward compatibility). contents (dict): Information to store. include_params (bool): Includes parameter into single file. This is ignored when the extension of filename is nnp. Example: The current supported fields as contents are ``networks`` and ``executors``. The following example creates a two inputs and two outputs MLP, and save the network structure and the initialized parameters.:: python import nnabla as nn import nnabla.functions as F import nnabla.parametric_functions as PF x0 = nn.Variable([batch_size, 100]) x1 = nn.Variable([batch_size, 100]) h1_0 = PF.affine(x0, 100, name='affine1_0') h1_1 = PF.affine(x1, 100, name='affine1_0') h1 = F.tanh(h1_0 + h1_1) h2 = F.tanh(PF.affine(h1, 50, name='affine2')) y0 = PF.affine(h2, 10, name='affiney_0') y1 = PF.affine(h2, 10, name='affiney_1') contents = { 'networks': [ {'name': 'net1', 'batch_size': batch_size, 'outputs': {'y0': y0, 'y1': y1}, 'names': {'x0': x0, 'x1': x1}}], 'executors': [ {'name': 'runtime', 'network': 'net1', 'data': ['x0', 'x1'], 'output': ['y0', 'y1']}]} save('net.nnp', contents) ''' _, ext = os.path.splitext(filename) print(filename, ext) if ext == '.nntxt' or ext == '.prototxt': logger.info("Saveing {} as prototxt".format(filename)) proto = create_proto(contents, include_params) with open(filename, 'w') as file: text_format.PrintMessage(proto, file) elif ext == '.protobuf': logger.info("Saveing {} as protobuf".format(filename)) proto = create_proto(contents, include_params) with open(filename, 'wb') as file: file.write(proto.SerializeToString()) elif ext == '.nnp': logger.info("Saveing {} as nnp".format(filename)) tmpdir = tempfile.mkdtemp() save('{}/network.nntxt'.format(tmpdir), contents, include_params=False) save_parameters('{}/parameter.protobuf'.format(tmpdir)) with zipfile.ZipFile(filename, 'w') as nnp: nnp.write('{}/network.nntxt'.format(tmpdir), 'network.nntxt') nnp.write('{}/parameter.protobuf'.format(tmpdir), 'parameter.protobuf') shutil.rmtree(tmpdir)