def __init__(self, updater, stop_trigger=None, out='result', extensions=None): self.updater = updater self.stop_trigger = trigger_module.get_trigger(stop_trigger) self.observation = {} self.out = out if extensions is None: extensions = [] reporter = reporter_module.Reporter() for name, optimizer in six.iteritems(updater.get_all_optimizers()): reporter.add_observer(name, optimizer.target) reporter.add_observers(name, optimizer.target.namedlinks(skipself=True)) self.reporter = reporter self._done = False self._extensions = collections.OrderedDict() self._start_at = None self._snapshot_elapsed_time = 0.0 self._final_elapsed_time = None updater.connect_trainer(self) for ext in extensions: self.extend(ext)
def __call__(self, trainer): """override method of extensions.Evaluator.""" # set up a reporter reporter = reporter_module.Reporter() if hasattr(self, 'name'): prefix = self.name + '/' else: prefix = '' for name, target in six.iteritems(self._targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: self.deconv_image_dir = os.path.join(trainer.out, 'deconv_' + self.layer_name) if not os.path.exists(self.deconv_image_dir): os.makedirs(self.deconv_image_dir) result, locs, bounds = self.evaluate() if not os.path.exists(trainer.out): os.makedirs(trainer.out) #print(bounds) #ioutil.savetxt(os.path.join(trainer.out, self.layer_name + '.txt'), # features, delimiter='\t') #cupy.savez(os.path.join(trainer.out, self.layer_name + '.npz'), # **{self.layer_name: features}) '''self.save_tuple_list(os.path.join(trainer.out, 'maxloc_' + self.layer_name + '.txt'), locs) self.save_tuple_list(os.path.join(trainer.out, 'maxbounds_' + self.layer_name + '.txt'), bounds)''' reporter_module.report(result) return result
def __call__(self, trainer=None): """Executes the evaluator extension. Unlike usual extensions, this extension can be executed without passing a trainer object. This extension reports the performance on validation dataset using the :func:`~chainer.report` function. Thus, users can use this extension independently from any trainer by manutally configuring a :class:`~chainer.Reporter` object. Args: trainer (~chainer.training.Trainer): Trainer object that invokes this extension. It can be omitted in case of calling this extension manually. Returns: dict: Result dictionary that contains mean statistics of values reported by the evaluation function. """ # set up a reporter reporter = reporter_module.Reporter() if hasattr(self, 'name'): prefix = self.name + '/' else: prefix = '' for name, target in six.iteritems(self._targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: result = self.evaluate() reporter_module.report(result) return result
def __call__(self, trainer): """override method of extensions.Evaluator.""" # set up a reporter reporter = reporter_module.Reporter() if hasattr(self, 'name'): prefix = self.name + '/' else: prefix = '' for name, target in six.iteritems(self._targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: result, features = self.evaluate() if not os.path.exists(trainer.out): os.makedirs(trainer.out) #self.savetxt(os.path.join(trainer.out, self.layer_name + '.txt'), # features, delimiter='\t') #cupy.savez(os.path.join(trainer.out, self.layer_name + '.npz'), # **{self.layer_name: features}) if self.top is not None: top_N_args = self.get_argmax_N(features, self.top) #print(top_N_args) np.savetxt(os.path.join(trainer.out, 'top_' + self.layer_name + '.txt'), top_N_args, fmt='%d', delimiter='\t') np.savez( os.path.join(trainer.out, 'top_' + self.layer_name + '.npz'), **{self.layer_name: top_N_args}) reporter_module.report(result) return result
def __init__(self, iterator, model, converter=convert.concat_examples, device=None, to_cpu=True): if isinstance(iterator, iterator_module.Iterator): iterator = {'main': iterator} self._iterators = iterator if not isinstance(model, dict): model = {'main': model} self._model = model self.observation = {} reporter = reporter_module.Reporter() for name, target in six.iteritems(self._model): reporter.add_observer(name, target) reporter.add_observers(name, target.namedlinks(skipself=True)) self.reporter = reporter self.converter = converter self.device = device self.to_cpu = to_cpu
def setup(self): _, comm_id = self.pipe.recv() self.comm = cupy.cuda.nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter_module.Reporter() self.reporter.add_observer('main', self.model)
def setup(self): _, comm_id = self.pipe.recv() self.comm = nccl.NcclCommunicator(self.n_devices, comm_id, self.proc_id) self.model.to_gpu(self.device) self.reporter = reporter.Reporter() self.reporter.add_observer('main', self.model) self.reporter.add_observers('main', self.model.namedlinks(skipself=True))
def run(self, prefix=''): reporter = reporter_module.Reporter() for name, target in six.iteritems(self._targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: result = self.evaluate() return result
def __call__(self, trainer=None): # set up a reporter reporter = reporter_module.Reporter() reporter.add_observer(self.name, self.target) with reporter: with configuration.using_config('train', False): with configuration.using_config('lmt', True): with configuration.using_config('lmt-fc', True): with configuration.using_config('exact', True): with configuration.using_config( 'cudnn_deterministic', True): self.evaluate( os.path.join(trainer.out, 'margin.npy'))
def __init__(self, updater, stop_trigger=None, out='result'): self.updater = updater self.stop_trigger = trigger_module.get_trigger(stop_trigger) self.observation = {} self.out = out reporter = reporter_module.Reporter() for name, optimizer in six.iteritems(updater.get_all_optimizers()): reporter.add_observer(name, optimizer.target) reporter.add_observers(name, optimizer.target.namedlinks(skipself=True)) self.reporter = reporter self._done = False self._extensions = collections.OrderedDict() updater.connect_trainer(self)
def __call__(self, trainer): # set up a reporter reporter = reporter_module.Reporter() if hasattr(self, 'name'): prefix = self.name + '/' else: prefix = '' for name, target in six.iteritems(self.targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: with configuration.using_config('train', False): result = self.evaluate(trainer) reporter_module.report(result) return result
def __call__(self, trainer): """override method of extensions.Evaluator.""" # set up a reporter reporter = reporter_module.Reporter() if hasattr(self, 'name'): prefix = self.name + '/' else: prefix = '' for name, target in six.iteritems(self._targets): reporter.add_observer(prefix + name, target) reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) with reporter: result, predictions = self.evaluate() print(result) print(predictions) self.save_predictions(os.path.join(trainer.out, 'pred.txt'), predictions) reporter_module.report(result) return result
def __call__(self, trainer=None): # set up a reporter reporter = reporter_module.Reporter() reporter.add_observer(self.name, self.target) with reporter: with configuration.using_config('cudnn_deterministic', True): with configuration.using_config('train', False): with configuration.using_config('lmt', True): with configuration.using_config('lmt-fc', True): with configuration.using_config('exact', True): upper = self.calculate_upper_lipschitz() with configuration.using_config('lmt', False): with configuration.using_config('lmt-fc', False): with configuration.using_config('exact', False): if not self.nograd: loc = self.calculate_local_lipschitz() glo = self.calculate_global_lipschitz() adv = self.calculate_adversarial_perturbation() print('\revaluation end, saving result', flush=True) if self.nograd: values = np.array(list(zip(upper, adv))) else: values = np.array(list(zip(upper, glo, loc, adv))) output_dir = self.output_dir or trainer.out filename = pathlib.Path(output_dir) / 'inequlaity_{0}.npy'.format( self.attack_name) np.save(str(filename), values) print('\rassertions start', flush=True) if self.nograd: for up, ad in zip(upper, adv): assert up <= ad else: for up, gl, lo, ad in zip(upper, glo, loc, adv): assert up <= gl assert gl <= lo assert up <= ad
def initialize(self, trainer): # NOTE: visualize the activations of a model with initial weights reporter = reporter_module.Reporter() with reporter.scope(trainer.observation): self.report(trainer)
def evaluate(self, snapshot_name=''): reporter = reporter_module.Reporter() current_device = chainer.get_device(self.args.gpu) summary = reporter_module.DictSummary() with chainer.using_device(current_device), reporter, configuration.using_config('train', False): for i, batch in enumerate(tqdm(self.data_iterator, total=len(self.data_loader) // self.args.batchsize)): observation = {} batch = concat_examples(batch, self.args.gpu) image_size = Size._make(batch['image'].shape[-2:]) with reporter_module.report_scope(observation): rois, bboxes, text_predictions, best_indices, chosen_prediction, scores = self.evaluator(return_predictions=True, **batch) summary.add(observation) if self.args.save_predictions: assert self.args.batchsize == 1, "if you want to save predictions, batchsize must be 1!" batch_size, num_predictions, num_bboxes, num_channels, height, width = rois.shape base_image = self.bbox_plotter.array_to_image(batch['image'][0, 0]) chosen_word = self.data_loader.decode_chars(cuda.to_cpu(text_predictions[0, chosen_prediction[0]].squeeze())) base_image = self.bbox_plotter.render_text(base_image, base_image, chosen_word, 0, bottom=True) rendered_images = [base_image] iterator = zip( F.separate(self.localizer.xp.stack( [batch['image'][i, best_indices[i]] for i in range(self.args.batchsize)]), axis=1), F.separate(rois, axis=1), F.separate(bboxes, axis=1), F.separate(text_predictions, axis=1), F.separate(scores, axis=1) ) for image, roi, bbox, prediction, score in iterator: image = image.array roi = roi.array bbox = bbox.array prediction = prediction.array score = score.array bbox = self.localizer.xp.reshape(bbox, (-1, 2, height, width)) predicted_words = self.data_loader.decode_chars(cuda.to_cpu(prediction.squeeze())) predicted_words = f"{predicted_words} {format(float(score[0]), '.4f')}" if args.cut_bboxes: cut_length = batch['num_words'][0] if 'num_words' in batch else len(predicted_words) bbox = bbox[:cut_length, ...] roi = roi[:cut_length, ...] if args.render_no_boxes: bbox = bbox[:1] roi = roi[:1] rendered_images.append( self.render_roi( [], bbox, None, i, image, roi, predicted_words ) ) self.save_rois(rendered_images, i) self.save_eval_results(snapshot_name, summary)
if args['--bootstrap']: print "Bootstrap resampling..." test_idx = np.random.choice(len(test), size=(len(test), ), replace=True) test_cur = test[test_idx, :] print test_cur.shape else: test_cur = test test_iter = chainer.iterators.SerialIterator(test_cur, batchsize, repeat=False, shuffle=False) obs = {} reprt = reporter.Reporter() reprt.add_observer('main', vae) with cupy.cuda.Device(gpu_id): start_time = timeit.default_timer() with reprt.scope(obs): teval = extensions.Evaluator(test_iter, vae, device=gpu_id) res = teval.evaluate() runtime = timeit.default_timer() - start_time print "Evaluation took %.2fs" % runtime print res obj_mean = -res['main/obj'] obj_sem = res['main/obj_var'] obj_sem = math.sqrt(obj_sem / len(test)) print "%.8f +/- %.8f # logp(%s) %d" % (obj_mean, obj_sem, vae_type, zcount)