def __init__(self, X, Y, layers, options): self.X = X self.Y = Y self.options = options self._validate_options() self.layers = layers self.params = self._init_params() self.loss = losses.get_loss_function(self.options['loss'])
def get_step_fn(model, optimizer, device, loss_name, loss_params={}, training=True): """Creates a step function for an Engine.""" loss_fn = losses.get_loss_function(loss_name, **loss_params) def step_fn(engine, data_batch): # Input and sizes images, labels, names, _, _ = data_batch n_samples, n_labels = labels.size() # Move tensors to GPU images = images.to(device) labels = labels.to(device) # Enable training model.train(training) torch.set_grad_enabled(training) # enable recording gradients # zero the parameter gradients optimizer.zero_grad() # Forward, receive outputs from the model and segments (bboxes) outputs, embedding_unused, segments_unused = model(images) # if bool(torch.isnan(outputs).any().item()): # warnings.warn("Nans found in prediction") # outputs[outputs != outputs] = 0 # Set NaNs to 0 # Compute classification loss loss = loss_fn(outputs, labels) # if bool(torch.isnan(loss).any().item()): # warnings.warn("Nans found in loss: {}".format(loss)) # loss[loss != loss] = 0 batch_loss = loss.item() if training: loss.backward() optimizer.step() return batch_loss, outputs, labels return step_fn
def _init_train_components(self, reinitialise=False): self.val_metrics = { 'loss': metrics.Loss(get_loss_function(self.loss_cfg)), 'segment_metrics': SegmentationMetrics(num_classes=self.data_loaders.num_classes, threshold=self.config.binarize_threshold) } self.model_cfg.network_params.input_channels = self.data_loaders.input_channels self.model_cfg.network_params.num_classes = self.data_loaders.num_classes self.model_cfg.network_params.image_size = self.data_loaders.image_size self.model = self._init_model(not reinitialise) self.criterion = self._init_criterion(not reinitialise) self.optimizer = self._init_optimizer(not reinitialise) self.lr_scheduler = self._init_lr_scheduler(self.optimizer) self.trainer, self.evaluator = self._init_engines() self._init_handlers()
def _init_train_components(self): self.metrics = { 'loss': metrics.Loss( GaussianVariationalInference( get_loss_function(self.train_cfg.loss_fn))), 'segment_metrics': SegmentationMetrics(num_classes=self.data_loaders.num_classes, threshold=self.config.binarize_threshold) } self.model_cfg.network_params.input_channels = self.data_loaders.input_channels self.model_cfg.network_params.num_classes = self.data_loaders.num_classes self.model = self._init_model() self.optimizer = self._init_optimizer() self.vi = GaussianVariationalInference(self._init_criterion()) self.lr_scheduler = self._init_lr_scheduler(self.optimizer) self.trainer, self.evaluator = self._init_engines() self._init_handlers()
noise=args.noise, std=1.0, scaling=args.scale, leak=args.decay) decoder_params = dict(decoder=args.decoder, device=device, scaling=args.steps * args.scale) loss_fn = losses.get_loss_function( loss=args.loss, verbose=args.verbose, spiking=True, params=dict(beta1=args.beta1, beta2=args.beta2, lambd1=args.lambd1, lambd2=args.lambd2, l1=args.l1, l2=args.l2, example2=args.example2, neuron2=args.neuron2, neuron1=args.neuron1, layers=(len(args.conv_channels) + len(args.hidden_sizes)) * 2)) net = SpikingConvolutionalAutoencoderNew( input_width=width, input_height=height, input_channels=channels, conv2d_channels=args.conv_channels, hidden_sizes=args.hidden_sizes, loss=loss_fn, optimizer=args.optimizer,
def _init_criterion(self, init=True): criterion = get_loss_function(self.loss_cfg).to(device=self.device) if init: self.main_logger.info(f'Using loss function {criterion}') return criterion
def compile(self, loss, optimizer, metric): self.loss = losses.get_loss_function(loss) self.optimizer = optimizer self.metric = metrics.get_metric_function(metric) self.metric_name = metric self.layers[-1].is_last_layer = True