def T(self, T): if not isinstance(T, (float, int)): raise e.TypeError('`T` should be a float or integer') if T < 0 or T > 1: raise e.ValueError('`T` should be between 0 and 1') self._T = T
def p(self, p): if not isinstance(p, (float, int)): raise e.TypeError('`p` should be a float or integer') if p < 0 or p > 1: raise e.ValueError('`p` should be between 0 and 1') self._p = p
def steps(self, steps): if not isinstance(steps, int): raise e.TypeError('`steps` should be an integer') if steps <= 0: raise e.ValueError('`steps` should be > 0') self._steps = steps
def momentum(self, momentum): if not isinstance(momentum, (float, int)): raise e.TypeError('`momentum` should be a float or integer') if momentum < 0: raise e.ValueError('`momentum` should be >= 0') self._momentum = momentum
def n_hidden(self, n_hidden): if not isinstance(n_hidden, int): raise e.TypeError('`n_hidden` should be an integer') if n_hidden <= 0: raise e.ValueError('`n_hidden` should be > 0') self._n_hidden = n_hidden
def T(self, T): if not isinstance(T, tuple): raise e.TypeError('`T` should be a tuple') if len(T) != self.n_layers: raise e.SizeError(f'`T` should have size equal as {self.n_layers}') self._T = T
def decay(self, decay): if not isinstance(decay, (float, int)): raise e.TypeError('`decay` should be a float or integer') if decay < 0: raise e.ValueError('`decay` should be >= 0') self._decay = decay
def n_channels(self, n_channels): if not isinstance(n_channels, int): raise e.TypeError('`n_channels` should be an integer') if n_channels <= 0: raise e.ValueError('`n_channels` should be > 0') self._n_channels = n_channels
def n_layers(self, n_layers): if not isinstance(n_layers, int): raise e.TypeError('`n_layers` should be an integer') if n_layers <= 0: raise e.ValueError('`n_layers` should be > 0') self._n_layers = n_layers
def n_visible(self, n_visible): if not isinstance(n_visible, int): raise e.TypeError('`n_visible` should be an integer') if n_visible <= 0: raise e.ValueError('`n_visible` should be > 0') self._n_visible = n_visible
def lr(self, lr): if not isinstance(lr, (float, int)): raise e.TypeError('`lr` should be a float or integer') if lr < 0: raise e.ValueError('`lr` should be >= 0') self._lr = lr
def test_type_error(): new_exception = exception.TypeError('error') try: raise new_exception except exception.TypeError: pass
def decay(self, decay): if not isinstance(decay, tuple): raise e.TypeError('`decay` should be a tuple') if len(decay) != self.n_layers: raise e.SizeError( f'`decay` should have size equal as {self.n_layers}') self._decay = decay
def momentum(self, momentum): if not isinstance(momentum, tuple): raise e.TypeError('`momentum` should be a tuple') if len(momentum) != self.n_layers: raise e.SizeError( f'`momentum` should have size equal as {self.n_layers}') self._momentum = momentum
def steps(self, steps): if not isinstance(steps, tuple): raise e.TypeError('`steps` should be a tuple') if len(steps) != self.n_layers: raise e.SizeError( f'`steps` should have size equal as {self.n_layers}') self._steps = steps
def lr(self, lr): if not isinstance(lr, tuple): raise e.TypeError('`lr` should be a tuple') if len(lr) != self.n_layers: raise e.SizeError( f'`lr` should have size equal as {self.n_layers}') self._lr = lr
def filter_shape(self, filter_shape): if not isinstance(filter_shape, tuple): raise e.TypeError('`filter_shape` should be a tuple') if (filter_shape[0] >= self.visible_shape[0]) or ( filter_shape[1] >= self.visible_shape[1]): raise e.ValueError( '`filter_shape` should be smaller than `visible_shape`') self._filter_shape = filter_shape
def optimizer(self, optimizer): if not isinstance(optimizer, (opt.SGD, opt.Adam)): raise e.TypeError( '`optimizer` should be a valid Pytorch optimizer') self._optimizer = optimizer
def n_hidden(self, n_hidden): if not isinstance(n_hidden, (list, tuple)): raise e.TypeError('`n_hidden` should be a tuple') self._n_hidden = n_hidden
def M(self, M): if not isinstance(M, torch.Tensor): raise ex.TypeError('`M` should be a PyTorch tensor') self._M = M
def visible_shape(self, visible_shape): if not isinstance(visible_shape, (list, tuple)): raise e.TypeError('`visible_shape` should be a list or tuple') self._visible_shape = visible_shape
def b(self, b): if not isinstance(b, nn.Parameter): raise e.TypeError('`b` should be a PyTorch parameter') self._b = b
def optimizer(self, optimizer): if not isinstance(optimizer, opt.SGD): raise e.TypeError('`optimizer` should be a SGD') self._optimizer = optimizer
def a(self, a): if not isinstance(a, nn.Parameter): raise e.TypeError('`a` should be a PyTorch parameter') self._a = a
def plot(*args, labels=None, title='', subtitle='', grid=True, legend=True): """Plots the convergence graph of desired variables. Essentially, each variable is a list or numpy array with size equals to (epochs x 1). Args: labels (list): Labels to be applied for each plot in legend. title (str): The title of the plot. subtitle (str): The subtitle of the plot. grid (bool): If grid should be used or not. legend (bool): If legend should be displayed or not. """ # Gathering the amount of possible ticks ticks = np.arange(1, len(args[0]) + 1) # Creating figure and axis subplots _, ax = plt.subplots(figsize=(7, 5)) # Defining some properties, such as axis labels ax.set(xlabel='epoch', ylabel='value') # Setting the amount of ticks ax.set_xticks(ticks) # Setting minimum and maximum possible ticks ax.set_xlim(xmin=1, xmax=ticks[-1]) # Setting both title and subtitles ax.set_title(title, loc='left', fontsize=14) ax.set_title(subtitle, loc='right', fontsize=8, color='grey') # If grid usage is true if grid: # Adds the grid property to the axis ax.grid() # Check if labels argument exists if labels: # Also check if it is a list if not isinstance(labels, list): raise e.TypeError('`labels` should be a list') # And check if it has the same size of arguments if len(labels) != len(args): raise e.SizeError('`args` and `labels` should have the same size') # If labels argument does not exists else: # Creates a list with indicators labels = [f'variable_{i}' for i in range(len(args))] # Plotting the axis for (arg, label) in zip(args, labels): ax.plot(ticks, arg, label=label) # If legend usage is true if legend: # Adds the legend property to the axis ax.legend() # Displaying the plot plt.show()
def data(self, data): if not isinstance(data, (np.ndarray, torch.Tensor)): raise e.TypeError('`data` should be a numpy array or a tensor') self._data = data
def targets(self, targets): if not isinstance(targets, np.ndarray): raise e.TypeError('`targets` should be a numpy array') self._targets = targets
def transform(self, transform): if not (hasattr(transform, '__call__') or transform is None): raise e.TypeError('`transform` should be a callable or None') self._transform = transform
def models(self, models): if not isinstance(models, list): raise e.TypeError('`models` should be a list') self._models = models
def W(self, W): if not isinstance(W, nn.Parameter): raise e.TypeError('`W` should be a PyTorch parameter') self._W = W