Esempio n. 1
0
    def __init__(self, n_inputs, layersizes=None, max_iter=100,
                 learning_rate=0.05,
                 activation="ReLU", output="softmax", loss="crossentropy",
                 l2_penalty=0.0, l1_penalty=0.0,
                 dropout=0.0, input_dropout=0.0, batch_size=64,
                 momentum=0.0, fraction_validation_set=0.15,
                 convergence_iter_tol=30,  early_stopping=True,
                 shuffle_data=True,
                 learning_rate_schedule="constant", learning_rate_decay=None,
                 snapshot_interval=None,
                 verbose=False, random_state=None, dtype=np.float32,
                 activationparams=None,
                 layerclass="default", logger=None,
                 output_weights=None):
        self.batch_size = batch_size
        self.max_iter = max_iter
        # we need a seed that is random even in multiple threads
        # started very close to each other in time
        if random_state is None:
            random_state = op.rand_int()

        self.n_inputs = n_inputs
        self.dropout = dropout
        self.verbose = verbose
        self.random_state = random_state
        self.l2_penalty = l2_penalty
        self.l1_penalty = l1_penalty
        self.shuffle_data = shuffle_data
        self.learning_rate = learning_rate # used for adaptive schedule
        self.momentum = momentum
        self.learning_rate_schedule = learning_rate_schedule
        self.learning_rate_decay = learning_rate_decay
        self.layersizes = layersizes
        self.input_dropout = input_dropout
        self.activation = activation.lower()
        self.loss = loss.lower()
        self.output = output.lower()
        self.dtype = dtype
        self.layerclass = layerclass
        self.logger = logger
        self.output_weights = output_weights
        self.convergence_iter_tol = convergence_iter_tol
        self.fraction_validation_set = fraction_validation_set
        self.early_stopping = early_stopping
        self.activationparams = activationparams
        self.snapshot_interval = snapshot_interval
        if learning_rate_schedule not in ('constant', 'adaptive', 'simple', 'invscale', 'linear', 'power'):
            raise ValueError("Unknown learning rate schedule.")
        if self.layersizes is not None:
            self.setup_layers(self.activationparams)

        self.ignore_last_minibatch_if_smaller=False
        op.set_seed(self.random_state)
        self.reset()
        self._epoch_callbacks = []
        self._minibatch_callbacks = []
        # use self.__class__ instead of NeuralNet in case
        # subclasses implement track_progress themselves
        self._epoch_callbacks.append(self.__class__.track_progress)
Esempio n. 2
0
 def reset(self, random_state=None):
     self.statistics = pd.DataFrame(dtype=np.float64,
         columns=('train_loss', 'val_loss', 'val_score', 'time'))
     self.statistics.index.name = "epoch"
     self.current_epoch = 0 # number of iterations
     self.update_count = 0
     self._best_params = None
     self._best_score_va = np.finfo(np.float32).min
     self._no_improvement_since = 0
     if random_state is not None:
         op.set_seed(self.random_state)
     if len(self.layers) > 0:
         self.layers[0].setup(self.n_inputs, batch_size=self.batch_size)
     for l1, l2 in zip(self.layers[:-1], self.layers[1:]):
         l2.setup(l1.size, batch_size=self.batch_size)
Esempio n. 3
0
 def reset(self, random_state=None):
     self.statistics = pd.DataFrame(dtype=np.float64,
                                    columns=('train_loss', 'val_loss',
                                             'val_score', 'time'))
     self.statistics.index.name = "epoch"
     self.current_epoch = 0  # number of iterations
     self.update_count = 0
     self._best_params = None
     self._best_score_va = np.finfo(np.float32).min
     self._no_improvement_since = 0
     if random_state is not None:
         op.set_seed(self.random_state)
     if len(self.layers) > 0:
         self.layers[0].setup(self.n_inputs, batch_size=self.batch_size)
     for l1, l2 in zip(self.layers[:-1], self.layers[1:]):
         l2.setup(l1.size, batch_size=self.batch_size)
Esempio n. 4
0
    def __init__(self,
                 n_inputs,
                 layersizes=None,
                 max_iter=100,
                 learning_rate=0.05,
                 activation="ReLU",
                 output="softmax",
                 loss="crossentropy",
                 l2_penalty=0.0,
                 l1_penalty=0.0,
                 dropout=0.0,
                 input_dropout=0.0,
                 batch_size=64,
                 momentum=0.0,
                 fraction_validation_set=0.15,
                 convergence_iter_tol=30,
                 early_stopping=True,
                 shuffle_data=True,
                 learning_rate_schedule="constant",
                 learning_rate_decay=None,
                 snapshot_interval=None,
                 verbose=False,
                 random_state=None,
                 dtype=np.float32,
                 activationparams=None,
                 layerclass="default",
                 logger=None,
                 output_weights=None):
        self.batch_size = batch_size
        self.max_iter = max_iter
        # we need a seed that is random even in multiple threads
        # started very close to each other in time
        if random_state is None:
            random_state = op.rand_int()

        self.n_inputs = n_inputs
        self.dropout = dropout
        self.verbose = verbose
        self.random_state = random_state
        self.l2_penalty = l2_penalty
        self.l1_penalty = l1_penalty
        self.shuffle_data = shuffle_data
        self.learning_rate = learning_rate  # used for adaptive schedule
        self.momentum = momentum
        self.learning_rate_schedule = learning_rate_schedule
        self.learning_rate_decay = learning_rate_decay
        self.layersizes = layersizes
        self.input_dropout = input_dropout
        self.activation = activation.lower()
        self.loss = loss.lower()
        self.output = output.lower()
        self.dtype = dtype
        self.layerclass = layerclass
        self.logger = logger
        self.output_weights = output_weights
        self.convergence_iter_tol = convergence_iter_tol
        self.fraction_validation_set = fraction_validation_set
        self.early_stopping = early_stopping
        self.activationparams = activationparams
        self.snapshot_interval = snapshot_interval
        if learning_rate_schedule not in ('constant', 'adaptive', 'simple',
                                          'invscale', 'linear', 'power'):
            raise ValueError("Unknown learning rate schedule.")
        self.layers = []
        if self.layersizes is not None:
            self.setup_layers(self.activationparams)

        op.set_seed(self.random_state)
        self.reset()
        self._epoch_callbacks = []
        self._minibatch_callbacks = []
        self._epoch_callbacks.append(NeuralNet.track_progress)