Ejemplo n.º 1
0
    def objective_function_test(self, x, **kwargs):
        start_time = time.time()

        rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)

        # Concatenate training and validation dataset
        if type(self.train) == sparse.csr.csr_matrix or type(self.valid) == sparse.csr.csr_matrix:
            train = sparse.vstack((self.train, self.valid))
        else:
            train = np.concatenate((self.train, self.valid))

        train_targets = np.concatenate((self.train_targets, self.valid_targets))

        # Transform hyperparameters to linear scale
        C = np.exp(float(x[0]))
        gamma = np.exp(float(x[1]))

        # Train support vector machine
        clf = svm.SVC(gamma=gamma, C=C, random_state=self.rng)
        clf.fit(train, train_targets)

        # Compute test error
        y = 1 - clf.score(self.test, self.test_targets)
        c = time.time() - start_time

        return {'function_value': y, "cost": c}
Ejemplo n.º 2
0
    def objective_function_test(self, x, steps=1, **kwargs):

        rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        # if rng was not not, set rng for lasagne
        if rng is not None:
            lasagne.random.set_rng(self.rng)

        num_epochs = int(1 + (self.max_num_epochs - 1) * steps)

        train = np.concatenate((self.train, self.valid))
        train_targets = np.concatenate(
            (self.train_targets, self.valid_targets))
        lc_curve, cost_curve, train_loss, valid_loss = \
            self.train_net(train, train_targets,
                           self.test, self.test_targets,
                           init_learning_rate=np.power(10., x[0]),
                           batch_size=int(x[1]),
                           n_units_1=int(np.power(2, x[2])),
                           n_units_2=int(np.power(2, x[3])),
                           n_units_3=int(np.power(2, x[4])),
                           num_epochs=num_epochs)
        y = lc_curve[-1]
        c = cost_curve[-1]
        return {
            'function_value': y,
            "cost": c,
            "train_loss": train_loss,
            "valid_loss": valid_loss,
            "learning_curve": lc_curve,
            "learning_curve_cost": cost_curve
        }
    def objective_function_test(self, x, steps=1, **kwargs):

        rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        # if rng was not not, set rng for lasagne
        if rng is not None:
            lasagne.random.set_rng(self.rng)

        num_epochs = int(1 + (self.max_num_epochs - 1) * steps)

        train = np.concatenate((self.train, self.valid))
        train_targets = np.concatenate(
            (self.train_targets, self.valid_targets))
        lc_curve, cost_curve, train_loss, valid_loss = self.train_net(
            train,
            train_targets,
            self.test,
            self.test_targets,
            init_learning_rate=np.float32(np.power(10., x[0])),
            l2_reg=np.float32(np.power(10., x[1])),
            batch_size=np.int32(x[2]),
            gamma=np.float32(np.power(10, x[3])),
            power=np.float32(x[4]),
            momentum=np.float32(x[5]),
            n_units_1=np.int32(np.power(2, x[6])),
            n_units_2=np.int32(np.power(2, x[7])),
            dropout_rate_1=np.float32(x[8]),
            dropout_rate_2=np.float32(x[9]),
            num_epochs=np.int32(num_epochs))
        y = lc_curve[-1]
        c = cost_curve[-1]
        return {'function_value': y, "cost": c, "learning_curve": lc_curve}
Ejemplo n.º 4
0
    def objective_function(self, x, dataset_fraction=1, **kwargs):
        start_time = time.time()

        rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)

        # Shuffle training data
        shuffle = self.rng.permutation(self.train.shape[0])
        size = int(dataset_fraction * self.train.shape[0])

        # Split of dataset subset
        train = self.train[shuffle[:size]]
        train_targets = self.train_targets[shuffle[:size]]

        # Transform hyperparameters to linear scale
        C = np.exp(float(x[0]))
        gamma = np.exp(float(x[1]))

        # Train support vector machine
        clf = svm.SVC(gamma=gamma, C=C, random_state=self.rng)
        clf.fit(train, train_targets)

        # Compute validation error
        y = 1 - clf.score(self.valid, self.valid_targets)
        c = time.time() - start_time

        return {'function_value': y, "cost": c}
Ejemplo n.º 5
0
    def objective_function_test(self, x, **kwargs):

        rng = kwargs.get("rng")
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        # if rng was not None, set rng for lasagne
        if rng is not None:
            lasagne.random.set_rng(self.rng)

        train = np.concatenate((self.train, self.valid))
        train_targets = np.concatenate(
            (self.train_targets, self.valid_targets))
        lc_curve, cost_curve = \
            self._train_model(config=x, train=train,
                              train_targets=train_targets,
                              valid=self.test,
                              valid_targets=self.test_targets)
        y = lc_curve[-1]
        c = cost_curve[-1]

        return {
            'function_value': y,
            "cost": c,
            "learning_curve": lc_curve,
            "cost_curve": cost_curve
        }
Ejemplo n.º 6
0
    def objective_function(self, x, dataset_fraction=1, **kwargs):

        # Shuffle training data
        rng = kwargs.get("rng")
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        # if rng was not not, set rng for lasagne
        if rng is not None:
            lasagne.random.set_rng(self.rng)

        # Shuffle training data
        shuffle = self.rng.permutation(self.train.shape[0])
        size = int(dataset_fraction * self.train.shape[0])

        # Split of dataset subset
        train = self.train[shuffle[:size]]
        train_targets = self.train_targets[shuffle[:size]]

        lc_curve, cost_curve = \
            self._train_model(config=x,
                              train=train,
                              train_targets=train_targets,
                              valid=self.valid,
                              valid_targets=self.valid_targets)
        y = lc_curve[-1]
        c = cost_curve[-1]

        return {
            'function_value': y,
            "cost": c,
            "learning_curve": lc_curve,
            "cost_curve": cost_curve
        }
    def objective_function(self, x, dataset_fraction=1, steps=1, **kwargs):

        rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        # if rng was not not, set rng for lasagne
        if rng is not None:
            lasagne.random.set_rng(self.rng)

        x = np.array(x, dtype=np.float32)

        num_epochs = int(1 + (self.max_num_epochs - 1) * steps)

        # Shuffle training data
        shuffle = self.rng.permutation(self.train.shape[0])
        size = int(dataset_fraction * self.train.shape[0])

        # Split of dataset subset
        train = self.train[shuffle[:size]]
        train_targets = self.train_targets[shuffle[:size]]

        lc_curve, cost_curve, train_loss, valid_loss = \
            self.train_net(train, train_targets,
                           self.valid, self.valid_targets,
                           init_learning_rate=np.float32(np.power(10., x[0])),
                           l2_reg=np.float32(np.power(10., x[1])),
                           batch_size=np.int32(x[2]),
                           gamma=np.float32(np.power(10, x[3])),
                           power=np.float32(x[4]),
                           momentum=np.float32(x[5]),
                           n_units_1=np.int32(np.power(2, x[6])),
                           n_units_2=np.int32(np.power(2, x[7])),
                           dropout_rate_1=np.float32(x[8]),
                           dropout_rate_2=np.float32(x[9]),
                           num_epochs=np.int32(num_epochs))

        y = lc_curve[-1]
        c = cost_curve[-1]
        return {
            'function_value': y,
            "cost": c,
            "learning_curve": lc_curve,
            "train_loss": train_loss,
            "valid_loss": valid_loss,
            "learning_curve_cost": cost_curve
        }
Ejemplo n.º 8
0
    def objective_function(self, configuration, **kwargs):
        fold = kwargs['fold']
        folds = kwargs.get('folds', 10)
        cutoff = kwargs.get('cutoff', 1800)
        memory_limit = kwargs.get('memory_limit', 3072)
        subsample = kwargs.get('subsample', None)
        instance = json.dumps({'fold': fold, 'subsample': subsample})

        # (TODO) For now ignoring seed
        rng = kwargs.get("rng")
        self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
        if fold == folds:
            # run validation with the same memory limit and cutoff
            return self.objective_function_test(configuration,
                                                cutoff=cutoff,
                                                rng=rng,
                                                memory_limit=memory_limit)

        include, _ = self._get_include_exclude_info()
        evaluator = autosklearn.evaluation.ExecuteTaFuncWithQueue(
            backend=self.backend,
            autosklearn_seed=1,
            resampling_strategy='partial-cv',
            folds=folds,
            logger=self.logger,
            memory_limit=memory_limit,
            metric=self.metric,
            include=include)

        status, cost, runtime, additional_run_info = evaluator.run(
            config=configuration, cutoff=cutoff, instance=instance)

        return {
            'function_value': cost,
            'cost': runtime,
            'status': status,
            'additional_run_info': additional_run_info
        }
Ejemplo n.º 9
0
    def objective_function(self, x, **kwargs):
        """
        Evaluates dataset_fraction of one fold of a 10 fold CV

        :param x: array/Configurations
            configuration
        :param kwargs:
            fold: int in [0, 9]
                if fold == 10, return test performance
            rng: rng, int or None
                if not None overwrites current RandomState

        :return: dict
        """
        fold = int(float(kwargs["fold"]))
        assert 0 <= fold <= self.folds

        arg_rng = kwargs.get("rng", None)
        self.rng = rng_helper.get_rng(rng=arg_rng, self_rng=self.rng)

        # if arg_rng was not not, set rng for lasagne
        if arg_rng is not None:
            lasagne.random.set_rng(self.rng)

        if fold == self.folds:
            return self.objective_function_test(x, **kwargs)

        # Compute crossvalidation splits
        kf = StratifiedKFold(n_splits=self.folds,
                             shuffle=True,
                             random_state=self.rng)

        # Get indices for required fold
        train_idx = None
        valid_idx = None
        for idx, split in enumerate(
                kf.split(X=self.train, y=self.train_targets)):
            if idx == fold:
                train_idx = split[0]
                valid_idx = split[1]
                break

        valid = self.train[valid_idx, :]
        valid_targets = self.train_targets[valid_idx]

        train = self.train[train_idx, :]
        train_targets = self.train_targets[train_idx]

        # Get performance
        lc_curve, cost_curve = self._train_model(config=x,
                                                 train=train,
                                                 train_targets=train_targets,
                                                 valid=valid,
                                                 valid_targets=valid_targets)
        y = lc_curve[-1]
        c = cost_curve[-1]
        return {
            'function_value': y,
            "cost": c,
            "learning_curve": lc_curve,
            "cost_curve": cost_curve
        }