예제 #1
0
    def _setup(self, config):
        # controls how often workers report model performance
        self.batches_per_step = config.get("batches_per_step", 1)
        self.max_validation_steps = config.get("max_validation_steps", 5)
        # removes channels from data to increase performance
        self.use_only_first_channel = config.get("use_only_first_channel",
                                                 False)
        self.train_data = get_pinned_object(config.get("train_data"))
        self.val_data = get_pinned_object(config.get("val_data"))

        # GPU or CPU?
        use_cuda = config.get("use_gpu") and torch.cuda.is_available()
        print("CUDA:", use_cuda)
        self.device = torch.device("cuda" if use_cuda else "cpu")

        self.batch_size = config.get("batch_size", 2)

        self.verbose = config.get("verbose", False)

        # Abstract: implement self.model

        optimizer = config.get("optimizer", "Adam")
        if optimizer == "SGD":
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=config.get("lr", 0.01),
                momentum=config.get("momentum", 0.9),
            )
        elif optimizer == "Adam":
            self.optimizer = optim.Adam(self.model.parameters())
        else:
            raise NotImplemented()
        self.validation_loss_F = F.mse_loss
        loss_name = config.get("training_loss", "mse_loss")
        self.train_loss_F = mappings.losses[loss_name]
    def _setup(self, config: dict):

        self._dataset, self._col_labels, self._y_label = (
            get_pinned_object(data_id),
            None,
            "Value",
        )

        self.rf_model = None
        self._build(config)
예제 #3
0
            def objective(config):
                
                #get objects from central tune storage
                labeler = get_pinned_object(labeler_id)
                price_array = get_pinned_object(price_array_id)
                calculate_profit = get_pinned_object(calculate_profit_id)

                #get the labels from the specific method
                label_list = labeler(config)

                #create the array tha gets passed to the profit calculator
                label_array = pd.concat(label_list, axis=0).to_numpy()
                label_array = np.expand_dims(label_array, axis=1)
                array = np.concatenate([price_array, label_array], axis=1)

                specific_profit, _ = calculate_profit(array, self.trading_fee)

                tune.report(specific_profit=specific_profit)
                time.sleep(0.1)
예제 #4
0
    def _setup(self, config):
        [X_train, X_test, y_train, y_test] = get_pinned_object(data_id)

        self.cuml_model = curfc(n_estimators=config.get("estimators", 40),
                                max_depth=config.get("depth", 16),
                                max_features=1.0)
        self.X_cudf_train = cudf.DataFrame.from_pandas(X_train)
        self.X_cudf_test = cudf.DataFrame.from_pandas(X_test)
        self.y_cudf_train = cudf.Series(y_train.values)
        self.y_test = y_test
예제 #5
0
    def _validation_loss(self):
        self.model.eval()

        val_loss = []
        with torch.no_grad():
            count = tqdm(
                range(self.max_validation_steps),
                desc="Validation",
                disable=not self.verbose,
            )
            for data, i in zip(get_pinned_object(self.val_data), count):
                if self.use_only_first_channel:
                    data = data.narrow(1, 0, 1)
                data = data.to(self.device)
                pred = self.model(data)
                val_loss.append(float(self.validation_loss_F(pred, data)))

        return np.mean(val_loss)
예제 #6
0
    def _train_batches(self, train_for_n_batches=1):
        losses = []
        self.model.train()

        count = tqdm(
            range(train_for_n_batches), desc="Training", disable=not self.verbose, leave=True
        )

        data_train = get_pinned_object(self.get_train_pin())
        for data, i in zip(data_train, count):
            if self.batch_size and self.batch_size < data['data'].shape[0]:
                data = data['data'][:self.batch_size].to(self.device)
            else:
                data = data['data'].to(self.device)
            self.optimizer.zero_grad()
            pred = self.model(data)
            loss = self.train_loss_F(pred, data)
            loss.backward()
            self.optimizer.step()
            losses.append(float(loss))
            
        return losses
예제 #7
0
    def _validation_loss(self):
        self.model.eval()
        
        val_loss = []
        truths = []
        with torch.no_grad():
            data_val = get_pinned_object(self.get_val_pin())
            for data,_ in zip(data_val, range(self.max_validation_steps)):
                wf = data['data'].to(self.device)
                truth = (data['MC_type']>0)
                pred = self.model(wf)
                val_loss.append(self.validation_loss_F(pred, wf).cpu().numpy())
                truths.append(np.array(truth))

        try:
            pred = np.hstack(np.asarray(val_loss))
            truth = np.hstack(truths)
            fpr, tpr, _ = metrics.roc_curve(truth, pred)
            auc = metrics.auc(fpr, tpr)
        except ValueError:
            return 0
        
        return auc, np.mean(pred)
예제 #8
0
 def train(config, reporter):
     get_pinned_object(X)
     reporter(timesteps_total=100, done=True)
예제 #9
0
 def f():
     return get_pinned_object(X)