示例#1
0
def build_model(config, device, train=True):
    net = PIXOR(config['use_bn']).to(device)
    criterion = CustomLoss(device=device, num_classes=1)
    if not train:
        return net, criterion

    optimizer = torch.optim.SGD(net.parameters(), lr=config['learning_rate'], momentum=config['momentum'])
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['lr_decay_every'], gamma=0.1)

    return net, criterion, optimizer, scheduler
示例#2
0
def quantiles(data_name, label, tau1, tau2, model_name):

    x, y = load_data("data/" + data_name)

    mod = import_module(model_name)
    model = mod.model

    # model 1 - upper bound
    loss = CustomLoss(tau1)
    model1 = fit_n(5, model, loss.loss, x, y)
    ym1 = model1.predict(x)
    model1.save("{}_{}_{}_{}.h5".format(model_name, data_name, label, tau1))
    np.save("{}_{}_{}_y_{}".format(model_name, data_name, label, tau1), ym1)

    # model 2 - lower bound
    loss = CustomLoss(tau2)
    model2 = fit_n(5, model, loss.loss, x, y)
    ym2 = model2.predict(x)
    model2.save("{}_{}_{}_{}.h5".format(model_name, data_name, label, tau2))
    np.save("{}_{}_{}_y_{}".format(model_name, data_name, label, tau2), ym2)
示例#3
0
    def __init__(self,
                 loss='square',
                 learning_rate=0.3,
                 n_estimators=20,
                 max_depth=6,
                 subsample=0.8,
                 colsample_bytree=0.8,
                 colsample_bylevel=0.8,
                 min_child_weight=1,
                 reg_lambda=1.0,
                 gamma=0,
                 num_thread=-1):

        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.subsample = subsample
        self.colsample_bytree = colsample_bytree
        self.colsample_bylevel = colsample_bylevel
        self.reg_lambda = reg_lambda
        self.gamma = gamma
        self.num_thread = num_thread
        self.min_child_weight = min_child_weight
        self.first_round_pred = 0.0
        self.trees = []
        self.eval_metric = None

        self._is_classifier = False

        if loss == 'logistic':
            self.loss = LogisticLoss()
        elif loss == 'square':
            self.loss = SquareLoss()
        else:
            if callable(loss):
                self.loss = CustomLoss(loss)
            else:
                raise Exception('unsupported loss function: {0}'.format(loss))
示例#4
0
def build_model(config, device, train=True):
    net = PIXOR(config['geometry'], config['use_bn'])
    loss_fn = CustomLoss(device, config, num_classes=1)

    if torch.cuda.device_count() <= 1:
        config['mGPUs'] = False
    if config['mGPUs']:
        print("using multi gpu")
        net = nn.DataParallel(net)

    net = net.to(device)
    loss_fn = loss_fn.to(device)
    if not train:
        return net, loss_fn

    optimizer = torch.optim.SGD(net.parameters(),
                                lr=config['learning_rate'],
                                momentum=config['momentum'],
                                weight_decay=config['weight_decay'])
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=config['lr_decay_at'], gamma=0.1)

    return net, loss_fn, optimizer, scheduler
def main(hyper_params=None, pretrain_full_info=False, train_regressor=False):
    # If custom hyper_params are not passed, load from hyper_params.py
    if hyper_params is None: from hyper_params import hyper_params
    else: print("Using passed hyper-parameters..")

    # Initialize a tensorboard writer
    global writer
    path = hyper_params['tensorboard_path']
    writer = SummaryWriter(path, flush_secs=20)

    # Loading data
    if pretrain_full_info == True:
        train_reader, test_reader = load_data_full_info(hyper_params)
    else:
        train_reader, test_reader, val_reader = load_data(
            hyper_params, train_regressor=train_regressor)
        hyper_params['all_ks'] = get_all(train_reader)  # For MinSup evaluation

    file_write(hyper_params,
               "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
    file_write(hyper_params, "Data reading complete!")
    file_write(hyper_params,
               "Number of train batches: {:4d}".format(len(train_reader)))
    if pretrain_full_info == False:
        file_write(hyper_params,
                   "Number of val batches: {:4d}".format(len(val_reader)))
    file_write(hyper_params,
               "Number of test batches: {:4d}".format(len(test_reader)))
    if 'all_ks' in hyper_params:
        file_write(
            hyper_params,
            "MinSup estimated k: " + str(hyper_params['all_ks']) + "\n\n")

    # Creating model
    if train_regressor: model = RegressionModelCifar(hyper_params)
    else: model = ModelCifar(hyper_params)
    if is_cuda_available: model.cuda()

    # Loss function
    if pretrain_full_info: criterion = nn.CrossEntropyLoss()
    elif train_regressor: criterion = MSELoss(hyper_params)
    else: criterion = CustomLoss(hyper_params)

    # Optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=hyper_params['lr'],
                                momentum=0.9,
                                weight_decay=hyper_params['weight_decay'])

    file_write(hyper_params, str(model))
    if pretrain_full_info == True:
        file_write(hyper_params, "Pre-training model on full information..")
    file_write(hyper_params, "\nModel Built!\nStarting Training...\n")

    best_metrics_val = None
    validate_on = hyper_params[
        'validate_using']  # Estimator to chose best model (validation)
    if pretrain_full_info == True:
        validate_on = "Accuracy"  # Since full-information

    try:
        for epoch in range(1, hyper_params['epochs'] + 1):
            epoch_start_time = time.time()
            metrics_train, metrics_val = None, None

            if pretrain_full_info == True:
                metrics_train = train_full_info(model, criterion, optimizer,
                                                train_reader, hyper_params,
                                                epoch)
                # Note that the metrics_train calculated is different from the actual model performance
                # Because the accuracy is calculated WHILE IT IS BEING TRAINED.
                # If we were to re-calculate the model performance keeping model parameters fixed:
                # we would get a different (most likely better) Accuracy.

                # Don't validate for logging policy. Just store the model at every epoch.
                torch.save(model.state_dict(), hyper_params['model_file'])
            else:
                metrics_train = train(model, criterion, optimizer,
                                      train_reader, hyper_params, epoch)
                # Calulating the metrics on the validation set
                metrics_val = evaluate(model,
                                       criterion,
                                       val_reader,
                                       hyper_params,
                                       eval_estimators=True,
                                       test_set=False)

                # Validate
                if best_metrics_val is None: best_metrics_val = metrics_val
                elif metrics_val[validate_on] >= best_metrics_val[validate_on]:
                    best_metrics_val = metrics_val

                # Save model if current is best epoch
                if metrics_val[validate_on] == best_metrics_val[validate_on]:
                    torch.save(model.state_dict(), hyper_params['model_file'])

            metrics_train = None  # Don't print train metrics, since already printing in tqdm bar
            log_end_epoch(hyper_params, epoch, epoch_start_time, writer,
                          metrics_train, metrics_val)

    except KeyboardInterrupt:
        print('Exiting from training early')

    # Evaluate best saved model
    model = ModelCifar(hyper_params)
    if is_cuda_available: model.cuda()
    model.load_state_dict(torch.load(hyper_params['model_file']))
    model.eval()

    metrics_train = None
    metrics_test = evaluate(model,
                            criterion,
                            test_reader,
                            hyper_params,
                            eval_estimators=False,
                            test_set=True)

    file_write(hyper_params, "Final model performance on test-set:")
    log_end_epoch(hyper_params,
                  hyper_params['epochs'] + 1,
                  time.time(),
                  writer,
                  metrics_train,
                  metrics_test,
                  test=True)

    writer.close()

    return metrics_test
示例#6
0
文件: train.py 项目: samux87/ruhsnet
parser.add_argument('-e', '--epochs', type=int, default=100, help='epoch of the train')
parser.add_argument('-lr', '--learning_rate', type=float, default=1e-3, help='learning rate')
args = parser.parse_args()

batch_size = args.batch_size
learning_rate = args.learning_rate
max_epochs = args.epochs

use_cuda = torch.cuda.is_available()

config_name = 'config.json'
config, _, _, _ = load_config(config_name)
train_data_loader, val_data_loader = get_data_loader(batch_size=batch_size, use_npy=config['use_npy'], frame_range=config['frame_range'])


criterion = CustomLoss(device=device, num_classes=1)
optimizer = Adam(net.parameters())


def train(epoch):
    net.train()          
    total_loss = 0.

    for batch_idx, (pc_feature, label_map) in enumerate(train_data_loader):
        N = pc_feature.size(0)
        pc_feature = pc_feature.to(device)
        label_map = label_map.to(device)

        pc_feature = Variable(pc_feature)
        label_map = Variable(label_map)
        predictions = net(pc_feature)
示例#7
0
class SGBModel(object):
    """
	Simple Gradient Boosting
	"""
    def __init__(self,
                 loss='square',
                 learning_rate=0.3,
                 n_estimators=20,
                 max_depth=6,
                 subsample=0.8,
                 colsample_bytree=0.8,
                 colsample_bylevel=0.8,
                 min_child_weight=1,
                 reg_lambda=1.0,
                 gamma=0,
                 num_thread=-1):

        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.subsample = subsample
        self.colsample_bytree = colsample_bytree
        self.colsample_bylevel = colsample_bylevel
        self.reg_lambda = reg_lambda
        self.gamma = gamma
        self.num_thread = num_thread
        self.min_child_weight = min_child_weight
        self.first_round_pred = 0.0
        self.trees = []
        self.eval_metric = None

        self._is_classifier = False

        if loss == 'logistic':
            self.loss = LogisticLoss()
        elif loss == 'square':
            self.loss = SquareLoss()
        else:
            if callable(loss):
                self.loss = CustomLoss(loss)
            else:
                raise Exception('unsupported loss function: {0}'.format(loss))

    def fit(self, X, y, eval_metric=None, early_stopping_rounds=None):
        self.trees = []
        self.feature_importances_ = {}
        self.eval_metric = _EVAL_METRIC[eval_metric] if eval_metric else None

        X.reset_index(drop=True, inplace=True)
        y.reset_index(drop=True, inplace=True)

        # Y stores: label, y_pred, grad, hess, sample_weight
        Y = pd.DataFrame(y.values, columns=[LABEL_COLUMN])
        Y['y_pred'] = self.first_round_pred
        Y[GRAD_COLUMN], Y[HESS_COLUMN] = self.loss.compute_grad_hess(
            Y.y_pred.values, Y.label.values)

        if self._is_classifier:
            Y['sample_weight'] = 1.0
            Y.loc[Y.label == 1, 'sample_weight'] = self.scale_pos_weight

        if self.eval_metric is not None and early_stopping_rounds is not None:
            assert early_stopping_rounds > 0
            best_val_score = -np.inf
            score_worse_round = 0
            best_round = 0

        for idx in xrange(self.n_estimators):
            if self._is_classifier:
                Y[GRAD_COLUMN] = Y[GRAD_COLUMN] * Y.sample_weight
                Y[HESS_COLUMN] = Y[HESS_COLUMN] * Y.sample_weight

            # subsample column and row before training the current tree
            X_sample_column = X.sample(frac=self.colsample_bytree, axis=1)
            data = pd.concat([X_sample_column, Y], axis=1)
            data = data.sample(frac=self.subsample, axis=0)

            X_feed = data[X_sample_column.columns]
            Y_feed = data[Y.columns]

            tree = Tree(max_depth=self.max_depth,
                        min_child_weight=self.min_child_weight,
                        colsample_bylevel=self.colsample_bylevel,
                        reg_lambda=self.reg_lambda,
                        gamma=self.gamma,
                        num_thread=self.num_thread)

            tree.fit(X_feed, Y_feed)

            # predict the whole train set to update the y_pred, grad and hess
            preds = tree.predict(X[X_sample_column.columns])

            Y['y_pred'] += self.learning_rate * preds
            Y[GRAD_COLUMN], Y[HESS_COLUMN] = self.loss.compute_grad_hess(
                Y.y_pred.values, Y.label.values)

            # only compute feature importance in "weight" type, xgboost support two more type "gain" and "cover"
            for feature, weight in tree.feature_importances_.iteritems():
                if feature in self.feature_importances_:
                    self.feature_importances_[feature] += weight
                else:
                    self.feature_importances_[feature] = weight

            self.trees.append(tree)

            if self.eval_metric is None:
                print '[SGBoost] train round: {0}'.format(idx)
            else:
                cur_val_score = self._eval_score(Y.label.values,
                                                 Y.y_pred.values)
                print '[SGBoost] train round: {0}, eval score: {1}'.format(
                    idx, cur_val_score)

                if early_stopping_rounds is not None:
                    if cur_val_score > best_val_score:
                        best_val_score = cur_val_score
                        score_worse_round = 0
                        best_round = idx
                    else:
                        score_worse_round += 1

                    if score_worse_round > early_stopping_rounds:
                        print '[SGBoost] train best round: {0}, best eval score: {1}'.format(
                            best_round, best_val_score)
                        break

        return self

    def predict(self, X):
        assert len(self.trees) > 0
        # TODO: add parallel tree prediction
        # but now a daemonic process is not allowed to create child processes
        preds = np.zeros((X.shape[0], ))
        preds += self.first_round_pred
        for tree in self.trees:
            preds += self.learning_rate * tree.predict(X)
        return preds

    def _eval_score(self, y_true, y_pred):
        raise NotImplementedError()
示例#8
0
def get_model(model_name, scaler=1, **kwargs):

    custom_loss = CustomLoss(alpha=scaler)
    param_grid = {}

    if model_name == 'XGB':
        model = XGBClassifier(
            n_jobs=-1,  #n_estimators=50,
            random_state=100,
            colsample_bytree=0.8,
            subsample=0.8,
            importance_type='gain',
            scale_pos_weight=scaler
            #objective = custom_loss.focal_loss_boosting
            #max_delta_step=1
        )

        # tuning step suggestion: https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
        param_grid = {
            'max_depth': [2, 3, 4],
            #'min_child_weight':[2,3,4],
            'learning_rate': [0.05, 0.1, 0.15],
            'gamma': [0, 1],
            'n_estimators': [50, 100],
            'reg_alpha': [1, 3, 5],
            'reg_lambda': [2, 4, 6]
        }

    elif model_name == 'LGB':
        params_lgb = {
            #'scale_pos_weight': scaler,
            'objective': custom_loss.focal_loss_boosting,
            'num_leaves': 60,
            'subsample': 0.8,  #sample datas
            'colsample_bytree': 0.8,  #sample columns
            'objective': 'binary',
            'class_weight': 'balanced',
            'importance_type': 'gain',
            'random_state': 42,
            'n_jobs': -1,
            'silent': True
        }

        model = LGBMClassifier(**params_lgb)
        param_grid = {
            'n_estimators': [50, 100, 200],
            'max_depth': [7, 8, 9],
            'learning_rate': [0.05, 0.1, 0.3],
            'min_child_samples': [20, 30, 40],
            'min_child_weight': [1.5, 2, 2.5, 3],
            'reg_alpha': [1, 3, 5],
            'reg_lambda': [2, 4, 6]
        }

        # #defaulet
        # model = LGBMClassifier()
        # param_grid={}

    elif model_name == 'nn':
        import torch.nn as nn

        class Net(nn.Module):
            def __init__(self, in_features, num_classes, mid_features):
                super(Net, self).__init__()

                self.classifier = nn.Sequential(
                    nn.Linear(in_features, mid_features),
                    nn.BatchNorm1d(num_features=mid_features),
                    nn.LeakyReLU(0.1, inplace=True),
                    #nn.Dropout(p=0.3),
                    nn.Linear(mid_features, mid_features),
                    nn.BatchNorm1d(num_features=mid_features),
                    nn.LeakyReLU(0.1, inplace=True),
                    nn.Linear(mid_features, num_classes))

            def forward(self, x):
                x = self.classifier(x)
                return x

        # net = Net(30,256,2)
        # t = torch.randn(16, 30)
        # net(t)

        model = Net(kwargs['in_features'], kwargs['num_classes'],
                    kwargs['mid_features'])

    return model, param_grid
示例#9
0
                             transforms.ToTensor(),
                             transforms.Normalize([0.4589, 0.4355, 0.4032],[0.2239, 0.2186, 0.2206])])

    augs = transforms.Compose([transforms.RandomResizedCrop(300),
                               transforms.RandomRotation(20),
                               transforms.ToTensor(),
                               transforms.Normalize([0.4589, 0.4355, 0.4032],[0.2239, 0.2186, 0.2206])])

    train_set = CustomDataset(dir_csv, dir_img, transforms=augs)
    train_loader = DataLoader(train_set, batch_size=batch_size_train, shuffle=True)

    val_set = CustomDataset(dir_csv, dir_img, transforms=tr)
    val_loader = DataLoader(val_set, batch_size=batch_size_test, shuffle=False)

    model = CustomModel()
    loss_function = CustomLoss()

    model.to(device)
    print('Starting optimizer with LR={}'.format(lr))
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
   
    scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
    for epoch in range(1, num_epochs + 1):
        train(model, device, train_loader, optimizer, epoch, loss_function)
        test(model, device, test_loader, loss_function)
        scheduler.step()

    torch.save(model.state_dict(), "well_trained model.pt")


if __name__ == "__main__":
示例#10
0
文件: main.py 项目: noveens/banditnet
def main(hyper_params=None, return_model=False):
    # If custom hyper_params are not passed, load from hyper_params.py
    if hyper_params is None: from hyper_params import hyper_params
    else: print("Using passed hyper-parameters..")

    # Initialize a tensorboard writer
    global writer
    path = hyper_params['tensorboard_path']
    writer = SummaryWriter(path)

    # Train It..
    train_reader, test_reader, val_reader = load_data(hyper_params)

    file_write(hyper_params['log_file'],
               "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
    file_write(hyper_params['log_file'], "Data reading complete!")
    file_write(hyper_params['log_file'],
               "Number of train batches: {:4d}".format(len(train_reader)))
    file_write(hyper_params['log_file'],
               "Number of test batches: {:4d}".format(len(test_reader)))

    model = ModelCifar(hyper_params)
    if is_cuda_available: model.cuda()

    criterion = CustomLoss(hyper_params)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=hyper_params['lr'],
                                momentum=0.9,
                                weight_decay=hyper_params['weight_decay'])

    file_write(hyper_params['log_file'], str(model))
    file_write(hyper_params['log_file'],
               "\nModel Built!\nStarting Training...\n")

    best_metrics_train = None
    best_metrics_test = None

    try:
        for epoch in range(1, hyper_params['epochs'] + 1):
            epoch_start_time = time.time()

            # Training for one epoch
            metrics = train(model, criterion, optimizer, train_reader,
                            hyper_params)

            string = ""
            for m in metrics:
                string += " | " + m + ' = ' + str(metrics[m])
            string += ' (TRAIN)'

            best_metrics_train = metrics

            # Calulating the metrics on the validation set
            metrics = evaluate(model, criterion, test_reader, hyper_params)
            string2 = ""
            for m in metrics:
                string2 += " | " + m + ' = ' + str(metrics[m])
            string2 += ' (TEST)'

            best_metrics_test = metrics

            ss = '-' * 89
            ss += '\n| end of epoch {:3d} | time: {:5.2f}s'.format(
                epoch, (time.time() - epoch_start_time))
            ss += string
            ss += '\n'
            ss += '-' * 89
            ss += '\n| end of epoch {:3d} | time: {:5.2f}s'.format(
                epoch, (time.time() - epoch_start_time))
            ss += string2
            ss += '\n'
            ss += '-' * 89
            file_write(hyper_params['log_file'], ss)

            for metric in metrics:
                writer.add_scalar('Test_metrics/' + metric, metrics[metric],
                                  epoch - 1)

    except KeyboardInterrupt:
        print('Exiting from training early')

    writer.close()

    if return_model == True: return model
    return best_metrics_train, best_metrics_test