Ejemplo n.º 1
0
def optimizers():
    init_plot()
    x, y = linear_dataset()
    for inx, optimizer in enumerate([SGD(), Momentum(), AdaGrad(), Adam()]):
        model = LinearModel(n_features=1) \
            .compile(loss_func=MSE(), optimizer=optimizer) \
            .fit(x, y, epoch=100, batch_size=8)
        plot_m(x, y, model, inx)
    plt.show()
Ejemplo n.º 2
0
def eval_params(w, bias, dr, D, det=False, args=None, intercept=True):
    # Given the model weights, this function evaluates the model
    model = LinearModel(D=D)
    model.w.weight.data = torch.FloatTensor([w])
    if intercept:
        model.w.bias.data = torch.FloatTensor([bias])
    return evaluate_model(model,
                          dr,
                          deterministic=det,
                          group_fairness_evaluation=True,
                          args=args,
                          fairness_evaluation=True)
Ejemplo n.º 3
0
def get_Model():
    Model_A = LinearModel(in_channel=28 * 28, linear_channel=[256, 64, 32, 10])
    Model_B = CnnModel(image_size=[28, 28],
                       in_channel=1,
                       hidden_channel=[32, 64, 32, 10])
    Model_C = CustomModel(image_size=[28, 28],
                          in_channel=1,
                          block1_channel=[64, 32, 64],
                          block2_channel=[64, 32, 64],
                          out_channel=10)
    Model = EnsembleModel(Model_A, Model_B, Model_C)
    return Model
Ejemplo n.º 4
0
 def build(units=(128, 32), num_inputs=8, num_actions=4, device="cpu", memory=""):
     """
     Build a new NN model
     :type units: tuple
     :param num_inputs: Number of inputs to expect on the first layer
     :param num_actions: Number of actions to output for the last layer
     :param units: A list of units to be used in each layer.
     :param memory: Indicates if first layer should be a RNN.  Values can be "GRU" or "LSTM"
     :param device: which device to use for PyTorch
     :return Module: A new PyTorch model
     """
     if memory:
         model = MemoryModel(memory, units, num_inputs, num_actions)
     else:
         model = LinearModel(units, num_inputs, num_actions)
     return model.to(device=device)
Ejemplo n.º 5
0
def perform_model_training(train_data_df,
                           y,
                           imputed_cols,
                           M=20,
                           _type='log',
                           get_pooled_parameters=False):
    imputed_train_dfs = []
    fitted_models = []

    for m in range(M):
        imputed_train_df = mice_impute_dataframe(train_data_df)

        X = imputed_train_df[imputed_cols].to_numpy()

        if _type == 'mnlog':
            estimator = MNLogit()
        elif _type == 'cont':
            estimator = LinearModel()
        else:
            estimator = Logit()

        estimator.fit(X, y)

        imputed_train_dfs.append(imputed_train_df)
        fitted_models.append(estimator)

    pooled_lin_model = PooledLinearModelFactory.init_from_linear_models(
        fitted_models, _type=_type)

    results = init_results_struct(_type, M)

    for m in range(M):
        imputed_train_df = imputed_train_dfs[m]

        X = imputed_train_df[imputed_cols].to_numpy()

        results.add_result(m, pooled_lin_model, X, y)

    result_struct = results.to_results_struct()

    if get_pooled_parameters:
        pooled_model_parameters = _pool_model_parameters(
            fitted_models, train_data_df.shape[0], imputed_cols)

        return result_struct, pooled_lin_model, imputed_train_dfs, pooled_model_parameters
    else:
        return result_struct, pooled_lin_model, imputed_train_dfs
    global seed, dtype, oracle_size
    oracle_size = 20
    dtype = torch.float32
    device = torch.device("cuda:0")
    device2 = torch.device("cuda:1")
    seed = 2018
    np.random.seed(seed=seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    cudnn.benchmark = True
    sub_x, test_data, test_label = get_data()

    target_model = Target()
    n_sample = test_data.size(0)
    # print('Oracle model evaluation on clean data #%d:'%(n_sample))
    # target_model.eval(test_data.reshape(n_sample, 3*96*96), test_label)

    sub = LinearModel()

    substitute_model = Substitute(model=sub, device=device)

    stl10_bbox_sub(param=param, target_model=target_model, substitute_model=substitute_model, \
                   x_sub=sub_x, test_data=test_data, test_label=test_label, aug_epoch=param['data_aug'],\
                   samples_max=12800, n_epoch=param['nb_epochs'], fixed_lambda=param['lambda'])

    print('\n\nFinal results:')
    # target_model.eval(test_data.reshape(n_sample, 3*96*96), test_label)
    print('Substitute model evaluation on clean data: #%d:'%(n_sample))
    substitute_model.eval(x=test_data, y=test_label, batch_size=512)
Ejemplo n.º 7
0
    error = pred - df[metric].values
    print(df[metric].values)
    print(pred)
    print(error)
    print(np.sqrt(np.sum(error**2) / np.sum(df.cs.values)),
          np.sum(error) / np.sum(df.cs.values))

    if metric in ['mufd', 'fof2']:
        wls_model = sm.WLS(df[metric].values - pred,
                           add_constant(pred, prepend=False), df.cs.values)
        wls_fit = wls_model.fit_regularized(alpha=np.array([1, 3]), L1_wt=0)
        coeff = wls_fit.params
        coeff[0] = coeff[0] + 1
        print(coeff)

        irimodel = LinearModel(irimodel, coeff[0], coeff[1])
        pred = irimodel.predict(df['station.longitude'].values,
                                df['station.latitude'].values)
        error = pred - df[metric].values
        print(df[metric].values)
        print(pred)
        print(error)
        print(np.sqrt(np.sum(error**2) / np.sum(df.cs.values)),
              np.sum(error) / np.sum(df.cs.values))

    gp3dmodel = GP3DModel()
    gp3dmodel.train(df, np.log(df[metric].values) - np.log(pred))

    model = ProductModel(irimodel, LogSpaceModel(gp3dmodel))
    pred = model.predict(df['station.longitude'].values,
                         df['station.latitude'].values)
Ejemplo n.º 8
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=1, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run', action='store_true', default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=200, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    parser.add_argument('--attack', action='store_true', default=False,
                        help='attack model')
    parser.add_argument('--LP', type=str, default="l2",
                        help='Random Corruption Norm Constrain')
    parser.add_argument('--eps', type=float, default=1e-4,
                        help='Random Corruption Epsilon')
    parser.add_argument('--attack_lr', type=float, default=1e-3,
                        help='Grad based attacker learning rate')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    train_kwargs = {'batch_size': args.batch_size}
    test_kwargs = {'batch_size': args.test_batch_size}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1,
                       'pin_memory': True,
                       'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset1 = datasets.MNIST('./', train=True, download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('./', train=False,
                              transform=transform)
    train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

    model = LinearModel().to(device)  # Net().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(model, device, test_loader)
        scheduler.step()

    if args.attack:
        print("start attack")
        attacker = GradAttacker(model.parameters(), lr=args.attack_lr, eps=args.eps, LP=args.LP)
        train(args, model, device, train_loader, optimizer=attacker, epoch='attack epoch')
        print("Accuracy After attack:")
        test(model, device, test_loader)

    if args.save_model:
        torch.save(model.state_dict(), "mnist_linear.pt")
Ejemplo n.º 9
0
class Namespace:
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)


if __name__ == "__main__":
    import pickle as pkl
    dr = YahooDataReader(None)
    dr.data = pkl.load(open("GermanCredit/german_train_rank.pkl", "rb"))
    vdr = YahooDataReader(None)
    vdr.data = pkl.load(open("GermanCredit/german_test_rank.pkl", "rb"))
    args = parse_my_args_reinforce()
    torch.set_num_threads(args.num_cores)
    args.group_feat_id = 3
    if args.model_type == "Linear":
        model = LinearModel(D=args.input_dim, clamp=args.clamp)
        print("Linear model initialized")
    else:
        model = NNModel(D=args.input_dim,
                        hidden_layer=args.hidden_layer,
                        dropout=args.dropout,
                        pooling=args.pooling,
                        clamp=args.clamp)
        print(
            "Model initialized with {} hidden layer size, Dropout={}, using {} pooling"
            .format(args.hidden_layer, args.dropout, args.pooling))

    model = demographic_parity_train(model, dr, vdr, vvector(200), args)

    results = evaluate_model(model,
                             vdr,
Ejemplo n.º 10
0
        val_loader_100, test_loader_100 = get_val_test(cv_path_100)

        #embeddings or one hot encoded features
        with torch.no_grad():
            features_seq = torch.FloatTensor(features_seq_np)
            features_eye = torch.FloatTensor(np.eye(num_nodes))

        # set features and epochs for each model
        if model_name == 'GNN_one-hot':
            epochs = 750
            features = features_eye
            model = GCNModel(features.shape[1]).to(device)
        elif model_name == 'FFNN':
            epochs = 750
            features = features_seq
            model = LinearModel(features.shape[1]).to(device)
        elif model_name == 'GNN_ProtBert':
            epochs = 750
            features = features_seq
            model = GCNModel(features.shape[1]).to(device)

        features = features.to(device)
        train_edges = train_edges.to(device)

        optimizer = optim.RAdam(model.parameters(),
                               lr=0.01)

        for epoch in range(1, epochs + 1):
            loss_train = train(train_edges, model)
            if epoch % 100 == 0:
                print('-------','epoch: {:04d}'.format(epoch+1), 'loss_train: {:.4f}'.format(loss_train), '-------')
Ejemplo n.º 11
0
         X_s_ = torch.tensor(X_s_).type(torch.FloatTensor)
         X_t1_ = torch.tensor(X_t1_).type(torch.FloatTensor)
         X_t2_ = torch.tensor(X_t2_).type(torch.FloatTensor)
     
     if use_cuda:
         X_s, X_t1, X_t2,  = X_s.cuda(), X_t1.cuda(), X_t2.cuda()
         Y_s, Y_t1, Y_t2,  = Y_s.cuda(), Y_t1.cuda(), Y_t2.cuda()
         X_s_, X_t1_, X_t2_ = X_s_.cuda(), X_t1_.cuda(), X_t2_.cuda()
 
     all_accs = []
     maxa = 0
 
     for _ in range(2):
         for dr in dropouts:
             for al in alphas:
                 model = LinearModel(bow_size, graph_size, dr)
                 if use_cuda:
                     model = model.cuda()
                 optimizer = optim.Adam(model.parameters(), lr=lr)
                 for p in model.parameters():
                     p.requires_grad = True
         
                 accs, loss = [], []
                 for epoch in range(n_epochs):
                     train_model(model, optimizer, loss_class, loss_domain, X_s, X_s_, Y_s, X_t1, X_t1_, al)
                     acc, l = eval_model(model, loss_class, loss_domain, X_t2, X_t2_, Y_t2)
                     accs.append(acc)
                     loss.append(l)    
             
                 max_acc = max(accs)
                 all_accs.append(max_acc)
Ejemplo n.º 12
0
        D_in = train_loader.dataset.X.shape[1]
        H1 = D_in // 4
        H2 = H1 // 2
        D_out = 2
        f1 = MLP(D_in=D_in, H1=H1, H2=H2, D_out=D_out, dropout=dropout).cuda()

        f1_optimizer = optim.Adam(f1.parameters(),
                                  lr=learning_rate,
                                  weight_decay=5e-3)

        std = train_loader.dataset.X.std(axis=0).reshape(-1, 1)
        mean = train_loader.dataset.X.mean(axis=0).reshape(-1, 1)
        prior_info = np.concatenate((std, mean), axis=1)
        prior_info = torch.FloatTensor(prior_info).cuda()

        f2 = LinearModel(D_in=prior_info.shape[1], D_out=1).cuda()
        f2_optimizer = optim.Adam(f2.parameters(), lr=prior_learning_rate)
        APExp = egexplainer.VariableBatchExplainer(train_loader.dataset)

        losses_with_prior.append(
            train_with_learned_prior(f1, f2, f1_optimizer, f2_optimizer,
                                     CrossEntropyLoss(), train_loader,
                                     valid_loader, test_loader, patience,
                                     APExp, prior_info))

    losses_no_prior = np.array(losses_no_prior)
    losses_with_prior = np.array(losses_with_prior)

    no_prior_mean_losses.append(losses_no_prior.mean())
    with_prior_mean_losses.append(losses_with_prior.mean())