示例#1
0
def all_of_it(sim, frac):
    use_cuda = torch.cuda.is_available()
    CUDA_GPU = 0
    # get data
    print('sim: ' + str(sim))
    print('frac: ' + str(frac))
    X, y, _, _, _, _, _ = load.get_data(sim, 0)
    (n, d) = X.shape
    X_t = Variable(torch.FloatTensor(X))
    y_t = Variable(torch.FloatTensor(y))
    if use_cuda:
        X_t = X_t.cuda(CUDA_GPU)
        y_t = y_t.cuda(CUDA_GPU)
    n_epochs = 100
    # model
    #model = nn.Sequential(nn.Linear(d,1,bias=False))
    model = the_linear(d)
    if use_cuda:
        model = model.cuda(CUDA_GPU)
    model = train(model, n_epochs, X_t, y_t)
    weights = list(model.parameters())[0]
    weights = weights.detach().cpu().numpy()
    if frac:
        np.savez_compressed('school_weights_linear_mse_sim' + str(sim) +
                            '_max1_frac',
                            w=weights)
    else:
        np.savez_compressed('school_weights_linear_mse_sim' + str(sim) +
                            '_max1',
                            w=weights)
def all_of_it(sim, frac, TT):
    #args = get_arguments()

    # load data
    #sim = 5
    #frac = args.frac
    print('sim=' + str(sim))
    print('frac=' + str(frac))
    print('TT=' + str(TT))
    _, _, S, X, A, A_oh, neigh = load.get_data(sim, 0)
    n = S.shape[0]
    da = A.shape[-1]
    if frac:
        DICT = np.load('school_weights_linear_mse_sim' + str(sim) +
                       '_max1_frac.npz')
    else:
        DICT = np.load('school_weights_linear_mse_sim' + str(sim) +
                       '_max1.npz')
    w = DICT['w'].T

    # get weight matrix, for x2
    w1 = w[0:da, :]
    w2 = w[da:da * 2, :]
    w3 = w[da * 2:da * 3, :]
    w4 = w[da * 3:da * 4, :]
    neigh = neigh.astype(int)
    # we just care about int_on for now
    x1 = X[:, 0]
    x3 = X[:, 2]

    A_ix = np.argmax(A, axis=1)

    bit_mask = np.zeros([2**neigh.shape[1], neigh.shape[1]])
    ints = np.arange(2**neigh.shape[1], dtype=np.int)
    for i in range(neigh.shape[1]):
        bit_mask[:, i] = ints % 2
        ints //= 2

    #Change me
    #scale=np.random.normal(0.5,1,n)# Junk scalar only used for stub of f
    #
    #def f(index,mask,constraint=0):
    #    #print('index=' + str(index))
    #    return scale[index]*mask.sum()
    #
    #def count_f(index,mask,constraint):
    #    #print('i=' + str(index))
    #    #print('const=' + str(constraint))
    #    return scale[index//(constraint+1)]*mask.sum()

    def EY(index, mask, a):
        neighS = S[index, neigh[index, :]]
        first = w1[a] * np.max(neighS * x1[neigh[index, :]])
        second = w2[a] * np.max(neighS * mask)
        third = w3[a] * x3[index]
        fourth = w4[a]
        return first + second + third + fourth

    def EY_inner(index, mask, a):
        neighS = S[index, neigh[index, :]]
        first = np.dot(a, w1) * np.max(neighS * x1[neigh[index, :]])
        second = np.dot(a, w2) * np.max(neighS * mask)
        third = np.dot(a, w3) * x3[index]
        fourth = np.dot(a, w4)
        return first + second + third + fourth

    def f(index, mask, constraint=0):
        if frac:
            return EY_inner(index, mask, A[np.newaxis, index, :])
        else:
            return EY(index, mask, A_ix[index])

    def count_f(index, mask, constraint):
        eya = EY(index, mask, constraint)
        return eya

    def get_weights(i, newf=f, constraint=0):
        weights = np.empty(bit_mask.shape[0])
        for r in range(bit_mask.shape[0]):
            weights[r] = newf(i, bit_mask[r], constraint)
        return weights

    #const = np.zeros((n,3))
    #for i in range(n):
    #    for a in range(3):
    #        const[i,a] = count_f(i,np.ones((neigh.shape[1],)),a)
    #
    all_times = []

    #TAUS = np.linspace(0.04, 0.16, 20)
    #TAU_DIFF = TAUS[1]-TAUS[0]
    #bb = [0.04-(a*TAU_DIFF) for a in np.arange(1,2)] #  we don't go all the way to 6 because it breaks for 2
    #bb = np.flip(bb, axis=0)
    #bb = np.array(bb)
    #cc = [0.16+(a*TAU_DIFF) for a in np.arange(1,6)]
    #cc = np.array(cc)
    #dd = [0.16+(a*TAU_DIFF) for a in np.arange(20,30)]
    #dd = [99999]
    #TAUS = [bb[0]]#np.concatenate((bb,cc))
    # NULL INTERVENTION
    def obj_temp(index, a):
        neighS = S[index, neigh[index, :]]
        first = np.dot(a, w1) * np.max(neighS * x1[neigh[index, :]])
        #second = np.dot(a,w2)*np.max(neighS*mask)
        third = np.dot(a, w3) * x3[index]
        fourth = np.dot(a, w4)
        return first + third + fourth

    def obj_counter(index, a):
        neighS = S[index, neigh[index, :]]
        first = w1[a] * np.max(neighS * x1[neigh[index, :]])
        #second = w2[a]*np.max(neighS*mask)
        third = w3[a] * x3[index]
        fourth = w4[a]
        return first + third + fourth

    obj = 0
    const = np.zeros((n, 3))
    for i in range(n):
        obj_true = obj_temp(i, A[np.newaxis, i, :])
        const[i, 0] = obj_counter(i, 0)
        const[i, 1] = obj_counter(i, 1)
        const[i, 2] = obj_counter(i, 2)
        obj += obj_true
    pdb.set_trace()

    DICT = np.load('results/TAUS_frac.npz')
    TAUS = DICT['TAUS']
    print('new')
    print('TAUS=' + str(TAUS))
    for t in range(TT):
        print('t=' + str(t))
        for Tau in TAUS:  #np.linspace(0.04, 0.16, 20):
            print('running tau=' + str(Tau))
            start = time.time()
            #Now build variables
            model = gb.Model()

            interventions = model.addVars(
                np.arange(neigh.shape[0]),
                lb=0,  #np.zeros(neigh.shape[0]),
                ub=1,  #np.ones(neigh.shape[0]),
                vtype=gb.GRB.BINARY)
            K = 25
            expr = gb.LinExpr()
            for i in range(len(interventions)):
                expr += interventions[i]
            model.addConstr(expr, gb.GRB.LESS_EQUAL, K, "k")

            counter_const = 0

            def add_constrained_aux(index, tau=False):
                #init=z.copy()
                #init[-1]=1
                weights = get_weights(index)

                counter = np.empty((3, weights.shape[0]))
                counter[:] = weights[np.newaxis]
                for i in range(3):
                    counter[i] -= get_weights(index, count_f, i)
                aux = model.addVars(
                    np.arange(bit_mask.shape[0]),  #2**neigh.shape[1]),
                    lb=0,
                    ub=1,
                    obj=weights,
                    vtype=gb.GRB.CONTINUOUS)
                model.update()
                for i in range(bit_mask.shape[0]):
                    for j in range(bit_mask.shape[1]):
                        if bit_mask[i, j]:
                            model.addConstr(
                                aux[i] <= interventions[neigh[index, j]])
                        else:
                            model.addConstr(
                                aux[i] <= 1 - interventions[neigh[index, j]])
                model.addConstr(aux.sum() == 1)
                if tau is not False:
                    for i in range(3):
                        model.addConstr(
                            sum(aux[f] * counter[i, f]
                                for f in range(weights.shape[0])) <= tau)
                return aux

            aux = list(
                map(lambda x: add_constrained_aux(x, tau=Tau),
                    range(neigh.shape[0])))

            model.setObjective(model.getObjective(), gb.GRB.MAXIMIZE)
            model.optimize()
            end = timeSince(start)
            all_times.append(end)

            if model.status == gb.GRB.Status.OPTIMAL:
                sol = [interventions[i].X for i in range(len(interventions))]
                sol = np.array(sol)
                sol = np.round(sol)
                sol = sol.astype(bool)
            else:
                print('did not work')
                sol = []
            if frac:
                filename = 'results/max_fair_k' + str(K) + '_' + str(
                    Tau) + '_sim' + str(sim) + '_frac'
                timename = 'results/time_max_k' + str(K) + '_sim' + str(
                    sim) + '_t' + str(t + 1) + '_frac'
            else:
                filename = 'results/max_fair_k' + str(K) + '_' + str(
                    Tau) + '_sim' + str(sim)
                timename = 'results/time_max_k' + str(K) + '_sim' + str(
                    sim) + '_t' + str(t + 1)
            model.write(filename + '.lp')
            np.savez_compressed(filename, sol=sol)
            print('done!')
            print('A dist')
            print(np.sum(A_oh[sol, :], axis=0))
        print(all_times)
        np.savez_compressed(timename, times=all_times,
                            tau=TAUS)  #np.linspace(0.04, 0.16, 20))
示例#3
0
import tensorflow as tf
import train
import load
import convert

with tf.name_scope('inputs'):
    x_input = tf.placeholder(tf.float32,
                             shape=[None,
                                    vc1num])  # vc1num?# vшки counter1 number
    y_input = tf.placeholder(tf.float32, shape=[None, 2])

dir = ''
train_dir = dir + 'train'
test_dir = dir + 'test'
x_train, y_train = load.get_data(train_dir, 5)
x_test, y_test = load.get_data(test_dir, 5)

train.train_neural_network(x_train,
                           y_train,
                           x_test,
                           y_test,
                           epochs=10,
                           batch_size=1,
                           learning_rate=0.3)
示例#4
0
def all_of_it(sim, TT, constraint_type):

    # 0. load data
    print('sim=' + str(sim))
    print('TT=' + str(TT))
    if constraint_type == "minority":
        _, _, S, X, A, A_oh, neigh = load.get_data(sim, 1)
    else:
        _, _, S, X, A, A_oh, neigh = load.get_data(sim, 0)
    A_ix = np.argmax(A, axis=1)

    n = S.shape[0]
    da = A.shape[-1]

    # 1. load weights of causal model
    DICT = np.load('school_weights_linear_mse_sim' + str(sim) +
                   '_max1_frac.npz')
    w = DICT['w'].T
    w1 = w[0:da, :]
    w2 = w[da:da * 2, :]
    w3 = w[da * 2:da * 3, :]
    w4 = w[da * 3:da * 4, :]
    neigh = neigh.astype(int)
    x1 = X[:, 0]
    x3 = X[:, 2]

    bit_mask = np.zeros([2**neigh.shape[1], neigh.shape[1]])
    ints = np.arange(2**neigh.shape[1], dtype=np.int)
    for i in range(neigh.shape[1]):
        bit_mask[:, i] = ints % 2
        ints //= 2

    def EY_inner(index, mask, a):
        neighS = S[index, neigh[index, :]]
        first = np.dot(a, w1) * np.max(neighS * x1[neigh[index, :]])
        second = np.dot(a, w2) * np.max(neighS * mask)
        third = np.dot(a, w3) * x3[index]
        fourth = np.dot(a, w4)
        return first + second + third + fourth

    def f(index, mask, constraint=0):
        return EY_inner(index, mask, A[np.newaxis, index, :])

    def get_weights(i, newf=f, constraint=0):
        weights = np.empty(bit_mask.shape[0])
        for r in range(bit_mask.shape[0]):
            weights[r] = newf(i, bit_mask[r], constraint)
        return weights

    all_times = []

    # NULL INTERVENTION
    def obj_temp(index, a):
        neighS = S[index, neigh[index, :]]
        first = np.dot(a, w1) * np.max(neighS * x1[neigh[index, :]])
        #second = np.dot(a,w2)*np.max(neighS*mask)
        third = np.dot(a, w3) * x3[index]
        fourth = np.dot(a, w4)
        return first + third + fourth

    def obj_counter(index, a):
        neighS = S[index, neigh[index, :]]
        first = w1[a] * np.max(neighS * x1[neigh[index, :]])
        #second = w2[a]*np.max(neighS*mask)
        third = w3[a] * x3[index]
        fourth = w4[a]
        return first + third + fourth

    obj = 0
    const = np.zeros((n, 3))
    for i in range(n):
        obj_true = obj_temp(i, A[np.newaxis, i, :])
        const[i, 0] = obj_counter(i, 0)
        const[i, 1] = obj_counter(i, 1)
        const[i, 2] = obj_counter(i, 2)
        obj += obj_true
    print('null allocation')
    print('obj_true: ' + str(obj))
    print('max_const: ' + str(np.max(const)))
    if constraint_type == "minority":
        filename = 'results/null_sim' + str(sim) + '_frac_minority'
        np.savez_compressed(filename, obj=obj, const=const)
    else:
        filename = 'results/null_sim' + str(sim) + '_frac'
        np.savez_compressed(filename, obj=obj, const=const)
    pdb.set_trace()

    for t in range(TT):
        print('t=' + str(t))
        start = time.time()
        #Now build variables
        model = gb.Model()

        interventions = model.addVars(
            np.arange(neigh.shape[0]),
            lb=0,  #np.zeros(neigh.shape[0]),
            ub=1,  #np.ones(neigh.shape[0]),
            vtype=gb.GRB.BINARY)
        K = 25
        expr = gb.LinExpr()
        for i in range(n):
            expr += interventions[i]
        model.addConstr(expr, gb.GRB.LESS_EQUAL, K, "k")

        if constraint_type == "parity":
            parity0 = gb.LinExpr()
            for i in range(n):
                if A_oh[i, 0] != 0:
                    parity0 += interventions[i]
            model.addConstr(parity0, gb.GRB.EQUAL, K // 3, "parity0")
            parity1 = gb.LinExpr()
            for i in range(n):
                if A_oh[i, 1] != 0:
                    parity1 += interventions[i]
            model.addConstr(parity1, gb.GRB.EQUAL, K // 3, "parity1")
            parity2 = gb.LinExpr()
            for i in range(n):
                if A_oh[i, 2] != 0:
                    parity2 += interventions[i]
            model.addConstr(parity2, gb.GRB.EQUAL, K // 3, "parity2")

        def add_constrained_aux(index):
            weights = get_weights(index)

            ##counter=np.empty((3,weights.shape[0]))
            ##counter[:]=weights[np.newaxis]
            ##for i in range(3):
            ##    counter[i]-=get_weights(index,count_f,i)
            aux = model.addVars(
                np.arange(bit_mask.shape[0]),  #2**neigh.shape[1]),
                lb=0,
                ub=1,
                obj=weights,
                vtype=gb.GRB.CONTINUOUS)
            model.update()
            for i in range(bit_mask.shape[0]):
                for j in range(bit_mask.shape[1]):
                    if bit_mask[i, j]:
                        model.addConstr(
                            aux[i] <= interventions[neigh[index, j]])
                    else:
                        model.addConstr(
                            aux[i] <= 1 - interventions[neigh[index, j]])
            model.addConstr(aux.sum() == 1)
            ##if tau is not False:
            ##    for i in range(3):
            ##        model.addConstr(sum(aux[f]*counter[i,f] for f in range(weights.shape[0]))<=tau)
            return aux

        aux = list(map(lambda x: add_constrained_aux(x),
                       range(neigh.shape[0])))

        model.setObjective(model.getObjective(), gb.GRB.MAXIMIZE)
        model.optimize()
        end = timeSince(start)
        all_times.append(end)

        if model.status == gb.GRB.Status.OPTIMAL:
            sol = [interventions[i].X for i in range(len(interventions))]
            sol = np.array(sol)
            sol = np.round(sol)
            sol = sol.astype(bool)
        else:
            print('did not work')
            sol = []
        if constraint_type == "parity":
            filename = 'results/max_parity_k' + str(K) + '_sim' + str(
                sim) + '_frac'
            timename = 'results/time_parity_k' + str(K) + '_sim' + str(
                sim) + '_t' + str(t + 1) + '_frac'
        elif constraint_type == "minority":
            filename = 'results/max_minority_k' + str(K) + '_sim' + str(
                sim) + '_frac'
            timename = 'results/time_minority_k' + str(K) + '_sim' + str(
                sim) + '_t' + str(t + 1) + '_frac'
        else:
            print('err')
            pdb.set_trace()
        model.write(filename + '.lp')
        np.savez_compressed(filename, sol=sol)
        print('done!')
        print('A dist')
        print(np.sum(A_oh[sol, :], axis=0))
        print(all_times)
        np.savez_compressed(timename, times=all_times)
示例#5
0
文件: gan.py 项目: SAI990323/GAN
        return input


class dis(nn.Module):
    def __init__(self):
        super(dis, self).__init__()
        self.dis = nn.Sequential(nn.Linear(2, 256), nn.ReLU(inplace=False),
                                 nn.Linear(256, 64), nn.ReLU(inplace=False),
                                 nn.Linear(64, 1), nn.Sigmoid())

    def forward(self, input):
        input = self.dis(input)
        return input


train_set, test_set = load.get_data()


def train(epoch=100, batch_size=100, input_size=2):
    GEN = gen(input_size).to(device).double()
    DIS = dis().to(device).double()
    optimizer_g = torch.optim.RMSprop(GEN.parameters(), lr=0.0001)
    optimizer_d = torch.optim.RMSprop(DIS.parameters(), lr=0.0001)
    for ep in range(epoch):
        total_gen_loss = 0
        total_dis_loss = 0
        for j in range(int(len(train_set) / batch_size)):
            func = torch.from_numpy(train_set[j * batch_size:(j + 1) *
                                              batch_size]).double().to(device)
            input = torch.randn(batch_size, input_size).to(device).double()
            noise = GEN(input)
示例#6
0
文件: finger.py 项目: wolfiex/gcn
    None

# Load data
#adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
########################################
'''['vec_smiles',
 'smiles',
 'finger_mqn',
 'embed_fn ',#space!!
 'finger_maccs',
 'names',
 'vec_spec',
 'fngroups']'''

import load
adj, finger = load.get_data()

y_val = np.array(finger['fngroups'])  #[[i] for i in finger['fngroups']])

#np.repeat(True,len(y_val))
from scipy import sparse
features = finger['fngroups']

#features = sparse.lil_matrix(finger['fngroups'])

# Load data
#adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
features = sparse.lil_matrix(features)

y_test = y_train = y_val
train_mask = val_mask = test_mask = y_val.sum(axis=1) > -10
示例#7
0
def main():
    start_time = time()  #Time the program

    in_arg = get_input_args()

    #Retrieve data
    train_data = get_data(in_arg.data_dir)[0]
    trainloader = get_data(in_arg.data_dir)[1]
    validloader = get_data(in_arg.data_dir)[2]
    testloader = get_data(in_arg.data_dir)[3]

    #Obatain labels
    with open('cat_to_name.json', 'r') as f:
        cat_to_name = json.load(f)

    #Train model
    resnet18 = models.resnet18(pretrained=True)
    alexnet = models.alexnet(pretrained=True)
    squeezenet = models.squeezenet1_0(pretrained=True)
    vgg16 = models.vgg16(pretrained=True)
    densenet = models.densenet161(pretrained=True)
    inception = models.inception_v3(pretrained=True)

    models_dict = {
        'resnet': resnet18,
        'alexnet': alexnet,
        'squeezenet': squeezenet,
        'vgg16': vgg16,
        'densenet': densenet,
        'inception': inception
    }

    input_dict = {
        'resnet': 2048,
        'alexnet': 9216,
        'squeezenet': 512,
        'vgg16': 25088,
        'densenet': 1024,
        'inception': 2048
    }

    def training_model(model_name, hidden_units, epoch_number, learn_rate):
        """
        Trains a neural network using a pretrained model by:
        Defining a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
        Training the classifier layers using backpropagation using the pre-trained network to get the features
        Tracking the loss and accuracy on the validation set to determine the best hyperparameters

        """
        device = torch.device('cuda' if torch.cuda.is_available()
                              and in_arg.gpu == True else 'cpu')
        model = models_dict[model_name]
        input_units = input_dict[model_name]

        for param in model.parameters():
            param.requires_grad = False

        model.classifier = nn.Sequential(
            OrderedDict([('fc1', nn.Linear(input_units,
                                           hidden_units,
                                           bias=True)), ('relu1', nn.ReLU()),
                         ('dropout1', nn.Dropout(p=0.5)),
                         ('fc2', nn.Linear(hidden_units, 102, bias=True)),
                         ('output', nn.LogSoftmax(dim=1))]))

        print("classifier updated")
        criterion = nn.NLLLoss()
        optimizer = optim.Adam(model.classifier.parameters(), lr=learn_rate)

        model.to(device)

        epochs = epoch_number
        steps = 0
        running_losses = 0
        testing_losses = []
        training_losses = []
        print_every = 10  #how many steps we're going to go before we print validation loss

        print("assignments complete, commencing training")

        for e in range(epochs):
            model.train()
            for inputs, labels in trainloader:
                steps += 1
                inputs, labels = inputs.to(device), labels.to(device)

                optimizer.zero_grad()
                logps = model.forward(inputs)
                loss = criterion(logps, labels)
                loss.backward()
                optimizer.step()

                running_losses += loss.item()

                if steps % print_every == 0:  #every 5 steps we drop out of training loop to test accuracy
                    valid_loss = 0
                    valid_accuracy = 0
                    model.eval()
                    with torch.no_grad():
                        for inputs, labels in validloader:
                            inputs, labels = inputs.to(device), labels.to(
                                device)

                            logps = model.forward(inputs)
                            batch_loss = criterion(logps, labels)

                            valid_loss += batch_loss.item()

                            ps = torch.exp(logps)
                            top_p, top_class = ps.topk(1, dim=1)
                            equals = top_class == labels.view(*top_class.shape)

                            valid_accuracy += torch.mean(
                                equals.type(torch.FloatTensor)).item()

                    print(
                        f"Epoch {e+1}/{epochs}.. "  #Keep track of where we are
                        f"Train loss: {running_losses/print_every: .3f}.. "  #Average of trainingloss
                        f"Validation loss: {valid_loss/len(validloader): .3f}.. "
                        #Denominator: How many batches are in valid dataset
                        #Numerator: Sum of test losses across batches
                        #Result: average test loss
                        f"Validation accuracy: {valid_accuracy/len(validloader): .3f}"
                    )  #Average accuracy

                    running_losses = 0
                    model.train()

        model.class_to_idx = train_data.class_to_idx
        print("Training Complete")

        print("Testing the model")

        #Test model
        testset_loss = 0
        test_accuracy = 0
        model.eval()
        with torch.no_grad():
            for inputs, labels in testloader:
                inputs, labels = inputs.to(device), labels.to(device)

                logps = model.forward(inputs)
                testset_loss += criterion(logps, labels).item()

                ps = torch.exp(logps)
                top_p, top_class = ps.topk(1, dim=1)
                equals = top_class == labels.view(*top_class.shape)

                test_accuracy += torch.mean(equals.type(
                    torch.FloatTensor)).item()

        model.class_to_idx = train_data.class_to_idx
        print(
            f"Epoch {e+1}/{epochs}.. "  #Keep track of where we are
            f"Test loss: {testset_loss/len(testloader): .3f}.. "
            #Denominator: How many batches are in valid dataset
            #Numerator: Sum of test losses across batches
            #Result: average test loss
            f"Test accuracy: {test_accuracy/len(testloader): .3f}"
        )  #Average accuracy

        return model, model.state_dict(
        ), model.classifier, model.class_to_idx, input_units

    trained_model = training_model(in_arg.arch, in_arg.hidden_units,
                                   in_arg.epoch_number, in_arg.learn_rate)
    #Save Checkpoint
    path = in_arg.save_dir

    torch.save(
        {
            'arch': in_arg.arch,
            'input': trained_model[4],
            'output': 102,
            'epochs': in_arg.epoch_number,
            'model_state_dict': trained_model[1],
            #'optimizer_state_dict':optimizer.state_dict(),
            'classifier': trained_model[2],
            'class_to_idx': trained_model[3]
        },
        path)

    print("Checkpoint saved")

    end_time = time()

    tot_time = end_time - start_time
    print(
        "\n** Total Elapsed Runtime:",
        str(int((tot_time / 3600))) + ":" + str(int(
            (tot_time % 3600) / 60)) + ":" + str(int((tot_time % 3600) % 60)))