action='store_true',
                    default=False,
                    help='use proposed method')
parser.add_argument('--N', type=int, default=10, help='num units')
parser.add_argument('--max_adaptation_steps',
                    type=int,
                    default=500,
                    help='maximum number of steps in adaptation.')
args = parser.parse_args()

# fix random seeds
torch.manual_seed(4)
np.random.seed(4)

N = args.N
model = StructuralModel(N, dtype=torch.float64)

optimizer = torch.optim.SGD(model.modules_parameters(), lr=1e-1)
meta_optimizer = torch.optim.RMSprop([model.w], lr=1e-2)

num_runs = 100
num_training = 1
num_transfer = args.max_adaptation_steps
num_gradient_steps = 2

train_batch_size = 1000
transfer_batch_size = 10

alphas = np.zeros((num_runs, num_training, num_transfer))
accs = np.zeros((num_runs, num_training, num_transfer))
times = np.zeros((num_runs, num_training, num_transfer))
import numpy as np
from tqdm import tnrange
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from util import generate_data_categorical
from models import StructuralModel

N = 20
model = StructuralModel(N, dtype=torch.float64)

optimizer = torch.optim.SGD(model.modules_parameters(), lr=1e-1)
meta_optimizer = torch.optim.RMSprop([model.w], lr=1e-2)

num_runs = 1  # 10
num_training = 1  # 100
num_transfer = 1000
num_gradient_steps = 2
batch_size = 100

train_batch_size = 1000
transfer_batch_size = 10

alphas = np.zeros((num_runs, num_training, num_transfer))

for j in range(num_runs):
    model.w.data.zero_()
    for i in tnrange(num_training, leave=False):
        # Step 1: Sample a joint distribution before intervention
        pi_A_1 = np.random.dirichlet(np.ones(N))
        pi_B_A = np.random.dirichlet(np.ones(N), size=N)
import torch
import tqdm
import matplotlib.pyplot as plt
import numpy as np

from util import generate_data_categorical, plot_loss
from models import StructuralModel

N = 10
model = StructuralModel(N, dtype=torch.float64)

num_episodes = 1000
batch_size = 100  # 1
num_test = 10000
num_training = 10  # 100
num_transfers = 10  # 100

optimizer = torch.optim.SGD(model.modules_parameters(), lr=1.)

losses = np.zeros((2, num_training, num_transfers, num_episodes))
'''
  1. Initalize causal and non causal
  2. Randomlize P(A) and P(B|A) 
  3. Generate data with P(A) and P(B|A)
      generate_data_categorical(num_test, pi_A_1, pi_B_A)
  4. Train causal and non-causal model
  5. Update P(A) to P2(A)
  6. Regenerate data with P2(A) and P(B|A)
  7. Meta learn with new data
'''
'''
import torch
import matplotlib.pyplot as plt
from tqdm import tnrange, tqdm_notebook

from causal_meta.utils.data_utils import generate_data_categorical
from models import StructuralModel

parser = argparse.ArgumentParser(description='Compositional Instructions.')
parser.add_argument('--N', type=int, default=10, help='num units')
args = parser.parse_args()

# fix data
np.random.seed(4)

N = args.N
model = StructuralModel(N, dtype=torch.float64)

num_episodes = 100
batch_size = 100 # 1
num_test = 10000
num_training = 10 # 100
num_transfers = 10 # 100

optimizer = torch.optim.SGD(model.modules_parameters(), lr=1.)

losses = np.zeros((2, num_training, num_transfers, num_episodes))

for k in tnrange(num_training):
    pi_A_1 = np.random.dirichlet(np.ones(N))
    pi_B_A = np.random.dirichlet(np.ones(N), size=N)
    for j in tnrange(num_transfers, leave=False):
import torch
import tqdm
import matplotlib.pyplot as plt
import numpy as np

from util import generate_data_categorical, plot_loss
from models import StructuralModel

N = 10
model = StructuralModel(N, dtype=torch.float64)

num_episodes = 150
batch_size = 100  # 1
num_test = 5000
num_training = 3  # 10  # 100
num_transfers = 5  # 100

optimizer = torch.optim.SGD(model.modules_parameters(), lr=0.1)

losses = np.zeros((model.n_models, num_training, num_transfers, num_episodes))

for k in tqdm.trange(num_training):
    pi_A_1 = np.random.dirichlet(np.ones(N))
    pi_B_A = np.random.dirichlet(np.ones(N), size=N)
    pi_C_B = np.random.dirichlet(np.ones(N), size=N)

    for w in range(5000):
        x_transfer = generate_data_categorical(batch_size, pi_A_1, pi_B_A,
                                               pi_C_B)
        model.zero_grad()
        loss = sum([-torch.mean(l) for l in model.compute_losses(x_transfer)])
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F

from util import generate_data_categorical, plot_loss
from models import StructuralModel

N = 5
num_episodes = 200
batch_size = 50 # 1
num_test = batch_size
num_training = 1 # 100
num_transfers = 10 # 100

model = StructuralModel(N, batch_size, dtype=torch.float64)
model1 = StructuralModel(N, batch_size, dtype=torch.float64)
optimizer = torch.optim.SGD(model.modules_parameters(), lr=0.1)
optimizer1 = torch.optim.SGD(model1.modules_parameters(), lr=0.1)
losses = np.zeros((2, num_training, num_transfers, num_episodes))

trainingLoss = {
    'marginalConditional': [],
    'joint': [],
    'nonCausal': [],
}
for k in tqdm.trange(num_training):
    pi_A_1 = np.random.dirichlet(np.ones(N))
    pi_B_A = np.random.dirichlet(np.ones(N), size=N)
    pi_all = np.multiply(pi_B_A, pi_A_1)
    for w in range(int(1E3)):
Beispiel #7
0
def run_map_loads(cart3dGeom='Components.i.triq',
                  bdfModel='fem.bdf',
                  bdfModelOut='fem.loads.out'):
    assert os.path.exists(bdfModel), '%r doesnt exist' % bdfModel

    t0 = time()
    mesh = Cart3DReader()
    half_model = cart3dGeom + '_half'

    result_names = ['Cp', 'rho', 'rhoU', 'rhoV', 'rhoW', 'E']
    (nodes, elements, regions,
     loads) = mesh.read_cart3d(cart3dGeom, result_names=result_names)
    #Cp = loads['Cp']
    (nodes, elements, regions, loads) = mesh.make_half_model(nodes,
                                                             elements,
                                                             regions,
                                                             loads,
                                                             axis='y')

    Cp = loads['Cp']
    #(nodes, elements, regions, Cp) = mesh.renumber_mesh(nodes, elements, regions, Cp)
    mesh.write_cart3d(half_model, nodes, elements, regions, loads)

    Mach = 0.825
    pInf = 499.3  # psf, alt=35k (per Schaufele p. 11)
    pInf = pInf / 144.  # convert to psi
    qInf = 1.4 / 2. * pInf * Mach**2.
    aeroModel = AeroModel(nodes, elements, Cp, pInf, qInf)
    log.info("elements[1] = %s" % elements[1])
    del elements, nodes, Cp

    fem = BDF(debug=True, log=log)
    fem.read_bdf(bdfModel)
    sys.stdout.flush()

    propertyRegions = [
        1, 1101, 1501, 1601, 1701, 1801, 1901, 2101, 2501, 2601, 2701, 2801,
        2901, 10103, 10201, 10203, 10301, 10401, 10501, 10601, 10701, 10801,
        10901, 20103, 20203, 20301, 20401, 20501, 20601, 20701, 20801, 20901,
        701512, 801812
    ]

    # 1 inboard
    # 1000s upper - lower inboard
    # 2000s lower - lower inboard
    # big - fin

    structuralModel = StructuralModel(fem, propertyRegions)

    mapper = LoadMapping(aeroModel, structuralModel)
    t1 = time()
    mapper.set_flight_condition(pInf, qInf)
    mapper.setOutput(bdffile=bdfModelOut, loadCase=1)
    log.info("setup time = %g sec; %g min" % (t1 - t0, (t1 - t0) / 60.))

    mapper.build_mapping_matrix(debug=False)
    t2 = time()
    log.info("mapping matrix time = %g sec; %g min" % (t2 - t1,
                                                       (t2 - t1) / 60.))

    mapper.mapLoads()
    t3 = time()
    log.info("map loads time = %g sec" % (t3 - t2))
    log.info("total time = %g min" % ((t3 - t0) / 60.))