Beispiel #1
0
def execute_main(o, arguments):
    """ the main method (callable internally (pseudo_main(args)) or from command line) """

    # Orbital Elements
    planet_elements = Orb_Kepler(o.a_pl, o.e_pl, o.i_pl, o.M_pl, o.argw_pl,
                                 o.node_pl)
    binary_elements = Orb_Kepler(o.a_bin, o.e_bin, o.i_bin, o.M_bin,
                                 o.argw_bin, o.node_bin)

    # Star Masses
    mass_one = (o.mass_bin) * (1 - o.u_bin)
    mass_two = (o.mass_bin) * (o.u_bin)

    star_masses = quantities.AdaptingVectorQuantity()
    star_masses.append(mass_one)
    star_masses.append(mass_two)

    print star_masses, o.a_pl

    mkdir(o.dir)

    run_simulation(planet_elements,
                   binary_elements,
                   star_masses,
                   o.eta,
                   o.sim_time,
                   o.n_steps,
                   o.dir,
                   o.fout,
                   file_redir=o.file_redir)
Beispiel #2
0
def experiments(N: int, lat_dim: int, exp_folder: str):
    exp_folder = os.path.join(curr_folder, exp_folder)
    mkdir(exp_folder)
    train_gamma_matrix, train_alpha_matrix, train_beta_matrix = [], [], []
    test_gamma_matrix, test_alpha_matrix, test_beta_matrix = [], [], []

    for i in range(N):
        one_vae_experiment(
            os.path.join(exp_folder, str(i)),
            lat_dim,
            train_gamma_matrix,
            train_alpha_matrix,
            train_beta_matrix,
            test_gamma_matrix,
            test_alpha_matrix,
            test_beta_matrix,
        )

    plot_rrh_matrices(
        train_gamma_matrix,
        train_alpha_matrix,
        train_beta_matrix,
        exp_folder,
        "het_train",
    )
    plot_rrh_matrices(
        test_gamma_matrix,
        test_alpha_matrix,
        test_beta_matrix,
        exp_folder,
        "het_test",
    )
Beispiel #3
0
    def __init__(self, sess):
        self.sess = sess
        self.szs = config.SizeContainer()
        self.config = config.default_config
        self.batch_size = self.config.batch_size
        self.num_classes = self.config.num_classes
        self.conditioned = self.config.conditioned
        self.batch_norm = self.config.batch_norm
        self.is_training = self.config.is_training

        self.log_dir = misc.mkdir('outputs/logs/')
        self.ckpt_dir = misc.mkdir('outputs/ckpts/')
        self.dump_dir = misc.mkdir('outputs/dump/{}'.format(
            'train' if self.is_training else 'deploy'))

        self.image_aerial_holder = tf.placeholder(
            tf.float32, [self.batch_size, None, None, self.szs.C_src])
        self.image_ground_holder = tf.placeholder(
            tf.float32, [self.batch_size, None, None, self.szs.C_tar])
        self.label_ground_holder = tf.placeholder(
            tf.int32, [self.batch_size, None, None])

        self.build_model([
            self.image_aerial_holder, self.image_ground_holder,
            self.label_ground_holder
        ], self.is_training)
Beispiel #4
0
 def __init__(self,input_filepath,tmpdir=None):
     if tmpdir is None:
         self.tmpdir = os.path.join(os.getcwd(),'tmp\\split\\')
     else:
         self.tmpdir = tmpdir
     mkdir(resource_path(self.tmpdir))
     self.xml_tree = docx2xmltree(input_filepath)
     self.xml_body = self.xml_tree[0]
Beispiel #5
0
 def act_auto(self):
     self.refresh_project_members()
     self.progressBar.setValue(0)
     mkdir(self.processed_dir)
     count = len(self.sections)
     for index, section in enumerate(self.sections):
         fillin = FillIn(os.path.join(self.split_dir, section + '.docx'))
         fillin.process(os.path.join(self.processed_dir, section + '.docx'))
         self.progressBar.setValue(int((index + 1) * 100 / count))
Beispiel #6
0
def one_vae_experiment(
    exp_folder: str,
    lat_dim: int,
    train_gamma_matrix,
    train_alpha_matrix,
    train_beta_matrix,
    test_gamma_matrix,
    test_alpha_matrix,
    test_beta_matrix,
):

    mkdir(exp_folder)
    vae, optimizer = new_vae(device, lat_dim=lat_dim)

    (
        vae,
        optimizer,
        train_losses,
        test_train_losses,
        test_eval_losses,
    ) = train_vae(
        vae,
        cnn,
        optimizer,
        device,
        train_dataloader,
        test_dataloader,
        evaluate=True,
    )

    torch.save(vae.state_dict(), os.path.join(exp_folder, "vae.pth"))
    torch.save(optimizer.state_dict(), os.path.join(exp_folder, "adam.pth"))
    with open(os.path.join(exp_folder, "vae.txt"), "w+") as f:
        f.write(str(vae))

    plot_loss(train_losses, test_train_losses, test_eval_losses, exp_folder)

    # Compute RRH --------------------------------------------------------------

    vae.eval()
    gammas, alphas, betas = calculate_rrh(vae, cnn, device, test_X, test_y)
    train_gamma_matrix.append(gammas)
    train_alpha_matrix.append(alphas)
    train_beta_matrix.append(betas)

    vae.train()
    gammas, alphas, betas = calculate_rrh(vae, cnn, device, train_X, train_y)
    test_gamma_matrix.append(gammas)
    test_alpha_matrix.append(alphas)
    test_beta_matrix.append(betas)
Beispiel #7
0
def create_and_train_cnn(
    device, train_dataloader, test_dataloader, save_folder,
):
    cnn = ConvolutionalNeuralNet().to(device)
    optimizer = torch.optim.Adadelta(cnn.parameters(), lr = LR)

    scheduler = StepLR(optimizer, step_size=1, gamma=GAMMA)
    for epoch in range(1, CNN_EPOCH + 1):
        train_cnn(cnn, optimizer, device, train_dataloader)
        #test_cnn(cnn, device, test_dataloader, train = True)
        test_cnn(cnn, device, test_dataloader)
        scheduler.step()

    date = datetime.date.today().strftime("%m-%d")
    model_name = date + "_epoch=" + str(CNN_EPOCH) + ".pth"
    optimizer_name = date + "_adadelta" + ".pth"
    mkdir(save_folder)
    torch.save(cnn.state_dict(), os.path.join(save_folder, model_name))
    torch.save(optimizer.state_dict(), os.path.join(save_folder, optimizer_name))
    return cnn
def execute_main(o, arguments):
   """ the main method (callable internally (pseudo_main(args)) or from command line) """
   
   # Orbital Elements
   planet_elements = Orb_Kepler(o.a_pl, o.e_pl, o.i_pl, o.M_pl, o.argw_pl, o.node_pl)
   binary_elements = Orb_Kepler(o.a_bin, o.e_bin, o.i_bin, o.M_bin, o.argw_bin, o.node_bin)
   
   # Star Masses
   mass_one = (o.mass_bin) * (1 - o.u_bin)
   mass_two = (o.mass_bin) * (o.u_bin)
      
   star_masses = quantities.AdaptingVectorQuantity()
   star_masses.append(mass_one)
   star_masses.append(mass_two)
   
   print star_masses, o.a_pl
   
   mkdir(o.dir)
   
   run_simulation(planet_elements, binary_elements, star_masses,
                    o.eta, o.sim_time, 
                    o.n_steps, o.dir, o.fout, file_redir = o.file_redir)
Beispiel #9
0
import odl
from odl.contrib import fom
from odl.solvers import CallbackPrintIteration, CallbackPrintTiming

#%% set parameters and create folder structure
filename = 'ml'

nepoch = 30
nepoch_target = 5000
datasets = ['fdg', 'amyloid10min']

tol_step = 1e-6
rho = 0.999

folder_norms = '{}/norms'.format(folder_out)
misc.mkdir(folder_norms)

for dataset in datasets:

    if dataset is 'amyloid10min':
        folder_data = folder_data_amyloid
        planes = None
        data_suffix = 'rings0-64_span1_time3000-3600'
        clim = [0, 1]  # set colour limit for plots

    elif dataset is 'fdg':
        folder_data = folder_data_fdg
        planes = [85, 90, 46]
        data_suffix = 'rings0-64_span1'
        clim = [0, 10]  # set colour limit for plots
Beispiel #10
0
 def act_split(self):
     mkdir(self.split_dir)
     split = Split(input_filepath=self.filepath_extract)
     self.sections = split.process(self.progressBar)
     self.combobox.addItems(self.sections)
Beispiel #11
0
output_filename = 'output.docx'
output_filepath = os.path.join(os.getcwd(), output_filename)

tmp_dir1 = os.path.join(os.getcwd(), 'tmp\\split')
tmp_dir2 = os.path.join(os.getcwd(), 'tmp\\split-processed')

from misc import mkdir

if __name__ == "__main__":
    #
    extract = Extract(input_filepath)
    extract.process()  #output_filepath=extract_filepath)
    db['project_info'].set_db(extract.extract_project_infos())
    #
    mkdir(tmp_dir1)
    split = Split(input_filepath=extract_filepath)
    sections = split.process()
    #
    db['finance'].filtering(need_years=3)
    db['human'].select_people(name_list=['总经理姓名', '联系人姓名', '项目经理人姓名'])
    db['projects_done'].filtering(project_types=['水利'], need_years=3)
    db['projects_being'].filtering(project_types=['水利'])
    #
    mkdir(tmp_dir2)
    for section in sections:
        fillin = FillIn(os.path.join(tmp_dir1, section + '.docx'))
        fillin.process(os.path.join(tmp_dir2, section + '.docx'))
    #
    merge = Merge(tmpdir=tmp_dir2, section_names=sections)
    merge.process(output_filepath)