示例#1
0
def training_procedure(params):
    """Trains over the MNIST standard spilt (50K/10K/10K) 
    Saves the best model on validation set
    Evaluates over test set every epoch for plots"""

    train_loader, test_loader = datas.load_data(params)

    N, K = params['N_K']
    direct_vae = Direct_VAE(params)

    best_state_dicts = None
    print('hyper parameters: ', params)

    train_results, test_results = [], []

    print('len(train_loader)', len(train_loader))
    print('len(test_loader)', len(test_loader))
    for epoch in range(params['num_epochs']):
        epoch_results = [0, 0]
        train_nll = direct_vae.train(train_loader)
        train_results.append(train_nll)
        epoch_results[0] = train_nll

        test_nll = direct_vae.evaluate(test_loader)
        test_results.append(test_nll)

        epoch_results[1] = test_nll
        if params['print_result']:
            print_results(epoch_results, epoch, params['num_epochs'])

    return train_results, test_results
示例#2
0
    def __init__(self, params):
        self.N, self.K = params['N_K']  # multinomial with K modes
        self.D = params['gumbels']  # average of D Gumbel variables
        self.gaussian_dim = params['gaussian_dimension']
        self.params = params
        self.num_epochs = params['num_epochs']
        self.epoch_semi_sup = params['supervised_epochs']
        self.num_epochs += self.epoch_semi_sup
        batch_size = params['batch_size']

        params['dataset'] = 'mnist'
        self.train_loader, self.valid_loader, self.test_loader = datas.load_data(
            params)
        if self.epoch_semi_sup > 0:
            train_ds, _ = datas.get_pytorch_mnist_datasets()
            balanced_ds = datas.get_balanced_dataset(
                train_ds, params['num_labeled_data'])
            self.train_loader_balanced = torch.utils.data.DataLoader(
                dataset=balanced_ds, batch_size=batch_size, shuffle=True)
        H = 400
        gibbs = Gibbs_Encoder(h_dim=H, N=self.N, K=self.K, D=self.D)
        self.vae = VAE(gibbs, h_dim=H, N=self.N, K=self.K, D=self.D)
        print(self.vae)
        print('number of parameters: ',
              sum(param.numel() for param in self.vae.parameters()))
        print(params)
        if torch.cuda.is_available():
            self.vae.cuda()
        #vae.load_state_dict(torch.load('vae_multi_gauss_new.pkl',lambda storage, loc: storage)) ]
        self.optimizer = torch.optim.Adam(self.vae.parameters(),
                                          lr=params['learning_rate'])
        self.print_every = params['print_every']
示例#3
0
def check_running_time(params):
    torch.manual_seed(params['random_seed'])
    params['batch_size'] = 1
    train_loader, test_loader = datas.load_data(params)
    print('len(train_loader)', len(train_loader))
    print('len(test_loader)', len(test_loader))
    print('hyper parameters: ', params)
    time_to_plot = []
    nk = [(i, 2) for i in range(5, 16)]

    for n_k in nk:
        params['N_K'] = n_k
        direct_vae = Direct_VAE(params)
        time = direct_vae.train(train_loader, return_time=True)
        time_to_plot.append((n_k[0], time))
        print(n_k, time)
    return time_to_plot
示例#4
0
文件: DVAE.py 项目: siamakz/doc2hash
def training_procedure(params):
    """trains over the MNIST standard spilt (50K/10K/10K) or omniglot
    saves the best model on validation set
    evaluates over test set every epoch just for plots"""

    torch.manual_seed(params['random_seed'])

    train_loader, valid_loader, test_loader = datas.load_data(params)

    N, K = params['N_K']
    direct_vae = Direct_VAE(params)

    best_state_dicts = None
    print('hyper parameters: ', params)

    train_results, valid_results, test_results = [], [], []
    best_valid, best_test_nll = float('Inf'), float('Inf')

    for epoch in range(params['num_epochs']):
        epoch_results = [0, 0, 0]
        train_nll = direct_vae.train(train_loader)
        train_results.append(train_nll)
        epoch_results[0] = train_nll

        valid_nll = direct_vae.evaluate(valid_loader)
        valid_results.append(valid_nll)
        epoch_results[1] = valid_nll

        test_nll = direct_vae.evaluate(test_loader)
        test_results.append(test_nll)
        epoch_results[2] = test_nll

        if params['print_result']:
            print_results(epoch_results, epoch, params['num_epochs'])

        if valid_nll < best_valid:
            best_valid = valid_nll
            best_test_nll = test_nll
            best_state_dicts = (direct_vae.encoder.state_dict(),
                                direct_vae.decoder.state_dict())

    return train_results, test_results, best_test_nll, best_state_dicts, params.copy(
    )
示例#5
0
def main():

    f = open("ranges_blank.json", "r")
    ranges = json.loads(f.read())
    f.close()

    excel_filename = "xlfile.xlsx"
    xl_sheetname = "Export Worksheet"

    ranges = datas.load_data(excel_filename, xl_sheetname,
                             ranges, flags=False, pared_down=True)

    json_ranges = json.dumps(ranges)
    try:
        file = open("ref_ranges_new.json", "w")
        file.write(json_ranges)
        file.close()
        print("Changes saved")
    except:
        print("Something broke, go tell Jeffrey")
示例#6
0
文件: GSM.py 项目: siamakz/doc2hash
def training_procedure(params):
    torch.manual_seed(params['random_seed'])

    train_loader,valid_loader,test_loader = datas.load_data(params)
    N,K = params['N_K']
    num_epochs = params['num_epochs']
    gsm_vae = GSM_VAE(params)
    best_state_dicts = None
    print ('hyper parameters: ' ,params)

    train_results,valid_results,test_results = [],[],[]
    best_valid,best_test_nll = float('Inf'),float('Inf')

    for epoch in range(num_epochs):
        epoch_results = [0,0,0] 
        train_nll = gsm_vae.train(train_loader)
        train_results.append(train_nll)
        epoch_results[0] = train_nll

        valid_nll  = gsm_vae.evaluate(valid_loader) 
        valid_results.append(valid_nll)
        epoch_results[1] = valid_nll

        test_nll  = gsm_vae.evaluate(test_loader) 
        test_results.append(test_nll)
        epoch_results[2] = test_nll
        
        if params['print_result']:        
            print_results(epoch_results,epoch,params['num_epochs'])         
        
        if valid_nll < best_valid:
            best_valid = valid_nll
            best_test_nll = test_nll
            best_state_dict = gsm_vae.vae.state_dict()

    return train_results,test_results,best_test_nll,best_state_dict,params.copy()
示例#7
0
def main():

    flags_list = [
        ' ', 'OK', ' ', ' ', 'No Range', 'Flagged', 'OK', 'OK', 'OK', 'OK',
        'No Range', 'OK', ' ', 'Flagged', ' ', 'No Range', ' ', ' ', 'OK',
        'OK', 'No Range', 'OK', 'OK', 'OK', 'OK', 'OK', 'OK', 'OK', 'OK', 'OK',
        ' ', 'OK', 'OK', 'OK', 'OK', 'No Range', 'No Range', 'No Range', 'OK',
        'OK', 'No Range', 'No Range', 'No Range', 'No Range', 'OK', 'Flagged',
        'No Range'
    ]

    test_type = "Flag LOW"

    order = datas.get_elements_order()

    output_file = "ranges_output.xlsx"

    wb = openpyxl.Workbook()
    ws = wb.active
    ws.title = "Reference Ranges"

    title_font = Font(size=16, bold=True)
    species_font = Font(size=14, bold=True)
    tissue_font = Font(size=12, bold=True)

    ws.merge_cells("A1:G1")
    ws['A1'] = "LabVantage - Checking Status of Reference Ranges"
    ws['A1'].font = title_font

    base_row = 3

    ranges = datas.load_data()

    for species in ranges:
        ws['A' + str(base_row)] = species
        ws['A' + str(base_row)].font = species_font

        base_row += 1

        for type in ranges[species]:
            ws['A' + str(base_row)] = type.title()
            ws['A' + str(base_row)].font = tissue_font
            base_row += 1

            tissue_headers = False
            serum_headers = False
            if type in ["liver", "kidney"]:
                if not tissue_headers:
                    write_tissue_headers(ws, base_row)
                    base_row += 1
                    write_tissue_element_names(ws, base_row, order)
                    tissue_headers = True
                    base_row += 1
                write_tissue_row(ws, flags_list, base_row)
                base_row += 1
            else:
                if not serum_headers:
                    write_serum_headers(ws, base_row)
                    base_row += 1
                    write_serum_element_names(ws, base_row, order)
                    serum_headers = True
                    base_row += 1
                write_serum_row(ws, flags_list, base_row)
                base_row += 1

            base_row += 2

    for i, flag in enumerate(flags_list):
        column = get_column_letter(i + 1)
        ws[column + str(base_row)] = order[i]
        ws[column + str(base_row + 1)] = flag

    dims = {}
    for row in ws.rows:
        for cell in row:
            if cell.value:
                dims[cell.column_letter] = max(
                    (dims.get(cell.column_letter, 0), len(str(cell.value))))
    for col, value in dims.items():
        ws.column_dimensions[col].width = value * 1.4
    ws.column_dimensions['A'].width = 20

    wb.save(output_file)

    # TODO: Thursday ================
    # Clean up/comment function to change specifications.
    # Set up entering data into data entry screen
    # Output organizing
    # Loop it all!

    # Shut out the lights and turn the heat down on your way out.
    print("done")
示例#8
0
import os
import datas

import sys

import scipy.stats as stats
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

params = {'binarize':True,
          'dataset':'mnist',
          'random_seed':777,
          'split_valid':False,
          'batch_size':100}
batch_size = params['batch_size']
train_loader,test_loader = datas.load_data(params)

slim=tf.contrib.slim
Bernoulli = tf.contrib.distributions.Bernoulli

#%%
def bernoulli_loglikelihood(b, log_alpha):
    return b * (-tf.nn.softplus(-log_alpha)) + (1 - b) * (-log_alpha - tf.nn.softplus(-log_alpha))

def lrelu(x, alpha=0.1):
    return tf.nn.relu(x) - alpha * tf.nn.relu(-x)



def encoder(x,b_dim,reuse=False):
    with tf.variable_scope("encoder", reuse = reuse):
示例#9
0
def main():

    order = datas.get_elements_order()

    excel_filename = "xlfile.xlsx"
    xl_sheetname = "Export Worksheet"

    ranges = {}

    ranges = datas.load_data(filename=excel_filename,
                             sheetname=xl_sheetname,
                             species_dict=ranges,
                             flags=True)

    print("Data Loaded OK")

    # Set up the web driver using Chrome since LV8 only really works with Chrome (boo).
    # Make this a global variable to make code less ornery.

    driver = Chrome()
    driver.maximize_window()

    # Set the default wait time of 10 seconds for an element to load before the script bails.
    driver.implicitly_wait(3)

    # Open LabVantage login page and make sure it exists based on the page title.
    driver.get("http://sapphire.lsd.uoguelph.ca:8080/labservices/logon.jsp")
    assert "LabVantage Logon" in driver.title

    # Call the login function. See lv.py for clarification.
    # Result should be a successful login to LV8.
    login(driver)

    # # # # # # # # # # # # # # # # # # # # # # # # # #
    # User should now be logged in, interaction with LabVantage goes below
    # # # # # # # # # # # # # # # # # # # # # # # # # #

    # Load the submission then add a new specification to it.
    bring_up_submission(driver, "18-074980")

    wb, ws = outputs.create_workbook()

    title_font = Font(size=20, bold=True)
    species_font = Font(size=16, bold=True)
    type_font = Font(size=14, bold=True)

    ws.merge_cells("A1:G1")
    ws['A1'] = "LabVantage - Checking Status of Reference Ranges"
    ws['A1'].font = title_font

    ws['A2'] = "Three rows of tests for each sample type - Below, Within Ranges, Above Range."

    base_row = 4

    for species in ranges:
        print("Beginning", species)
        ws['A' + str(base_row)] = species
        ws['A' + str(base_row)].font = species_font

        base_row += 1

        for type in ranges[species]:
            print("\tProcessing", type)
            ws['A' + str(base_row)] = type
            ws['A' + str(base_row)].font = type_font

            base_row += 1

            # Add the new specification
            select_top_sample(driver)

            element = random.choice(list(ranges[species][type]))
            spec_version_id = ranges[species][type][element]["spec_version_id"]
            clear_specifications_and_add(driver, species, type,
                                         spec_version_id)

            # Program gets stuck here a lot. Loop until it works!!
            in_data_entry = False
            while not in_data_entry:
                main_window = driver.current_window_handle
                # First, try to enter the data entry screen.
                try:
                    enter_data_entry(driver)
                    in_data_entry = True
                # If this fails, try again.
                except:
                    # If this try succeeds, great, enter data entry.
                    # If not, try closing the specs window again first.
                    try:
                        print("\t\tRetrying to exit specs window.")
                        driver.switch_to.window(main_window)
                        driver.switch_to.default_content()
                        dlg_frame = driver.find_element_by_tag_name("iframe")
                        driver.switch_to.frame(dlg_frame[3])
                        exit_specifications_window(driver)
                    except:
                        pass

            tissue_headers = False
            serum_headers = False

            test_types = ["flag_low_value", "flag_ok_value", "flag_high_value"]
            for test_type in test_types:

                input_string = datas.get_input_string(ranges, species, type,
                                                      test_type)
                clear_inputs_and_paste_new(driver)
                flags_list = check_data_flags(driver)

                if type in ["liver", "kidney"]:
                    if not tissue_headers:
                        outputs.write_tissue_headers(ws, base_row)
                        base_row += 1
                        outputs.write_tissue_element_names(ws, base_row, order)
                        tissue_headers = True
                        base_row += 1
                    outputs.write_tissue_row(ws, flags_list, base_row,
                                             input_string)
                    base_row += 1
                else:
                    if not serum_headers:
                        outputs.write_serum_headers(ws, base_row)
                        base_row += 1
                        outputs.write_serum_element_names(ws, base_row, order)
                        serum_headers = True
                        base_row += 1
                    outputs.write_serum_row(ws, flags_list, base_row,
                                            input_string)
                    base_row += 1

            exit_data_entry(driver)
            outputs.autoresize_columns(ws)
            outputs.save_workbook(wb)
            base_row += 2

    outputs.autoresize_columns(ws)

    outputs.save_workbook(wb)

    time.sleep(5)

    # # # # # #
    # Interactions in LabVantage should all be above this, teardown only past this point.
    # # # # # #

    # Teardown.

    logout(driver)
    driver.close()
    driver.quit()

    # Shut out the lights and turn the heat down on your way out.
    print("done")