Ejemplo n.º 1
0
                def fitness(*params):
                    # Preprocessing parameters for different experiments
                    if experiment == "bayesianCNN":
                        es = EarlyStopping(
                            monitor='val_loss',
                            patience=Config.get("ESValPatience"),
                            min_delta=Config.get("ESValMinDelta"),
                            baseline=Config.get("ESValThresholdBaseline"))

                        model, hist = training_func(root_logger, X_train_int,
                                                    y_train_int, X_val, y_val,
                                                    es, *params[0])

                    else:
                        hidden_layers_comb = get_hidden_layers_combinations(
                            BOconfig["hiddenLayers"], 3,
                            BOconfig["allowFirstLevelZero"])

                        model, hist = training_func(root_logger, X_train_int,
                                                    y_train_int, X_val, y_val,
                                                    features_size,
                                                    hidden_layers_comb,
                                                    *params[0])

                    del model
                    K.clear_session()
                    val_auprc = hist.history['val_auprc'][-1]
                    root_logger.debug(
                        "BAYESIAN OPTIMIZER - Validation auprc: {}".format(
                            val_auprc))
                    return -val_auprc
Ejemplo n.º 2
0
def train_fixed_cnn(root_logger, X_train_int, y_train_int, X_val, y_val, type):
    model = fixed_cnn(type)

    #nadam_opt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999)
    nadam_opt = Nadam(lr="learningRate", beta_1="nadamBeta1", beta_2="nadamBeta2")

    model.compile(loss='binary_crossentropy',
                  optimizer=nadam_opt,
                  metrics=[auprc, auroc])

    es = EarlyStopping(monitor='val_loss', patience=Config.get("ESPatience"), min_delta=Config.get("ESMinDelta"))

    validation_set = None
    if X_val is not None and y_val is not None:
        y_val = encoding_labels(y_val)
        validation_set = (X_val, y_val)

    y_train_int = encoding_labels(y_train_int)
    history = model.fit(x=X_train_int,
                        y=y_train_int,
                        validation_data=validation_set,
                        epochs=Config.get('epochs'),
                        batch_size=Config.get("batchSize"),
                        callbacks=[es],
                        verbose=Config.get("kerasVerbosity"))

    return model, history
Ejemplo n.º 3
0
def sendToPostEndpoint(fileName):
    directory = Config.paths('DIRECTORY_TO_MONITOR')
    user = Config.dev('USERNAME')
    password = Config.dev('PASSWORD')
    file = open(directory + fileName, 'rb')
    url = 'http://localhost:8080/postRule'
    files = {'file': file}
    r = requests.post(url, files=files, auth=(user, password))
    print("uploaded: " + fileName + "-- response from server: " + str(r))
Ejemplo n.º 4
0
def main():

    #print (Config.paths('DIRECTORY_TO_MONITOR'))
    #print (Config.dev('USERNAME'))
    #print (Config.dev('PASSWORD'))

    print("starting reading of directory:" +
          Config.paths('DIRECTORY_TO_MONITOR'))
    #initial scan of the local directory
    files = os.listdir(Config.paths('DIRECTORY_TO_MONITOR'))
    initialRulesProcessing(files)

    #initiate watcher class instance and update database as changes are made
    w = Watcher()
    w.run()
    def fitness_mlp(kernel_space_1, units_2, kernel_space_2, dense_1, dense_2):
        es = EarlyStopping(monitor='val_loss',
                           patience=Config.get("ESValPatience"),
                           min_delta=Config.get("ESValMinDelta"),
                           baseline=0.2)

        model, hist = train_bayesian_cnn(X_train_int, y_train_int,
                                         (X_val, y_val), es, kernel_space_1,
                                         units_2, kernel_space_2, dense_1,
                                         dense_2)

        val_auprc = hist.history['val_auprc'][-1]
        print()
        print("Validation Loss: {}".format(val_auprc))
        print()
        return -val_auprc
Ejemplo n.º 6
0
	def __init__(self, file_path):
		"""Initializes the provisioning handler
		
		Arguments:
			file_path {string} -- path to your configuration file
		"""
		#Logging
		logging.basicConfig(level=logging.ERROR)
		self.logger = logging.getLogger(__name__)
		
		#Load configuration settings from config.ini
		config = Config(file_path)
		self.config_parameters = config.get_section('SETTINGS')
		self.secure_cert_path = self.config_parameters['SECURE_CERT_PATH']
		self.iot_endpoint = self.config_parameters['IOT_ENDPOINT']	
		self.template_name = self.config_parameters['PRODUCTION_TEMPLATE']
		self.rotation_template = self.config_parameters['CERT_ROTATION_TEMPLATE']
		self.claim_cert = self.config_parameters['CLAIM_CERT']
		self.secure_key = self.config_parameters['SECURE_KEY']
		self.root_cert = self.config_parameters['ROOT_CERT']
	
		# Sample Provisioning Template requests a serial number as a 
		# seed to generate Thing names in IoTCore. Simulating here.
		self.unique_id = "1234567-abcde-fghij-klmno-1234567abc-TLS350" 
		self.unique_id = str(int(round(time.time() * 1000)))

		# ------------------------------------------------------------------------------
		#  -- PROVISIONING HOOKS EXAMPLE --
		# Provisioning Hooks are a powerful feature for fleet provisioning. Most of the
		# heavy lifting is performed within the cloud lambda. However, you can send
		# device attributes to be validated by the lambda. An example is show in the line
		# below (.hasValidAccount could be checked in the cloud against a database). 
		# Alternatively, a serial number, geo-location, or any attribute could be sent.
		# 
		# -- Note: This attribute is passed up as part of the register_thing method and
		# will be validated in your lambda's event data.
		# ------------------------------------------------------------------------------

		self.primary_MQTTClient = AWSIoTMQTTClient(self.unique_id)
		self.test_MQTTClient = AWSIoTMQTTClient(self.unique_id)
		self.primary_MQTTClient.onMessage = self.on_message_callback
		self.callback_returned = False
		self.message_payload = {}
		self.isRotation = False
Ejemplo n.º 7
0
def train_bayesian_cnn(root_logger, X_train_int, y_train_int, X_val, y_val, es,
                       ks1, u2, ks2, d1, d2):
    root_logger.debug("Kernel space 1: {}".format(ks1))
    root_logger.debug("Units 2: {}".format(u2))
    root_logger.debug("Kernel space 2: {}".format(ks2))
    root_logger.debug("Dense 1: {}".format(d1))
    root_logger.debug("Dense 2: {}".format(d2))

    model = bayesian_cnn(kernel_size_1=ks1,
                         units_2=u2,
                         kernel_size_2=ks2,
                         dense_units_1=d1,
                         dense_units_2=d2)

    parallel_model = multi_gpu_model(model, gpus=4)

    nadam_opt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999)

    parallel_model.compile(loss='binary_crossentropy',
                           optimizer=nadam_opt,
                           metrics=[auprc, auroc])

    # Building dataset
    validation_set = None
    if X_val is not None and y_val is not None:
        y_val = encoding_labels(y_val)
        validation_set = (X_val, y_val)

    y_train_int = encoding_labels(y_train_int)
    history = parallel_model.fit(x=X_train_int,
                                 y=y_train_int,
                                 validation_data=validation_set,
                                 epochs=Config.get('epochs'),
                                 batch_size=Config.get("batchSize"),
                                 callbacks=[es],
                                 verbose=Config.get("kerasVerbosity"))

    return parallel_model, history
Ejemplo n.º 8
0
def upload():

    name = request.values['name']
    data = request.values['data']

    if name and data:
        path = Path(Config.files("download_path"))

        with open(path / name, 'w') as file:
            file.write(data)

        return "File created !"

    else:
        return "Parameters missing or wrong."
def get_logger(exp):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    format_string = "%(asctime)s — %(message)s"
    log_format = logging.Formatter(format_string)
    # Creating and adding the console handler
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(log_format)
    logger.addHandler(console_handler)
    # Creating and adding the file handler
    file_handler = logging.FileHandler(
        "{}/{}_{}.log".format(Config.get("logDir"), time.strftime("%Y%m%d-%H%M%S"), exp), mode='a')
    file_handler.setFormatter(log_format)
    logger.addHandler(file_handler)

    return logger
Ejemplo n.º 10
0
def train_bayesian_mlp(root_logger, X_train_int, y_train_int, X_val, y_val, features_size,
                       hidden_layers_comb, learning_rate, num_hidden_layer, hidden_layer_choice):
    if num_hidden_layer > 0:
        hidden_layer_choice = int(hidden_layer_choice * len(hidden_layers_comb[num_hidden_layer])
                                  / len(hidden_layers_comb[-1]))
        hidden_layer_configuration = hidden_layers_comb[num_hidden_layer][hidden_layer_choice]
    else:
        hidden_layer_configuration = []

    root_logger.debug("Training with parameters: ")
    root_logger.debug('Learning rate: {}'.format(learning_rate))
    root_logger.debug('number of hidden layers: {}'.format(num_hidden_layer))
    root_logger.debug('hidden layers configuration: {}'.format(hidden_layer_configuration))

    model = bayesian_mlp(features_size, hidden_layer_configuration)
    parallel_model = multi_gpu_model(model, gpus=4)

    sgd_opt = SGD(lr=learning_rate,
                  decay=Config.get('decay'),
                  momentum=Config.get('momentum'),
                  nesterov=Config.get('nesterov'))

    parallel_model.compile(loss='binary_crossentropy',
                           optimizer=sgd_opt,
                           metrics=[auprc, auroc])

    es = EarlyStopping(monitor='val_loss', patience=Config.get("ESTestPatience"), min_delta=Config.get("ESTestMinDelta"))

    validation_set = None
    if X_val is not None and y_val is not None:
        y_val = encoding_labels(y_val)
        validation_set = (X_val, y_val)

    y_train_int = encoding_labels(y_train_int)
    history = parallel_model.fit(x=X_train_int,
                                 y=y_train_int,
                                 validation_data=validation_set,
                                 epochs=Config.get('epochs'),
                                 batch_size=Config.get("batchSize"),
                                 callbacks=[es],
                                 verbose=Config.get("kerasVerbosity"), workers=Config.get("fitWorkers"))

    return parallel_model, history
Ejemplo n.º 11
0
class Watcher:
    #DIRECTORY_TO_WATCH = "/Users/xai/rules/"
    directory = Config.paths('DIRECTORY_TO_MONITOR')

    def __init__(self):
        self.observer = Observer()

    def run(self):
        event_handler = Handler()
        self.observer.schedule(event_handler, self.directory, recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(10)
        except:
            self.observer.stop()
            print("Error in running program")

        self.observer.join()
Ejemplo n.º 12
0
def main():
    # initialize core objects
    scraper = Scraper(Config.praw('CLIENT_ID'), Config.praw('CLIENT_SECRET'),
                      Config.praw('USER_AGENT'), Config.praw('USERNAME'),
                      Config.praw('PASSWORD'))
    parser = Parser()
    notifier = Notifier(Config.twilio('ACCOUNT_SID'),
                        Config.twilio('AUTH_TOKEN'))

    # initialize time for loop
    startTime = time.time()

    while True:
        try:
            # grab last 100 new mechmarket posts
            posts = scraper.grabNewPosts('mechmarket', 100)

            # loop through posts
            for post in posts:
                have = parser.parseHave(post.title)
                # does this need the or? or is the search case insensitive?
                if parser.keywordSearch('milkshake',
                                        have) or parser.keywordSearch(
                                            'Milkshake', have):
                    # notify if we found it
                    notify(notifier, post,
                           f'Milkshake found '\
                           f'{post.title} '\
                           f'{post.url} ')
            # sleep for 60 seconds
            time.sleep(60.0 - ((time.time() - startTime) % 60.0))
            print(
                f'Starting new loop at {time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}'
            )
        except:
            # disable during testing
            # notifier.sendMessage('8042100901','17572092974','Something broke and the bot has stopped')
            break
Ejemplo n.º 13
0
def fixed_cnn_exp(gene, mode):
    X, y, features_size = filter_by_tasks(*import_sequence_dataset(
        "data", gene),
                                          Config.get("task"),
                                          perc=Config.get("samplePerc"))

    metrics = {'losses': [], 'auprc': [], 'auroc': []}
    for ext_holdout in range(Config.get("nExternalHoldout")):
        print()
        print("{}/{} EXTERNAL HOLDOUTS".format(ext_holdout,
                                               Config.get("nExternalHoldout")))
        print()
        X_train, X_test, y_train, y_test = split(X,
                                                 y,
                                                 random_state=42,
                                                 proportions=None,
                                                 mode=mode)

        # Internal holdouts
        X_train_int, X_val, y_train_int, y_val = split(X_train,
                                                       y_train,
                                                       random_state=42,
                                                       proportions=None,
                                                       mode=mode)

        model, _ = train_fixed_cnn(X_train_int, y_train_int, (X_val, y_val),
                                   Config.get("type"))

        #external holdout
        model, history = train_fixed_cnn(X_train_int, y_train_int, None,
                                         Config.get("type"))

        eval_score = model.evaluate(X_test, y_test)

        print("Metrics names: ", model.metrics_names)
        print("Final Scores: ", eval_score)
        metrics['losses'].append(eval_score[0])
        metrics['auprc'].append(eval_score[1])
        metrics['auroc'].append(eval_score[2])

    return metrics
Ejemplo n.º 14
0
import shutil
import time
import numpy as np
from config_loader import Config
from src.bayesian_cnn_exp import bayesian_cnn_exp
from src.bayesian_mlp_exp import bayesian_mlp_exp
from src.fixed_cnn_exp import fixed_cnn_exp
from src.utilities import build_log_filename

exp = {"bMLP": bayesian_mlp_exp, "fixedCNN": fixed_cnn_exp, "bayesianCNN": bayesian_cnn_exp}

gene = Config.get("gene")
mode = Config.get("mode")
experiment = Config.get("experiment")

f = exp[experiment]
metrics = f(gene, mode)


#Saving results metrics
np.save(build_log_filename(), metrics)
# copying the configuration json file with experiments details
dest = "experiment_configurations/{}_{}_{}_experiment_configuration.json".format(
    time.strftime("%Y%m%d-%H%M%S"), gene, mode
)
shutil.copy("experiment_configurations/experiment.json", dest)
Ejemplo n.º 15
0
import numpy as np
from keras.callbacks import EarlyStopping
from skopt import gp_minimize
from skopt.callbacks import DeltaYStopper

from config_loader import Config
from keras import backend as K
from src.dataset_utils import get_data, split, encoding_labels, filter_by_tasks
from src.logging_utils import save_metrics, copy_experiment_configuration, get_logger
from src.models import get_training_function
from src.utilities import get_parameters_space, get_hidden_layers_combinations

if __name__ == "__main__":
    experiment = Config.get("experiment")

    root_logger = get_logger(experiment)

    genes = Config.get("gene")
    modes = Config.get("mode")
    tasks = Config.get("task")

    for gene in genes:
        root_logger.debug("IMPORTING DATA")
        X, y = get_data(experiment, "data", gene, Config.get("samplePerc"))
        features_size = len(X[0])

        for task in tasks:
            for mode in modes:

                root_logger.debug(
                    "EXPERIMENT: {}, GENE: {}, MODE: {}\nTASK: {}".format(
Ejemplo n.º 16
0
def adminhome():
    cur = cnxn.cursor()
    result = cur.execute("Select * from Things")
    rows = result.fetchall()
    data = []
    for row in rows:
        data.append(list(row))
    
    return jsonify({'result':data}), status.HTTP_200_OK

######### Provisioning certificate #####################

#Set Config path
CONFIG_PATH = 'config.ini'

config = Config(CONFIG_PATH)
config_parameters = config.get_section('SETTINGS')
secure_cert_path = config_parameters['SECURE_CERT_PATH']
bootstrap_cert = config_parameters['CLAIM_CERT']


def callback(payload):
    print(payload)

card = ""
@app.route('/provisioning-cert',methods=['POST'])
def run_provisioning(isRotation=False):
    
    provisioner = ProvisioningHandler(CONFIG_PATH)

    if isRotation:
def get_parameters_space(hidden_layers_comb):
    params = [conf_to_params(conf) for conf in Config.get("bayesianOpt")["hyperparameters"]]
    max = len(hidden_layers_comb[-1])-1
    params.append(Integer(0, max, name="hidden_layer_choice"))

    return params
def bayesian_mlp_exp(gene, mode):
    BOconfig = Config.get("bayesianOpt")

    hidden_layers_comb = get_hidden_layers_combinations(
        BOconfig["hiddenLayers"], 3, BOconfig["allowFirstLevelZero"])
    mlp_parameters_space = get_parameters_space(hidden_layers_comb)

    @use_named_args(mlp_parameters_space)
    def fitness_mlp(learning_rate, num_hidden_layer, hidden_layer_choice):

        model, hist = train_bayesian_mlp(X_train_int, y_train_int, (X_val, y_val), features_size,
                                         learning_rate, num_hidden_layer, hidden_layer_choice, hidden_layers_comb)

        val_auprc = hist.history['val_auprc'][-1]
        print()
        print("Validation Loss: {}".format(val_auprc))
        print()
        return -val_auprc

    print()
    print("Importing Epigenetic data...")
    print()
    X, y, features_size = filter_by_tasks(*import_epigenetic_dataset("data", gene), Config.get("task"),
                                          perc=Config.get("samplePerc"))

    print("Datasets length: {}, {}".format(len(X), len(y)))
    print("Features sizes: {}".format(features_size))

    metrics = {'losses': [], 'auprc': [], 'auroc': []}
    delta_stopper = DeltaYStopper(n_best=BOconfig["n_best"], delta=BOconfig["delta"])

    for ext_holdout in range(Config.get("nExternalHoldout")):
        print()
        print("{}/{} EXTERNAL HOLDOUTS".format(ext_holdout, Config.get("nExternalHoldout")))
        print()
        X_train, X_test, y_train, y_test = split(X, y, random_state=42, proportions=None, mode=mode)

        # Internal holdouts
        X_train_int, X_val, y_train_int, y_val = split(X_train, y_train, random_state=42, proportions=None, mode=mode)

        print("Searching Parameters...")
        print()

        min_res = gp_minimize(func=fitness_mlp,
                              dimensions=mlp_parameters_space,
                              acq_func=BOconfig["acq_function"],
                              callback=[delta_stopper],
                              n_calls=BOconfig["nBayesianOptCall"])

        print()
        print("Training with best parameters found: {}".format(min_res.x))
        print()
        print(X_train)

        model, _ = train_bayesian_mlp(X_train, y_train, None, features_size,
                                      min_res.x[0], min_res.x[1], min_res.x[2], hidden_layers_comb)

        eval_score = model.evaluate(X_test, y_test)
        #K.clear_session()

        print("Metrics names: ", model.metrics_names)
        print("Final Scores: ", eval_score)
        metrics['losses'].append(eval_score[0])
        metrics['auprc'].append(eval_score[1])
        metrics['auroc'].append(eval_score[2])

    return metrics
Ejemplo n.º 19
0
        return "No file or directory"


@app.route('/upload', methods=['POST'])
def upload():

    name = request.values['name']
    data = request.values['data']

    if name and data:
        path = Path(Config.files("download_path"))

        with open(path / name, 'w') as file:
            file.write(data)

        return "File created !"

    else:
        return "Parameters missing or wrong."


@app.route('/', methods=['GET', 'POST'])
def hello():
    return "Hello, world !"


if __name__ == "__main__":
    host = Config.server("host")
    port = Config.server("port")
    app.run(debug=True, host=host, port=port)
def train():
    # Load the config variables
    cfg = Config(FLAGS.config)
    steps_per_phase = cfg.get('steps_per_phase')
    add_steps_exp = cfg.get('add_steps_exp')
    min_steps = cfg.get('min_steps')
    dg_steps = cfg.get('dg_steps')
    checkpoints_folder = cfg.get('checkpoints_folder')
    path_input_train = cfg.get('path_input_train')
    path_input_val = cfg.get('path_input_test')
    path_input_test = cfg.get('path_input_val')
    input_resolution = cfg.get('input_resolution')
    batch_size = cfg.get('batch_size')
    learning_rate = cfg.get('learning_rate')
    latent_input_dim = cfg.get('latent_input_dim')
    model_name = cfg.get('model_name')
    seed = cfg.get('seed')
    n_critic = cfg.get('n_critic')
    version = cfg.get('version')
    inference_steps = cfg.get('inference_steps')
    is_training = cfg.get('is_training')
    dg_freq = cfg.get('dg_freq')
    sum_freq = cfg.get('sum_freq')
    snap_freq = cfg.get('snap_freq')
    is_cpu = cfg.get('is_cpu')
    save_model_freq = cfg.get('save_model_freq')
    is_testing = cfg.get('is_testing')
    dataset_name = cfg.get("dataset_name")
    growth_ip_steps = cfg.get("act_ip_steps")

    # Define training steps and images per phase
    kimg_per_phase = steps_per_phase * batch_size * n_critic / 1000.0
    num_phases = 2 * (int(np.log2(input_resolution)) - 2) + 1
    training_steps = int(num_phases * steps_per_phase) + 1 + add_steps_exp
    if training_steps < min_steps:
        training_steps = min_steps

    # When training is continued model_name is non-empty
    load_model = model_name is not None and model_name != ""
    if not load_model:
        model_basename = cfg.get('model_basename')
        model_name = construct_model_name(model_basename, version)

    # Define checkpoint folder
    checkpoint_folder = os.path.join(checkpoints_folder, model_name)
    if not os.path.exists(checkpoint_folder):
        os.makedirs(checkpoint_folder)

    # For traceability of experiments, copy the config file
    shutil.copyfile(
        FLAGS.config,
        os.path.join(checkpoint_folder,
                     "{}__{}.ini".format(model_name, FLAGS.section)))

    # One training step
    #---------------------------------------------------------------
    def training_step(sess, step, model, n_critic, d_train, sum_freq, g_train,
                      gan_summary, snap_freq, snap_summary, dg_freq,
                      current_to_tmp_op, iter_val_init_op, dg_steps,
                      d_train_worst, iter_test_init_op, minmax, g_train_worst,
                      maxmin, iter_train_init_op):

        # Train discriminator for n_critic
        for critic_iter in range(n_critic):
            results = sess.run(d_train)

        # Train generator and add summaries every sum_freq steps
        if step % sum_freq == 0 and step > 0:
            results, summary1 = sess.run([g_train, gan_summary])
            train_writer.add_summary(summary1, step)
            train_writer.flush()
        else:
            results = sess.run(g_train)

        # Chane to EMA variables when adding snapshot summary
        # (for inference we take exp. running avg. of weights)
        if step % snap_freq == 0 and step > 0:
            sess.run(model.to_testing())
            summary2 = sess.run(snap_summary)
            train_writer.add_summary(summary2, step)
            train_writer.flush()
            sess.run(model.to_training())

        duality_gap = None
        maxmin_value = None
        minmax_value = None

        # Compute duality gap at dg_freq
        if step % dg_freq == 0 and step > 0:

            # Assign current weights to g_worst, d_worst
            sess.run(current_to_tmp_op)

            # Train d_worst on validation data
            sess.run(iter_val_init_op)
            for i in range(0, dg_steps):
                _ = sess.run(d_train_worst)

            # Compute minmax on test data
            sess.run(iter_test_init_op)
            minmax_value = sess.run(minmax)

            # Train g_worst on validation data
            sess.run(iter_val_init_op)
            for j in range(0, dg_steps):
                _ = sess.run(g_train_worst)

            # Compute maxmin on test data
            sess.run(iter_test_init_op)
            maxmin_value = sess.run(maxmin)

            # Compute DG
            duality_gap = minmax_value - maxmin_value

            # Add to summaries
            summary3 = tf.Summary()
            summary3.value.add(tag="minmax", simple_value=minmax_value)
            summary3.value.add(tag="maxmin", simple_value=maxmin_value)
            summary3.value.add(tag="duality_gap", simple_value=duality_gap)
            train_writer.add_summary(summary3, step)
            train_writer.flush()

            # Print in log file
            logging.info('-----------Step %d:-------------' % step)
            logging.info('  Time: {}'.format(
                datetime.now().strftime('%b-%d-%I%M%p-%G')))
            logging.info('  Duality Gap  : {}'.format(duality_gap))

            # Reinit train OP's
            sess.run(iter_train_init_op)
        return duality_gap, maxmin_value, minmax_value

    # To undo dynamic weight scaling at transition
    #---------------------------------------------------------------
    def get_wscale(shape, gain=np.sqrt(2), fan_in=None):
        if fan_in is None: fan_in = np.prod(shape[:-1])
        std = gain / np.sqrt(fan_in)  # He init
        return std

    # Built graph
    #---------------------------------------------------------------
    graph = tf.Graph()
    with graph.as_default():

        # Get datasets, iterators and initializers (train/val/test)
        #---------------------------------------------------------------
        dataset_train = Dataset(path_input_train,
                                batch_size=batch_size,
                                new_size=input_resolution,
                                what_dataset=dataset_name).get_dataset()
        dataset_val = Dataset(path_input_val,
                              batch_size=batch_size,
                              new_size=input_resolution,
                              what_dataset=dataset_name).get_dataset()
        dataset_test = Dataset(path_input_test,
                               batch_size=batch_size,
                               new_size=input_resolution,
                               what_dataset=dataset_name).get_dataset()
        data_iterator = tf.data.Iterator.from_structure(
            dataset_train.output_types, dataset_train.output_shapes)
        iter_train_init_op = data_iterator.make_initializer(dataset_train)
        iter_test_init_op = data_iterator.make_initializer(dataset_val)
        iter_val_init_op = data_iterator.make_initializer(dataset_test)

        # Create model
        #---------------------------------------------------------------
        model = GAN_DG(iterator=data_iterator,
                       batch_size=batch_size,
                       input_resolution=input_resolution,
                       latent_input_dim=latent_input_dim,
                       learning_rate=learning_rate,
                       is_cpu=is_cpu)

        # Get training OPs
        #---------------------------------------------------------------
        d_train, d_opt_reset, g_train, g_opt_reset, g_summary = model.gan()
        current_to_tmp_op = model.assign_vars()
        d_train_worst, minmax = model.minmax()
        g_train_worst, maxmin = model.maxmin()

        # Check snapshots during morphing
        #---------------------------------------------------------------
        im_fk = model.get_fake_images(g_var_scope="generator")
        snap_summary = tf.summary.image("snapshot_fake",
                                        tf.transpose(im_fk, [0, 2, 3, 1]),
                                        max_outputs=2)
        snap_summary_1 = tf.summary.image("snapshot_after_morph",
                                          tf.transpose(im_fk, [0, 2, 3, 1]),
                                          max_outputs=4)
        snap_summary_2 = tf.summary.image("snapshot_before_morph",
                                          tf.transpose(im_fk, [0, 2, 3, 1]),
                                          max_outputs=4)

        # Create lod placeholder, assign lod values (lod_in_g = lod_in_d), add to summary
        #---------------------------------------------------------------
        lod_in_g = tf.placeholder(tf.float32, name='lod_in_g', shape=[])
        lod_in_d = tf.placeholder(tf.float32, name='lod_in_d', shape=[])
        lod_in_im = tf.placeholder(tf.float32, name='lod_in_im', shape=[])
        lod_in_grow = tf.placeholder(tf.float32, name='lod_in_grow', shape=[])

        lod_image_a = [
            v for v in tf.global_variables() if v.name == "lod_image:0"
        ][0]
        lod_g_a = [
            v for v in tf.global_variables() if v.name == "generator/lod_g:0"
        ][0]
        lod_d_a = [
            v for v in tf.global_variables()
            if v.name == "discriminator/lod_d:0"
        ][0]
        lod_gtmp_a = [
            v for v in tf.global_variables()
            if v.name == "worst_calc_g/genTMP/lod_g:0"
        ][0]
        lod_dtmp_a = [
            v for v in tf.global_variables()
            if v.name == "worst_calc_d/discTMP/lod_d:0"
        ][0]

        lod_grow_g_a = [
            v for v in tf.global_variables()
            if v.name == "generator/lod_grow:0"
        ][0]
        lod_grow_d_a = [
            v for v in tf.global_variables()
            if v.name == "discriminator/lod_grow:0"
        ][0]
        lod_grow_gtmp_a = [
            v for v in tf.global_variables()
            if v.name == "worst_calc_g/genTMP/lod_grow:0"
        ][0]
        lod_grow_dtmp_a = [
            v for v in tf.global_variables()
            if v.name == "worst_calc_d/discTMP/lod_grow:0"
        ][0]

        lod_assign_ops = [
            tf.assign(lod_g_a, lod_in_g),
            tf.assign(lod_d_a, lod_in_d),
            tf.assign(lod_gtmp_a, lod_in_g),
            tf.assign(lod_dtmp_a, lod_in_d)
        ]

        lod_grow_assign_ops = [
            tf.assign(lod_grow_g_a, lod_in_grow),
            tf.assign(lod_grow_d_a, lod_in_grow),
            tf.assign(lod_grow_gtmp_a, lod_in_grow),
            tf.assign(lod_grow_dtmp_a, lod_in_grow)
        ]

        gan_summary = tf.summary.merge([
            g_summary,
            tf.summary.scalar('lod_in_gen', lod_g_a),
            tf.summary.scalar('lod_in_disc', lod_d_a),
            tf.summary.scalar('lod_in_grow', lod_grow_g_a)
        ])
        #---------------------------------------------------------------

        # Morphing Vars and OPs (Reference at the bottom), only supports up to reoslution 128
        #----------------------------------------------------------------------------------
        tvars_d = [
            var for var in tf.trainable_variables()
            if "discriminator" in var.name
        ]
        tvars_g = [
            var for var in tf.trainable_variables() if "generator" in var.name
        ]
        to_rgb_names = [
            "ToRGB_lod" + num for num in ["5", "4", "3", "2", "1", "0"]
        ]
        fr_rgb_names = [
            "FromRGB_lod" + num for num in ["5", "4", "3", "2", "1", "0"]
        ]
        layer_names = ["8x8", "16x16", "32x32", "64x64", "128x128"]
        conv_names_g = ["Conv0", "Conv1"]
        conv_names_d = ["Conv0", "Conv1"]
        weight_names = ["weight", "bias"]

        all_rgb_vars_g = []
        all_rgb_plh_g = []
        all_rgb_asgn_ops_g = []
        all_rgb_vars_d = []
        all_rgb_plh_d = []
        all_rgb_asgn_ops_d = []
        for rgb_g, rgb_d in zip(to_rgb_names, fr_rgb_names):
            layer_asgn_ops_g = []
            layer_plh_list_g = []
            layer_var_names_g = []
            layer_asgn_ops_d = []
            layer_plh_list_d = []
            layer_var_names_d = []
            for weight in weight_names:
                var_name_g = rgb_g + "/" + weight + ":0"
                var_name_d = rgb_d + "/" + weight + ":0"
                pl_rgb_g = "pl_gen_" + rgb_g + "_" + weight
                pl_rgb_d = "pl_dis_" + rgb_d + "_" + weight
                tvar_g = [var for var in tvars_g if var_name_g in var.name][0]
                tvar_d = [var for var in tvars_d if var_name_d in var.name][0]
                pl_g = tf.placeholder(tf.float32,
                                      name=pl_rgb_g,
                                      shape=tvar_g.shape)
                pl_d = tf.placeholder(tf.float32,
                                      name=pl_rgb_d,
                                      shape=tvar_d.shape)
                asgn_op_g = tf.assign(tvar_g, pl_g)
                asgn_op_d = tf.assign(tvar_d, pl_d)

                layer_var_names_g.append(tvar_g)
                layer_plh_list_g.append(pl_g)
                layer_asgn_ops_g.append(asgn_op_g)
                layer_var_names_d.append(tvar_d)
                layer_plh_list_d.append(pl_d)
                layer_asgn_ops_d.append(asgn_op_d)

            all_rgb_vars_g.append(layer_var_names_g)
            all_rgb_plh_g.append(layer_plh_list_g)
            all_rgb_asgn_ops_g.append(layer_asgn_ops_g)
            all_rgb_vars_d.append(layer_var_names_d)
            all_rgb_plh_d.append(layer_plh_list_d)
            all_rgb_asgn_ops_d.append(layer_asgn_ops_d)

        all_var_names_g = []
        all_plh_list_g = []
        all_asgn_ops_g = []
        all_var_names_d = []
        all_plh_list_d = []
        all_asgn_ops_d = []

        for layer in layer_names:
            layer_asgn_ops_g = []
            layer_plh_list_g = []
            layer_var_names_g = []
            layer_asgn_ops_d = []
            layer_plh_list_d = []
            layer_var_names_d = []
            for conv_g, conv_d in zip(conv_names_g, conv_names_d):
                for weight in weight_names:
                    var_name_g = "/" + layer + "/" + conv_g + "/" + weight + ":0"
                    var_name_d = "/" + layer + "/" + conv_d + "/" + weight + ":0"

                    pl_name_g = "pl_gen_" + layer + "_" + conv_g + "_" + weight
                    pl_name_d = "pl_dis_" + layer + "_" + conv_d + "_" + weight

                    tvar_g = [
                        var for var in tvars_g if var_name_g in var.name
                    ][0]
                    tvar_d = [
                        var for var in tvars_d if var_name_d in var.name
                    ][0]

                    pl_g = tf.placeholder(tf.float32,
                                          name=pl_name_g,
                                          shape=tvar_g.shape)
                    pl_d = tf.placeholder(tf.float32,
                                          name=pl_name_d,
                                          shape=tvar_d.shape)
                    asgn_op_g = tf.assign(tvar_g, pl_g)
                    asgn_op_d = tf.assign(tvar_d, pl_d)

                    layer_var_names_g.append(tvar_g)
                    layer_plh_list_g.append(pl_g)
                    layer_asgn_ops_g.append(asgn_op_g)
                    layer_var_names_d.append(tvar_d)
                    layer_plh_list_d.append(pl_d)
                    layer_asgn_ops_d.append(asgn_op_d)

            all_var_names_g.append(layer_var_names_g)
            all_plh_list_g.append(layer_plh_list_g)
            all_asgn_ops_g.append(layer_asgn_ops_g)
            all_var_names_d.append(layer_var_names_d)
            all_plh_list_d.append(layer_plh_list_d)
            all_asgn_ops_d.append(layer_asgn_ops_d)

        # Reference:
        # 1.) RGBs:
        # w,b = all_RGB[i] = i'th layer RGBs (4x4 [0]->128x128 [5])
        # 2.) Convs:
        # Gen: w1, b1, w2, b2 = all_gen[i] = i'th layer Conv0_up and Conv1 weights and biases
        # Disc: w1, b1, w2, b2 = all_disc[i] = i'th layer Conv0, Conv1_down weights and biases
        #----------------------------------------------------------------------------------

    #----------------------------------------------------------------------------------
    #SESSION
    #----------------------------------------------------------------------------------
    with tf.Session(graph=graph) as sess:

        # Load model or initialise vars/lod
        if load_model:
            step = restore_model(checkpoint_folder, sess)
        else:
            sess.run(tf.global_variables_initializer())
            step = 0

        # Saver, summary_op & writer, coordinator, Training Schedule
        TS = TrainingSchedule(final_resolution=input_resolution,
                              lod_training_kimg=kimg_per_phase,
                              lod_transition_kimg=kimg_per_phase)

        lod_step_g = step
        lod_step_d = step
        lod_step_grow = step
        lod_val_g = np.floor(TS.get_lod(lod_step_g * batch_size * n_critic))
        lod_val_d = np.floor(TS.get_lod(lod_step_d * batch_size * n_critic))
        lod_val_grow = lod_val_g
        lod_new_grow = lod_val_grow
        lod_grow_bool = 0

        # Define saver, writer, etc
        saver = tf.train.Saver(max_to_keep=15)
        train_writer = tf.summary.FileWriter(checkpoint_folder, graph)
        coord = tf.train.Coordinator()

        # Ensure train init and assign current lod_vals
        sess.run(iter_train_init_op)
        sess.run([lod_assign_ops],
                 feed_dict={
                     lod_in_g: lod_val_g,
                     lod_in_d: lod_val_d
                 })
        sess.run([lod_grow_assign_ops], feed_dict={lod_in_grow: lod_val_grow})

        # --------------------------------------------------
        # Inference (generate and save images), either final model
        # --------------------------------------------------
        if is_training == False:

            if is_testing == True:
                all_out = []
                sess.run(model.to_testing())
                for i in range(inference_steps):
                    if i % 50 == 0:
                        logging.info('Current inference Step: %d' % i)
                    outputs = sess.run(
                        [model.get_fake_images(g_var_scope="generator")])
                    all_out.append(outputs)
                    if i % 100 == 0 and i > 0:
                        np.save(checkpoint_folder + '/inf_' + str(i), all_out)
                        all_out = []
                sess.run(model.to_training())
                np.save(checkpoint_folder + '/inf_' + str(i), all_out)
                raise ValueError('Inference done.')
        # --------------------------------------------------

        # EMA initializer to_training
        sess.run(model.to_training())

        # Train
        while step < training_steps and not coord.should_stop():
            try:

                dg_val, maxmin_val, minmax_val = training_step(
                    sess, step, model, n_critic, d_train, sum_freq, g_train,
                    gan_summary, snap_freq, snap_summary, dg_freq,
                    current_to_tmp_op, iter_val_init_op, dg_steps,
                    d_train_worst, iter_test_init_op, minmax, g_train_worst,
                    maxmin, iter_train_init_op)

                old_step = step
                lod_step_g = step
                lod_step_d = step

                # Update lod
                lod_new_g = np.floor(
                    TS.get_lod(lod_step_g * batch_size * n_critic))
                lod_new_d = np.floor(
                    TS.get_lod(lod_step_d * batch_size * n_critic))

                # Growth LOD helper to determine when to morph and to ip non-lins
                if step % steps_per_phase == 0 and step > 0 and step % (
                        2 * steps_per_phase) != 0:
                    lod_grow_bool = 1
                if lod_grow_bool == 1:
                    if lod_new_grow == np.floor(lod_new_grow):
                        lod_new_grow = lod_new_grow - 1.0 / growth_ip_steps
                    else:
                        lod_new_grow = np.maximum(
                            lod_new_grow - 1.0 / growth_ip_steps,
                            np.floor(lod_new_grow))
                    if lod_new_grow % np.floor(lod_new_grow) < 1e-3:
                        lod_new_grow = np.floor(lod_new_grow)
                        lod_grow_bool = 0
                    if lod_new_grow < 0.0:
                        lod_new_grow = 0.0

                #----------------------------------------------------------------------------------
                # MORPHISM
                #----------------------------------------------------------------------------------
                # Reference:
                # GAN Filters:   (K,K,Cin,Cout)
                # Morph Filters: (Cout,Cin,K,K)

                if lod_new_g != lod_val_g or lod_new_d != lod_val_d:
                    # Snaps before Morphing
                    sum_be_morph = sess.run(snap_summary_2)
                    train_writer.add_summary(sum_be_morph, step)
                    train_writer.flush()

                    # Find morph layer, and get all weight variables/references
                    ci = int(4.0 - lod_new_g)  #0,1,2,3,4
                    w_torgb_old, b_torgb_old = sess.run(all_rgb_vars_g[ci])
                    w_fromrgb_old, b_fromrgb_old = sess.run(all_rgb_vars_d[ci])
                    w_torgb_new, b_torgb_new = sess.run(all_rgb_vars_g[ci + 1])
                    w_fromrgb_new, b_fromrgb_new = sess.run(all_rgb_vars_d[ci +
                                                                           1])
                    wtorgb_shape = w_torgb_new.shape
                    btorgb_shape = b_torgb_new.shape
                    wfromrgb_shape = w_fromrgb_new.shape
                    bfromrgb_shape = b_fromrgb_new.shape

                    w1_g_shape = all_var_names_g[ci][0].shape
                    b1_g_shape = all_var_names_g[ci][1].shape
                    w2_g_shape = all_var_names_g[ci][2].shape
                    b2_g_shape = all_var_names_g[ci][3].shape
                    w1_d_shape = all_var_names_d[ci][0].shape
                    b1_d_shape = all_var_names_d[ci][1].shape
                    w2_d_shape = all_var_names_d[ci][2].shape
                    b2_d_shape = all_var_names_d[ci][3].shape

                    # ----------------------Generator ---------------------
                    # Case 1: No halving of convolutional channels:
                    if w1_g_shape == w2_g_shape:
                        G = np.transpose(
                            w_torgb_old,
                            [3, 2, 0, 1])  #(K,K,Cin,Cout)->(Cout,Cin,K,K)
                        Cout = w1_g_shape[3]
                        F1, F3 = decomp_filters_lsq_iter(
                            G, Cout, 3, 1, iters=100)  # morph 1 step
                        F2, F3 = decomp_filters_lsq_iter(
                            F3, Cout, 3, 1, iters=100)  # morph 2 step

                        ## Identity Filter
                        #F1     = np.zeros(w1_g_shape)
                        #for i in range(0,w1_g_shape[2]):
                        #    F1[:,:,i,i] = np.array([[0,0,0],[0,1.0,0],[0,0,0]])

                        w1_g = np.transpose(F1, [2, 3, 1, 0])
                        w1_g = w1_g / get_wscale(
                            w1_g.shape)  #Undo wscaling effect
                        b1_g = np.zeros(shape=b1_g_shape)
                        w2_g = np.transpose(
                            F2, [2, 3, 1, 0])  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        w2_g = w2_g / get_wscale(
                            w2_g.shape)  #Undo wscaling effect
                        b2_g = np.zeros(shape=b2_g_shape)
                        wrgb_g = np.transpose(
                            F3, [2, 3, 1, 0]
                        )  #(Cout,Cin,K,K)->(K,K,Cin,Cout) || Here no wscale undo because same as previous toRGB
                        brgb_g = b_torgb_old

                        assert w1_g.shape == w1_g_shape and w2_g.shape == w2_g_shape
                        assert wrgb_g.shape == wtorgb_shape and brgb_g.shape == btorgb_shape

                    # Case 2: Halving of convolutional channels:
                    else:
                        G = np.transpose(
                            w_torgb_old,
                            [3, 2, 0, 1])  #(K,K,Cin,Cout)->(Cout,Cin,K,K)
                        Cout = w1_g_shape[3]
                        F1, F3 = decomp_filters_lsq_iter(G,
                                                         Cout,
                                                         3,
                                                         1,
                                                         iters=100)

                        ## Identity Filter
                        #F2     = np.zeros(w2_g_shape)
                        #for i in range(0,w2_g_shape[2]):
                        #    F2[:,:,i,i] = np.array([[0,0,0],[0,1.0,0],[0,0,0]])

                        F2, F3 = decomp_filters_lsq_iter(F3,
                                                         Cout,
                                                         3,
                                                         1,
                                                         iters=100)

                        w1_g = np.transpose(
                            F1, [2, 3, 1, 0])  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        w1_g = w1_g / get_wscale(w1_g.shape)  #Undo wscale
                        b1_g = np.zeros(shape=b1_g_shape)
                        w2_g = np.transpose(F2, [2, 3, 1, 0])
                        w2_g = w2_g / get_wscale(w2_g.shape)  #Undo wscale
                        b2_g = np.zeros(
                            shape=b2_g_shape)  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        wrgb_g = np.transpose(F3, [2, 3, 1, 0])
                        wrgb_g = wrgb_g / get_wscale(
                            wrgb_g.shape, gain=1) * get_wscale(
                                w_torgb_old.shape, gain=1)
                        brgb_g = b_torgb_old

                        logging.info('w2_is %s, w2_should %s', w2_g.shape,
                                     w2_g_shape)
                        logging.info('w1_is %s, w1_should %s', w1_g.shape,
                                     w1_g_shape)
                        logging.info('wrgb_is %s, wrgb_should %s',
                                     wrgb_g.shape, wtorgb_shape)
                        logging.info('brgb_is %s, brgb_should %s',
                                     brgb_g.shape, btorgb_shape)
                        assert w1_g.shape == w1_g_shape and w2_g.shape == w2_g_shape
                        assert wrgb_g.shape == wtorgb_shape and brgb_g.shape == btorgb_shape

                    # Assign new Weights:
                    sess.run(
                        [
                            all_rgb_asgn_ops_g[ci + 1][0],
                            all_rgb_asgn_ops_g[ci + 1][1]
                        ],
                        feed_dict={
                            all_rgb_plh_g[ci + 1][0]: wrgb_g,
                            all_rgb_plh_g[ci + 1][1]: brgb_g
                        })

                    sess.run(
                        [
                            all_asgn_ops_g[ci][0], all_asgn_ops_g[ci][1],
                            all_asgn_ops_g[ci][2], all_asgn_ops_g[ci][3]
                        ],
                        feed_dict={
                            all_plh_list_g[ci][0]: w1_g,
                            all_plh_list_g[ci][1]: b1_g,
                            all_plh_list_g[ci][2]: w2_g,
                            all_plh_list_g[ci][3]: b2_g
                        })
                    # -----------------------------------------------------

                    # ----------------- Discriminator ---------------------
                    # Case 1: No Doubling of convolutional channels:
                    if w1_d_shape == w2_d_shape:
                        G = np.transpose(
                            w_fromrgb_old,
                            [3, 2, 0, 1])  #(K,K,Cin,Cout)->(Cout,Cin,K,K)
                        Cout = w2_d_shape[2]
                        F1, F3 = decomp_filters_lsq_iter(G,
                                                         Cout,
                                                         1,
                                                         3,
                                                         iters=100)
                        F1, F2 = decomp_filters_lsq_iter(F1,
                                                         Cout,
                                                         1,
                                                         3,
                                                         iters=100)

                        # Identity filter (no longer)
                        #F3     = np.zeros(w2_d_shape)
                        #for i in range(0,w2_d_shape[2]):
                        #    F3[:,:,i,i] = np.array([[0,0,0],[0,1.0,0],[0,0,0]])

                        wrgb_d = np.transpose(
                            F1, [2, 3, 1, 0]
                        )  #(Cout,Cin,K,K)->(K,K,Cin,Cout), here no undo-wscale: Same Dim as fromRGB_old
                        brgb_d = np.zeros(bfromrgb_shape)
                        w1_d = np.transpose(
                            F2, [2, 3, 1, 0])  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        w1_d = w1_d / get_wscale(w1_d.shape)  #Undo wscale
                        b1_d = np.zeros(shape=b1_d_shape)
                        w2_d = np.transpose(F3, [2, 3, 1, 0])
                        w2_d = w2_d / get_wscale(w2_d.shape)  #Undo wscale
                        b2_d = b_fromrgb_old
                        assert w1_d.shape == w1_d_shape and w2_d.shape == w2_d_shape
                        assert wrgb_d.shape == wfromrgb_shape and brgb_d.shape == bfromrgb_shape

                    # Case 2: Doubling of convolutional channels:
                    else:
                        G = np.transpose(
                            w_fromrgb_old,
                            [3, 2, 0, 1])  #(K,K,Cin,Cout)->(Cout,Cin,K,K)
                        Cout = w2_d_shape[2]
                        F1, F3 = decomp_filters_lsq_iter(G,
                                                         Cout,
                                                         1,
                                                         3,
                                                         iters=100)
                        F1, F2 = decomp_filters_lsq_iter(F1,
                                                         Cout,
                                                         1,
                                                         3,
                                                         iters=100)

                        #F2     = np.zeros(w1_d_shape)
                        #for i in range(0,w1_d_shape[2]):
                        #    F2[:,:,i,i] = np.array([[0,0,0],[0,1.0,0],[0,0,0]])

                        wrgb_d = np.transpose(
                            F1, [2, 3, 1, 0])  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        wrgb_d = wrgb_d / get_wscale(
                            wrgb_d.shape, gain=1) * get_wscale(
                                w_fromrgb_old.shape,
                                gain=1)  #Same wscale as old
                        brgb_d = np.zeros(bfromrgb_shape)
                        w1_d = np.transpose(F2, [2, 3, 1, 0])
                        w1_d = w1_d / get_wscale(w1_d.shape)
                        b1_d = np.zeros(shape=b1_d_shape)
                        w2_d = np.transpose(
                            F3, [2, 3, 1, 0])  #(Cout,Cin,K,K)->(K,K,Cin,Cout)
                        w2_d = w2_d / get_wscale(w2_d.shape)
                        b2_d = b_fromrgb_old
                        logging.info('w2_is %s, w2_should %s', w2_d.shape,
                                     w2_d_shape)
                        logging.info('w1_is %s, w1_should %s', w1_d.shape,
                                     w1_d_shape)
                        logging.info('wrgb_is %s, wrgb_should %s',
                                     wrgb_d.shape, wfromrgb_shape)
                        logging.info('brgb_is %s, brgb_should %s',
                                     brgb_d.shape, bfromrgb_shape)
                        assert w1_d.shape == w1_d_shape and w2_d.shape == w2_d_shape
                        assert wrgb_d.shape == wfromrgb_shape and brgb_d.shape == bfromrgb_shape
                    # -----------------------------------------------------
                    # Assign new weights:
                    sess.run(
                        [
                            all_rgb_asgn_ops_d[ci + 1][0],
                            all_rgb_asgn_ops_d[ci + 1][1]
                        ],
                        feed_dict={
                            all_rgb_plh_d[ci + 1][0]: wrgb_d,
                            all_rgb_plh_d[ci + 1][1]: brgb_d
                        })

                    sess.run(
                        [
                            all_asgn_ops_d[ci][0], all_asgn_ops_d[ci][1],
                            all_asgn_ops_d[ci][2], all_asgn_ops_d[ci][3]
                        ],
                        feed_dict={
                            all_plh_list_d[ci][0]: w1_d,
                            all_plh_list_d[ci][1]: b1_d,
                            all_plh_list_d[ci][2]: w2_d,
                            all_plh_list_d[ci][3]: b2_d
                        })
                    # -----------------------------------------------------
                    # Assign new lod's
                    sess.run([lod_assign_ops],
                             feed_dict={
                                 lod_in_g: lod_new_g,
                                 lod_in_d: lod_new_d
                             })
                    sess.run([g_opt_reset, d_opt_reset])

                    # Sumamry after morphing
                    sum_be_morph_3 = sess.run(snap_summary_1)
                    train_writer.add_summary(sum_be_morph_3, step)
                    train_writer.flush()

                if lod_new_grow != lod_val_grow:
                    sess.run([lod_grow_assign_ops],
                             feed_dict={lod_in_grow: lod_new_grow})

                lod_val_g = lod_new_g
                lod_val_d = lod_new_d
                lod_val_grow = lod_new_grow

                # Saver
                if step > 0 and step % save_model_freq == 0:
                    save_path = saver.save(sess,
                                           checkpoint_folder + "/model.ckpt",
                                           global_step=step)
                    logging.info("Model saved in file: {}".format(save_path))

                if step == old_step:
                    step += 1
            except KeyboardInterrupt:
                logging.info('Interrupted')
                coord.request_stop()
            except Exception as e:
                logging.warning(
                    "Unforeseen error at step {}. Requesting stop...".format(
                        step))
                coord.request_stop(e)

        save_path = saver.save(sess,
                               checkpoint_folder + "/model.ckpt",
                               global_step=step)

        logging.info("Model saved in file: %s" % save_path)
Ejemplo n.º 21
0
def main(stdscr):

  # if config.ini exists, contine
  #  else create it and exit
  if not os.path.exists(config):
    with open(config, 'w') as cfgFile:
      cfgFile.write(configContents)
    print("created config file")
    leave()
  
  # get settings from file
  try:
    com = 'COM' + Config.gen('COM')
    baudrate = int(Config.gen('BAUD'))
  except:
    print("invalid inputs")
    leave()
  
  # name dataFiles after date and time or user chosen
  dataPath = Config.gen('FOLDER')
  if Config.gen('FILENAMEDATE')[0] == 'y':
    tm = time.localtime()
    dataFile = f'{tm[7]:03}{tm[3]:02}{tm[4]:02}{tm[5]:02}'
  else:
    dataFile = input("file to store data: ")
    while dataFile == "":
      dataFile = input("file to store data: ")
    while os.path.exists(dataFile):
      if input("overwrite existing file? (y for yes): ") == "y":
         break
      else:
         dataFile = input("file to store data: ")
         while dataFile == "":
           dataFile = input("file to store data: ")

  dataPath = dataPath + '/' +  dataFile

  # from here on we want a nicer output
  stdscr.clear()
  # open serial port
  ser.baudrate = baudrate
  ser.port = com
  ser.timeout = 2
  cnt = 0;
  while not ser.is_open:
    try:
      ser.open()
    except:
      stdscr.clear()
      stdscr.addstr(0, 0, 'opening port: ' + com + ' at ' + str(baudrate) + " try: " + str(cnt))
      stdscr.refresh()
      cnt += 1

  # collect and log data
  numMismatch = 0
  maxMismatch = 5

  allKeys = list(sensors.keys())
  buf = bytearray()
  stdscr.nodelay(1) # make getch() non-blocking
  while True:
    # rewrite of ser.readline()
    #   used since readline() is slow
    try:
      buf = buf + ser.read(max(1, ser.in_waiting))
      i = buf.find(b'\n')
      if i >= 0:
          line = buf[:i+1]
          buf = buf[i+1:]
      else:
          continue
    except Exception as e:
      stdscr.clear()
      stdscr.addstr(0, 0, 'error')
      stdscr.refresh()
      continue

    if line == '':
      stdscr.clear()
      stdscr.addstr(0, 0, 'empty buffer')
      stdscr.refresh()
      continue

    vals = line.split(b':')
    if len(vals) != len(allKeys):
      numMismatch += 1
      if numMismatch >= maxMismatch:
        numMismatch = 0
        stdscr.clear()
        stdscr.addstr(0, 0, 'mismatch in sensor number')
        stdscr.addstr(1, 0, 'press enter to continue')
        stdscr.refresh()
        stdscr.nodelay(0)
        stdscr.getkey()
        stdscr.nodelay(1)
        continue
      else:
        continue;

    numMismatch = 0 # if we get here, no consecutive mismatch
    output = ''
    for k in range(len(allKeys)):
      try:
        sensors[allKeys[k]].append(float(vals[k]))
      except ValueError:
        sensors[allKeys[k]].append(vals[k])
    for k in sensors:
      try:
        output = output + k + ": " + str(float(vals[allKeys.index(k)])) + '\n'
      except ValueError:
        output = output + k + ": " + str(vals[allKeys.index(k)]) + '\n'
    output = output + 'saving to: ' + dataPath + '\n'
    output = output + 'len of TPS: ' + str(len(sensors['TPS'])) + '\n'
    output = output + 'shift-s to save and exit'
    stdscr.clear()
    stdscr.addstr(output)
    stdscr.refresh()

    if stdscr.getch() == ord('S'):
      break

  with open(dataPath, 'wb') as df:
    pickle.dump(sensors, df) # save file
def bayesian_cnn_exp(gene, mode):
    BOconfig = Config.get("bayesianOpt")
    mlp_parameters_space = [
        conf_to_params(conf)
        for conf in Config.get("bayesianOpt")["hyperparameters"]
    ]

    @use_named_args(mlp_parameters_space)
    def fitness_mlp(kernel_space_1, units_2, kernel_space_2, dense_1, dense_2):
        es = EarlyStopping(monitor='val_loss',
                           patience=Config.get("ESValPatience"),
                           min_delta=Config.get("ESValMinDelta"),
                           baseline=0.2)

        model, hist = train_bayesian_cnn(X_train_int, y_train_int,
                                         (X_val, y_val), es, kernel_space_1,
                                         units_2, kernel_space_2, dense_1,
                                         dense_2)

        val_auprc = hist.history['val_auprc'][-1]
        print()
        print("Validation Loss: {}".format(val_auprc))
        print()
        return -val_auprc

    print()
    print("Importing Epigenetic data...")
    print()
    X, y, features_size = filter_by_tasks(*import_sequence_dataset(
        "data", gene),
                                          Config.get("task"),
                                          perc=Config.get("samplePerc"))

    print("Datasets length: {}, {}".format(len(X), len(y)))
    print("Features sizes: {}".format(features_size))

    metrics = {'losses': [], 'auprc': [], 'auroc': []}
    delta_stopper = DeltaYStopper(n_best=BOconfig["n_best"],
                                  delta=BOconfig["delta"])

    for ext_holdout in range(Config.get("nExternalHoldout")):
        print()
        print("{}/{} EXTERNAL HOLDOUTS".format(ext_holdout,
                                               Config.get("nExternalHoldout")))
        print()
        X_train, X_test, y_train, y_test = split(X,
                                                 y,
                                                 random_state=42,
                                                 proportions=None,
                                                 mode=mode)

        # Internal holdouts
        X_train_int, X_val, y_train_int, y_val = split(X_train,
                                                       y_train,
                                                       random_state=42,
                                                       proportions=None,
                                                       mode=mode)

        print("Searching Parameters...")
        print()

        min_res = gp_minimize(func=fitness_mlp,
                              dimensions=mlp_parameters_space,
                              acq_func=BOconfig["acq_function"],
                              callback=[delta_stopper],
                              n_calls=BOconfig["nBayesianOptCall"])

        print()
        print("Training with best parameters found: {}".format(min_res.x))
        print()
        print(X_train)

        es = EarlyStopping(monitor='val_loss',
                           patience=Config.get("ESPatience"),
                           min_delta=Config.get("ESMinDelta"))
        model, _ = train_bayesian_cnn(X_train, y_train, None, es, min_res.x[0],
                                      min_res.x[1], min_res.x[2], min_res.x[3])

        eval_score = model.evaluate(X_test, y_test)
        # K.clear_session()

        print("Metrics names: ", model.metrics_names)
        print("Final Scores: ", eval_score)
        metrics['losses'].append(eval_score[0])
        metrics['auprc'].append(eval_score[1])
        metrics['auroc'].append(eval_score[2])

    return metrics