def main_cgega_iterative_training(dataset,
                                  subject,
                                  model,
                                  params1,
                                  params2,
                                  exp,
                                  eval_set,
                                  ph,
                                  save_iter=False):
    printd(dataset, subject, model, params1, params2, exp, eval_set, ph)

    # retrieve model's parameters
    params1 = locate_params(params1)
    params2 = locate_params(params2)
    model_class = locate_model(model)

    # scale variables in minutes to the benchmark sampling frequency
    ph_f = ph // cs.freq
    hist_f = params1["hist"] // cs.freq
    day_len_f = cs.day_len // cs.freq
    freq_ds = misc.datasets.datasets[dataset]["glucose_freq"]
    """ PREPROCESSING """
    train, valid, test, scalers = preprocessing(dataset, subject, ph_f, hist_f,
                                                day_len_f)
    """ MODEL TRAINING """
    dir = join(cs.path, "processing", "models", "weights", "cg_ega")
    file = join(dir, exp, model_class.__name__ + "_" + dataset + subject)

    results_test, results_valid_iter = progressive_improvement_clinical_acceptability(
        subject, model_class, params1, params2, ph, freq_ds, train, valid,
        test, scalers, file, eval_set)

    results_test = postprocessing(results_test, scalers, dataset)
    results_valid_iter = postprocessing_all_iter(results_valid_iter, scalers,
                                                 dataset)

    ResultsSubject(model,
                   exp,
                   ph,
                   dataset,
                   subject,
                   params=[params1, params2],
                   results=results_test).save_raw_results()
    if save_iter:
        ResultsSubjectPICA(model,
                           exp,
                           ph,
                           dataset,
                           subject,
                           params=[params1, params2],
                           results=results_valid_iter).save_raw_results()
示例#2
0
def process_main_args(args_):
    model = locate_model(args_.model)
    params = locate_params(args_.params)

    # redirect the logs to a file if specified
    if args_.log is not None:
        log_file = args_.log
        log_path = os.path.join(path, "logs", log_file)
        sys.stdout = open(log_path, "w")

    sbj_msg = args_.source_dataset + "_2_" + args_.target_dataset, " " + args_.target_subject
    if args_.tl_mode == "source_training":
        printd("source_training", sbj_msg)
        main_source_training(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                             args_.weights, args_.eval_mode)
    elif args_.tl_mode == "source_training_test":
        printd("source_training_test", sbj_msg)
        main_source_training(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                             args_.weights, args_.eval_mode)
        main_target_global(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                           args_.weights, args_.eval_mode, args_.exp, args_.plot)
    elif args_.tl_mode == "end_to_end_0":
        printd("end_to_end_0", sbj_msg)
        end_to_end(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                   args_.weights, args_.eval_mode, args_.exp, args_.plot)
    elif args_.tl_mode == "target_training":
        printd("target_training", sbj_msg)
        main_target_training(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                             args_.eval_mode, args_.exp, args_.plot)
    elif args_.tl_mode == "target_global":
        printd("target_global", sbj_msg)
        main_target_global(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                           args_.weights, args_.eval_mode, args_.exp, args_.plot)
    elif args_.tl_mode == "target_finetuning":
        printd("target_finetuning", sbj_msg)
        main_target_finetuning(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                               args_.weights, args_.eval_mode, args_.exp, args_.plot)
    elif args_.tl_mode == "end_to_end" and args_.params_ft is not None:
        printd("end_to_end", sbj_msg)

        params_ft = locate_params(args_.params_ft)

        main_source_training(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params,
                             args_.weights, args_.eval_mode)
        main_target_global(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params_ft,
                           args_.weights, args_.eval_mode, args_.exp, args_.plot)
        main_target_finetuning(args_.source_dataset, args_.target_dataset, args_.target_subject, model, params_ft,
                               args_.weights, args_.eval_mode, args_.exp, args_.plot, args_.save)
示例#3
0
 def __init__(self, dataset, ph, hist, experiment, params):
     self.dataset = dataset
     self.ph = ph // cs.freq
     self.hist = hist // cs.freq
     self.exp = experiment
     self.params = locate_params(params)
     self.train, self.valid, self.test, self.scalers = {}, {}, {}, {}
def main_standard(dataset, subject, model, params, exp, eval_set, ph):
    printd(dataset, subject, model, params, exp, eval_set, ph)

    # retrieve model's parameters
    params = locate_params(params)
    model_class = locate_model(model)

    # scale variables in minutes to the benchmark sampling frequency
    ph_f = ph // cs.freq
    hist_f = params["hist"] // cs.freq
    day_len_f = cs.day_len // cs.freq
    """ PREPROCESSING """
    train, valid, test, scalers = preprocessing(dataset, subject, ph_f, hist_f,
                                                day_len_f)
    """ MODEL TRAINING """
    raw_results = make_predictions_pclstm(subject,
                                          model_class,
                                          params,
                                          ph_f,
                                          train,
                                          valid,
                                          test,
                                          scalers,
                                          mode=eval_set)
    """ POST-PROCESSING """
    raw_results = postprocessing(raw_results, scalers, dataset)
    """ EVALUATION """
    ResultsSubject(model,
                   exp,
                   ph,
                   dataset,
                   subject,
                   params=params,
                   results=raw_results).save_raw_results()
示例#5
0
def study(dataset, model, params, mode, ph, all_feat, patients, combs):
    # retrieve model's parameters
    params = locate_params(params)
    model_class = locate_model(model)

    # scale variables in minutes to the benchmark sampling frequency
    ph_f = ph // cs.freq
    hist_f = params["hist"] // cs.freq
    day_len_f = cs.day_len // cs.freq

    # full processing
    for i in patients:
        dir = os.path.join(cs.path, "study", dataset, model, mode,
                           "patient " + str(i))
        """ PREPROCESSING ALL FEATURES"""
        printd("Preprocessing patient " + str(i))
        data = preprocessing_full(dataset, str(i), ph_f, hist_f, day_len_f,
                                  all_feat)

        for ele in combs:
            printd("Preprocessing patient", str(i),
                   "with features glucose " + " + ".join(ele))
            train, valid, test, scalers = preprocessing_select(
                data, dataset, day_len_f, all_feat, ele)

            for j in range(5):
                torch.manual_seed(j)
                """ MODEL TRAINING & TUNING """
                if not ele:
                    file = os.path.join(dir, "reference", "seed " + str(j),
                                        "weights", "weights")
                else:
                    file = os.path.join(dir, " + ".join(ele), "seed " + str(j),
                                        "weights", "weights")
                raw_results = make_predictions(str(i),
                                               model_class,
                                               params,
                                               ph_f,
                                               train,
                                               valid,
                                               test,
                                               mode=mode,
                                               save_model_file=file)
                """ POST-PROCESSING """
                raw_results = postprocessing(raw_results, scalers, dataset)
                """ EVALUATION """
                if not ele:
                    file_save = os.path.join("reference", "seed " + str(j))
                else:
                    file_save = os.path.join(" + ".join(ele), "seed " + str(j))
                results = ResultsSubject(model,
                                         file_save,
                                         ph,
                                         dataset,
                                         str(i),
                                         params=params,
                                         results=raw_results,
                                         study=True,
                                         mode=mode)
                printd(results.compute_mean_std_results())
示例#6
0
文件: main.py 项目: dotXem/GLYFE
def main(dataset, subject, model, params, exp, mode, log, ph, plot):
    printd(dataset, subject, model, params, exp, mode, log, ph, plot)

    # retrieve model's parameters
    search = locate_search(params)
    params = locate_params(params)
    model_class = locate_model(model)

    # scale variables in minutes to the benchmark sampling frequency
    ph_f = ph // cs.freq
    hist_f = params["hist"] // cs.freq
    day_len_f = cs.day_len // cs.freq

    """ PREPROCESSING """
    train, valid, test, scalers = preprocessing(dataset, subject, ph_f, hist_f, day_len_f)
    """ MODEL TRAINING & TUNING """
    if search:
        params = find_best_hyperparameters(subject, model_class, params, search, ph_f, train, valid, test)

    raw_results = make_predictions(subject, model_class, params, ph_f, train, valid, test, mode=mode)
    """ POST-PROCESSING """
    raw_results = postprocessing(raw_results, scalers, dataset)

    """ EVALUATION """
    results = ResultsSubject(model, exp, ph, dataset, subject, params=params, results=raw_results)
    printd(results.compute_results())
    if plot:
        results.plot(0)
示例#7
0
    def __init__(self, source_dataset, target_dataset, target_subject,
                 exp_name, model_name, params):
        self.source_dataset = source_dataset
        self.target_dataset = target_dataset
        self.target_subject = target_subject
        self.exp_name = exp_name

        self.model_name = model_name
        self.Model = locate_model(model_name)
        self.params = locate_params(params)

        self.ph = misc.constants.ph_f
        self.hist = self.params["hist"] // misc.constants.freq
        self.day_len = misc.constants.day_len_f

        self.train, self.valid, self.test, _ = preprocessing_source_multi(
            self.source_dataset, self.target_dataset, self.target_subject,
            self.ph, self.hist, self.day_len)
    def __init__(self, source_ds, target_ds, weights_name, model_name, params_name):
        """
        Object that is used to analyze the features outputed by saved models pre-transfer
        (models trained on the multi sources, before finetuning). Can be use to plot and save t-SNE representations,
        and compute the local domain perplexity metric.
        :param source_ds: source dataset (e.g., "idiab", "ohio", "idiab+t1dms", etc.)
        :param target_ds: target dataset (i.e., "idiab" or "ohio")
        :param weights_name: name of the weights
        :param model_name: name of the model
        :param params_name: name of the used parameters
        """
        self.source_dataset = source_ds
        self.target_dataset = target_ds
        self.exp_name = weights_name
        self.model_name = model_name
        self.Model = locate_model(model_name)
        self.params = locate_params(params_name)
        self.target_subjects = misc.datasets.datasets[self.target_dataset]["subjects"]

        self.ph = misc.constants.ph_f
        self.hist = self.params["hist"] // misc.constants.freq
        self.day_len = misc.constants.day_len_f
示例#9
0
def main(dataset,
         subject,
         model,
         params,
         exp,
         mode,
         log,
         ph,
         plot,
         save=False):
    printd(dataset, subject, model, params, exp, mode, log, ph, plot)

    # retrieve model's parameters
    search = locate_search(params)
    params = locate_params(params)
    model_class = locate_model(model)

    # scale variables in minutes to the benchmark sampling frequency
    ph_f = ph // cs.freq
    hist_f = params["hist"] // cs.freq
    day_len_f = cs.day_len // cs.freq
    """ PREPROCESSING """
    train, valid, test, scalers = preprocessing(dataset, subject, ph_f, hist_f,
                                                day_len_f)
    start = time.time()
    """ MODEL TRAINING & TUNING """
    if search:
        params = find_best_hyperparameters(subject, model_class, params,
                                           search, ph_f, train, valid, test)

    if save:
        dir = os.path.join(cs.path, "processing", "models", "weights",
                           model_class.__name__, exp)
        file = os.path.join(dir,
                            model_class.__name__ + "_" + dataset + subject)
    else:
        file = None

    raw_results = make_predictions(subject,
                                   model_class,
                                   params,
                                   ph_f,
                                   train,
                                   valid,
                                   test,
                                   mode=mode,
                                   save_model_file=file)
    """ POST-PROCESSING """
    raw_results = postprocessing(raw_results, scalers, dataset)
    """ EVALUATION """
    results = ResultsSubject(model,
                             exp,
                             ph,
                             dataset,
                             subject,
                             params=params,
                             results=raw_results)
    printd(results.compute_mean_std_results())
    end = time.time()
    printd("Time elapsed : " + str(end - start) + " seconds")
    if plot:
        results.plot(0)