Exemplo n.º 1
0
 def run_ma(self):
     result = None
     # dispatch on type; build an R object, then run the analysis
     if self.data_type == "binary":
         # note that this call creates a tmp object in R called
         # tmp_obj (though you can pass in whatever var name
         # you'd like)
         meta_py_r.ma_dataset_to_simple_binary_robj(self.model)
         result = meta_py_r.run_binary_ma(self.current_method, self.current_param_vals)
     elif self.data_type == "continuous":
         meta_py_r.ma_dataset_to_simple_continuous_robj(self.model)
         result = meta_py_r.run_continuous_ma(self.current_method, self.current_param_vals)
     self.parent().analysis(result)
     self.accept()
Exemplo n.º 2
0
 def populate_cbo_box(self):
     # we first build an R object with the current data. this is to pass off         
     # to the R side to check the feasibility of the methods over the current data.
     # i.e., we do not display methods that cannot be performed over the 
     # current data.
     tmp_obj_name = "tmp_obj" 
     if self.data_type == "binary":
         meta_py_r.ma_dataset_to_simple_binary_robj(self.model, var_name=tmp_obj_name)
     elif self.data_type == "continuous":
         meta_py_r.ma_dataset_to_simple_continuous_robj(self.model, var_name=tmp_obj_name)
         
     available_methods = meta_py_r.get_available_methods(for_data_type=self.data_type, data_obj_name=tmp_obj_name)
     print "\n\navailable %s methods: %s" % (self.data_type, ", ".join(available_methods))
     for method in available_methods:
         self.method_cbo_box.addItem(method)
     self.current_method = self.method_cbo_box.currentText()
     self.setup_params()
     self.parameter_grp_box.setTitle(self.current_method)
Exemplo n.º 3
0
    def run_meta_reg(self):
        at_least_one_study_does_not_have_vals = False
        cov_d = {}
        selected_covariates = []
        for cov, chk_box in self.covs_and_check_boxes:
            if chk_box.isChecked():
                selected_covariates.append(cov)

            # here we have to exclude studies that do not have values
            # for all of the selected covariates
            cov_d[cov.name] = \
                self.model.dataset.get_values_for_cov(cov.name, ids_for_keys=True)

        current_effect = self.model.current_effect
        if self.is_diagnostic:
            if self.dor_radio.isChecked():
                current_effect = "DOR"
            elif self.sensitivity_radio.isChecked():
                current_effect = "Sens"
            else:
                current_effect = "Spec"

        studies = []
        for study in [
                study
                for study in self.model.get_studies(only_if_included=True)
        ]:
            has_covs = [
                study.id in cov_d[selected_cov.name]
                for selected_cov in selected_covariates
            ]
            #if study != '' and study.id in cov_d and cov_d[study.id] is not None:
            #    studies.append(study)
            if all(has_covs):
                studies.append(study)
            else:
                at_least_one_study_does_not_have_vals = True

        if self.is_diagnostic:
            meta_py_r.ma_dataset_to_simple_diagnostic_robj(self.model,\
                                                    metric=current_effect,
                                                    covs_to_include=selected_covariates,
                                                    studies=studies)
        elif self.model.get_current_outcome_type() == "continuous":
            meta_py_r.ma_dataset_to_simple_continuous_robj(self.model,\
                                                    covs_to_include=selected_covariates,
                                                    studies=studies)
        else:
            meta_py_r.ma_dataset_to_simple_binary_robj(self.model, include_raw_data=False,\
                                                    covs_to_include=selected_covariates,
                                                    studies=studies)

        # fixed or random effects meta-regression?
        fixed_effects = False
        if self.fixed_effects_radio.isChecked():
            fixed_effects = True

        if at_least_one_study_does_not_have_vals:
            # TODO: this run_with_missing stuff needs to be finished i.e.
            # actually remove the affected studies. Currently just throws an
            # error
            run_with_missing = QMessageBox.warning(
                self, "Missing covariate value(s)",
                "Some studies do not have values for the covariate(s) you have selected. Do you want me to run the regression without them (i.e., drop studies with missing values)?",
                QMessageBox.Yes | QMessageBox.No)

            if run_with_missing == QMessageBox.No:
                self.accept()
                return

        result = meta_py_r.run_meta_regression(
            self.model.dataset,
            studies,
            selected_covariates,
            current_effect,
            fixed_effects=fixed_effects,
            conf_level=self.model.get_global_conf_level())
        if isinstance(result, str):
            # then there was an error!
            QMessageBox.critical(self,
                                "Whoops.",
                                "Sorry, there was an error performing the regression.\n%s" % \
                                result)
        else:
            self.parent().analysis(result)
            self.accept()
Exemplo n.º 4
0
    def populate_cbo_box(self, cbo_box=None, param_box=None):
        # if no combo box is passed in, use the default 'method_cbo_box'
        if cbo_box is None:
            cbo_box = self.method_cbo_box
            param_box = self.parameter_grp_box

        # we first build an R object with the current data. this is to pass off         
        # to the R side to check the feasibility of the methods over the current data.
        # i.e., we do not display methods that cannot be performed over the 
        # current data.
        tmp_obj_name = "tmp_obj" 
        if self.data_type == "binary":
            meta_py_r.ma_dataset_to_simple_binary_robj(self.model, var_name=tmp_obj_name)
        elif self.data_type == "continuous":
            meta_py_r.ma_dataset_to_simple_continuous_robj(self.model, var_name=tmp_obj_name)
        elif self.data_type == "diagnostic":
            meta_py_r.ma_dataset_to_simple_diagnostic_robj(self.model, var_name=tmp_obj_name)

        
        self.available_method_d = None
        ###
        # in the case of diagnostic data, the is.feasible methods need also to know
        # which metric the analysis is to be run over. here we check this. note that
        # we're setting params for multiple metrics (e.g., sens./spec.) but the is.feasible
        # method only wants *one* metric. thus we arbitrarily select one or the other --
        # this is tacitly assuming that the *same* methods are feasible for, say, sens.
        # as for spec. this is a reasonable assumption is all cases of which I'm aware, 
        # but a more conservative/correct thing to do would be to pass in the *most restrictive*
        # metric to the _get_available_methods_routine
        ###
        metric = self.model.current_effect 
        if self.data_type == "diagnostic":
            if self.meta_f_str is None:
                metric = "Sens" if self.sens_spec else "DOR"
            else:
                metric = "Sens"

        
        self.available_method_d = meta_py_r.get_available_methods(for_data_type=self.data_type,\
                                         data_obj_name=tmp_obj_name, metric=metric)

        print "\n\navailable %s methods: %s" % (self.data_type, ", ".join(self.available_method_d.keys()))
        
        
        #print("----------------------------------\nAvailable methods dictionary:",self.available_method_d)

        # issue #110 -- this is NOT a general/good/flexible solution
        # -- we sort here in reverse because this will put .random
        # first. otherwise, the default is that R provides the functions
        # in alphabetical (ascending). 
        method_names = self.available_method_d.keys()
       

        ###
        # removing bivariate (sens/spec) methods when it makes no sense
        # @TODO handle this better
        if self.data_type == "diagnostic":
            biv_ml_name = "Bivariate (Maximum Likelihood)"
            for biv_method in (biv_ml_name, "HSROC"):
                if metric != "Sens" and biv_method in method_names or\
                         self.meta_f_str is not None or\
                         not ("sens" in self.diag_metrics and "spec" in self.diag_metrics):
                    method_names.remove(biv_method)
            # Fix for issue # 175            
            if set(["lr","dor"]) <= set(self.diag_metrics):    
                try:
                    method_names.remove('Diagnostic Fixed-Effect Peto')
                    #QMessageBox.warning(self.parent(), "whoops", "removed peto")
                except:
                    print("Couldn't remove 'Diagnostic Fixed-Effect Peto' for some reason... don't know why")
        
        method_names.sort(reverse=True)

        ###
        # default to bivariate method for diagnostic
        if self.data_type == "diagnostic" and biv_ml_name in method_names:
            method_names.remove(biv_ml_name)
            method_names.insert(0, biv_ml_name)
 
        for method in method_names:
            cbo_box.addItem(method)
        self.current_method = self.available_method_d[str(cbo_box.currentText())]
        self.setup_params()
        param_box.setTitle(self.current_method)
Exemplo n.º 5
0
    def run_ma(self):
        ###
        # first, let's fire up a progress bar
        bar = MetaProgress(self)
        bar.show()
        result = None
        
        # this method is defined statically, below
        add_plot_params(self)

        # also add the metric to the parameters
        # -- this is for scaling

        if not self.data_type == "diagnostic":
            self.current_param_vals["measure"] = self.model.current_effect 
        
        # dispatch on type; build an R object, then run the analysis
        if self.data_type == "binary":
            # note that this call creates a tmp object in R called
            # tmp_obj (though you can pass in whatever var name
            # you'd like)
            meta_py_r.ma_dataset_to_simple_binary_robj(self.model)
            if self.meta_f_str is None:
                result = meta_py_r.run_binary_ma(self.current_method, self.current_param_vals)
            else:
                result = meta_py_r.run_meta_method(self.meta_f_str, self.current_method, self.current_param_vals)
        elif self.data_type == "continuous":
            meta_py_r.ma_dataset_to_simple_continuous_robj(self.model)
            if self.meta_f_str is None:
                # run standard meta-analysis
                result = meta_py_r.run_continuous_ma(self.current_method, self.current_param_vals)
            else:
                # get meta!
                result = meta_py_r.run_meta_method(self.meta_f_str, self.current_method, self.current_param_vals)
        elif self.data_type == "diagnostic":
            # add the current metrics (e.g., PLR, etc.) to the method/params
            # dictionary
            self.add_cur_analysis_details()
        
            method_names, list_of_param_vals = [], []

            if len(self.diag_metrics_to_analysis_details) == 0:
                self.add_cur_analysis_details()


            ordered_metrics = ["Sens", "Spec", "NLR", "PLR", "DOR"]
            for diag_metric in \
                  [metric for metric in ordered_metrics \
                    if metric in self.diag_metrics_to_analysis_details]:
                # pull out the method and parameters object specified for this
                # metric.
                method, param_vals = self.diag_metrics_to_analysis_details[diag_metric]
                param_vals = copy.deepcopy(param_vals)

                
                # update the forest plot path
                split_fp_path = self.current_param_vals["fp_outpath"].split(".")
                new_str = split_fp_path[0] if len(split_fp_path) == 1 else \
                          ".".join(split_fp_path[:-1])
                new_str = new_str + "_%s" % diag_metric.lower() + ".png"
                param_vals["fp_outpath"] = new_str

                # update the metric 
                param_vals["measure"] = diag_metric
                
                method_names.append(method)
                list_of_param_vals.append(param_vals)
            
            # create the DiagnosticData object on the R side -- this is going 
            # to be the same for all analyses
            meta_py_r.ma_dataset_to_simple_diagnostic_robj(self.model)

            if self.meta_f_str is None:
                # regular meta-analysis
                try:
                    result = meta_py_r.run_diagnostic_multi(method_names, list_of_param_vals)
                except Exception, e:
                    error_message = \
                        "sorry, something has gone wrong with your analysis. here is a stack trace that probably won't be terribly useful.\n %s"  \
                                            % e
                
                    QMessageBox.critical(self, "analysis failed", error_message)
                    bar.hide()
                    # reset Rs working directory 
                    meta_py_r.reset_Rs_working_dir()
                    self.accept()

            else:
                # in the case of diagnostic, we pass in lists
                # of param values to the meta_method 
                result = meta_py_r.run_meta_method_diag(\
                                self.meta_f_str, method_names, list_of_param_vals)
    def run_meta_reg(self):
        at_least_one_study_does_not_have_vals = False
        cov_d = {}
        selected_covariates = []
        for cov, chk_box in self.covs_and_check_boxes:
            if chk_box.isChecked():
                selected_covariates.append(cov)

            # here we have to exclude studies that do not have values
            # for all of the selected covariates
            cov_d[cov.name] = \
                self.model.dataset.get_values_for_cov(cov.name, ids_for_keys=True)

        current_effect = self.model.current_effect
        if self.is_diagnostic:
            if self.dor_radio.isChecked():
                current_effect = "DOR"
            elif self.sensitivity_radio.isChecked():
                current_effect = "Sens"
            else:
                current_effect = "Spec"

        studies = []
        for study in [study for study in self.model.get_studies(only_if_included=True)]:
            has_covs = [study.id in cov_d[selected_cov.name] for selected_cov in selected_covariates]
            #if study != '' and study.id in cov_d and cov_d[study.id] is not None:
            #    studies.append(study)
            if all(has_covs):
                studies.append(study)
            else:
                at_least_one_study_does_not_have_vals = True

        if self.is_diagnostic:
            meta_py_r.ma_dataset_to_simple_diagnostic_robj(self.model,\
                                                    metric=current_effect,
                                                    covs_to_include=selected_covariates,
                                                    studies=studies)    
        elif self.model.get_current_outcome_type() == "continuous":
            meta_py_r.ma_dataset_to_simple_continuous_robj(self.model,\
                                                    covs_to_include=selected_covariates,
                                                    studies=studies) 
        else:
            meta_py_r.ma_dataset_to_simple_binary_robj(self.model, include_raw_data=False,\
                                                    covs_to_include=selected_covariates,
                                                    studies=studies)

    
        # fixed or random effects meta-regression?
        fixed_effects = False
        if self.fixed_effects_radio.isChecked():
            fixed_effects = True
    

        if at_least_one_study_does_not_have_vals:
            # TODO: this run_with_missing stuff needs to be finished i.e.
            # actually remove the affected studies. Currently just throws an
            # error
            run_with_missing = QMessageBox.warning(self,
                        "Missing covariate value(s)",
                        "Some studies do not have values for the covariate(s) you have selected. Do you want me to run the regression without them (i.e., drop studies with missing values)?",
                        QMessageBox.Yes | QMessageBox.No)
        
            if run_with_missing == QMessageBox.No:
                self.accept()
                return


        result = meta_py_r.run_meta_regression(self.model.dataset, studies,
                                               selected_covariates,
                                               current_effect,
                                               fixed_effects=fixed_effects,
                                               conf_level=self.model.get_global_conf_level())
        if isinstance(result, str):
            # then there was an error!
            QMessageBox.critical(self,
                                "Whoops.",
                                "Sorry, there was an error performing the regression.\n%s" % \
                                result)
        else:
            self.parent().analysis(result)
            self.accept()