コード例 #1
0
def _real_main():
    utils.insure_filesystem()
    utils.setup_logging()
    logging.info(
        f"Starting UtopiaForReddit version {variables.version_human_friendly}")
    utils.setup_caching()
    logging.info("Loading config and saving defaults if needed.")
    variables.config = config.get_config().load().save_defaults()
    logging.info("Starting ui framework")
    loop = asyncio.get_event_loop()
    loop.set_exception_handler(global_exception_handler)
    app = WxAsyncApp(loop=loop)
    loop.set_debug(True)
    am = account_manager.AccountManager(True)
    # The account manager is either shown or passed through. In either case, the show call, are done in the create method of the account manager.
    # and then the main ui will be shown.
    # check for updates
    StartCoroutine(updater.check_for_updates(), am)
    # Show program tips
    tips.show_tips(None, False)
    loop.run_until_complete(app.MainLoop())
コード例 #2
0
def main():
    from core.folders import folders
    log_dir_name = folders.experiments
    # Ensure the folder exists
    if os.path.exists(folders.log):
        shutil.rmtree(folders.log)
    Path(log_dir_name).mkdir(parents=True, exist_ok=True)

    # Setup logging
    log_to = f"{log_dir_name}/logs.txt"
    us.setup_logging(log_to)
    print("Logging results to " + log_to)

    Config.EXECTIME = 0

    config = cfg.BeamNGConfig()
    problem = BeamNGProblem.BeamNGProblem(config)

    map_E = MapElitesBNG(config.Feature_Combination, problem, log_dir_name,
                         int(config.run_id), True)
    map_E.run()
    map_E.print_config(log_dir_name)
コード例 #3
0
ファイル: dcl.py プロジェクト: shuber2/pyfsa
def main(argv=None):
    if argv is None:
        argv = sys.argv

    # Setup vanilla CLI parsing and add custom arg(s).
    parser = utils.setup_cli_parsing()
    parser.add_option("",
                      "--codewords",
                      help="number of codewords.",
                      default=50,
                      type="int")
    (options, args) = parser.parse_args()

    # Setup logging
    utils.setup_logging(options)
    logger = logging.getLogger()

    # Read graph file list and label file list
    graph_file_list = utils.read_graph_file_list(options)
    if not options.globalLabelFile is None:
        label_file_list = [options.globalLabelFile] * len(graph_file_list)
    else:
        label_file_list = utils.read_label_file_list(options,
                                                     graph_file_list)

    # Read class info and grouping info
    class_info = utils.read_class_info(options)
    group_info = utils.read_group_info(options)

    assert (group_info.shape[0] ==
            len(class_info) ==
            len(graph_file_list) ==
            len(label_file_list))

    # Zip lists together
    data = zip(graph_file_list,
               label_file_list,
               class_info)

    # Run fine-structure analysis
    fsa_res = fsa.run_fsa(data,
                          options.radii,
                          options.recompute,
                          options.writeAs,
                          options.skip,
                          options.omitDegenerate)
    data_mat = fsa_res['data_mat']
    data_idx = fsa_res['data_idx']

    # Create cross-validation folds
    # NOTE: random_state=0 should guarantee equal splits
    n_graphs = len(class_info)
    cv = ShuffleSplit(n_graphs,
                      n_iter=options.cvRuns,
                      test_size=0.2,
                      random_state=0)

    # Try inplace feature normalization
    if options.normalize:
        logger.info("Running feature normalization ...")
        scaler = preprocessing.StandardScaler(copy=False)
        scaler.fit_transform(fsa_res['data_mat'])

    scores = []
    todisk = []
    for cv_id, (trn, tst) in enumerate(cv):

        # Compose training data
        pos = []
        for i in trn:
            tmp = np.where(data_idx==i)[0]
            pos.extend(list(tmp))
        np_pos = np.array(pos)

        # Learn a codebook from training data
        codebook = fsa.learn_codebook(data_mat[np_pos,:],
                                      options.codewords,
                                      options.seed)

        # Compute BoW histograms for training data
        bow_trn_mat = np.zeros((len(trn), options.codewords))
        for cnt, i in enumerate(trn):
            np_pos = np.where(data_idx==i)[0]
            bow_trn_mat[cnt,:] = np.asarray(fsa.bow(data_mat[np_pos,:],
                                                    codebook))

        # Cross-validate (5-fold) SVM classifier and parameters
        param_selection = [{'kernel': ['rbf'],
                            'gamma': np.logspace(-6,2,10),
                            'C': [1, 10, 100, 1000]},
                           {'kernel': ['linear'],
                            'C': [1, 10, 100, 1000]}]
        clf = GridSearchCV(svm.SVC(C=1), param_selection, cv=5)
        clf.fit(bow_trn_mat, np.asarray(class_info)[trn])

        # Compute BoW histograms for testing data
        bow_tst_mat = np.zeros((len(tst), options.codewords))
        for cnt,i in enumerate(tst):
            pos =  np.where(data_idx==i)[0]
            bow_tst_mat[cnt,:] = fsa.bow(data_mat[pos,:], codebook)


        yhat = clf.predict(bow_tst_mat)
        gold = np.asarray(class_info)[tst]    

        print "yhat : ", yhat
        print "gold : ", gold

        tmp = {"yhat" : list(yhat), 
               "gold" : list(gold)}
        todisk.append(tmp)

        # Score the classifier
        score = clf.score(bow_tst_mat, np.asarray(class_info)[tst])
        scores.append(score)
        logger.info("Score (%.2d): %.2f" % (cv_id,100*score))

    json_file = "%s.json" % options.writeAs
    with open(json_file, 'w') as outfile:
        json.dump(todisk, outfile)

    utils.show_summary(scores)
コード例 #4
0
ファイル: mapcl.py プロジェクト: AorticaCorp/TubeTK
def main(argv=None):
    if argv is None:
        argv = sys.argv

    # Setup vanilla CLI parsing and add custom arg(s).
    parser = utils.setup_cli_parsing()
    parser.add_option("",
                      "--mixComp",
                      help="number of GMM components.",
                      default=3,
                      type="int")
    (options, args) = parser.parse_args()

    # Setup logging
    utils.setup_logging(options)
    logger = logging.getLogger()

    # Read graph file list and label file list
    graph_file_list = utils.read_graph_file_list(options)
    label_file_list = utils.read_label_file_list(options, graph_file_list)

    # Read class info and grouping info
    class_info = utils.read_class_info(options)
    group_info = utils.read_group_info(options)

    assert (group_info.shape[0] ==
            len(class_info) ==
            len(graph_file_list) ==
            len(label_file_list))

    # Zip lists together
    data = zip(graph_file_list,
               label_file_list,
               class_info)

    # Run fine-structure analysis
    fsa_res = fsa.run_fsa(data,
                          options.radii,
                          options.recompute,
                          options.writeAs,
                          options.skip,
                          options.omitDegenerate)
    data_mat = fsa_res['data_mat']
    data_idx = fsa_res['data_idx']

    # Create cross-validation folds (20% testing)
    n_graphs = len(class_info)
    cv = ShuffleSplit(n_graphs,
                      n_iter=options.cvRuns,
                      test_size=0.2,
                      random_state=0)

    # Our unique class labels
    label_set = np.unique(class_info)

    if options.normalize:
        logger.info("Running feature normalization ...")
        scaler = preprocessing.StandardScaler(copy=False)
        scaler.fit_transform(fsa_res['data_mat'])

    scores = []
    for cv_id, (trn, tst) in enumerate(cv):

        models = []
        for l in label_set:
            l_idx = np.where(class_info == l)[0]
            l_idx = np.asarray(l_idx).ravel()
            l_trn = np.intersect1d(l_idx, trn)

            pos = []
            for i in l_trn:
                tmp = np.where(fsa_res['data_idx']==i)[0]
                pos.extend(list(tmp))

            np_pos = np.asarray(pos)
            gmm_model = fsa.estimate_gm(data_mat[np_pos,:], options.mixComp)
            models.append(gmm_model)

        predict = []
        for i in tst:
            pos = np.where(data_idx==i)[0]
            map_idx = fsa.pp_gmm(data_mat[pos,:], models, argmax=True)
            predict.append(label_set[map_idx])

        # Score the MAP classifier
        truth = [class_info[i] for i in tst]
        score = accuracy_score(truth, predict)

        print "yhat :", predict
        print "gold :", truth

        logger.info("Score (%.2d): %.2f" % (cv_id, 100*score))
        scores.append(score)

    utils.show_summary(scores)
コード例 #5
0
def get_fetcher(conf_filepath):
    with open(conf_filepath) as f:
        fetcher_conf = yaml.load(f)
        module_path = fetcher_conf.get('cloud_fetcher')
        if not module_path:
            logging.error(f'Fetcher module: {module_path} can not be imported')
            return
        del fetcher_conf['cloud_fetcher']
        mod_path, _, class_name = module_path.rpartition('.')
        mod = import_module(mod_path)
        fetcher = getattr(mod, class_name)
        return fetcher(**fetcher_conf)


if __name__ == '__main__':
    setup_logging()

    global_conf = os.getenv('GLOBAL_CONF', './config/global.yml')
    fetchers_conf_files = get_modules_conf_files(config)
    if fetchers_conf_files is None:
        exit(1)

    fetchers = []
    for fetcher_conf_file in fetchers_conf_files:
        fc = get_fetcher(fetcher_conf_file)
        if fc:
            fetchers.append(fc)
    logging.info(f'fetcher_coros: {fetchers}')

    loop = asyncio.get_event_loop()
    loop.run_until_complete(
コード例 #6
0
ファイル: mapcl.py プロジェクト: zlinzju/ITKTubeTK
def main(argv=None):
    if argv is None:
        argv = sys.argv

    # Setup vanilla CLI parsing and add custom arg(s).
    parser = utils.setup_cli_parsing()
    parser.add_option("",
                      "--mixComp",
                      help="number of GMM components.",
                      default=3,
                      type="int")
    (options, args) = parser.parse_args()

    # Setup logging
    utils.setup_logging(options)
    logger = logging.getLogger()

    # Read graph file list and label file list
    graph_file_list = utils.read_graph_file_list(options)
    label_file_list = utils.read_label_file_list(options, graph_file_list)

    # Read class info and grouping info
    class_info = utils.read_class_info(options)
    group_info = utils.read_group_info(options)

    assert (group_info.shape[0] == len(class_info) == len(graph_file_list) ==
            len(label_file_list))

    # Zip lists together
    data = zip(graph_file_list, label_file_list, class_info)

    # Run fine-structure analysis
    fsa_res = fsa.run_fsa(data, options.radii, options.recompute,
                          options.writeAs, options.skip,
                          options.omitDegenerate)
    data_mat = fsa_res['data_mat']
    data_idx = fsa_res['data_idx']

    # Create cross-validation folds (20% testing)
    n_graphs = len(class_info)
    cv = ShuffleSplit(n_graphs,
                      n_iter=options.cvRuns,
                      test_size=0.2,
                      random_state=0)

    # Our unique class labels
    label_set = np.unique(class_info)

    if options.normalize:
        logger.info("Running feature normalization ...")
        scaler = preprocessing.StandardScaler(copy=False)
        scaler.fit_transform(fsa_res['data_mat'])

    scores = []
    for cv_id, (trn, tst) in enumerate(cv):

        models = []
        for l in label_set:
            l_idx = np.where(class_info == l)[0]
            l_idx = np.asarray(l_idx).ravel()
            l_trn = np.intersect1d(l_idx, trn)

            pos = []
            for i in l_trn:
                tmp = np.where(fsa_res['data_idx'] == i)[0]
                pos.extend(list(tmp))

            np_pos = np.asarray(pos)
            gmm_model = fsa.estimate_gm(data_mat[np_pos, :], options.mixComp)
            models.append(gmm_model)

        predict = []
        for i in tst:
            pos = np.where(data_idx == i)[0]
            map_idx = fsa.pp_gmm(data_mat[pos, :], models, argmax=True)
            predict.append(label_set[map_idx])

        # Score the MAP classifier
        truth = [class_info[i] for i in tst]
        score = accuracy_score(truth, predict)

        print "yhat :", predict
        print "gold :", truth

        logger.info("Score (%.2d): %.2f" % (cv_id, 100 * score))
        scores.append(score)

    utils.show_summary(scores)
コード例 #7
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    # Setup vanilla CLI parsing and add custom arg(s).
    parser = utils.setup_cli_parsing()
    parser.add_option("",
                      "--codewords",
                      help="number of codewords.",
                      default=50,
                      type="int")
    (options, args) = parser.parse_args()

    # Setup logging
    utils.setup_logging(options)
    logger = logging.getLogger()

    # Read graph file list and label file list
    graph_file_list = utils.read_graph_file_list(options)
    if not options.globalLabelFile is None:
        label_file_list = [options.globalLabelFile] * len(graph_file_list)
    else:
        label_file_list = utils.read_label_file_list(options,
                                                     graph_file_list)

    # Read class info and grouping info
    class_info = utils.read_class_info(options)
    group_info = utils.read_group_info(options)

    assert (group_info.shape[0] ==
            len(class_info) ==
            len(graph_file_list) ==
            len(label_file_list))

    # Zip lists together
    data = zip(graph_file_list,
               label_file_list,
               class_info)

    # Run fine-structure analysis
    fsa_res = fsa.run_fsa(data,
                          options.radii,
                          options.recompute,
                          options.writeAs,
                          options.skip,
                          options.omitDegenerate)
    data_mat = fsa_res['data_mat']
    data_idx = fsa_res['data_idx']

    # Create cross-validation folds
    n_graphs = len(class_info)
    cv = ShuffleSplit(n_graphs,
                      n_iter=options.cvRuns,
                      test_size=0.2,
                      random_state=0)

    # Try inplace feature normalization
    if options.normalize:
        logger.info("Running feature normalization ...")
        scaler = preprocessing.StandardScaler(copy=False)
        scaler.fit_transform(fsa_res['data_mat'])

    scores = []
    for cv_id, (trn, tst) in enumerate(cv):

        # Compose training data
        pos = []
        for i in trn:
            tmp = np.where(data_idx==i)[0]
            pos.extend(list(tmp))
        np_pos = np.array(pos)

        # Learn a codebook from training data
        codebook = fsa.learn_codebook(data_mat[np_pos,:],
                                      options.codewords,
                                      options.seed)

        # Compute BoW histograms for training data
        bow_trn_mat = np.zeros((len(trn), options.codewords))
        for cnt, i in enumerate(trn):
            np_pos = np.where(data_idx==i)[0]
            bow_trn_mat[cnt,:] = np.asarray(fsa.bow(data_mat[np_pos,:],
                                                    codebook))

        # Cross-validate (5-fold) SVM classifier and parameters
        param_selection = [{'kernel': ['rbf'],
                            'gamma': np.logspace(-6,2,10),
                            'C': [1, 10, 100, 1000]},
                           {'kernel': ['linear'],
                            'C': [1, 10, 100, 1000]}]
        clf = GridSearchCV(svm.SVC(C=1), param_selection)
        clf.fit(bow_trn_mat, np.asarray(class_info)[trn], cv=5)

        # Compute BoW histograms for testing data
        bow_tst_mat = np.zeros((len(tst), options.codewords))
        for cnt,i in enumerate(tst):
            pos =  np.where(data_idx==i)[0]
            bow_tst_mat[cnt,:] = fsa.bow(data_mat[pos,:], codebook)

        print "yhat : ", clf.predict(bow_tst_mat)
        print "gold : ", np.asarray(class_info)[tst]

        # Score the classifier
        score = clf.score(bow_tst_mat, np.asarray(class_info)[tst])
        scores.append(score)

        logger.info("Score (%.2d): %.2f" % (cv_id,100*score))

    utils.show_summary(scores)