Exemplo n.º 1
0
            dataset_fields = pd.get_fields_structure(dataset, None)
            models_or_ensembles = ensemble_ids if ensemble_ids != [] else models
            resume = evaluate(
                models_or_ensembles,
                [dataset],
                api,
                args,
                resume,
                fields=fields,
                dataset_fields=dataset_fields,
                session_file=session_file,
                path=path,
                log=log,
                labels=labels,
                all_labels=all_labels,
                objective_field=args.objective_field,
            )

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models, dataset, fields, api, args, resume, session_file=session_file, path=path, log=log)

    u.print_generated_files(path, log_file=session_file, verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
Exemplo n.º 2
0
                                   else models)
            resume = evaluate(models_or_ensembles, [dataset], output, api,
                              args, resume, name=name, description=description,
                              fields=fields, dataset_fields=dataset_fields,
                              fields_map=fields_map,
                              session_file=session_file, path=path,
                              log=log, labels=labels, all_labels=all_labels,
                              objective_field=objective_field)

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models, dataset, fields, api, args, resume,
                       name=name, description=description,
                       fields_map=fields_map, session_file=session_file,
                       path=path, log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, u" "), "utf-8") + u"\n")
    else:
        message = (u"\nGenerated files:\n\n" + 
                   u.print_tree(path, u" ") + u"\n")
    u.log_message(message, log_file=session_file, console=args.verbosity)
    if args.reports:
Exemplo n.º 3
0
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        if args.number_of_evaluations > 0:
            number_of_evaluations = args.number_of_evaluations
        else:
            number_of_evaluations = int(MONTECARLO_FACTOR *
                                        args.cross_validation_rate)
        cross_validate(models,
                       dataset,
                       number_of_evaluations,
                       name,
                       description,
                       fields,
                       fields_map,
                       api,
                       args,
                       resume,
                       session_file=session_file,
                       path=path,
                       log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
Exemplo n.º 4
0
                          log=log,
                          labels=labels,
                          all_labels=all_labels,
                          objective_field=objective_field)

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models,
                       dataset,
                       fields,
                       api,
                       args,
                       resume,
                       name=name,
                       description=description,
                       fields_map=fields_map,
                       session_file=session_file,
                       path=path,
                       log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
Exemplo n.º 5
0
            if args.test_split > 0 or args.has_test_datasets_:
                dataset = test_dataset
            dataset = u.check_resource(dataset, api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            models_or_ensembles = (ensemble_ids if ensemble_ids != []
                                   else models)
            resume = evaluate(models_or_ensembles, [dataset], api,
                              args, resume,
                              fields=fields, dataset_fields=dataset_fields,
                              session_file=session_file, path=path,
                              log=log, labels=labels, all_labels=all_labels,
                              objective_field=args.objective_field)

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models, dataset, fields, api, args, resume,
                       session_file=session_file,
                       path=path, log=log)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
Exemplo n.º 6
0
        resume = evaluate(model, dataset, name, description, fields,
                          fields_map, output, api, args, resume,
                          session_file=session_file, path=path, log=log)

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        if args.number_of_evaluations > 0:
            number_of_evaluations = args.number_of_evaluations
        else:
            number_of_evaluations = int(MONTECARLO_FACTOR *
                                        args.cross_validation_rate)
        cross_validate(models, dataset, number_of_evaluations, name,
                       description, fields, fields_map, api, args, resume,
                       session_file=session_file, path=path, log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file, console=args.verbosity)