Esempio n. 1
0
def update_time_series(time_series,
                       time_series_args,
                       args,
                       api=None,
                       path=None,
                       session_file=None):
    """Updates time-series properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating time-series. %s\n" % get_url(time_series))
    log_message(message, log_file=session_file, console=args.verbosity)
    time_series = api.update_time_series(time_series, \
        time_series_args)
    check_resource_error(
        time_series,
        "Failed to update time-series: %s" % time_series['resource'])
    time_series = check_resource(time_series,
                                 api.get_time_series,
                                 query_string=FIELDS_QS,
                                 raise_on_error=True)
    if is_shared(time_series):
        message = dated("Shared time-series link. %s\n" %
                        get_url(time_series, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, time_series)

    return time_series
Esempio n. 2
0
def get_samples(sample_ids,
                args,
                api=None,
                session_file=None,
                query_string=''):
    """Retrieves remote samples in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    sample_id = ""
    samples = sample_ids
    sample_id = sample_ids[0]
    message = dated("Retrieving %s. %s\n" %
                    (plural("sample", len(sample_ids)), get_url(sample_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    # only one sample to predict at present
    try:
        sample = api.get_sample(sample_ids[0], query_string=query_string)
        check_resource_error(
            sample, "Failed to create sample: %s" % sample['resource'])
        sample = check_resource(sample,
                                api=api,
                                query_string=query_string,
                                raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished sample: %s" % str(exception))
Esempio n. 3
0
def create_evaluation(model_or_ensemble, dataset, evaluation_args, args,
                      api=None,
                      path=None, session_file=None, log=None, seed=SEED):
    """Create evaluation

       ``model_or_ensemble``: resource object or id for the model or ensemble
                              that should be evaluated
       ``dataset``: dataset object or id to evaluate with
       ``evaluation_args``: arguments for the ``create_evaluation`` call
       ``args``: input values for bigmler flags
       ``api``: api to remote objects in BigML
       ``path``: directory to store the BigMLer generated files in
       ``session_file``: file to store the messages of that session
       ``log``: user provided log file
       ``seed``: seed for the dataset sampling (when needed)

    """
    if api is None:
        api = bigml.api.BigML()
    if args.cross_validation_rate > 0:
        evaluation_args.update(seed=seed)
    message = dated("Creating evaluation.\n")
    log_message(message, log_file=session_file,
                console=args.verbosity)
    evaluation = api.create_evaluation(model_or_ensemble, dataset,
                                       evaluation_args)
    log_created_resources("evaluation", path,
                          bigml.api.get_evaluation_id(evaluation))
    check_resource_error(evaluation, "Failed to create evaluation: ")
    log_message("%s\n" % evaluation['resource'], log_file=log)

    return evaluation
Esempio n. 4
0
def update_deepnet(deepnet,
                   deepnet_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None):
    """Updates deepnet properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating deepnet. %s\n" % get_url(deepnet))
    log_message(message, log_file=session_file, console=args.verbosity)
    deepnet = api.update_deepnet(deepnet, deepnet_args)
    check_resource_error(deepnet,
                         "Failed to update deepnet: %s" % deepnet['resource'])
    deepnet = check_resource(deepnet,
                             api.get_deepnet,
                             query_string=FIELDS_QS,
                             raise_on_error=True)
    if is_shared(deepnet):
        message = dated("Shared deepnet link. %s\n" %
                        get_url(deepnet, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, deepnet)

    return deepnet
Esempio n. 5
0
def update_topic_model(topic_model,
                       topic_model_args,
                       args,
                       api=None,
                       path=None,
                       session_file=None):
    """Updates topic model properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating topic model. %s\n" % get_url(topic_model))
    log_message(message, log_file=session_file, console=args.verbosity)
    topic_model = api.update_topic_model(topic_model, \
        topic_model_args)
    check_resource_error(
        topic_model,
        "Failed to update topic model: %s" % topic_model['resource'])
    topic_model = check_resource(topic_model,
                                 api.get_topic_model,
                                 query_string=FIELDS_QS,
                                 raise_on_error=True)
    if is_shared(topic_model):
        message = dated("Shared topic model link. %s\n" %
                        get_url(topic_model, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, topic_model)

    return topic_model
Esempio n. 6
0
def update_anomaly(anomaly,
                   anomaly_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None):
    """Updates anomaly properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating anomaly detector. %s\n" % get_url(anomaly))
    log_message(message, log_file=session_file, console=args.verbosity)
    anomaly = api.update_anomaly(anomaly, anomaly_args)
    check_resource_error(anomaly,
                         "Failed to update anomaly: %s" % anomaly['resource'])
    anomaly = check_resource(anomaly,
                             api.get_anomaly,
                             query_string=FIELDS_QS,
                             raise_on_error=True)
    if is_shared(anomaly):
        message = dated("Shared anomaly link. %s\n" %
                        get_url(anomaly, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, anomaly)

    return anomaly
Esempio n. 7
0
def update_sample(sample,
                  sample_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None):
    """Updates sample properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating sample. %s\n" % get_url(sample))
    log_message(message, log_file=session_file, console=args.verbosity)
    sample = api.update_sample(sample, sample_args)
    check_resource_error(sample,
                         "Failed to update sample: %s" % sample['resource'])
    sample = check_resource(sample, api.get_sample, raise_on_error=True)
    if is_shared(sample):
        message = dated("Shared sample link. %s\n" %
                        get_url(sample, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, sample)

    return sample
Esempio n. 8
0
def update_logistic_regression(logistic_regression,
                               logistic_regression_args,
                               args,
                               api=None,
                               path=None,
                               session_file=None):
    """Updates logistic regression properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating logistic regression. %s\n" %
                    get_url(logistic_regression))
    log_message(message, log_file=session_file, console=args.verbosity)
    logistic_regression = api.update_logistic_regression(logistic_regression, \
        logistic_regression_args)
    check_resource_error(
        logistic_regression, "Failed to update logistic regression: %s" %
        logistic_regression['resource'])
    logistic_regression = check_resource(logistic_regression,
                                         api.get_logistic_regression,
                                         query_string=FIELDS_QS,
                                         raise_on_error=True)
    if is_shared(logistic_regression):
        message = dated("Shared logistic regression link. %s\n" %
                        get_url(logistic_regression, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, logistic_regression)

    return logistic_regression
Esempio n. 9
0
def remote_predict_models(models, test_reader, prediction_file, api, args,
                          resume=False, output_path=None,
                          session_file=None, log=None, exclude=None):
    """Retrieve predictions remotely, combine them and save predictions to file

    """
    predictions_files = []
    prediction_args = {
        "tags": args.tag
    }
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)
    message_logged = False

    raw_input_data_list = []
    for input_data in test_reader:
        raw_input_data_list.append(input_data)
    single_model = len(models) == 1
    if single_model:
        prediction_file = UnicodeWriter(prediction_file).open_writer()
    for model in models:
        model = bigml.api.get_model_id(model)
        predictions_file = get_predictions_file_name(model,
                                                     output_path)
        predictions_files.append(predictions_file)
        if (not resume or
                not c.checkpoint(c.are_predictions_created, predictions_file,
                                 test_reader.number_of_tests(),
                                 debug=args.debug)[0]):
            if not message_logged:
                message = u.dated("Creating remote predictions.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
            message_logged = True
            with UnicodeWriter(predictions_file) as predictions_file:
                for input_data in raw_input_data_list:
                    input_data_dict = test_reader.dict(input_data)
                    prediction = api.create_prediction(model, input_data_dict,
                                                       by_name=test_set_header,
                                                       wait_time=0,
                                                       args=prediction_args)
                    u.check_resource_error(prediction,
                                           "Failed to create prediction: ")
                    u.log_message("%s\n" % prediction['resource'],
                                  log_file=log)
                    prediction_row = prediction_to_row(prediction)
                    predictions_file.writerow(prediction_row)
                    if single_model:
                        write_prediction(prediction_row[0:2], prediction_file,
                                         args.prediction_info,
                                         input_data, exclude)
    if single_model:
        prediction_file.close_writer()
    else:
        combine_votes(predictions_files,
                      Model(models[0]).to_prediction,
                      prediction_file, args.method,
                      args.prediction_info, raw_input_data_list, exclude)
Esempio n. 10
0
def create_models(dataset,
                  model_ids,
                  model_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None,
                  log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)

    last_model = None
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file, console=args.verbosity)

        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(models[i - 1],
                                                   api.get_model,
                                                   query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)

            model = api.create_model(dataset, model_args)
            log_message("%s\n" % model['resource'], log_file=log)
            model_ids.append(model['resource'])
            models.append(model)
            log_created_resources("models",
                                  path,
                                  bigml.api.get_model_id(model),
                                  open_mode='a')
            check_resource_error(
                model, "Failed to create model %s:" % model['resource'])
        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model,
                                           api.get_model,
                                           query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" % get_url(model))
            log_message(message, log_file=session_file, console=args.verbosity)
Esempio n. 11
0
def remote_predict(models,
                   test_reader,
                   prediction_file,
                   api,
                   resume=False,
                   verbosity=True,
                   output_path=None,
                   method=PLURALITY_CODE,
                   tags="",
                   session_file=None,
                   log=None,
                   debug=False,
                   prediction_info=None):
    """Retrieve predictions remotely, combine them and save predictions to file

    """

    predictions_files = []
    prediction_args = {"tags": tags}
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)
    message_logged = False
    raw_input_data_list = []
    for model in models:
        model = bigml.api.get_model_id(model)
        predictions_file = get_predictions_file_name(model, output_path)
        predictions_files.append(predictions_file)
        if (not resume or not c.checkpoint(c.are_predictions_created,
                                           predictions_file,
                                           test_reader.number_of_tests(),
                                           debug=debug)):
            if not message_logged:
                message = u.dated("Creating remote predictions.")
                u.log_message(message,
                              log_file=session_file,
                              console=verbosity)
            message_logged = True

            predictions_file = csv.writer(open(predictions_file, 'w', 0),
                                          lineterminator="\n")

            for input_data in test_reader:
                raw_input_data_list.append(input_data)
                input_data_dict = test_reader.dict(input_data)
                prediction = api.create_prediction(model,
                                                   input_data_dict,
                                                   by_name=test_set_header,
                                                   wait_time=0,
                                                   args=prediction_args)
                u.check_resource_error(prediction,
                                       "Failed to create prediction: ")
                u.log_message("%s\n" % prediction['resource'], log_file=log)
                prediction_row = prediction_to_row(prediction)
                predictions_file.writerow(prediction_row)
    combine_votes(predictions_files,
                  Model(models[0]).to_prediction, prediction_file, method,
                  prediction_info, raw_input_data_list)
Esempio n. 12
0
def update_source(source, source_args, args, api=None, session_file=None):
    """Updates source properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating source. %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=args.verbosity)
    source = api.update_source(source, source_args)
    check_resource_error(source, "Failed to update source: ")
    source = check_resource(source, api.get_source)
    return source
Esempio n. 13
0
def create_models(dataset, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)

    last_model = None
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(
                        models[i - 1], api.get_model, query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)

            model = api.create_model(dataset, model_args)
            log_message("%s\n" % model['resource'], log_file=log)
            model_ids.append(model['resource'])
            models.append(model)
            log_created_resources("models", path,
                                  bigml.api.get_model_id(model), open_mode='a')
            check_resource_error(model, "Failed to create model %s:" %
                                 model['resource'])
        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model, api.get_model,
                                           query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" %
                            get_url(model))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Esempio n. 14
0
def remote_predict(models, test_reader, prediction_file, api,
                   resume=False,
                   verbosity=True, output_path=None,
                   method=PLURALITY_CODE, tags="",
                   session_file=None, log=None, debug=False,
                   prediction_info=None):
    """Retrieve predictions remotely, combine them and save predictions to file

    """

    predictions_files = []
    prediction_args = {
        "tags": tags
    }
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)
    message_logged = False
    raw_input_data_list = []
    for model in models:
        model = bigml.api.get_model_id(model)
        predictions_file = get_predictions_file_name(model,
                                                     output_path)
        predictions_files.append(predictions_file)
        if (not resume or
            not c.checkpoint(c.are_predictions_created, predictions_file,
                             test_reader.number_of_tests(), debug=debug)):
            if not message_logged:
                message = u.dated("Creating remote predictions.")
                u.log_message(message, log_file=session_file,
                              console=verbosity)
            message_logged = True

            predictions_file = csv.writer(open(predictions_file, 'w', 0),
                                          lineterminator="\n")

            for input_data in test_reader:
                raw_input_data_list.append(input_data)
                input_data_dict = test_reader.dict(input_data)
                prediction = api.create_prediction(model, input_data_dict,
                                                   by_name=test_set_header,
                                                   wait_time=0,
                                                   args=prediction_args)
                u.check_resource_error(prediction,
                                       "Failed to create prediction: ")
                u.log_message("%s\n" % prediction['resource'], log_file=log)
                prediction_row = prediction_to_row(prediction)
                predictions_file.writerow(prediction_row)
    combine_votes(predictions_files,
                  Model(models[0]).to_prediction,
                  prediction_file, method,
                  prediction_info, raw_input_data_list)
Esempio n. 15
0
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
    """Retrieves remote ensemble in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(ensemble, basestring)
            or bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving ensemble. %s\n" % get_url(ensemble))
        log_message(message, log_file=session_file, console=verbosity)
        ensemble = check_resource(ensemble, api.get_ensemble)
        check_resource_error(ensemble, "Failed to get ensemble: ")
    return ensemble
Esempio n. 16
0
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
    """Retrieves the dataset in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(dataset, basestring)
            or bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving dataset. %s\n" % get_url(dataset))
        log_message(message, log_file=session_file, console=verbosity)
        dataset = check_resource(dataset, api.get_dataset)
        check_resource_error(dataset, "Failed to get dataset: ")
    return dataset
Esempio n. 17
0
def update_dataset(dataset, dataset_args, verbosity,
                   api=None, session_file=None):
    """Updates dataset properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=verbosity)
    dataset = api.update_dataset(dataset, dataset_args)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Esempio n. 18
0
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
    """Retrieves the dataset in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(dataset, basestring) or
            bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving dataset. %s\n" %
                        get_url(dataset))
        log_message(message, log_file=session_file,
                    console=verbosity)
        dataset = check_resource(dataset, api.get_dataset)
        check_resource_error(dataset, "Failed to get dataset: ")
    return dataset
Esempio n. 19
0
def update_project(project_args, args, api=None, session_file=None, log=None):
    """Updates project properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating project attributes.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    project = api.update_project(args.project_id, project_args)
    check_resource_error(project,
                         "Failed to update project: %s" % project['resource'])
    message = dated("Project \"%s\" has been updated.\n" % project['resource'])
    log_message(message, log_file=session_file, console=args.verbosity)
    log_message("%s\n" % args.project_id, log_file=log)
    return project
Esempio n. 20
0
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
    """Retrieves remote ensemble in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(ensemble, basestring) or
            bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving ensemble. %s\n" %
                        get_url(ensemble))
        log_message(message, log_file=session_file,
                    console=verbosity)
        ensemble = check_resource(ensemble, api.get_ensemble)
        check_resource_error(ensemble, "Failed to get ensemble: ")
    return ensemble
Esempio n. 21
0
def create_evaluations(model_ids,
                       dataset,
                       evaluation_args,
                       args,
                       api=None,
                       path=None,
                       session_file=None,
                       log=None,
                       existing_evaluations=0):
    """Create evaluations for a list of models

       ``model_ids``: list of model ids to create an evaluation of
       ``dataset``: dataset object or id to evaluate with
       ``evaluation_args``: arguments for the ``create_evaluation`` call
       ``args``: input values for bigmler flags
       ``api``: api to remote objects in BigML
       ``path``: directory to store the BigMLer generated files in
       ``session_file``: file to store the messages of that session
       ``log``: user provided log file
       ``seed``: seed for the dataset sampling (when needed)
    """
    evaluations = []
    if api is None:
        api = bigml.api.BigML()
    number_of_evaluations = len(model_ids)
    message = dated("Creating evaluations.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    for i in range(0, number_of_evaluations):
        model = model_ids[i]

        if i % args.max_parallel_evaluations == 0 and i > 0:
            try:
                evaluations[i - 1] = check_resource(evaluations[i - 1],
                                                    api.get_evaluation)
            except ValueError, exception:
                sys.exit("Failed to get a finished evaluation: %s" %
                         str(exception))
        if args.cross_validation_rate > 0:
            new_seed = get_basic_seed(i + existing_evaluations)
            evaluation_args.update(seed=new_seed)
        evaluation = api.create_evaluation(model, dataset, evaluation_args)
        log_created_resources("evaluations",
                              path,
                              bigml.api.get_evaluation_id(evaluation),
                              open_mode='a')
        check_resource_error(evaluation, "Failed to create evaluation: ")
        evaluations.append(evaluation)
        log_message("%s\n" % evaluation['resource'], log_file=log)
Esempio n. 22
0
def create_library(source_code,
                   library_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None,
                   log=None):
    """Creates remote library

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Creating library \"%s\".\n" % library_args["name"])
    log_message(message, log_file=session_file, console=args.verbosity)
    library = api.create_library(source_code, library_args)
    log_created_resources("library",
                          path,
                          bigml.api.get_library_id(library),
                          mode='a')
    library_id = check_resource_error(library, "Failed to create library: ")
    try:
        library = check_resource(library, api.get_library, raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a compiled library: %s" % str(exception))
Esempio n. 23
0
def create_dataset(source_or_dataset,
                   dataset_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None,
                   log=None,
                   dataset_type=None):
    """Creates remote dataset from source, dataset or datasets list

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Creating dataset.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    dataset = api.create_dataset(source_or_dataset, dataset_args)
    suffix = "_" + dataset_type if dataset_type else ""
    log_created_resources("dataset%s" % suffix,
                          path,
                          bigml.api.get_dataset_id(dataset),
                          open_mode='a')
    dataset_id = check_resource_error(dataset, "Failed to create dataset: ")
    try:
        dataset = check_resource(dataset,
                                 api.get_dataset,
                                 query_string=ALL_FIELDS_QS)
    except ValueError, exception:
        sys.exit("Failed to get a finished dataset: %s" % str(exception))
Esempio n. 24
0
def create_forecast(time_series,
                    input_data,
                    forecast_args,
                    args,
                    api=None,
                    session_file=None,
                    path=None,
                    log=None):
    """Creates remote forecast

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Creating remote forecast.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    forecast = api.create_forecast(time_series,
                                   input_data,
                                   forecast_args,
                                   retries=None)
    log_created_resources("forecast",
                          path,
                          bigml.api.get_forecast_id(forecast),
                          mode='a')
    forecast_id = check_resource_error(forecast, "Failed to create forecast: ")
    try:
        forecast = check_resource(forecast,
                                  api.get_forecast,
                                  raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished forecast: %s" % str(exception))
Esempio n. 25
0
def create_external_connector(external_connector_args,
                              args,
                              api=None,
                              session_file=None,
                              path=None,
                              log=None):
    """Creates remote external connector

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Creating external connector.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    external_connector = api.create_external_connector( \
        args.connection_info, external_connector_args)
    log_created_resources( \
        "external_connector", path,
        bigml.api.get_external_connector_id(external_connector),
        mode='a')
    external_connector_id = check_resource_error( \
        external_connector,
        "Failed to create external connector: ")
    try:
        external_connector = check_resource( \
            external_connector, api=api, raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished external connector: %s" % \
            str(exception))
Esempio n. 26
0
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """

    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    existing_ensembles = len(ensembles)
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if args.dataset_off and args.evaluate:
        args.test_dataset_ids = datasets[:]
    if not args.multi_label:
        datasets = datasets[existing_ensembles:]
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
                                     api, "ensemble",
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]

            if args.dataset_off and args.evaluate:
                multi_dataset = args.test_dataset_ids[:]
                del multi_dataset[i + existing_ensembles]
                ensemble = api.create_ensemble(multi_dataset,
                                               ensemble_args,
                                               retries=None)
            else:
                ensemble = api.create_ensemble(datasets, ensemble_args,
                                               retries=None)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Esempio n. 27
0
def create_batch_prediction(model_or_ensemble, test_dataset,
                            batch_prediction_args, verbosity,
                            api=None, session_file=None,
                            path=None, log=None):
    """Creates remote batch_prediction

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Creating batch prediction.\n")
    log_message(message, log_file=session_file, console=verbosity)
    batch_prediction = api.create_batch_prediction(model_or_ensemble,
                                                   test_dataset,
                                                   batch_prediction_args)
    log_created_resources("batch_prediction", path,
                          bigml.api.get_batch_prediction_id(batch_prediction),
                          open_mode='a')
    batch_prediction_id = check_resource_error(
        batch_prediction, "Failed to create batch prediction: ")
    try:
        batch_prediction = check_resource(batch_prediction,
                                          api.get_batch_prediction)
    except ValueError, exception:
        sys.exit("Failed to get a finished batch prediction: %s"
                 % str(exception))
Esempio n. 28
0
def create_source(data_set,
                  source_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None,
                  log=None,
                  source_type=None):
    """Creates remote source

    """
    if api is None:
        api = bigml.api.BigML()
    suffix = "" if source_type is None else "%s " % source_type
    message = dated("Creating %ssource.\n" % suffix)
    log_message(message, log_file=session_file, console=args.verbosity)
    check_fields_struct(source_args, "source")
    source = api.create_source(data_set,
                               source_args,
                               progress_bar=args.progress_bar)
    if path is not None:
        suffix = "_" + source_type if source_type else ""
        log_created_resources("source%s" % suffix,
                              path,
                              source['resource'],
                              mode='ab',
                              comment=(u"%s\n" % source['object']['name']))
    source_id = check_resource_error(source, "Failed to create source: ")
    try:
        source = check_resource(source,
                                api.get_source,
                                query_string=ALL_FIELDS_QS,
                                raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished source: %s" % str(exception))
Esempio n. 29
0
def publish_dataset(dataset, args, api=None, session_file=None):
    """Publishes dataset and sets its price (if any)

    """
    if api is None:
        api = bigml.api.BigML()
    public_dataset = {"private": False}
    if args.dataset_price:
        message = dated("Updating dataset. %s\n" % get_url(dataset))
        log_message(message, log_file=session_file, console=args.verbosity)
        public_dataset.update(price=args.dataset_price)
    message = dated("Updating dataset. %s\n" % get_url(dataset))
    log_message(message, log_file=session_file, console=args.verbosity)
    dataset = api.update_dataset(dataset, public_dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Esempio n. 30
0
def create_batch_prediction(model_or_ensemble,
                            test_dataset,
                            batch_prediction_args,
                            args,
                            api=None,
                            session_file=None,
                            path=None,
                            log=None):
    """Creates remote batch_prediction

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Creating batch prediction.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    batch_prediction = api.create_batch_prediction(model_or_ensemble,
                                                   test_dataset,
                                                   batch_prediction_args,
                                                   retries=None)
    log_created_resources("batch_prediction",
                          path,
                          bigml.api.get_batch_prediction_id(batch_prediction),
                          mode='a')
    batch_prediction_id = check_resource_error(
        batch_prediction, "Failed to create batch prediction: ")
    try:
        batch_prediction = check_resource(batch_prediction,
                                          api.get_batch_prediction,
                                          raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished batch prediction: %s" %
                 str(exception))
Esempio n. 31
0
def create_execution(execution_args,
                     args,
                     api=None,
                     path=None,
                     session_file=None,
                     log=None):
    """Creates remote execution

    """
    message = dated("Creating execution.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    scripts = args.script_ids if args.script_ids else args.script
    execution = api.create_execution(scripts, execution_args)
    log_created_resources("execution",
                          path,
                          bigml.api.get_execution_id(execution),
                          mode='a')
    execution_id = check_resource_error(execution,
                                        "Failed to create execution: ")
    try:
        execution = check_resource(execution,
                                   api.get_execution,
                                   raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished execution: %s" % str(exception))
Esempio n. 32
0
def create_samples(datasets,
                   sample_ids,
                   sample_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None,
                   log=None):
    """Create remote samples

    """
    if api is None:
        api = bigml.api.BigML()

    samples = sample_ids[:]
    existing_samples = len(samples)
    sample_args_list = []
    datasets = datasets[existing_samples:]
    # if resuming and all samples were created, there will be no datasets left
    if datasets:
        if isinstance(sample_args, list):
            sample_args_list = sample_args

        # Only one sample per command, at present
        number_of_samples = 1
        max_parallel_samples = 1
        message = dated("Creating %s.\n" % plural("sample", number_of_samples))
        log_message(message, log_file=session_file, console=args.verbosity)

        inprogress = []
        for i in range(0, number_of_samples):
            wait_for_available_tasks(inprogress, max_parallel_samples, api,
                                     "sample")
            if sample_args_list:
                sample_args = sample_args_list[i]

            sample = api.create_sample(datasets[i], sample_args, retries=None)
            sample_id = check_resource_error(sample,
                                             "Failed to create sample: ")
            log_message("%s\n" % sample_id, log_file=log)
            sample_ids.append(sample_id)
            inprogress.append(sample_id)
            samples.append(sample)
            log_created_resources("samples", path, sample_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED:
                try:
                    sample = check_resource(sample,
                                            api.get_sample,
                                            raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished sample: %s" %
                             str(exception))
                samples[0] = sample
            message = dated("Sample created: %s\n" % get_url(sample))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, sample)
Esempio n. 33
0
def create_models(dataset, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)
    model_args_list = []
    if isinstance(model_args, list):
        model_args_list = model_args
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        single_model = args.number_of_models == 1 and existing_models == 0
        # if there's more than one model the first one must contain
        # the entire field structure to be used as reference.
        query_string = (FIELDS_QS if single_model
                        else ALL_FIELDS_QS)
        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(
                        models[i - 1], api.get_model,
                        query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if model_args_list:
                model_args = model_args_list[i]
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)
            model = api.create_model(dataset, model_args)
            model_id = check_resource_error(model, "Failed to create model: ")
            log_message("%s\n" % model_id, log_file=log)
            model_ids.append(model_id)
            models.append(model)
            log_created_resources("models", path, model_id, open_mode='a')

        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model, api.get_model,
                                           query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" %
                            get_url(model))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Esempio n. 34
0
def create_ensemble(dataset, ensemble_args, args, api=None, path=None,
                    session_file=None, log=None):
    """Create ensemble from input data

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Creating ensemble.\n")
    log_message(message, log_file=session_file,
                console=args.verbosity)
    ensemble = api.create_ensemble(dataset, ensemble_args)
    log_created_resources("ensemble", path,
                          bigml.api.get_ensemble_id(ensemble))
    check_resource_error(ensemble, "Failed to create ensemble: ")
    log_message("%s\n" % ensemble['resource'], log_file=log)

    return ensemble
Esempio n. 35
0
def create_clusters(datasets, cluster_ids, cluster_args,
                    args, api=None, path=None,
                    session_file=None, log=None):
    """Create remote clusters

    """
    if api is None:
        api = bigml.api.BigML()

    clusters = cluster_ids[:]
    existing_clusters = len(clusters)
    cluster_args_list = []
    datasets = datasets[existing_clusters:]
    # if resuming and all clusters were created, there will be no datasets left
    if datasets:
        if isinstance(cluster_args, list):
            cluster_args_list = cluster_args

        # Only one cluster per command, at present
        number_of_clusters = 1
        message = dated("Creating %s.\n" %
                        plural("cluster", number_of_clusters))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        query_string = FIELDS_QS
        inprogress = []
        for i in range(0, number_of_clusters):
            wait_for_available_tasks(inprogress, args.max_parallel_clusters,
                                     api, "cluster")
            if cluster_args_list:
                cluster_args = cluster_args_list[i]

            cluster = api.create_cluster(datasets, cluster_args, retries=None)
            cluster_id = check_resource_error(cluster,
                                              "Failed to create cluster: ")
            log_message("%s\n" % cluster_id, log_file=log)
            cluster_ids.append(cluster_id)
            inprogress.append(cluster_id)
            clusters.append(cluster)
            log_created_resources("clusters", path, cluster_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED:
                try:
                    cluster = check_resource(cluster, api.get_cluster,
                                             query_string=query_string,
                                             raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished cluster: %s" %
                             str(exception))
                clusters[0] = cluster
            message = dated("Cluster created: %s\n" %
                            get_url(cluster))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, cluster)
Esempio n. 36
0
def create_dataset(source_or_dataset, dataset_args, args, api=None, path=None,
                   session_file=None, log=None, dataset_type=None):
    """Creates remote dataset

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Creating dataset.\n")
    log_message(message, log_file=session_file, console=args.verbosity)
    dataset = api.create_dataset(source_or_dataset, dataset_args)
    suffix = "_" + dataset_type if dataset_type else ""
    log_created_resources("dataset%s" % suffix, path,
                          bigml.api.get_dataset_id(dataset))
    check_resource_error(dataset, "Failed to create dataset: ")
    try:
        dataset = check_resource(dataset, api.get_dataset)
    except ValueError, exception:
        sys.exit("Failed to get a finished dataset: %s" % str(exception))
Esempio n. 37
0
def update_source_fields(source, updated_values, fields, api=None,
                         verbosity=True, session_file=None):
    """Update remote source with new fields values

    """
    if api is None:
        api = bigml.api.BigML()
    update_fields = {}
    for (column, value) in updated_values.iteritems():
        update_fields.update({
            fields.field_id(column): value})
    message = dated("Updating source. %s\n" %
                    get_url(source))
    log_message(message, log_file=session_file,
                console=verbosity)
    source = api.update_source(source, {"fields": update_fields})
    check_resource_error(source, "Failed to update source: ")
    return source
Esempio n. 38
0
def remote_predict_ensemble(ensemble_id,
                            test_reader,
                            prediction_file,
                            api,
                            resume=False,
                            verbosity=True,
                            output_path=None,
                            method=PLURALITY_CODE,
                            tags="",
                            session_file=None,
                            log=None,
                            debug=False,
                            prediction_info=None):
    """Retrieve predictions remotely and save predictions to file

    """

    prediction_args = {"tags": tags, "combiner": method}
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)

    if (not resume or not c.checkpoint(c.are_predictions_created,
                                       prediction_file,
                                       test_reader.number_of_tests(),
                                       debug=debug)):
        message = u.dated("Creating remote predictions.")
        u.log_message(message, log_file=session_file, console=verbosity)

        predictions_file = csv.writer(open(prediction_file, 'w', 0),
                                      lineterminator="\n")
        for input_data in test_reader:
            input_data_dict = test_reader.dict(input_data)
            prediction = api.create_prediction(ensemble_id,
                                               input_data_dict,
                                               by_name=test_set_header,
                                               wait_time=0,
                                               args=prediction_args)
            prediction = u.check_resource(prediction, api.get_prediction)
            u.check_resource_error(prediction, "Failed to create prediction: ")
            u.log_message("%s\n" % prediction['resource'], log_file=log)
            prediction_row = prediction_to_row(prediction, prediction_info)
            write_prediction(prediction_row, predictions_file, prediction_info,
                             input_data)
Esempio n. 39
0
def create_fusion(models,
                  fusion,
                  fusion_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None,
                  log=None):
    """Create remote fusion

    """
    if api is None:
        api = bigml.api.BigML()

    fusions = []
    fusion_ids = []
    if fusion is not None:
        fusions = [fusion]
        fusion_ids = [fusion]
    # if resuming and all fusions were created
    if models:

        # Only one fusion per command, at present
        message = dated("Creating fusion.\n")
        log_message(message, log_file=session_file, console=args.verbosity)

        query_string = FIELDS_QS
        inprogress = []
        wait_for_available_tasks(inprogress, args.max_parallel_fusions, api,
                                 "fusion")

        fusion = api.create_fusion(models, fusion_args, retries=None)
        fusion_id = check_resource_error( \
            fusion,
            "Failed to create fusion: ")
        log_message("%s\n" % fusion_id, log_file=log)
        fusion_ids.append(fusion_id)
        inprogress.append(fusion_id)
        fusions.append(fusion)
        log_created_resources("fusions", path, fusion_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED:
                try:
                    fusion = check_resource( \
                        fusion, api.get_fusion,
                        query_string=query_string,
                        raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished fusion: %s" %
                             str(exception))
                fusions[0] = fusion
            message = dated("Fusion created: %s\n" % get_url(fusion))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, fusion)
Esempio n. 40
0
def update_source_fields(source,
                         updated_values,
                         fields,
                         api=None,
                         verbosity=True,
                         session_file=None):
    """Update remote source with new fields values

    """
    if api is None:
        api = bigml.api.BigML()
    update_fields = {}
    for (column, value) in updated_values.iteritems():
        update_fields.update({fields.field_id(column): value})
    message = dated("Updating source. %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=verbosity)
    source = api.update_source(source, {"fields": update_fields})
    check_resource_error(source, "Failed to update source: ")
    return source
Esempio n. 41
0
def publish_dataset(dataset, args, api=None, session_file=None):
    """Publishes dataset and sets its price (if any)

    """
    if api is None:
        api = bigml.api.BigML()
    public_dataset = {"private": False}
    if args.dataset_price:
        public_dataset.update(price=args.dataset_price)
    dataset = update_dataset(dataset,
                             public_dataset,
                             args,
                             api=api,
                             session_file=session_file)
    check_resource_error(dataset, "Failed to update dataset: ")
    dataset = check_resource(dataset,
                             api.get_dataset,
                             query_string=ALL_FIELDS_QS)
    return dataset
Esempio n. 42
0
def create_evaluations(model_ids, dataset, evaluation_args, args, api=None,
                       path=None, session_file=None, log=None,
                       existing_evaluations=0):
    """Create evaluations for a list of models

       ``model_ids``: list of model ids to create an evaluation of
       ``dataset``: dataset object or id to evaluate with
       ``evaluation_args``: arguments for the ``create_evaluation`` call
       ``args``: input values for bigmler flags
       ``api``: api to remote objects in BigML
       ``path``: directory to store the BigMLer generated files in
       ``session_file``: file to store the messages of that session
       ``log``: user provided log file
       ``seed``: seed for the dataset sampling (when needed)
    """
    evaluations = []
    if api is None:
        api = bigml.api.BigML()
    number_of_evaluations = len(model_ids)
    message = dated("Creating evaluations.\n")
    log_message(message, log_file=session_file,
                console=args.verbosity)
    for i in range(0, number_of_evaluations):
        model = model_ids[i]

        if i % args.max_parallel_evaluations == 0 and i > 0:
            try:
                evaluations[i - 1] = check_resource(
                    evaluations[i - 1], api.get_evaluation)
            except ValueError, exception:
                sys.exit("Failed to get a finished evaluation: %s" %
                         str(exception))
        if args.cross_validation_rate > 0:
            new_seed = get_basic_seed(i + existing_evaluations)
            evaluation_args.update(seed=new_seed)
        evaluation = api.create_evaluation(model, dataset, evaluation_args)
        log_created_resources("evaluations", path,
                              bigml.api.get_evaluation_id(evaluation),
                              open_mode='a')
        check_resource_error(evaluation, "Failed to create evaluation: ")
        evaluations.append(evaluation)
        log_message("%s\n" % evaluation['resource'], log_file=log)
Esempio n. 43
0
def create_ensembles(datasets,
                     ensemble_ids,
                     ensemble_args,
                     args,
                     number_of_ensembles=1,
                     api=None,
                     path=None,
                     session_file=None,
                     log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file, console=args.verbosity)
        query_string = ALL_FIELDS_QS
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress,
                                     args.max_parallel_ensembles,
                                     api.get_ensemble,
                                     "ensemble",
                                     query_string=query_string,
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(datasets, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles",
                                  path,
                                  ensemble_id,
                                  open_mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" % get_url(ensemble))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Esempio n. 44
0
def publish_dataset(dataset, args, api=None, session_file=None):
    """Publishes dataset and sets its price (if any)

    """
    if api is None:
        api = bigml.api.BigML()
    public_dataset = {"private": False}
    if args.dataset_price:
        message = dated("Updating dataset. %s\n" %
                        get_url(dataset))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        public_dataset.update(price=args.dataset_price)
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    dataset = api.update_dataset(dataset, public_dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Esempio n. 45
0
def remote_predict_ensemble(ensemble_id, test_reader, prediction_file, api,
                            resume=False,
                            verbosity=True, output_path=None,
                            method=PLURALITY_CODE, tags="",
                            session_file=None, log=None, debug=False,
                            prediction_info=None):
    """Retrieve predictions remotely and save predictions to file

    """

    prediction_args = {
        "tags": tags,
        "combiner": method
    }
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)

    if (not resume or
        not c.checkpoint(c.are_predictions_created, prediction_file,
                         test_reader.number_of_tests(), debug=debug)):
        message = u.dated("Creating remote predictions.")
        u.log_message(message, log_file=session_file,
                      console=verbosity)

        predictions_file = csv.writer(open(prediction_file, 'w', 0),
                                      lineterminator="\n")
        for input_data in test_reader:
            input_data_dict = test_reader.dict(input_data)
            prediction = api.create_prediction(ensemble_id, input_data_dict,
                                               by_name=test_set_header,
                                               wait_time=0,
                                               args=prediction_args)
            prediction = u.check_resource(prediction,
                                          api.get_prediction)
            u.check_resource_error(prediction,
                                   "Failed to create prediction: ")
            u.log_message("%s\n" % prediction['resource'], log_file=log)
            prediction_row = prediction_to_row(prediction, prediction_info)
            write_prediction(prediction_row, predictions_file,
                             prediction_info, input_data)
Esempio n. 46
0
def update_dataset(dataset, dataset_args, args,
                   api=None, path=None, session_file=None):
    """Updates dataset properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    dataset = api.update_dataset(dataset, dataset_args)
    if is_shared(dataset):
        message = dated("Shared dataset link. %s\n" %
                        get_url(dataset, shared=True))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        if args.reports:
            report(args.reports, path, dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Esempio n. 47
0
def update_model(model, model_args, args,
                 api=None, path=None, session_file=None):
    """Updates model properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating model. %s\n" %
                    get_url(model))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    model = api.update_model(model, model_args)
    check_resource_error(model, "Failed to update model: %s"
                         % model['resource'])
    if is_shared(model):
        message = dated("Shared model link. %s\n" %
                        get_url(model, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, model)

    return model
Esempio n. 48
0
def update_evaluation(evaluation, evaluation_args, args,
                      api=None, path=None, session_file=None):
    """Updates evaluation properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating evaluation. %s\n" %
                    get_url(evaluation))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    evaluation = api.update_evaluation(evaluation, evaluation_args)
    check_resource_error(evaluation, "Failed to update evaluation: %s"
                         % evaluation['resource'])
    if is_shared(evaluation):
        message = dated("Shared evaluation link. %s\n" %
                        get_url(evaluation, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, evaluation)

    return evaluation
Esempio n. 49
0
def remote_predict_ensemble(ensemble_id, test_reader, prediction_file, api,
                            args, resume=False, output_path=None,
                            session_file=None, log=None, exclude=None):
    """Retrieve predictions remotely and save predictions to file

    """
    prediction_args = {
        "tags": args.tag,
        "combiner": args.method
    }
    test_set_header = test_reader.has_headers()
    if output_path is None:
        output_path = u.check_dir(prediction_file)

    if (not resume or not c.checkpoint(
            c.are_predictions_created, prediction_file,
            test_reader.number_of_tests(), debug=args.debug)[0]):
        message = u.dated("Creating remote predictions.")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)

        with UnicodeWriter(prediction_file) as predictions_file:
            for input_data in test_reader:
                input_data_dict = test_reader.dict(input_data)
                prediction = api.create_prediction(ensemble_id,
                                                   input_data_dict,
                                                   by_name=test_set_header,
                                                   wait_time=0,
                                                   args=prediction_args)
                prediction = u.check_resource(prediction,
                                              api.get_prediction)
                u.check_resource_error(prediction,
                                       "Failed to create prediction: ")
                u.log_message("%s\n" % prediction['resource'], log_file=log)
                prediction_row = prediction_to_row(prediction,
                                                   args.prediction_info)
                write_prediction(prediction_row, predictions_file,
                                 args.prediction_info, input_data, exclude)
Esempio n. 50
0
def create_evaluations(model_ids, datasets, evaluation_args, args, api=None,
                       path=None, session_file=None, log=None,
                       existing_evaluations=0):
    """Create evaluations for a list of models

       ``model_ids``: list of model ids to create an evaluation of
       ``datasets``: dataset objects or ids to evaluate with
       ``evaluation_args``: arguments for the ``create_evaluation`` call
       ``args``: input values for bigmler flags
       ``api``: api to remote objects in BigML
       ``path``: directory to store the BigMLer generated files in
       ``session_file``: file to store the messages of that session
       ``log``: user provided log file
       ``existing_evaluations``: evaluations found when attempting resume
    """
    evaluations = []
    dataset = datasets[0]
    evaluation_args_list = []
    if isinstance(evaluation_args, list):
        evaluation_args_list = evaluation_args
    if api is None:
        api = bigml.api.BigML()
    remaining_ids = model_ids[existing_evaluations:]
    number_of_evaluations = len(remaining_ids)
    message = dated("Creating evaluations.\n")
    log_message(message, log_file=session_file,
                console=args.verbosity)

    inprogress = []
    for i in range(0, number_of_evaluations):
        model = remaining_ids[i]
        wait_for_available_tasks(inprogress, args.max_parallel_evaluations,
                                 api.get_evaluation, "evaluation")

        if evaluation_args_list != []:
            evaluation_args = evaluation_args_list[i]
        if args.cross_validation_rate > 0:
            new_seed = get_basic_seed(i + existing_evaluations)
            evaluation_args.update(seed=new_seed)
        evaluation = api.create_evaluation(model, dataset, evaluation_args)
        evaluation_id = check_resource_error(evaluation,
                                             "Failed to create evaluation: ")
        inprogress.append(evaluation_id)
        log_created_resources("evaluations", path, evaluation_id,
                              open_mode='a')
        evaluations.append(evaluation)
        log_message("%s\n" % evaluation['resource'], log_file=log)

    return evaluations
Esempio n. 51
0
def publish_model(model, args, api=None, session_file=None):
    """Update model with publish info

    """
    if api is None:
        api = bigml.api.BigML()
    public_model = {}
    if args.black_box:
        public_model = {"private": False}
    if args.white_box:
        public_model = {"private": False, "white_box": True}
        if args.model_price:
            public_model.update(price=args.model_price)
        if args.cpp:
            public_model.update(credits_per_prediction=args.cpp)
    if public_model:
        message = dated("Updating model. %s\n" %
                        get_url(model))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        model = api.update_model(model, public_model)
        check_resource_error(model, "Failed to update model %s: " %
                             model['resource'])
    return model
Esempio n. 52
0
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        query_string = ALL_FIELDS_QS
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
                                     api.get_ensemble, "ensemble",
                                     query_string=query_string,
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(datasets, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  open_mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Esempio n. 53
0
def create_ensembles(dataset, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        query_string = ALL_FIELDS_QS
        for i in range(0, number_of_ensembles):
            if i % args.max_parallel_ensembles == 0 and i > 0:
                try:
                    ensembles[i - 1] = check_resource(
                        ensembles[i - 1], api.get_ensemble,
                        query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished ensemble: %s" %
                             str(exception))
            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(dataset, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  open_mode='a')

        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Esempio n. 54
0
def create_models(datasets, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)
    model_args_list = []
    if args.dataset_off and args.evaluate:
        args.test_dataset_ids = datasets[:]
    if not args.multi_label:
        datasets = datasets[existing_models:]
    # if resuming and all models were created, there will be no datasets left
    if datasets:
        dataset = datasets[0]
        if isinstance(model_args, list):
            model_args_list = model_args
        if args.number_of_models > 0:
            message = dated("Creating %s.\n" %
                            plural("model", args.number_of_models))
            log_message(message, log_file=session_file,
                        console=args.verbosity)

            single_model = args.number_of_models == 1 and existing_models == 0
            # if there's more than one model the first one must contain
            # the entire field structure to be used as reference.
            query_string = (FIELDS_QS if single_model
                            else ALL_FIELDS_QS)
            inprogress = []
            for i in range(0, args.number_of_models):
                wait_for_available_tasks(inprogress, args.max_parallel_models,
                                         api.get_model, "model",
                                         query_string=query_string)
                if model_args_list:
                    model_args = model_args_list[i]
                if args.cross_validation_rate > 0:
                    new_seed = get_basic_seed(i + existing_models)
                    model_args.update(seed=new_seed)
                # one model per dataset (--max-categories or single model)
                if (args.max_categories or
                        (args.test_datasets and args.evaluate)) > 0:
                    dataset = datasets[i]
                    model = api.create_model(dataset, model_args)
                elif args.dataset_off and args.evaluate:
                    multi_dataset = args.test_dataset_ids[:]
                    del multi_dataset[i + existing_models]
                    model = api.create_model(multi_dataset, model_args)
                else:
                    model = api.create_model(datasets, model_args)
                model_id = check_resource_error(model,
                                                "Failed to create model: ")
                log_message("%s\n" % model_id, log_file=log)
                model_ids.append(model_id)
                inprogress.append(model_id)
                models.append(model)
                log_created_resources("models", path, model_id, open_mode='a')

            if args.number_of_models < 2 and args.verbosity:
                if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                    try:
                        model = check_resource(model, api.get_model,
                                               query_string=query_string)
                    except ValueError, exception:
                        sys.exit("Failed to get a finished model: %s" %
                                 str(exception))
                    models[0] = model
                message = dated("Model created: %s.\n" %
                                get_url(model))
                log_message(message, log_file=session_file,
                            console=args.verbosity)
                if args.reports:
                    report(args.reports, path, model)
Esempio n. 55
0
        api = bigml.api.BigML()
    suffix = "" if source_type is None else "%s " % source_type
    message = dated("Creating %ssource.\n" % suffix)
    log_message(message, log_file=session_file, console=args.verbosity)

    source = api.create_source(data_set, source_args,
                               progress_bar=args.progress_bar)
    if path is not None:
        try:
            suffix = "_" + source_type if source_type else ""
            with open("%s/source%s" % (path, suffix), 'w', 0) as source_file:
                source_file.write("%s\n" % source['resource'])
                source_file.write("%s\n" % source['object']['name'])
        except IOError, exc:
            sys.exit("%s: Failed to write %s/source" % (str(exc), path))
    source_id = check_resource_error(source, "Failed to create source: ")
    try:
        source = check_resource(source, api.get_source,
                                query_string=ALL_FIELDS_QS)
    except ValueError, exception:
        sys.exit("Failed to get a finished source: %s" % str(exception))
    message = dated("Source created: %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=args.verbosity)
    log_message("%s\n" % source_id, log_file=log)

    return source


def data_to_source(training_set, test_set,
                   training_set_header, test_set_header, args):
    """Extracts the flags info to create a source object
Esempio n. 56
0
def remote_predict(model, test_dataset, batch_prediction_args, args,
                   api, resume, prediction_file=None, session_file=None,
                   path=None, log=None):
    """Computes a prediction for each entry in the `test_set`.

    Predictions are computed remotely using the batch predictions call.
    """
    if args.ensemble is not None and not args.dataset_off:
        model_or_ensemble = args.ensemble
    elif args.dataset_off:
        if hasattr(args, "ensemble_ids_") and args.ensemble_ids_:
            models = args.ensemble_ids_
        else:
            models = args.model_ids_
        test_datasets = args.test_dataset_ids
    else:
        model_or_ensemble = bigml.api.get_model_id(model)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        if not args.dataset_off:
            batch_prediction = create_batch_prediction(
                model_or_ensemble, test_dataset, batch_prediction_args,
                args, api, session_file=session_file, path=path, log=log)
        else:
            batch_predictions = []
            for index, test_dataset_n in enumerate(test_datasets):
                batch_predictions.append(create_batch_prediction( \
                    models[index], test_dataset_n, batch_prediction_args,
                    args, api, session_file=session_file, path=path, log=log))
    if not args.no_csv and not args.dataset_off:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset and not args.dataset_off:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
    elif args.to_dataset and args.dataset_off:
        predictions_datasets = []
        for batch_prediction in batch_predictions:
            batch_prediction = bigml.api.check_resource(batch_prediction,
                                                        api=api)
            new_dataset = bigml.api.get_dataset_id(
                batch_prediction['object']['output_dataset_resource'])
            if new_dataset is not None:
                predictions_datasets.append(new_dataset)
                message = u.dated("Batch prediction dataset created: %s\n"
                                  % u.get_url(new_dataset))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                u.log_created_resources("batch_prediction_dataset",
                                        path, new_dataset, mode='a')
        multi_dataset = api.create_dataset(predictions_datasets)
        log_created_resources("dataset_pred", path,
                              bigml.api.get_dataset_id(multi_dataset),
                              mode='a')
        dataset_id = check_resource_error(multi_dataset,
                                          "Failed to create dataset: ")
        try:
            multi_dataset = api.check_resource(multi_dataset)
        except ValueError, exception:
            sys.exit("Failed to get a finished dataset: %s" % str(exception))
        message = dated("Predictions dataset created: %s\n" %
                        get_url(multi_dataset))
        log_message(message, log_file=session_file, console=args.verbosity)
        log_message("%s\n" % dataset_id, log_file=log)
        if not args.no_csv:
            file_name = api.download_dataset(dataset_id, prediction_file)
            if file_name is None:
                sys.exit("Failed downloading CSV.")