Exemplo n.º 1
0
def update_anomaly(anomaly,
                   anomaly_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None):
    """Updates anomaly properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating anomaly detector. %s\n" % get_url(anomaly))
    log_message(message, log_file=session_file, console=args.verbosity)
    anomaly = api.update_anomaly(anomaly, anomaly_args)
    check_resource_error(anomaly,
                         "Failed to update anomaly: %s" % anomaly['resource'])
    anomaly = check_resource(anomaly,
                             api.get_anomaly,
                             query_string=FIELDS_QS,
                             raise_on_error=True)
    if is_shared(anomaly):
        message = dated("Shared anomaly link. %s\n" %
                        get_url(anomaly, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, anomaly)

    return anomaly
Exemplo n.º 2
0
def update_logistic_regression(logistic_regression,
                               logistic_regression_args,
                               args,
                               api=None,
                               path=None,
                               session_file=None):
    """Updates logistic regression properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating logistic regression. %s\n" %
                    get_url(logistic_regression))
    log_message(message, log_file=session_file, console=args.verbosity)
    logistic_regression = api.update_logistic_regression(logistic_regression, \
        logistic_regression_args)
    check_resource_error(
        logistic_regression, "Failed to update logistic regression: %s" %
        logistic_regression['resource'])
    logistic_regression = check_resource(logistic_regression,
                                         api.get_logistic_regression,
                                         query_string=FIELDS_QS,
                                         raise_on_error=True)
    if is_shared(logistic_regression):
        message = dated("Shared logistic regression link. %s\n" %
                        get_url(logistic_regression, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, logistic_regression)

    return logistic_regression
Exemplo n.º 3
0
def update_deepnet(deepnet,
                   deepnet_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None):
    """Updates deepnet properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating deepnet. %s\n" % get_url(deepnet))
    log_message(message, log_file=session_file, console=args.verbosity)
    deepnet = api.update_deepnet(deepnet, deepnet_args)
    check_resource_error(deepnet,
                         "Failed to update deepnet: %s" % deepnet['resource'])
    deepnet = check_resource(deepnet,
                             api.get_deepnet,
                             query_string=FIELDS_QS,
                             raise_on_error=True)
    if is_shared(deepnet):
        message = dated("Shared deepnet link. %s\n" %
                        get_url(deepnet, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, deepnet)

    return deepnet
Exemplo n.º 4
0
def update_sample(sample,
                  sample_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None):
    """Updates sample properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating sample. %s\n" % get_url(sample))
    log_message(message, log_file=session_file, console=args.verbosity)
    sample = api.update_sample(sample, sample_args)
    check_resource_error(sample,
                         "Failed to update sample: %s" % sample['resource'])
    sample = check_resource(sample, api.get_sample, raise_on_error=True)
    if is_shared(sample):
        message = dated("Shared sample link. %s\n" %
                        get_url(sample, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, sample)

    return sample
Exemplo n.º 5
0
def update_topic_model(topic_model,
                       topic_model_args,
                       args,
                       api=None,
                       path=None,
                       session_file=None):
    """Updates topic model properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating topic model. %s\n" % get_url(topic_model))
    log_message(message, log_file=session_file, console=args.verbosity)
    topic_model = api.update_topic_model(topic_model, \
        topic_model_args)
    check_resource_error(
        topic_model,
        "Failed to update topic model: %s" % topic_model['resource'])
    topic_model = check_resource(topic_model,
                                 api.get_topic_model,
                                 query_string=FIELDS_QS,
                                 raise_on_error=True)
    if is_shared(topic_model):
        message = dated("Shared topic model link. %s\n" %
                        get_url(topic_model, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, topic_model)

    return topic_model
Exemplo n.º 6
0
def update_time_series(time_series,
                       time_series_args,
                       args,
                       api=None,
                       path=None,
                       session_file=None):
    """Updates time-series properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating time-series. %s\n" % get_url(time_series))
    log_message(message, log_file=session_file, console=args.verbosity)
    time_series = api.update_time_series(time_series, \
        time_series_args)
    check_resource_error(
        time_series,
        "Failed to update time-series: %s" % time_series['resource'])
    time_series = check_resource(time_series,
                                 api.get_time_series,
                                 query_string=FIELDS_QS,
                                 raise_on_error=True)
    if is_shared(time_series):
        message = dated("Shared time-series link. %s\n" %
                        get_url(time_series, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, time_series)

    return time_series
Exemplo n.º 7
0
def get_time_series(time_series_ids, args, api=None, session_file=None):
    """Retrieves remote time-series in its actual status

    """
    if api is None:
        api = bigml.api.BigML()

    time_series_id = ""
    time_series_set = time_series_ids
    time_series_id = time_series_ids[0]
    message = dated(
        "Retrieving %s. %s\n" %
        (plural("time-series", len(time_series_ids)), get_url(time_series_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    # only one time-series to predict at present
    try:
        # we need the whole fields structure when exporting fields
        query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
        time_series = check_resource(time_series_ids[0],
                                     api.get_time_series,
                                     query_string=query_string,
                                     raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished time-series: %s" % \
            str(exception))
Exemplo n.º 8
0
def add_gazibit_links(resource, output_dir=None, shared=False):
    """ Adds the link to the resource in the corresponding section of the
        report template

    """
    try:
        gazibit_tmp = GAZIBIT_SHARED if shared else GAZIBIT_PRIVATE
        path = check_dir(os.path.join(output_dir,
                                      REPORTS_DIR,
                                      os.path.basename(gazibit_tmp)))
        input_file = os.path.join(path, os.path.basename(gazibit_tmp))
        output_file = tempfile.NamedTemporaryFile(
            mode="w", dir=output_dir, delete=False)

        if not os.path.isfile(input_file):
            shutil.copyfile(gazibit_tmp, input_file)
        with open(input_file, "r") as report_template:
            with output_file as report_output:
                content = report_template.read()
                resource_type = bigml.api.get_resource_type(resource)
                resource_type = resource_type.upper()
                url_template = URL_TEMPLATE % resource_type
                content = content.replace(url_template,
                                          get_url(resource, shared=shared))
                section_template = SECTION_START % resource_type
                content = content.replace(section_template, "")
                section_template = SECTION_END % resource_type
                content = content.replace(section_template, "")
                report_output.write(content)
        os.remove(input_file)
        os.rename(output_file.name, input_file)
    except IOError, exc:
        os.remove(output_file.name)
        sys.exit("Failed to generate the gazibit output report. %s" % str(exc))
Exemplo n.º 9
0
def add_gazibit_links(resource, output_dir=None, shared=False):
    """ Adds the link to the resource in the corresponding section of the
        report template

    """
    try:
        gazibit_tmp = GAZIBIT_SHARED if shared else GAZIBIT_PRIVATE
        path = check_dir(
            os.path.join(output_dir, REPORTS_DIR,
                         os.path.basename(gazibit_tmp)))
        input_file = os.path.join(path, os.path.basename(gazibit_tmp))
        output_file = tempfile.NamedTemporaryFile(mode="w",
                                                  dir=output_dir,
                                                  delete=False)

        if not os.path.isfile(input_file):
            shutil.copyfile(gazibit_tmp, input_file)
        with open(input_file, "r") as report_template:
            with output_file as report_output:
                content = report_template.read()
                resource_type = bigml.api.get_resource_type(resource)
                resource_type = resource_type.upper()
                url_template = URL_TEMPLATE % resource_type
                content = content.replace(url_template,
                                          get_url(resource, shared=shared))
                section_template = SECTION_START % resource_type
                content = content.replace(section_template, "")
                section_template = SECTION_END % resource_type
                content = content.replace(section_template, "")
                report_output.write(content)
        os.remove(input_file)
        os.rename(output_file.name, input_file)
    except IOError, exc:
        os.remove(output_file.name)
        sys.exit("Failed to generate the gazibit output report. %s" % str(exc))
Exemplo n.º 10
0
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """

    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    existing_ensembles = len(ensembles)
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if args.dataset_off and args.evaluate:
        args.test_dataset_ids = datasets[:]
    if not args.multi_label:
        datasets = datasets[existing_ensembles:]
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
                                     api, "ensemble",
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]

            if args.dataset_off and args.evaluate:
                multi_dataset = args.test_dataset_ids[:]
                del multi_dataset[i + existing_ensembles]
                ensemble = api.create_ensemble(multi_dataset,
                                               ensemble_args,
                                               retries=None)
            else:
                ensemble = api.create_ensemble(datasets, ensemble_args,
                                               retries=None)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Exemplo n.º 11
0
def get_models(model_ids, args, api=None, session_file=None):
    """Retrieves remote models in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    model_id = ""
    models = model_ids
    single_model = len(model_ids) == 1
    if single_model:
        model_id = model_ids[0]
    message = dated("Retrieving %s. %s\n" %
                    (plural("model", len(model_ids)),
                     get_url(model_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    if len(model_ids) < args.max_batch_models:
        models = []
        for model in model_ids:
            try:
                # if there's more than one model the first one must contain
                # the entire field structure to be used as reference.
                query_string = (ALL_FIELDS_QS if not single_model
                                and (len(models) == 0 or args.multi_label)
                                else FIELDS_QS)
                model = check_resource(model, api.get_model,
                                       query_string=query_string)
            except ValueError, exception:
                sys.exit("Failed to get a finished model: %s" %
                         str(exception))
            models.append(model)
        model = models[0]
Exemplo n.º 12
0
def publish_dataset(dataset, args, api=None, session_file=None):
    """Publishes dataset and sets its price (if any)

    """
    if api is None:
        api = bigml.api.BigML()
    public_dataset = {"private": False}
    if args.dataset_price:
        message = dated("Updating dataset. %s\n" % get_url(dataset))
        log_message(message, log_file=session_file, console=args.verbosity)
        public_dataset.update(price=args.dataset_price)
    message = dated("Updating dataset. %s\n" % get_url(dataset))
    log_message(message, log_file=session_file, console=args.verbosity)
    dataset = api.update_dataset(dataset, public_dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Exemplo n.º 13
0
def remote_anomaly_score(anomaly, test_dataset, batch_anomaly_score_args, args,
                         api, resume, prediction_file=None, session_file=None,
                         path=None, log=None):
    """Computes an anomaly score for each entry in the `test_set`.

       Predictions are computed remotely using the batch anomaly score call.
    """

    anomaly_id = bigml.api.get_anomaly_id(anomaly)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch anomaly score not found. Resuming.\n")
        resume, batch_anomaly_score = c.checkpoint(
            c.is_batch_anomaly_score_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)

    if not resume:
        batch_anomaly_score = create_batch_anomaly_score(
            anomaly_id, test_dataset, batch_anomaly_score_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        api.download_batch_anomaly_score(batch_anomaly_score, prediction_file)
    if args.to_dataset:
        batch_anomaly_score = bigml.api.check_resource(batch_anomaly_score,
                                                       api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_anomaly_score['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch anomaly score dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_anomaly_score_dataset",
                                    path, new_dataset, open_mode='a')
Exemplo n.º 14
0
def get_samples(sample_ids,
                args,
                api=None,
                session_file=None,
                query_string=''):
    """Retrieves remote samples in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    sample_id = ""
    samples = sample_ids
    sample_id = sample_ids[0]
    message = dated("Retrieving %s. %s\n" %
                    (plural("sample", len(sample_ids)), get_url(sample_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    # only one sample to predict at present
    try:
        sample = api.get_sample(sample_ids[0], query_string=query_string)
        check_resource_error(
            sample, "Failed to create sample: %s" % sample['resource'])
        sample = check_resource(sample,
                                api=api,
                                query_string=query_string,
                                raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished sample: %s" % str(exception))
Exemplo n.º 15
0
def remote_centroid(cluster, test_dataset, batch_centroid_args, args,
                    api, resume, prediction_file=None, session_file=None,
                    path=None, log=None):
    """Computes a centroid for each entry in the `test_set`.

       Predictions are computed remotely using the batch centroid call.
    """

    cluster_id = bigml.api.get_cluster_id(cluster)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch centroid not found. Resuming.\n")
        resume, batch_centroid = c.checkpoint(
            c.is_batch_centroid_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_centroid = create_batch_centroid(
            cluster_id, test_dataset, batch_centroid_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        api.download_batch_centroid(batch_centroid, prediction_file)
    if args.to_dataset:
        batch_centroid = bigml.api.check_resource(batch_centroid, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_centroid['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch centroid dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_centroid_dataset",
                                    path, new_dataset, mode='a')
Exemplo n.º 16
0
def get_models(model_ids, args, api=None, session_file=None):
    """Retrieves remote models in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    model_id = ""
    models = model_ids
    if len(model_ids) == 1:
        model_id = model_ids[0]
    message = dated("Retrieving %s. %s\n" %
                    (plural("model", len(model_ids)),
                     get_url(model_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    if len(model_ids) < args.max_batch_models:
        models = []
        for model in model_ids:
            try:
                model = check_resource(model, api.get_model,
                                       query_string=FIELDS_QS)
            except ValueError, exception:
                sys.exit("Failed to get a finished model: %s" %
                         str(exception))
            models.append(model)
        model = models[0]
Exemplo n.º 17
0
def get_logistic_regressions(logistic_regression_ids,
                             args,
                             api=None,
                             session_file=None):
    """Retrieves remote logistic regression in its actual status

    """
    if api is None:
        api = bigml.api.BigML()

    logistic_regression_id = ""
    logistic_regressions = logistic_regression_ids
    logistic_regression_id = logistic_regression_ids[0]
    message = dated(
        "Retrieving %s. %s\n" %
        (plural("logistic regression", len(logistic_regression_ids)),
         get_url(logistic_regression_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    # only one logistic regression to predict at present
    try:
        # we need the whole fields structure when exporting fields
        query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
        logistic_regression = check_resource(logistic_regression_ids[0],
                                             api.get_logistic_regression,
                                             query_string=query_string,
                                             raise_on_error=True)
    except Exception, exception:
        sys.exit("Failed to get a finished logistic regression: %s" % \
            str(exception))
Exemplo n.º 18
0
def get_models(model_ids, args, api=None, session_file=None):
    """Retrieves remote models in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    model_id = ""
    models = model_ids
    single_model = len(model_ids) == 1
    if single_model:
        model_id = model_ids[0]
    message = dated("Retrieving %s. %s\n" %
                    (plural("model", len(model_ids)),
                     get_url(model_id)))
    log_message(message, log_file=session_file, console=args.verbosity)
    if len(model_ids) < args.max_batch_models:
        models = []
        for model in model_ids:
            try:
                # if there's more than one model the first one must contain
                # the entire field structure to be used as reference.
                query_string = (
                    ALL_FIELDS_QS if (
                        (not single_model and (not models or
                                               args.multi_label)) or
                        not args.test_header)
                    else FIELDS_QS)
                model = check_resource(model, api.get_model,
                                       query_string=query_string,
                                       raise_on_error=True)
            except Exception, exception:
                sys.exit("Failed to get a finished model: %s" %
                         str(exception))
            models.append(model)
        model = models[0]
Exemplo n.º 19
0
def remote_predict(model, test_dataset, batch_prediction_args, args,
                   api, resume, prediction_file=None, session_file=None,
                   path=None, log=None):
    """Computes a prediction for each entry in the `test_set`.

       Predictions are computed remotely using the batch predictions call.
    """

    if args.ensemble is not None:
        model_or_ensemble = args.ensemble
    else:
        model_or_ensemble = bigml.api.get_model_id(model)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(
            model_or_ensemble, test_dataset, batch_prediction_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        api.download_batch_prediction(batch_prediction, prediction_file)
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
Exemplo n.º 20
0
def remote_centroid(cluster, test_dataset, batch_centroid_args, args,
                    api, resume, prediction_file=None, session_file=None,
                    path=None, log=None):
    """Computes a centroid for each entry in the `test_set`.

       Predictions are computed remotely using the batch centroid call.
    """

    cluster_id = bigml.api.get_cluster_id(cluster)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch centroid not found. Resuming.\n")
        resume, batch_centroid = c.checkpoint(
            c.is_batch_centroid_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_centroid = create_batch_centroid(
            cluster_id, test_dataset, batch_centroid_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        file_name = api.download_batch_centroid(batch_centroid,
                                                prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_centroid = bigml.api.check_resource(batch_centroid, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_centroid['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch centroid dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_centroid_dataset",
                                    path, new_dataset, mode='a')
Exemplo n.º 21
0
def create_samples(datasets,
                   sample_ids,
                   sample_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None,
                   log=None):
    """Create remote samples

    """
    if api is None:
        api = bigml.api.BigML()

    samples = sample_ids[:]
    existing_samples = len(samples)
    sample_args_list = []
    datasets = datasets[existing_samples:]
    # if resuming and all samples were created, there will be no datasets left
    if datasets:
        if isinstance(sample_args, list):
            sample_args_list = sample_args

        # Only one sample per command, at present
        number_of_samples = 1
        max_parallel_samples = 1
        message = dated("Creating %s.\n" % plural("sample", number_of_samples))
        log_message(message, log_file=session_file, console=args.verbosity)

        inprogress = []
        for i in range(0, number_of_samples):
            wait_for_available_tasks(inprogress, max_parallel_samples, api,
                                     "sample")
            if sample_args_list:
                sample_args = sample_args_list[i]

            sample = api.create_sample(datasets[i], sample_args, retries=None)
            sample_id = check_resource_error(sample,
                                             "Failed to create sample: ")
            log_message("%s\n" % sample_id, log_file=log)
            sample_ids.append(sample_id)
            inprogress.append(sample_id)
            samples.append(sample)
            log_created_resources("samples", path, sample_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED:
                try:
                    sample = check_resource(sample,
                                            api.get_sample,
                                            raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished sample: %s" %
                             str(exception))
                samples[0] = sample
            message = dated("Sample created: %s\n" % get_url(sample))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, sample)
Exemplo n.º 22
0
def create_models(dataset, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)
    model_args_list = []
    if isinstance(model_args, list):
        model_args_list = model_args
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        single_model = args.number_of_models == 1 and existing_models == 0
        # if there's more than one model the first one must contain
        # the entire field structure to be used as reference.
        query_string = (FIELDS_QS if single_model
                        else ALL_FIELDS_QS)
        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(
                        models[i - 1], api.get_model,
                        query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if model_args_list:
                model_args = model_args_list[i]
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)
            model = api.create_model(dataset, model_args)
            model_id = check_resource_error(model, "Failed to create model: ")
            log_message("%s\n" % model_id, log_file=log)
            model_ids.append(model_id)
            models.append(model)
            log_created_resources("models", path, model_id, open_mode='a')

        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model, api.get_model,
                                           query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" %
                            get_url(model))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Exemplo n.º 23
0
def create_models(dataset,
                  model_ids,
                  model_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None,
                  log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)

    last_model = None
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file, console=args.verbosity)

        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(models[i - 1],
                                                   api.get_model,
                                                   query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)

            model = api.create_model(dataset, model_args)
            log_message("%s\n" % model['resource'], log_file=log)
            model_ids.append(model['resource'])
            models.append(model)
            log_created_resources("models",
                                  path,
                                  bigml.api.get_model_id(model),
                                  open_mode='a')
            check_resource_error(
                model, "Failed to create model %s:" % model['resource'])
        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model,
                                           api.get_model,
                                           query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" % get_url(model))
            log_message(message, log_file=session_file, console=args.verbosity)
Exemplo n.º 24
0
def create_clusters(datasets, cluster_ids, cluster_args,
                    args, api=None, path=None,
                    session_file=None, log=None):
    """Create remote clusters

    """
    if api is None:
        api = bigml.api.BigML()

    clusters = cluster_ids[:]
    existing_clusters = len(clusters)
    cluster_args_list = []
    datasets = datasets[existing_clusters:]
    # if resuming and all clusters were created, there will be no datasets left
    if datasets:
        if isinstance(cluster_args, list):
            cluster_args_list = cluster_args

        # Only one cluster per command, at present
        number_of_clusters = 1
        message = dated("Creating %s.\n" %
                        plural("cluster", number_of_clusters))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        query_string = FIELDS_QS
        inprogress = []
        for i in range(0, number_of_clusters):
            wait_for_available_tasks(inprogress, args.max_parallel_clusters,
                                     api, "cluster")
            if cluster_args_list:
                cluster_args = cluster_args_list[i]

            cluster = api.create_cluster(datasets, cluster_args, retries=None)
            cluster_id = check_resource_error(cluster,
                                              "Failed to create cluster: ")
            log_message("%s\n" % cluster_id, log_file=log)
            cluster_ids.append(cluster_id)
            inprogress.append(cluster_id)
            clusters.append(cluster)
            log_created_resources("clusters", path, cluster_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED:
                try:
                    cluster = check_resource(cluster, api.get_cluster,
                                             query_string=query_string,
                                             raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished cluster: %s" %
                             str(exception))
                clusters[0] = cluster
            message = dated("Cluster created: %s\n" %
                            get_url(cluster))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, cluster)
Exemplo n.º 25
0
def remote_anomaly_score(anomaly,
                         test_dataset,
                         batch_anomaly_score_args,
                         args,
                         api,
                         resume,
                         prediction_file=None,
                         session_file=None,
                         path=None,
                         log=None):
    """Computes an anomaly score for each entry in the `test_set`.

       Predictions are computed remotely using the batch anomaly score call.
    """

    anomaly_id = bigml.api.get_anomaly_id(anomaly)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch anomaly score not found. Resuming.\n")
        resume, batch_anomaly_score = c.checkpoint(
            c.is_batch_anomaly_score_created,
            path,
            debug=args.debug,
            message=message,
            log_file=session_file,
            console=args.verbosity)

    if not resume:
        batch_anomaly_score = create_batch_anomaly_score(
            anomaly_id,
            test_dataset,
            batch_anomaly_score_args,
            args,
            api,
            session_file=session_file,
            path=path,
            log=log)
    if not args.no_csv:
        file_name = api.download_batch_anomaly_score(batch_anomaly_score,
                                                     prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")

    if args.to_dataset:
        batch_anomaly_score = bigml.api.check_resource(batch_anomaly_score,
                                                       api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_anomaly_score['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch anomaly score dataset created: %s\n" %
                              u.get_url(new_dataset))
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_anomaly_score_dataset",
                                    path,
                                    new_dataset,
                                    mode='a')
Exemplo n.º 26
0
def remote_prediction(model,
                      test_dataset,
                      batch_prediction_args,
                      args,
                      api,
                      resume,
                      prediction_file=None,
                      session_file=None,
                      path=None,
                      log=None):
    """Computes a prediction for each entry in the `test_set`.

       Predictions are computed remotely using the batch prediction call.
    """

    model_id = bigml.api.get_resource_id( \
        model)
    batch_prediction_args.update({"probability": True, "confidence": False})

    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(c.is_batch_prediction_created,
                                                path,
                                                debug=args.debug,
                                                message=message,
                                                log_file=session_file,
                                                console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(model_id,
                                                   test_dataset,
                                                   batch_prediction_args,
                                                   args,
                                                   api,
                                                   session_file=session_file,
                                                   path=path,
                                                   log=log)
    if not args.no_csv:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n" %
                              u.get_url(new_dataset))
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path,
                                    new_dataset,
                                    mode='a')
Exemplo n.º 27
0
def create_fusion(models,
                  fusion,
                  fusion_args,
                  args,
                  api=None,
                  path=None,
                  session_file=None,
                  log=None):
    """Create remote fusion

    """
    if api is None:
        api = bigml.api.BigML()

    fusions = []
    fusion_ids = []
    if fusion is not None:
        fusions = [fusion]
        fusion_ids = [fusion]
    # if resuming and all fusions were created
    if models:

        # Only one fusion per command, at present
        message = dated("Creating fusion.\n")
        log_message(message, log_file=session_file, console=args.verbosity)

        query_string = FIELDS_QS
        inprogress = []
        wait_for_available_tasks(inprogress, args.max_parallel_fusions, api,
                                 "fusion")

        fusion = api.create_fusion(models, fusion_args, retries=None)
        fusion_id = check_resource_error( \
            fusion,
            "Failed to create fusion: ")
        log_message("%s\n" % fusion_id, log_file=log)
        fusion_ids.append(fusion_id)
        inprogress.append(fusion_id)
        fusions.append(fusion)
        log_created_resources("fusions", path, fusion_id, mode='a')

        if args.verbosity:
            if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED:
                try:
                    fusion = check_resource( \
                        fusion, api.get_fusion,
                        query_string=query_string,
                        raise_on_error=True)
                except Exception, exception:
                    sys.exit("Failed to get a finished fusion: %s" %
                             str(exception))
                fusions[0] = fusion
            message = dated("Fusion created: %s\n" % get_url(fusion))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, fusion)
Exemplo n.º 28
0
def create_ensembles(datasets,
                     ensemble_ids,
                     ensemble_args,
                     args,
                     number_of_ensembles=1,
                     api=None,
                     path=None,
                     session_file=None,
                     log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file, console=args.verbosity)
        query_string = ALL_FIELDS_QS
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress,
                                     args.max_parallel_ensembles,
                                     api.get_ensemble,
                                     "ensemble",
                                     query_string=query_string,
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(datasets, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles",
                                  path,
                                  ensemble_id,
                                  open_mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" % get_url(ensemble))
            log_message(message, log_file=session_file, console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Exemplo n.º 29
0
def publish_dataset(dataset, args, api=None, session_file=None):
    """Publishes dataset and sets its price (if any)

    """
    if api is None:
        api = bigml.api.BigML()
    public_dataset = {"private": False}
    if args.dataset_price:
        message = dated("Updating dataset. %s\n" %
                        get_url(dataset))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        public_dataset.update(price=args.dataset_price)
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    dataset = api.update_dataset(dataset, public_dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Exemplo n.º 30
0
def update_dataset(dataset, dataset_args, args,
                   api=None, path=None, session_file=None):
    """Updates dataset properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    dataset = api.update_dataset(dataset, dataset_args)
    if is_shared(dataset):
        message = dated("Shared dataset link. %s\n" %
                        get_url(dataset, shared=True))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        if args.reports:
            report(args.reports, path, dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Exemplo n.º 31
0
def get_evaluation(evaluation, api=None, verbosity=True, session_file=None):
    """Retrieves evaluation in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Retrieving evaluation. %s\n" % get_url(evaluation))
    log_message(message, log_file=session_file, console=verbosity)
    try:
        evaluation = check_resource(evaluation, api.get_evaluation)
    except ValueError, exception:
        sys.exit("Failed to get a finished evaluation: %s" % str(exception))
Exemplo n.º 32
0
def remote_predict(model,
                   test_dataset,
                   batch_prediction_args,
                   args,
                   api,
                   resume,
                   prediction_file=None,
                   session_file=None,
                   path=None,
                   log=None):
    """Computes a prediction for each entry in the `test_set`.

       Predictions are computed remotely using the batch predictions call.
    """

    if args.ensemble is not None:
        model_or_ensemble = args.ensemble
    else:
        model_or_ensemble = bigml.api.get_model_id(model)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(c.is_batch_prediction_created,
                                                path,
                                                debug=args.debug,
                                                message=message,
                                                log_file=session_file,
                                                console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(model_or_ensemble,
                                                   test_dataset,
                                                   batch_prediction_args,
                                                   args,
                                                   api,
                                                   session_file=session_file,
                                                   path=path,
                                                   log=log)
    if not args.no_csv:
        api.download_batch_prediction(batch_prediction, prediction_file)
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n" %
                              u.get_url(new_dataset))
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path,
                                    new_dataset,
                                    mode='a')
Exemplo n.º 33
0
def update_source(source, source_args, args, api=None, session_file=None):
    """Updates source properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating source. %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=args.verbosity)
    source = api.update_source(source, source_args)
    check_resource_error(source, "Failed to update source: ")
    source = check_resource(source, api.get_source)
    return source
Exemplo n.º 34
0
def create_models(dataset, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)

    last_model = None
    if args.number_of_models > 0:
        message = dated("Creating %s.\n" %
                        plural("model", args.number_of_models))
        log_message(message, log_file=session_file,
                    console=args.verbosity)

        for i in range(0, args.number_of_models):
            if i % args.max_parallel_models == 0 and i > 0:
                try:
                    models[i - 1] = check_resource(
                        models[i - 1], api.get_model, query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
            if args.cross_validation_rate > 0:
                new_seed = get_basic_seed(i + existing_models)
                model_args.update(seed=new_seed)

            model = api.create_model(dataset, model_args)
            log_message("%s\n" % model['resource'], log_file=log)
            model_ids.append(model['resource'])
            models.append(model)
            log_created_resources("models", path,
                                  bigml.api.get_model_id(model), open_mode='a')
            check_resource_error(model, "Failed to create model %s:" %
                                 model['resource'])
        if args.number_of_models < 2 and args.verbosity:
            if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                try:
                    model = check_resource(model, api.get_model,
                                           query_string=FIELDS_QS)
                except ValueError, exception:
                    sys.exit("Failed to get a finished model: %s" %
                             str(exception))
                models[0] = model
            message = dated("Model created: %s.\n" %
                            get_url(model))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Exemplo n.º 35
0
def update_evaluation(evaluation, evaluation_args, args,
                      api=None, path=None, session_file=None):
    """Updates evaluation properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating evaluation. %s\n" %
                    get_url(evaluation))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    evaluation = api.update_evaluation(evaluation, evaluation_args)
    check_resource_error(evaluation, "Failed to update evaluation: %s"
                         % evaluation['resource'])
    if is_shared(evaluation):
        message = dated("Shared evaluation link. %s\n" %
                        get_url(evaluation, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, evaluation)

    return evaluation
Exemplo n.º 36
0
def update_pca(pca, pca_args, args, api=None, path=None, session_file=None):
    """Updates pca properties

    """
    if api is None:
        api = bigml.api.BigML()

    message = dated("Updating PCA. %s\n" % get_url(pca))
    log_message(message, log_file=session_file, console=args.verbosity)
    pca = api.update_pca(pca, pca_args)
    check_resource_error(pca, "Failed to update PCA: %s" % pca['resource'])
    pca = check_resource(pca,
                         api.get_pca,
                         query_string=FIELDS_QS,
                         raise_on_error=True)
    if is_shared(pca):
        message = dated("Shared PCA link. %s\n" % get_url(pca, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, pca)

    return pca
Exemplo n.º 37
0
def update_dataset(dataset,
                   dataset_args,
                   args,
                   api=None,
                   path=None,
                   session_file=None):
    """Updates dataset properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating dataset. %s\n" % get_url(dataset))
    log_message(message, log_file=session_file, console=args.verbosity)
    dataset = api.update_dataset(dataset, dataset_args)
    if is_shared(dataset):
        message = dated("Shared dataset link. %s\n" %
                        get_url(dataset, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, dataset)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Exemplo n.º 38
0
def update_model(model, model_args, args,
                 api=None, path=None, session_file=None):
    """Updates model properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating model. %s\n" %
                    get_url(model))
    log_message(message, log_file=session_file,
                console=args.verbosity)
    model = api.update_model(model, model_args)
    check_resource_error(model, "Failed to update model: %s"
                         % model['resource'])
    if is_shared(model):
        message = dated("Shared model link. %s\n" %
                        get_url(model, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, model)

    return model
Exemplo n.º 39
0
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
    """Retrieves the dataset in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(dataset, basestring)
            or bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving dataset. %s\n" % get_url(dataset))
        log_message(message, log_file=session_file, console=verbosity)
        dataset = check_resource(dataset, api.get_dataset)
        check_resource_error(dataset, "Failed to get dataset: ")
    return dataset
Exemplo n.º 40
0
def get_evaluation(evaluation, api=None, verbosity=True, session_file=None):
    """Retrieves evaluation in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Retrieving evaluation. %s\n" %
                    get_url(evaluation))
    log_message(message, log_file=session_file, console=verbosity)
    try:
        evaluation = check_resource(evaluation, api.get_evaluation)
    except ValueError, exception:
        sys.exit("Failed to get a finished evaluation: %s" % str(exception))
Exemplo n.º 41
0
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
    """Retrieves remote ensemble in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(ensemble, basestring)
            or bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving ensemble. %s\n" % get_url(ensemble))
        log_message(message, log_file=session_file, console=verbosity)
        ensemble = check_resource(ensemble, api.get_ensemble)
        check_resource_error(ensemble, "Failed to get ensemble: ")
    return ensemble
Exemplo n.º 42
0
def get_source(source, api=None, verbosity=True, session_file=None):
    """Retrieves the source in its actual state and its field info

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(source, basestring)
            or bigml.api.get_status(source)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving source. %s\n" % get_url(source))
        log_message(message, log_file=session_file, console=verbosity)
        try:
            source = check_resource(source, api.get_source)
        except ValueError, exception:
            sys.exit("Failed to get a finished source: %s" % str(exception))
Exemplo n.º 43
0
def update_dataset(dataset, dataset_args, verbosity,
                   api=None, session_file=None):
    """Updates dataset properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating dataset. %s\n" %
                    get_url(dataset))
    log_message(message, log_file=session_file,
                console=verbosity)
    dataset = api.update_dataset(dataset, dataset_args)
    check_resource_error(dataset, "Failed to update dataset: ")
    return dataset
Exemplo n.º 44
0
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
    """Retrieves the dataset in its actual state

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(dataset, basestring) or
            bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving dataset. %s\n" %
                        get_url(dataset))
        log_message(message, log_file=session_file,
                    console=verbosity)
        dataset = check_resource(dataset, api.get_dataset)
        check_resource_error(dataset, "Failed to get dataset: ")
    return dataset
Exemplo n.º 45
0
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
    """Retrieves remote ensemble in its actual status

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(ensemble, basestring) or
            bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving ensemble. %s\n" %
                        get_url(ensemble))
        log_message(message, log_file=session_file,
                    console=verbosity)
        ensemble = check_resource(ensemble, api.get_ensemble)
        check_resource_error(ensemble, "Failed to get ensemble: ")
    return ensemble
Exemplo n.º 46
0
def update_model(model,
                 model_args,
                 args,
                 api=None,
                 path=None,
                 session_file=None):
    """Updates model properties

    """
    if api is None:
        api = bigml.api.BigML()
    message = dated("Updating model. %s\n" % get_url(model))
    log_message(message, log_file=session_file, console=args.verbosity)
    model = api.update_model(model, model_args)
    check_resource_error(model,
                         "Failed to update model: %s" % model['resource'])
    if is_shared(model):
        message = dated("Shared model link. %s\n" %
                        get_url(model, shared=True))
        log_message(message, log_file=session_file, console=args.verbosity)
        if args.reports:
            report(args.reports, path, model)

    return model
Exemplo n.º 47
0
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        query_string = ALL_FIELDS_QS
        inprogress = []
        for i in range(0, number_of_ensembles):
            wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
                                     api.get_ensemble, "ensemble",
                                     query_string=query_string,
                                     wait_step=args.number_of_models)

            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(datasets, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            inprogress.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  open_mode='a')
        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, ensemble)

    return ensembles, ensemble_ids, models, model_ids
Exemplo n.º 48
0
def get_source(source, api=None, verbosity=True,
               session_file=None):
    """Retrieves the source in its actual state and its field info

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(source, basestring) or
            bigml.api.get_status(source)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving source. %s\n" %
                        get_url(source))
        log_message(message, log_file=session_file,
                    console=verbosity)
        try:
            source = check_resource(source, api.get_source)
        except ValueError, exception:
            sys.exit("Failed to get a finished source: %s" % str(exception))
Exemplo n.º 49
0
def create_ensembles(dataset, ensemble_ids, ensemble_args, args,
                     number_of_ensembles=1,
                     api=None, path=None, session_file=None, log=None):
    """Create ensembles from input data

    """
    if api is None:
        api = bigml.api.BigML()
    ensembles = ensemble_ids[:]
    model_ids = []
    ensemble_args_list = []
    if isinstance(ensemble_args, list):
        ensemble_args_list = ensemble_args
    if number_of_ensembles > 0:
        message = dated("Creating %s.\n" %
                        plural("ensemble", number_of_ensembles))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        query_string = ALL_FIELDS_QS
        for i in range(0, number_of_ensembles):
            if i % args.max_parallel_ensembles == 0 and i > 0:
                try:
                    ensembles[i - 1] = check_resource(
                        ensembles[i - 1], api.get_ensemble,
                        query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished ensemble: %s" %
                             str(exception))
            if ensemble_args_list:
                ensemble_args = ensemble_args_list[i]
            ensemble = api.create_ensemble(dataset, ensemble_args)
            ensemble_id = check_resource_error(ensemble,
                                               "Failed to create ensemble: ")
            log_message("%s\n" % ensemble_id, log_file=log)
            ensemble_ids.append(ensemble_id)
            ensembles.append(ensemble)
            log_created_resources("ensembles", path, ensemble_id,
                                  open_mode='a')

        models, model_ids = retrieve_ensembles_models(ensembles, api, path)
        if number_of_ensembles < 2 and args.verbosity:
            message = dated("Ensemble created: %s.\n" %
                            get_url(ensemble))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
Exemplo n.º 50
0
def update_source_fields(source, updated_values, fields, api=None,
                         verbosity=True, session_file=None):
    """Update remote source with new fields values

    """
    if api is None:
        api = bigml.api.BigML()
    update_fields = {}
    for (column, value) in updated_values.iteritems():
        update_fields.update({
            fields.field_id(column): value})
    message = dated("Updating source. %s\n" %
                    get_url(source))
    log_message(message, log_file=session_file,
                console=verbosity)
    source = api.update_source(source, {"fields": update_fields})
    check_resource_error(source, "Failed to update source: ")
    return source
Exemplo n.º 51
0
def remote_prediction(model, test_dataset,
                      batch_prediction_args, args,
                      api, resume, prediction_file=None, session_file=None,
                      path=None, log=None):
    """Computes a prediction for each entry in the `test_set`.

       Predictions are computed remotely using the batch prediction call.
    """

    model_id = bigml.api.get_resource_id( \
        model)
    batch_prediction_args.update({"probability": True, "confidence": False})

    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(
            model_id, test_dataset, batch_prediction_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
Exemplo n.º 52
0
def publish_model(model, args, api=None, session_file=None):
    """Update model with publish info

    """
    if api is None:
        api = bigml.api.BigML()
    public_model = {}
    if args.black_box:
        public_model = {"private": False}
    if args.white_box:
        public_model = {"private": False, "white_box": True}
        if args.model_price:
            public_model.update(price=args.model_price)
        if args.cpp:
            public_model.update(credits_per_prediction=args.cpp)
    if public_model:
        message = dated("Updating model. %s\n" %
                        get_url(model))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        model = api.update_model(model, public_model)
        check_resource_error(model, "Failed to update model %s: " %
                             model['resource'])
    return model
Exemplo n.º 53
0
def create_evaluations(model_ids, datasets, evaluation_args, args, api=None,
                       path=None, session_file=None, log=None,
                       existing_evaluations=0):
    """Create evaluations for a list of models

       ``model_ids``: list of model ids to create an evaluation of
       ``datasets``: dataset objects or ids to evaluate with
       ``evaluation_args``: arguments for the ``create_evaluation`` call
       ``args``: input values for bigmler flags
       ``api``: api to remote objects in BigML
       ``path``: directory to store the BigMLer generated files in
       ``session_file``: file to store the messages of that session
       ``log``: user provided log file
       ``existing_evaluations``: evaluations found when attempting resume
    """
    evaluations = []
    dataset = datasets[0]
    evaluation_args_list = []
    if isinstance(evaluation_args, list):
        evaluation_args_list = evaluation_args
    if api is None:
        api = bigml.api.BigML()
    remaining_ids = model_ids[existing_evaluations:]
    number_of_evaluations = len(remaining_ids)
    message = dated("Creating evaluations.\n")
    log_message(message, log_file=session_file,
                console=args.verbosity)

    inprogress = []
    for i in range(0, number_of_evaluations):
        model = remaining_ids[i]
        wait_for_available_tasks(inprogress, args.max_parallel_evaluations,
                                 api.get_evaluation, "evaluation")

        if evaluation_args_list != []:
            evaluation_args = evaluation_args_list[i]
        if args.cross_validation_rate > 0:
            new_seed = get_basic_seed(i + existing_evaluations)
            evaluation_args.update(seed=new_seed)
        evaluation = api.create_evaluation(model, dataset, evaluation_args)
        evaluation_id = check_resource_error(evaluation,
                                             "Failed to create evaluation: ")
        inprogress.append(evaluation_id)
        log_created_resources("evaluations", path, evaluation_id,
                              open_mode='a')
        evaluations.append(evaluation)
        log_message("%s\n" % evaluation['resource'], log_file=log)

    if (args.number_of_evaluations < 2 and len(evaluations) == 1
            and args.verbosity):
        evaluation = evaluations[0]
        if bigml.api.get_status(evaluation)['code'] != bigml.api.FINISHED:
            try:
                evaluation = check_resource(evaluation, api.get_evaluation)
            except ValueError, exception:
                sys.exit("Failed to get a finished evaluation: %s" %
                         str(exception))
            evaluations[0] = evaluation
        message = dated("Evaluation created: %s.\n" %
                        get_url(evaluation))
        log_message(message, log_file=session_file,
                    console=args.verbosity)
        if args.reports:
            report(args.reports, path, evaluation)
Exemplo n.º 54
0
def remote_predict(model, test_dataset, batch_prediction_args, args,
                   api, resume, prediction_file=None, session_file=None,
                   path=None, log=None):
    """Computes a prediction for each entry in the `test_set`.

    Predictions are computed remotely using the batch predictions call.
    """
    if args.ensemble is not None and not args.dataset_off:
        model_or_ensemble = args.ensemble
    elif args.dataset_off:
        if hasattr(args, "ensemble_ids_") and args.ensemble_ids_:
            models = args.ensemble_ids_
        else:
            models = args.model_ids_
        test_datasets = args.test_dataset_ids
    else:
        model_or_ensemble = bigml.api.get_model_id(model)
    # if resuming, try to extract dataset form log files
    if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        if not args.dataset_off:
            batch_prediction = create_batch_prediction(
                model_or_ensemble, test_dataset, batch_prediction_args,
                args, api, session_file=session_file, path=path, log=log)
        else:
            batch_predictions = []
            for index, test_dataset_n in enumerate(test_datasets):
                batch_predictions.append(create_batch_prediction( \
                    models[index], test_dataset_n, batch_prediction_args,
                    args, api, session_file=session_file, path=path, log=log))
    if not args.no_csv and not args.dataset_off:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset and not args.dataset_off:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
    elif args.to_dataset and args.dataset_off:
        predictions_datasets = []
        for batch_prediction in batch_predictions:
            batch_prediction = bigml.api.check_resource(batch_prediction,
                                                        api=api)
            new_dataset = bigml.api.get_dataset_id(
                batch_prediction['object']['output_dataset_resource'])
            if new_dataset is not None:
                predictions_datasets.append(new_dataset)
                message = u.dated("Batch prediction dataset created: %s\n"
                                  % u.get_url(new_dataset))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                u.log_created_resources("batch_prediction_dataset",
                                        path, new_dataset, mode='a')
        multi_dataset = api.create_dataset(predictions_datasets)
        log_created_resources("dataset_pred", path,
                              bigml.api.get_dataset_id(multi_dataset),
                              mode='a')
        dataset_id = check_resource_error(multi_dataset,
                                          "Failed to create dataset: ")
        try:
            multi_dataset = api.check_resource(multi_dataset)
        except ValueError, exception:
            sys.exit("Failed to get a finished dataset: %s" % str(exception))
        message = dated("Predictions dataset created: %s\n" %
                        get_url(multi_dataset))
        log_message(message, log_file=session_file, console=args.verbosity)
        log_message("%s\n" % dataset_id, log_file=log)
        if not args.no_csv:
            file_name = api.download_dataset(dataset_id, prediction_file)
            if file_name is None:
                sys.exit("Failed downloading CSV.")
Exemplo n.º 55
0
def compute_output(api, args, training_set, test_set=None, output=None,
                   objective_field=None,
                   description=None,
                   field_attributes=None,
                   types=None,
                   dataset_fields=None,
                   model_fields=None,
                   name=None, training_set_header=True,
                   test_set_header=True, model_ids=None,
                   votes_files=None, resume=False, fields_map=None):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required, open the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        if args.clear_logs:
            try:
                open(log, 'w', 0).close()
            except IOError:
                pass

    if (training_set or (args.evaluate and test_set)):
        if resume:
            resume, args.source = u.checkpoint(u.is_source_created, path,
                                               bigml.api, debug=args.debug)
            if not resume:
                message = u.dated("Source not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)

    # If neither a previous source, dataset or model are provided.
    # we create a new one. Also if --evaluate and test data are provided
    # we create a new dataset to test with.
    data_set = None
    if (training_set and not args.source and not args.dataset and
            not args.model and not args.models):
        data_set = training_set
        data_set_header = training_set_header
    elif (args.evaluate and test_set and not args.source):
        data_set = test_set
        data_set_header = test_set_header

    if not data_set is None:

        source_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag,
            "source_parser": {"header": data_set_header}}
        message = u.dated("Creating source.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        source = api.create_source(data_set, source_args,
                                   progress_bar=args.progress_bar)
        source = api.check_resource(source, api.get_source)
        message = u.dated("Source created: %s\n" % u.get_url(source, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        u.log_message("%s\n" % source['resource'], log_file=log)

        fields = Fields(source['object']['fields'],
                        source['object']['source_parser']['missing_tokens'],
                        source['object']['source_parser']['locale'])
        source_file = open(path + '/source', 'w', 0)
        source_file.write("%s\n" % source['resource'])
        source_file.write("%s\n" % source['object']['name'])
        source_file.flush()
        source_file.close()

    # If a source is provided, we retrieve it.
    elif args.source:
        message = u.dated("Retrieving source. %s\n" %
                          u.get_url(args.source, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        source = api.get_source(args.source)

    # If we already have source, we check that is finished and extract the
    # fields, and update them if needed.
    if source:
        if source['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.check_resource(source, api.get_source)
        csv_properties = {'missing_tokens':
                          source['object']['source_parser']['missing_tokens'],
                          'data_locale':
                          source['object']['source_parser']['locale']}

        fields = Fields(source['object']['fields'], **csv_properties)
        update_fields = {}
        if field_attributes:
            for (column, value) in field_attributes.iteritems():
                update_fields.update({
                    fields.field_id(column): value})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

        update_fields = {}
        if types:
            for (column, value) in types.iteritems():
                update_fields.update({
                    fields.field_id(column): {'optype': value}})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

    if (training_set or args.source or (args.evaluate and test_set)):
        if resume:
            resume, args.dataset = u.checkpoint(u.is_dataset_created, path,
                                                bigml.api,
                                                debug=args.debug)
            if not resume:
                message = u.dated("Dataset not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
    # If we have a source but not dataset or model has been provided, we
    # create a new dataset if the no_dataset option isn't set up. Also
    # if evaluate is set and test_set has been provided.
    if ((source and not args.dataset and not args.model and not model_ids and
            not args.no_dataset) or
            (args.evaluate and args.test_set and not args.dataset)):
        dataset_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag
        }

        if args.json_filter:
            dataset_args.update(json_filter=args.json_filter)
        elif args.lisp_filter:
            dataset_args.update(lisp_filter=args.lisp_filter)

        input_fields = []
        if dataset_fields:
            for name in dataset_fields:
                input_fields.append(fields.field_id(name))
            dataset_args.update(input_fields=input_fields)
        message = u.dated("Creating dataset.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        dataset = api.create_dataset(source, dataset_args)
        dataset = api.check_resource(dataset, api.get_dataset)
        message = u.dated("Dataset created: %s\n" % u.get_url(dataset, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        u.log_message("%s\n" % dataset['resource'], log_file=log)
        dataset_file = open(path + '/dataset', 'w', 0)
        dataset_file.write("%s\n" % dataset['resource'])
        dataset_file.flush()
        dataset_file.close()

    # If a dataset is provided, let's retrieve it.
    elif args.dataset:
        message = u.dated("Retrieving dataset. %s\n" %
                          u.get_url(args.dataset, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        dataset = api.get_dataset(args.dataset)

    # If we already have a dataset, we check the status and get the fields if
    # we hadn't them yet.
    if dataset:
        if dataset['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving dataset. %s\n" %
                              u.get_url(dataset, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            dataset = api.check_resource(dataset, api.get_dataset)
        if not csv_properties:
            csv_properties = {'data_locale':
                              dataset['object']['locale']}
        if args.public_dataset:
            if not description:
                raise Exception("You should provide a description to publish.")
            public_dataset = {"private": False}
            if args.dataset_price:
                message = u.dated("Updating dataset. %s\n" %
                                  u.get_url(dataset, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_dataset.update(price=args.dataset_price)
            message = u.dated("Updating dataset. %s\n" %
                              u.get_url(dataset, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            dataset = api.update_dataset(dataset, public_dataset)
        fields = Fields(dataset['object']['fields'], **csv_properties)

    # If we have a dataset but not a model, we create the model if the no_model
    # flag hasn't been set up.
    if (dataset and not args.model and not model_ids and not args.no_model):
        model_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag
        }
        if objective_field is not None:
            model_args.update({"objective_field":
                               fields.field_id(objective_field)})
        # If evaluate flag is on, we choose a deterministic sampling with 80%
        # of the data to create the model
        if args.evaluate:
            if args.sample_rate == 1:
                args.sample_rate = EVALUATE_SAMPLE_RATE
            seed = SEED
            model_args.update(seed=seed)

        input_fields = []
        if model_fields:
            for name in model_fields:
                input_fields.append(fields.field_id(name))
            model_args.update(input_fields=input_fields)

        if args.pruning and args.pruning != 'smart':
            model_args.update(stat_pruning=(args.pruning == 'statistical'))

        model_args.update(sample_rate=args.sample_rate,
                          replacement=args.replacement,
                          randomize=args.randomize)
        model_ids = []
        models = []
        if resume:
            resume, model_ids = u.checkpoint(u.are_models_created, path,
                                             args.number_of_models,
                                             bigml.api, debug=args.debug)
            if not resume:
                message = u.dated("Found %s models out of %s. Resuming.\n" %
                                  (len(model_ids),
                                   args.number_of_models))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
            models = model_ids
            args.number_of_models -= len(model_ids)

        model_file = open(path + '/models', 'w', 0)
        for model_id in model_ids:
            model_file.write("%s\n" % model_id)
        last_model = None
        if args.number_of_models > 0:
            message = u.dated("Creating %s.\n" %
                              u.plural("model", args.number_of_models))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            for i in range(1, args.number_of_models + 1):
                if i > args.max_parallel_models:
                    api.check_resource(last_model, api.get_model)
                model = api.create_model(dataset, model_args)
                u.log_message("%s\n" % model['resource'], log_file=log)
                last_model = model
                model_ids.append(model['resource'])
                models.append(model)
                model_file.write("%s\n" % model['resource'])
                model_file.flush()
            if args.number_of_models < 2 and args.verbosity:
                if model['object']['status']['code'] != bigml.api.FINISHED:
                    model = api.check_resource(model, api.get_model)
                    models[0] = model
                message = u.dated("Model created: %s.\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
        model_file.close()

    # If a model is provided, we retrieve it.
    elif args.model:
        message = u.dated("Retrieving model. %s\n" %
                          u.get_url(args.model, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        model = api.get_model(args.model)

    elif args.models or args.model_tag:
        models = model_ids[:]

    if model_ids and test_set and not args.evaluate:
        model_id = ""
        if len(model_ids) == 1:
            model_id = model_ids[0]
        message = u.dated("Retrieving %s. %s\n" %
                          (u.plural("model", len(model_ids)),
                           u.get_url(model_id, api)))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        if len(model_ids) < args.max_batch_models:
            models = []
            for model in model_ids:
                model = api.check_resource(model, api.get_model)
                models.append(model)
            model = models[0]
        else:
            model = api.check_resource(model_ids[0], api.get_model)
            models[0] = model

    # We check that the model is finished and get the fields if haven't got
    # them yet.
    if model and not args.evaluate and (test_set or args.black_box
                                        or args.white_box):
        if model['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving model. %s\n" %
                              u.get_url(model, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            model = api.check_resource(model, api.get_model)
        if args.black_box:
            if not description:
                raise Exception("You should provide a description to publish.")
            model = api.update_model(model, {"private": False})
        if args.white_box:
            if not description:
                raise Exception("You should provide a description to publish.")
            public_model = {"private": False, "white_box": True}
            if args.model_price:
                message = u.dated("Updating model. %s\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_model.update(price=args.model_price)
            if args.cpp:
                message = u.dated("Updating model. %s\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_model.update(credits_per_prediction=args.cpp)
            model = api.update_model(model, public_model)
        if not csv_properties:
            csv_properties = {'data_locale':
                              model['object']['locale']}
        csv_properties.update(verbose=True)
        if args.user_locale:
            csv_properties.update(data_locale=args.user_locale)

        fields = Fields(model['object']['model']['fields'], **csv_properties)

    if model and not models:
        models = [model]

    if models and test_set and not args.evaluate:
        objective_field = models[0]['object']['objective_fields']
        if isinstance(objective_field, list):
            objective_field = objective_field[0]
        predict(test_set, test_set_header, models, fields, output,
                objective_field, args.remote, api, log,
                args.max_batch_models, args.method, resume, args.tag,
                args.verbosity, session_file, args.debug)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if votes_files:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
                          r'\1', votes_files[0]).replace("_", "/")
        model = api.check_resource(model_id, api.get_model)
        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)
        u.combine_votes(votes_files, local_model.to_prediction,
                        output, args.method)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        if resume:
            resume, evaluation = u.checkpoint(u.is_evaluation_created, path,
                                              bigml.api,
                                              debug=args.debug)
            if not resume:
                message = u.dated("Evaluation not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
        if not resume:
            evaluation_file = open(path + '/evaluation', 'w', 0)
            evaluation_args = {
                "name": name,
                "description": description,
                "tags": args.tag
            }
            if not fields_map is None:
                update_map = {}
                for (dataset_column, model_column) in fields_map.iteritems():
                    update_map.update({
                        fields.field_id(dataset_column):
                        fields.field_id(model_column)})
                evaluation_args.update({"fields_map": update_map})
            if not ((args.dataset or args.test_set)
                    and (args.model or args.models or args.model_tag)):
                evaluation_args.update(out_of_bag=True, seed=SEED,
                                       sample_rate=args.sample_rate)
            message = u.dated("Creating evaluation.\n")
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            evaluation = api.create_evaluation(model, dataset, evaluation_args)
            u.log_message("%s\n" % evaluation['resource'], log_file=log)
            evaluation_file.write("%s\n" % evaluation['resource'])
            evaluation_file.flush()
            evaluation_file.close()
        message = u.dated("Retrieving evaluation. %s\n" %
                          u.get_url(evaluation, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        evaluation = api.check_resource(evaluation, api.get_evaluation)
        evaluation_json = open(output + '.json', 'w', 0)
        evaluation_json.write(json.dumps(evaluation['object']['result']))
        evaluation_json.flush()
        evaluation_json.close()
        evaluation_txt = open(output + '.txt', 'w', 0)
        api.pprint(evaluation['object']['result'],
                   evaluation_txt)
        evaluation_txt.flush()
        evaluation_txt.close()

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file, console=args.verbosity)
Exemplo n.º 56
0
    log_message(message, log_file=session_file, console=args.verbosity)
    source = api.create_source(data_set, source_args,
                               progress_bar=args.progress_bar)
    if path is not None:
        try:
            with open(path + '/source', 'w', 0) as source_file:
                source_file.write("%s\n" % source['resource'])
                source_file.write("%s\n" % source['object']['name'])
        except IOError, exc:
            raise IOError("%s: Failed to write %s/source" % (str(exc), path))
    check_resource_error(source, "Failed to create source: ")
    try:
        source = check_resource(source, api.get_source)
    except ValueError, exception:
        sys.exit("Failed to get a finished source: %s" % str(exception))
    message = dated("Source created: %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=args.verbosity)
    log_message("%s\n" % source['resource'], log_file=log)

    return source


def data_to_source(training_set, test_set,
                   training_set_header, test_set_header, args):
    """Extracts the flags info to create a source object

    """
    data_set = None
    data_set_header = None
    if (training_set and not args.source and not args.dataset and
            not args.model and not args.models):
Exemplo n.º 57
0
def create_models(datasets, model_ids, model_args,
                  args, api=None, path=None,
                  session_file=None, log=None):
    """Create remote models

    """
    if api is None:
        api = bigml.api.BigML()

    models = model_ids[:]
    existing_models = len(models)
    model_args_list = []
    if args.dataset_off and args.evaluate:
        args.test_dataset_ids = datasets[:]
    if not args.multi_label:
        datasets = datasets[existing_models:]
    # if resuming and all models were created, there will be no datasets left
    if datasets:
        dataset = datasets[0]
        if isinstance(model_args, list):
            model_args_list = model_args
        if args.number_of_models > 0:
            message = dated("Creating %s.\n" %
                            plural("model", args.number_of_models))
            log_message(message, log_file=session_file,
                        console=args.verbosity)

            single_model = args.number_of_models == 1 and existing_models == 0
            # if there's more than one model the first one must contain
            # the entire field structure to be used as reference.
            query_string = (FIELDS_QS if single_model
                            else ALL_FIELDS_QS)
            inprogress = []
            for i in range(0, args.number_of_models):
                wait_for_available_tasks(inprogress, args.max_parallel_models,
                                         api.get_model, "model",
                                         query_string=query_string)
                if model_args_list:
                    model_args = model_args_list[i]
                if args.cross_validation_rate > 0:
                    new_seed = get_basic_seed(i + existing_models)
                    model_args.update(seed=new_seed)
                # one model per dataset (--max-categories or single model)
                if (args.max_categories or
                        (args.test_datasets and args.evaluate)) > 0:
                    dataset = datasets[i]
                    model = api.create_model(dataset, model_args)
                elif args.dataset_off and args.evaluate:
                    multi_dataset = args.test_dataset_ids[:]
                    del multi_dataset[i + existing_models]
                    model = api.create_model(multi_dataset, model_args)
                else:
                    model = api.create_model(datasets, model_args)
                model_id = check_resource_error(model,
                                                "Failed to create model: ")
                log_message("%s\n" % model_id, log_file=log)
                model_ids.append(model_id)
                inprogress.append(model_id)
                models.append(model)
                log_created_resources("models", path, model_id, open_mode='a')

            if args.number_of_models < 2 and args.verbosity:
                if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                    try:
                        model = check_resource(model, api.get_model,
                                               query_string=query_string)
                    except ValueError, exception:
                        sys.exit("Failed to get a finished model: %s" %
                                 str(exception))
                    models[0] = model
                message = dated("Model created: %s.\n" %
                                get_url(model))
                log_message(message, log_file=session_file,
                            console=args.verbosity)
                if args.reports:
                    report(args.reports, path, model)
Exemplo n.º 58
0
                               progress_bar=args.progress_bar)
    if path is not None:
        try:
            suffix = "_" + source_type if source_type else ""
            with open("%s/source%s" % (path, suffix), 'w', 0) as source_file:
                source_file.write("%s\n" % source['resource'])
                source_file.write("%s\n" % source['object']['name'])
        except IOError, exc:
            sys.exit("%s: Failed to write %s/source" % (str(exc), path))
    source_id = check_resource_error(source, "Failed to create source: ")
    try:
        source = check_resource(source, api.get_source,
                                query_string=ALL_FIELDS_QS)
    except ValueError, exception:
        sys.exit("Failed to get a finished source: %s" % str(exception))
    message = dated("Source created: %s\n" % get_url(source))
    log_message(message, log_file=session_file, console=args.verbosity)
    log_message("%s\n" % source_id, log_file=log)

    return source


def data_to_source(training_set, test_set,
                   training_set_header, test_set_header, args):
    """Extracts the flags info to create a source object

    """
    data_set = None
    data_set_header = None
    if (training_set and not args.source and not args.dataset and
            not args.model and not args.models):