Exemplo n.º 1
0
def get_source_info(api, args, resume, csv_properties, session_file, path,
                    log):
    """Creating or retrieving the source and related information

    """
    source = None
    fields = None
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file, csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api,
            args,
            resume,
            csv_properties=csv_properties,
            session_file=session_file,
            path=path,
            log=log)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    return source, resume, csv_properties, fields
Exemplo n.º 2
0
def get_source_info(api, args, resume,
                    csv_properties, session_file, path, log):
    """Creating or retrieving the source and related information

    """
    source = None
    fields = None
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file,
            csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api, args, resume,
            csv_properties=csv_properties,
            session_file=session_file, path=path, log=log)
    return source, resume, csv_properties, fields
Exemplo n.º 3
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None
    other_label = OTHER
    ensemble_ids = []
    multi_label_data = None
    multi_label_fields = []
    # local_ensemble = None
    test_dataset = None
    datasets = None

    # variables from command-line options
    resume = args.resume_
    model_ids = args.model_ids_
    output = args.predictions
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = None if args.labels is None else [label.strip() for label in args.labels.split(args.args_separator)]
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set, args.train_header, args, path, labels=labels, session_file=session_file
        )
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({"objective_field": args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(args.source_file, csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api,
            args,
            resume,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log,
        )
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(
            args.objective_field, labels, multi_label_data, fields, multi_label_fields
        )

    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(args.dataset_file, csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source,
            api,
            args,
            resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log,
        )
    if datasets:
        dataset = datasets[0]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log
        )
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(dataset, args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset,
                    distribution,
                    fields,
                    args,
                    api,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log,
                    other_label=other_label,
                )
        else:
            sys.exit(
                "The provided objective field is not categorical nor "
                "a full terms only text field. "
                "Only these fields can be used with"
                "  --max-categories"
            )

    # If multi-dataset flag is on, generate a new dataset from the given
    # list of datasets
    if args.multi_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log
        )
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if (
        args.new_fields
        or (args.sample_rate != 1 and args.no_model)
        or (args.lisp_filter or args.json_filter)
        and not has_source(args)
    ):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(
            dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log
        )
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({"objective_field": args.objective_name_, "objective_field_present": True})
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(
            args.objective_field, labels, multi_label_data, fields, multi_label_fields
        )

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, "max_categories", args.max_categories)
        other_label = get_metadata(dataset, "other_label", other_label)
    if args.model_file:
        # model is retrieved from the contents of the given local JSON file
        model, csv_properties, fields = u.read_local_resource(args.model_file, csv_properties=csv_properties)
        models = [model]
        model_ids = [model["resource"]]
        ensemble_ids = []
    elif args.ensemble_file:
        # model is retrieved from the contents of the given local JSON file
        ensemble, csv_properties, fields = u.read_local_resource(args.ensemble_file, csv_properties=csv_properties)
        model_ids = ensemble["object"]["models"][:]
        ensemble_ids = [ensemble["resource"]]
        models = model_ids[:]
        model = retrieve_resource(bigml.api.BigML(storage="./storage"), models[0], query_string=r.ALL_FIELDS_QS)
        models[0] = model
    else:
        # model is retrieved from the remote object
        models, model_ids, ensemble_ids, resume = pm.models_processing(
            datasets,
            models,
            model_ids,
            api,
            args,
            resume,
            fields=fields,
            session_file=session_file,
            path=path,
            log=log,
            labels=labels,
            multi_label_data=multi_label_data,
            other_label=other_label,
        )

    if models:
        model = models[0]
        single_model = len(models) == 1
    # If multi-label flag is set and no training_set was provided, label
    # info is extracted from the user_metadata. If models belong to an
    # ensemble, the ensemble must be retrieved to get the user_metadata.
    if model and args.multi_label and multi_label_data is None:
        if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict):
            resource = ensemble_ids[0]
        elif belongs_to_ensemble(model):
            ensemble_id = get_ensemble_id(model)
            resource = r.get_ensemble(ensemble_id, api=api, verbosity=args.verbosity, session_file=session_file)
        else:
            resource = model
        multi_label_data = l.get_multi_label_data(resource)

    # We update the model's public state if needed
    if model:
        if isinstance(model, basestring) or bigml.api.get_status(model)["code"] != bigml.api.FINISHED:
            if not args.evaluate and not a.has_train(args):
                query_string = MINIMUM_MODEL
            elif not args.test_header:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = "%s;%s" % (r.ALL_FIELDS_QS, r.FIELDS_QS)
            model = u.check_resource(model, api.get_model, query_string=query_string)
            models[0] = model
        if args.black_box or args.white_box or (args.shared_flag and r.shared_changed(args.shared, model)):
            model_args = {}
            if args.shared_flag and r.shared_changed(args.shared, model):
                model_args.update(shared=args.shared)
            if args.black_box or args.white_box:
                model_args.update(r.set_publish_model_args(args))
            if model_args:
                model = r.update_model(model, model_args, args, api=api, path=path, session_file=session_file)
                models[0] = model

    # We get the fields of the model if we haven't got
    # them yet and need them
    if model and not args.evaluate and args.test_set:
        # If more than one model, use the full field structure
        if not single_model and not args.multi_label and belongs_to_ensemble(model):
            if len(ensemble_ids) > 0:
                ensemble_id = ensemble_ids[0]
            else:
                ensemble_id = get_ensemble_id(model)
        fields = pm.get_model_fields(
            model, csv_properties, args, single_model=single_model, multi_label_data=multi_label_data
        )
        # Free memory after getting fields
        # local_ensemble = None
        gc.collect()

    # Fills in all_labels from user_metadata
    if args.multi_label and not all_labels:
        (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(
            args.objective_field, labels, multi_label_data, fields, multi_label_fields
        )
    if model:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(model, "max_categories", args.max_categories)
        other_label = get_metadata(model, "other_label", other_label)
    # If predicting
    if models and (a.has_test(args) or (test_dataset and args.remote)) and not args.evaluate:
        models_per_label = 1
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        if args.multi_label:
            # When prediction starts from existing models, the
            # multi_label_fields can be retrieved from the user_metadata
            # in the models
            if args.multi_label_fields is None and multi_label_fields:
                multi_label_field_names = [field[1] for field in multi_label_fields]
                args.multi_label_fields = ",".join(multi_label_field_names)
            test_set = ps.multi_label_expansion(
                args.test_set, args.test_header, args, path, labels=labels, session_file=session_file, input_flag=True
            )[0]
            test_set_header = True

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on or multi-label
        # or max-categories are used
        if (
            args.remote
            and not args.no_batch
            and not args.multi_label
            and not args.method in [THRESHOLD_CODE, COMBINATION]
        ):
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, session_file=session_file, path=path, log=log
                )

                (test_source, resume, csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args, resume, session_file=session_file, path=path, log=log
                )
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None, objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)

            batch_prediction_args = r.set_batch_prediction_args(args, fields=fields, dataset_fields=test_fields)

            remote_predict(
                model,
                test_dataset,
                batch_prediction_args,
                args,
                api,
                resume,
                prediction_file=output,
                session_file=session_file,
                path=path,
                log=log,
            )
        else:
            models_per_label = args.number_of_models
            if args.multi_label and len(ensemble_ids) > 0 and args.number_of_models == 1:
                # use case where ensembles are read from a file
                models_per_label = len(models) / len(ensemble_ids)
            predict(
                models,
                fields,
                args,
                api=api,
                log=log,
                resume=resume,
                session_file=session_file,
                labels=labels,
                models_per_label=models_per_label,
                other_label=other_label,
                multi_label_data=multi_label_data,
            )

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if args.votes_files_:
        model_id = re.sub(r".*(model_[a-f0-9]{24})__predictions\.csv$", r"\1", args.votes_files_[0]).replace("_", "/")
        try:
            model = u.check_resource(model_id, api.get_model)
        except ValueError, exception:
            sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))

        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)

        combine_votes(args.votes_files_, local_model.to_prediction, output, method=args.method)
Exemplo n.º 4
0
def compute_output(api, args, training_set, test_set=None, output=None,
                   objective_field=None,
                   description=None,
                   field_attributes=None,
                   types=None,
                   dataset_fields=None,
                   model_fields=None,
                   name=None, training_set_header=True,
                   test_set_header=True, model_ids=None,
                   votes_files=None, resume=False, fields_map=None,
                   test_field_attributes=None,
                   test_types=None):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None
    other_label = OTHER
    ensemble_ids = []
    multi_label_data = None
    multi_label_fields = []
    local_ensemble = None

    # It is compulsory to have a description to publish either datasets or
    # models
    if (not description and
            (args.black_box or args.white_box or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --max-categories, it is compulsory to specify also the
    # objective_field
    if args.max_categories > 0 and objective_field is None:
        sys.exit("When --max-categories is used, you must also provide the"
                 " --objective field name or column number")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (map(str.strip, args.labels.split(','))
              if args.labels is not None else None)
    if labels is not None:
        labels = sorted([label.decode("utf-8") for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and training_set is not None:
        (training_set, multi_label_data) = ps.multi_label_expansion(
            training_set, training_set_header, objective_field, args, path,
            labels=labels, session_file=session_file)
        training_set_header = True
        objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels

    source, resume, csv_properties, fields = ps.source_processing(
        training_set, test_set, training_set_header, test_set_header,
        api, args, resume, name=name, description=description,
        csv_properties=csv_properties, field_attributes=field_attributes,
        types=types, multi_label_data=multi_label_data,
        session_file=session_file, path=path, log=log)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (objective_field, labels,
            all_labels, multi_label_fields) = l.multi_label_sync(
                objective_field, labels, multi_label_data, fields,
                multi_label_fields)
    datasets, resume, csv_properties, fields = pd.dataset_processing(
        source, training_set, test_set, fields, objective_field,
        api, args, resume, name=name, description=description,
        dataset_fields=dataset_fields, multi_label_data=multi_label_data,
        csv_properties=csv_properties,
        session_file=session_file, path=path, log=log)
    if datasets:
        dataset = datasets[0]

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume, name=name, description=description,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        objective_id = fields.field_id(fields.objective_field)
        if pd.check_max_categories(fields.fields[objective_id]):
            distribution = pd.get_categories_distribution(dataset,
                                                          objective_id)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset, distribution, fields, args,
                    api, resume, session_file=session_file, path=path, log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If multi-dataset flag is on, generate a new dataset from the given
    # list of datasets
    if args.multi_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets, api, args, resume, name=name,
            description=description, fields=fields,
            dataset_fields=dataset_fields, objective_field=objective_field,
            session_file=session_file, path=path, log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure
    if args.new_fields:
        dataset, resume = pd.create_new_dataset(
            dataset, api, args, resume, name=name,
            description=description, fields=fields,
            dataset_fields=dataset_fields, objective_field=objective_field,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (objective_field, labels,
            all_labels, multi_label_fields) = l.multi_label_sync(
                objective_field, labels, multi_label_data,
                fields, multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label',
                                   other_label)
    models, model_ids, ensemble_ids, resume = pm.models_processing(
        datasets, models, model_ids,
        objective_field, fields, api, args, resume,
        name=name, description=description, model_fields=model_fields,
        session_file=session_file, path=path, log=log, labels=labels,
        multi_label_data=multi_label_data, other_label=other_label)
    if models:
        model = models[0]
        single_model = len(models) == 1
    # If multi-label flag is set and no training_set was provided, label
    # info is extracted from the user_metadata. If models belong to an
    # ensemble, the ensemble must be retrieved to get the user_metadata.
    if model and args.multi_label and multi_label_data is None:
        if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict):
            resource = ensemble_ids[0]
        elif belongs_to_ensemble(model):
            ensemble_id = get_ensemble_id(model)
            resource = r.get_ensemble(ensemble_id, api=api,
                                      verbosity=args.verbosity,
                                      session_file=session_file)
        else:
            resource = model
        multi_label_data = l.get_multi_label_data(resource)

    # We update the model's public state if needed
    if model:
        if isinstance(model, basestring):
            if not args.evaluate:
                query_string = MINIMUM_MODEL
            else:
                query_string = r.FIELDS_QS
            model = u.check_resource(model, api.get_model,
                                     query_string=query_string)
        if (args.black_box or args.white_box or
                (args.shared_flag and r.shared_changed(args.shared, model))):
            model_args = {}
            if args.shared_flag and r.shared_changed(args.shared, model):
                model_args.update(shared=args.shared)
            if args.black_box or args.white_box:
                model_args.update(r.set_publish_model_args(args))
            if model_args:
                model = r.update_model(model, model_args, args,
                                       api=api, path=path,
                                       session_file=session_file)
                models[0] = model

    # We get the fields of the model if we haven't got
    # them yet and need them
    if model and not args.evaluate and test_set:
        # If more than one model, use the full field structure
        if (not single_model and not args.multi_label and
                belongs_to_ensemble(model)):
            if len(ensemble_ids) > 0:
                ensemble_id = ensemble_ids[0]
            else:
                ensemble_id = get_ensemble_id(model)
            local_ensemble = Ensemble(ensemble_id, api=api)
        fields, objective_field = pm.get_model_fields(
            model, csv_properties, args, single_model=single_model,
            multi_label_data=multi_label_data, local_ensemble=local_ensemble)

    # Fills in all_labels from user_metadata
    if args.multi_label and not all_labels:
        (objective_field, labels,
            all_labels, multi_label_fields) = l.multi_label_sync(
                objective_field, labels, multi_label_data,
                fields, multi_label_fields)
    if model:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(model, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(model, 'other_label',
                                   other_label)
    # If predicting
    if models and has_test(args) and not args.evaluate:
        models_per_label = 1
        test_dataset = None

        if args.multi_label:
            # When prediction starts from existing models, the
            # multi_label_fields can be retrieved from the user_metadata
            # in the models
            if args.multi_label_fields is None and multi_label_fields:
                multi_label_field_names = [field[1] for field
                                           in multi_label_fields]
                args.multi_label_fields = ",".join(multi_label_field_names)
            test_set = ps.multi_label_expansion(
                test_set, test_set_header, objective_field, args, path,
                labels=labels, session_file=session_file, input_flag=True)[0]
            test_set_header = True

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on or multi-label
        # or max-categories are used
        if (args.remote and not args.no_batch and not args.multi_label
                and not args.method in [THRESHOLD_CODE, COMBINATION]):
            # create test source from file
            test_name = "%s - test" % name
            if args.test_source is None:
                (test_source, resume,
                    csv_properties, test_fields) = ps.test_source_processing(
                        test_set, test_set_header,
                        api, args, resume, name=test_name,
                        description=description,
                        field_attributes=test_field_attributes,
                        types=test_types,
                        session_file=session_file, path=path, log=log)
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id,
                                                 api.get_source)
            if args.test_dataset is None:
            # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(test_name,
                                                        description, args)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(args.test_dataset)
                test_dataset = api.check_resource(test_dataset_id,
                                                  api.get_dataset)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)

            batch_prediction_args = r.set_batch_prediction_args(
                name, description, args, fields=fields,
                dataset_fields=test_fields, fields_map=fields_map)

            remote_predict(model, test_dataset, batch_prediction_args, args,
                           api, resume, prediction_file=output,
                           session_file=session_file, path=path, log=log)
        else:
            models_per_label = args.number_of_models
            if (args.multi_label and len(ensemble_ids) > 0
                    and args.number_of_models == 1):
                # use case where ensembles are read from a file
                models_per_label = len(models) / len(ensemble_ids)
            predict(test_set, test_set_header, models, fields, output,
                    objective_field, args, api=api, log=log,
                    resume=resume, session_file=session_file, labels=labels,
                    models_per_label=models_per_label, other_label=other_label,
                    multi_label_data=multi_label_data)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if votes_files:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
                          r'\1', votes_files[0]).replace("_", "/")
        try:
            model = u.check_resource(model_id, api.get_model)
        except ValueError, exception:
            sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))

        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)
        combine_votes(votes_files, local_model.to_prediction,
                      output, args.method)
Exemplo n.º 5
0
def compute_output(api, args):
    """ Creates a dataset using the `training_set`.

    """

    source = None
    dataset = None
    fields = None
    other_label = OTHER
    multi_label_data = None
    multi_label_fields = []
    datasets = None

    # variables from command-line options
    resume = args.resume_
    output = args.output
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)
    path = u.check_dir(output)

    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else [
        label.strip() for label in args.labels.split(args.args_separator)
    ])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set,
            args.train_header,
            args,
            path,
            labels=labels,
            session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file, csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api,
            args,
            resume,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)
    if source is not None:
        args.source = bigml.api.get_source_id(source)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file, csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source,
            api,
            args,
            resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)

    if datasets:
        dataset = datasets[-1]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset,
                                       api,
                                       args,
                                       resume,
                                       session_file=session_file,
                                       path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset,
            api,
            args,
            resume,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(
                dataset, args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset,
                    distribution,
                    fields,
                    args,
                    api,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If any of the transformations is applied,
    # generate a new dataset from the given list of datasets
    if args.new_dataset:
        dataset, resume = pd.create_new_dataset(datasets,
                                                api,
                                                args,
                                                resume,
                                                fields=fields,
                                                session_file=session_file,
                                                path=path,
                                                log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if args.new_fields or args.sample_rate != 1 or \
            (args.lisp_filter or args.json_filter) and not has_source(args):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = u.check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(dataset,
                                                api,
                                                args,
                                                resume,
                                                fields=fields,
                                                session_file=session_file,
                                                path=path,
                                                log=log)
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({
            'objective_field': args.objective_name_,
            'objective_field_present': True
        })
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label', other_label)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
Exemplo n.º 6
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None
    other_label = OTHER
    ensemble_ids = []
    multi_label_data = None
    multi_label_fields = []
    local_ensemble = None
    test_dataset = None
    datasets = None

    # variables from command-line options
    resume = args.resume_
    model_ids = args.model_ids_
    output = args.predictions
    dataset_fields = args.dataset_fields_

    # It is compulsory to have a description to publish either datasets or
    # models
    if (not args.description_ and
            (args.black_box or args.white_box or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --max-categories, it is compulsory to specify also the
    # objective_field
    if args.max_categories > 0 and args.objective_field is None:
        sys.exit("When --max-categories is used, you must also provide the"
                 " --objective field name or column number")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else
              [label.strip() for label in
               args.labels.split(args.args_separator)])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set, args.train_header, args, path,
            labels=labels, session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file,
            csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api, args, resume,
            csv_properties=csv_properties, multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields,
                                                  multi_label_fields)

    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file,
            csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source, api, args, resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if datasets:
        dataset = datasets[0]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset, api, args, resume,
                                       session_file=session_file, path=path)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        try:
            objective_id = fields.field_id(fields.objective_field)
        except ValueError, exc:
            sys.exit(exc)
        if pd.check_max_categories(fields.fields[objective_id]):
            distribution = pd.get_categories_distribution(dataset,
                                                          objective_id)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset, distribution, fields, args,
                    api, resume, session_file=session_file, path=path, log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")
Exemplo n.º 7
0
def compute_output(api,
                   args,
                   training_set,
                   test_set=None,
                   output=None,
                   objective_field=None,
                   description=None,
                   field_attributes=None,
                   types=None,
                   dataset_fields=None,
                   model_fields=None,
                   name=None,
                   training_set_header=True,
                   test_set_header=True,
                   model_ids=None,
                   votes_files=None,
                   resume=False,
                   fields_map=None,
                   test_field_attributes=None,
                   test_types=None):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None
    other_label = OTHER
    ensemble_ids = []
    multi_label_data = None
    multi_label_fields = []
    local_ensemble = None

    # It is compulsory to have a description to publish either datasets or
    # models
    if (not description
            and (args.black_box or args.white_box or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --max-categories, it is compulsory to specify also the
    # objective_field
    if args.max_categories > 0 and objective_field is None:
        sys.exit("When --max-categories is used, you must also provide the"
                 " --objective field name or column number")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required, open the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        if args.clear_logs:
            try:
                open(log, 'w', 0).close()
            except IOError:
                pass

    # labels to be used in multi-label expansion
    labels = (map(str.strip, args.labels.split(','))
              if args.labels is not None else None)
    if labels is not None:
        labels = sorted([label.decode("utf-8") for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and training_set is not None:
        (training_set, multi_label_data) = ps.multi_label_expansion(
            training_set,
            training_set_header,
            objective_field,
            args,
            path,
            labels=labels,
            session_file=session_file)
        training_set_header = True
        objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels

    source, resume, csv_properties, fields = ps.source_processing(
        training_set,
        test_set,
        training_set_header,
        test_set_header,
        api,
        args,
        resume,
        name=name,
        description=description,
        csv_properties=csv_properties,
        field_attributes=field_attributes,
        types=types,
        multi_label_data=multi_label_data,
        session_file=session_file,
        path=path,
        log=log)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)

    datasets, resume, csv_properties, fields = pd.dataset_processing(
        source,
        training_set,
        test_set,
        fields,
        objective_field,
        api,
        args,
        resume,
        name=name,
        description=description,
        dataset_fields=dataset_fields,
        multi_label_data=multi_label_data,
        csv_properties=csv_properties,
        session_file=session_file,
        path=path,
        log=log)

    if datasets:
        dataset = datasets[0]

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset,
            api,
            args,
            resume,
            name=name,
            description=description,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        objective_id = fields.field_id(fields.objective_field)
        if pd.check_max_categories(fields.fields[objective_id]):
            distribution = pd.get_categories_distribution(
                dataset, objective_id)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset,
                    distribution,
                    fields,
                    args,
                    api,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If multi-dataset flag is on, generate a new dataset from the given
    # list of datasets
    if args.multi_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets,
            api,
            args,
            resume,
            name=name,
            description=description,
            fields=fields,
            dataset_fields=dataset_fields,
            objective_field=objective_field,
            session_file=session_file,
            path=path,
            log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure
    if args.new_fields:
        dataset, resume = pd.create_new_dataset(
            dataset,
            api,
            args,
            resume,
            name=name,
            description=description,
            fields=fields,
            dataset_fields=dataset_fields,
            objective_field=objective_field,
            session_file=session_file,
            path=path,
            log=log)
        datasets[0] = dataset
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label', other_label)

    models, model_ids, ensemble_ids, resume = pm.models_processing(
        datasets,
        models,
        model_ids,
        objective_field,
        fields,
        api,
        args,
        resume,
        name=name,
        description=description,
        model_fields=model_fields,
        session_file=session_file,
        path=path,
        log=log,
        labels=labels,
        multi_label_data=multi_label_data,
        other_label=other_label)
    if models:
        model = models[0]
        single_model = len(models) == 1

    # If multi-label flag is set and no training_set was provided, label
    # info is extracted from the user_metadata. If models belong to an
    # ensemble, the ensemble must be retrieved to get the user_metadata.
    if model and args.multi_label and multi_label_data is None:
        if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict):
            resource = ensemble_ids[0]
        elif belongs_to_ensemble(model):
            ensemble_id = get_ensemble_id(model)
            resource = r.get_ensemble(ensemble_id,
                                      api=api,
                                      verbosity=args.verbosity,
                                      session_file=session_file)
        else:
            resource = model
        multi_label_data = l.get_multi_label_data(resource)

    # We update the model's public state if needed
    if model:
        if isinstance(model, basestring):
            if not args.evaluate:
                query_string = MINIMUM_MODEL
            else:
                query_string = r.FIELDS_QS
                model = u.check_resource(model,
                                         api.get_model,
                                         query_string=query_string)
        if (args.black_box or args.white_box
                or r.shared_changed(args.shared, model)):
            model_args = {}
            if r.shared_changed(args.shared, model):
                model_args.update(shared=args.shared)
            if args.black_box or args.white_box:
                model_args.update(r.set_publish_model_args(args))
            if model_args:
                model = r.update_model(model,
                                       model_args,
                                       args,
                                       api=api,
                                       path=path,
                                       session_file=session_file)
                models[0] = model

    # We get the fields of the model if we haven't got
    # them yet and need them
    if model and not args.evaluate and test_set:
        # If more than one model, use the full field structure
        if (not single_model and not args.multi_label
                and belongs_to_ensemble(model)):
            if len(ensemble_ids) > 0:
                ensemble_id = ensemble_ids[0]
            else:
                ensemble_id = get_ensemble_id(model)
            local_ensemble = Ensemble(ensemble_id, api=api)
        fields, objective_field = pm.get_model_fields(
            model,
            csv_properties,
            args,
            single_model=single_model,
            multi_label_data=multi_label_data,
            local_ensemble=local_ensemble)

    # Fills in all_labels from user_metadata
    if args.multi_label and not all_labels:
        (objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)
    if model:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(model, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(model, 'other_label', other_label)
    # If predicting
    if models and has_test(args) and not args.evaluate:
        models_per_label = 1
        test_dataset = None

        if args.multi_label:
            # When prediction starts from existing models, the
            # multi_label_fields can be retrieved from the user_metadata
            # in the models
            if args.multi_label_fields is None and multi_label_fields:
                multi_label_field_names = [
                    field[1] for field in multi_label_fields
                ]
                args.multi_label_fields = ",".join(multi_label_field_names)
            test_set = ps.multi_label_expansion(test_set,
                                                test_set_header,
                                                objective_field,
                                                args,
                                                path,
                                                labels=labels,
                                                session_file=session_file,
                                                input_flag=True)[0]
            test_set_header = True

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on or multi-label
        # or max-categories are used
        if (args.remote and not args.no_batch and not args.multi_label
                and not args.method in [THRESHOLD_CODE, COMBINATION]):
            # create test source from file
            test_name = "%s - test" % name
            if args.test_source is None:
                (test_source, resume, csv_properties,
                 test_fields) = ps.test_source_processing(
                     test_set,
                     test_set_header,
                     api,
                     args,
                     resume,
                     name=test_name,
                     description=description,
                     field_attributes=test_field_attributes,
                     types=test_types,
                     session_file=session_file,
                     path=path,
                     log=log)
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id,
                                                 api.get_source)
            if args.test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(
                    test_name, description, args)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source,
                    "test",
                    dataset_args,
                    api,
                    args,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(args.test_dataset)
                test_dataset = api.check_resource(test_dataset_id,
                                                  api.get_dataset)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)

            batch_prediction_args = r.set_batch_prediction_args(
                name,
                description,
                args,
                fields=fields,
                dataset_fields=test_fields,
                fields_map=fields_map)

            remote_predict(model,
                           test_dataset,
                           batch_prediction_args,
                           args,
                           api,
                           resume,
                           prediction_file=output,
                           session_file=session_file,
                           path=path,
                           log=log)
        else:
            models_per_label = args.number_of_models
            if (args.multi_label and len(ensemble_ids) > 0
                    and args.number_of_models == 1):
                # use case where ensembles are read from a file
                models_per_label = len(models) / len(ensemble_ids)
            predict(test_set,
                    test_set_header,
                    models,
                    fields,
                    output,
                    objective_field,
                    args,
                    api=api,
                    log=log,
                    resume=resume,
                    session_file=session_file,
                    labels=labels,
                    models_per_label=models_per_label,
                    other_label=other_label,
                    multi_label_data=multi_label_data)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if votes_files:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1',
                          votes_files[0]).replace("_", "/")
        try:
            model = u.check_resource(model_id, api.get_model)
        except ValueError, exception:
            sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))

        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        combine_votes(votes_files, local_model.to_prediction, output,
                      args.method)
Exemplo n.º 8
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None
    other_label = OTHER
    ensemble_ids = []
    multi_label_data = None
    multi_label_fields = []
    #local_ensemble = None
    test_dataset = None
    datasets = None

    # variables from command-line options
    resume = args.resume_
    model_ids = args.model_ids_
    output = args.predictions
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else
              [label.strip() for label in
               args.labels.split(args.args_separator)])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set, args.train_header, args, path,
            labels=labels, session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file,
            csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api, args, resume,
            csv_properties=csv_properties, multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields,
                                                  multi_label_fields)
    if fields and args.export_fields:
       fields.summary_csv(os.path.join(path, args.export_fields))
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file,
            csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source, api, args, resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if datasets:
        dataset = datasets[0]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset, api, args, resume,
                                       session_file=session_file, path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(dataset,
                                                          args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset, distribution, fields, args,
                    api, resume, session_file=session_file, path=path, log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If multi-dataset flag is on, generate a new dataset from the given
    # list of datasets
    if args.multi_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if args.new_fields or (args.sample_rate != 1 and args.no_model) or \
            (args.lisp_filter or args.json_filter) and not has_source(args):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = u.check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(
            dataset, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({'objective_field': args.objective_name_,
                               'objective_field_present': True})
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields, multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label',
                                   other_label)
    if fields and args.export_fields:
       fields.summary_csv(os.path.join(path, args.export_fields))
    if args.model_file:
        # model is retrieved from the contents of the given local JSON file
        model, csv_properties, fields = u.read_local_resource(
            args.model_file,
            csv_properties=csv_properties)
        models = [model]
        model_ids = [model['resource']]
        ensemble_ids = []
    elif args.ensemble_file:
        # model is retrieved from the contents of the given local JSON file
        ensemble, csv_properties, fields = u.read_local_resource(
            args.ensemble_file,
            csv_properties=csv_properties)
        model_ids = ensemble['object']['models'][:]
        ensemble_ids = [ensemble['resource']]
        models = model_ids[:]
        model = retrieve_resource(bigml.api.BigML(storage='./storage'),
                                  models[0],
                                  query_string=r.ALL_FIELDS_QS)
        models[0] = model
    else:
        # model is retrieved from the remote object
        models, model_ids, ensemble_ids, resume = pm.models_processing(
            datasets, models, model_ids,
            api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log, labels=labels,
            multi_label_data=multi_label_data, other_label=other_label)

    if models:
        model = models[0]
        single_model = len(models) == 1
    # If multi-label flag is set and no training_set was provided, label
    # info is extracted from the user_metadata. If models belong to an
    # ensemble, the ensemble must be retrieved to get the user_metadata.
    if model and args.multi_label and multi_label_data is None:
        if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict):
            resource = ensemble_ids[0]
        elif belongs_to_ensemble(model):
            ensemble_id = get_ensemble_id(model)
            resource = r.get_ensemble(ensemble_id, api=api,
                                      verbosity=args.verbosity,
                                      session_file=session_file)
        else:
            resource = model
        multi_label_data = l.get_multi_label_data(resource)

    # We update the model's public state if needed
    if model:
        if (isinstance(model, basestring) or
                bigml.api.get_status(model)['code'] != bigml.api.FINISHED):
            if not args.evaluate and not a.has_train(args) and \
                    not a.has_test(args) :
                query_string = MINIMUM_MODEL
            elif not args.test_header:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = "%s;%s" % (r.ALL_FIELDS_QS, r.FIELDS_QS)
            model = u.check_resource(model, api.get_model,
                                     query_string=query_string)
            models[0] = model
        if (args.black_box or args.white_box or
                (args.shared_flag and r.shared_changed(args.shared, model))):
            model_args = {}
            if args.shared_flag and r.shared_changed(args.shared, model):
                model_args.update(shared=args.shared)
            if args.black_box or args.white_box:
                model_args.update(r.set_publish_model_args(args))
            if model_args:
                model = r.update_model(model, model_args, args,
                                       api=api, path=path,
                                       session_file=session_file)
                models[0] = model

    # We get the fields of the model if we haven't got
    # them yet and need them
    if model and not args.evaluate and (a.has_test(args) or
                                        args.export_fields):
        # If more than one model, use the full field structure
        if (not single_model and not args.multi_label and
                belongs_to_ensemble(model)):
            if len(ensemble_ids) > 0:
                ensemble_id = ensemble_ids[0]
                args.ensemble_ids_ = ensemble_ids
            else:
                ensemble_id = get_ensemble_id(model)
        fields = pm.get_model_fields(
            model, csv_properties, args, single_model=single_model,
            multi_label_data=multi_label_data)
        # Free memory after getting fields
        # local_ensemble = None
        gc.collect()

    # Fills in all_labels from user_metadata
    if args.multi_label and not all_labels:
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)
    if model:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(model, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(model, 'other_label',
                                   other_label)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))
    # If predicting
    if (models and (a.has_test(args) or (test_dataset and args.remote))
            and not args.evaluate):
        models_per_label = 1
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        if args.multi_label:
            # When prediction starts from existing models, the
            # multi_label_fields can be retrieved from the user_metadata
            # in the models
            if args.multi_label_fields is None and multi_label_fields:
                multi_label_field_names = [field[1] for field
                                           in multi_label_fields]
                args.multi_label_fields = ",".join(multi_label_field_names)
            test_set = ps.multi_label_expansion(
                args.test_set, args.test_header, args, path,
                labels=labels, session_file=session_file, input_flag=True)[0]
            test_set_header = True

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on or multi-label
        # or max-categories are used
        if (args.remote and not args.no_batch and not args.multi_label
                and not args.method in [THRESHOLD_CODE, COMBINATION]):
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, session_file=session_file,
                    path=path, log=log)

                (test_source, resume, csv_properties,
                 test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
            # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)

            if args.to_dataset and args.dataset_off:
                model = api.check_resource(model['resource'],
                                           query_string=r.ALL_FIELDS_QS)
                model_fields = Fields(model)
                objective_field_name = model_fields.field_name( \
                    model_fields.objective_field)
                if objective_field_name in test_fields.fields_by_name.keys():
                    args.prediction_name = "%s (predicted)" % \
                        objective_field_name
            batch_prediction_args = r.set_batch_prediction_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_predict(model, test_dataset, batch_prediction_args, args,
                           api, resume, prediction_file=output,
                           session_file=session_file, path=path, log=log)
        else:
            models_per_label = args.number_of_models
            if (args.multi_label and len(ensemble_ids) > 0
                    and args.number_of_models == 1):
                # use case where ensembles are read from a file
                models_per_label = len(models) / len(ensemble_ids)
            predict(models, fields, args, api=api, log=log,
                    resume=resume, session_file=session_file, labels=labels,
                    models_per_label=models_per_label, other_label=other_label,
                    multi_label_data=multi_label_data)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if args.votes_files_:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
                          r'\1', args.votes_files_[0]).replace("_", "/")
        try:
            model = u.check_resource(model_id, api.get_model)
        except ValueError, exception:
            sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))

        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)

        combine_votes(args.votes_files_, local_model.to_prediction,
                      output, method=args.method)
Exemplo n.º 9
0
def compute_output(api, args):
    """ Creates a dataset using the `training_set`.

    """


    source = None
    dataset = None
    fields = None
    other_label = OTHER
    multi_label_data = None
    multi_label_fields = []
    datasets = None

    # variables from command-line options
    resume = args.resume_
    output = args.output
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)
    path = u.check_dir(output)

    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else
              [label.strip() for label in
               args.labels.split(args.args_separator)])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set, args.train_header, args, path,
            labels=labels, session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file,
            csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api, args, resume,
            csv_properties=csv_properties, multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if source is not None:
        args.source = bigml.api.get_source_id(source)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields,
                                                  multi_label_fields)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file,
            csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source, api, args, resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)

    if datasets:
        dataset = datasets[-1]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset, api, args, resume,
                                       session_file=session_file, path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(dataset,
                                                          args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset, distribution, fields, args,
                    api, resume, session_file=session_file, path=path, log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If any of the transformations is applied,
    # generate a new dataset from the given list of datasets
    if args.new_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if args.new_fields or args.sample_rate != 1 or \
            (args.lisp_filter or args.json_filter) and not has_source(args):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = u.check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(
            dataset, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({'objective_field': args.objective_name_,
                               'objective_field_present': True})
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields, multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label',
                                   other_label)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)