Exemplo n.º 1
0
def get_delete_list(args, api, query_list):
    """Building the list of resources to be deleted by adding the tag
       filtering user options to the
       previous ones for all the filtered resource types.

    """
    resource_selectors = filtered_selectors(args, api)
    delete_list = []

    if resource_selectors:
        for selector, api_call, filter_linked in resource_selectors:
            query_value = args.all_tag
            type_query_list = query_list[:]
            if args.all_tag or selector:
                if selector:
                    query_value = selector
                type_query_list.append("tags__in=%s" % query_value)
            if type_query_list and filter_linked:
                type_query_list.append(filter_linked)
            if type_query_list:
                status_code = STATUS_CODES[args.status]
                delete_list.extend(u.list_ids(api_call,
                                              ";".join(type_query_list),
                                              status_code=status_code))
    return delete_list
Exemplo n.º 2
0
def get_delete_list(args, api, query_list):
    """Building the list of resources to be deleted by adding the tag
       filtering user options to the
       previous ones for all the filtered resource types.

    """
    resource_selectors = filtered_selectors(args, api)
    delete_list = []

    if resource_selectors:
        for selector, api_call, filter_linked in resource_selectors:
            query_value = args.all_tag
            type_query_list = query_list[:]
            if args.all_tag or selector:
                if selector:
                    query_value = selector
                type_query_list.append("tags__in=%s" % query_value)
            if type_query_list and filter_linked:
                type_query_list.append(filter_linked)
            if type_query_list:
                status_code = STATUS_CODES[args.status]
                delete_list.extend(
                    u.list_ids(api_call,
                               ";".join(type_query_list),
                               status_code=status_code))
    return delete_list
Exemplo n.º 3
0
def transform_dataset_options(command_args, api):
    """Retrieves the dataset ids from the different input options

    """
    try:
        dataset_ids = None
        command_args.dataset_ids = []
        # Parses dataset/id if provided.
        if command_args.datasets:
            dataset_ids = u.read_datasets(command_args.datasets)
            if len(dataset_ids) == 1:
                command_args.dataset = dataset_ids[0]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    command_args.test_dataset_ids = []

    try:
        # Retrieve dataset/ids if provided.
        if command_args.dataset_tag:
            dataset_ids = dataset_ids.extend(
                u.list_ids(api.list_datasets,
                           "tags__in=%s" % command_args.dataset_tag))
            if len(dataset_ids) == 1:
                command_args.dataset = dataset_ids[0]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass
Exemplo n.º 4
0
def transform_dataset_options(command_args, api):
    """Retrieves the dataset ids from the different input options

    """
    dataset_ids = None
    command_args.dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_ids = u.read_datasets(command_args.datasets)
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    command_args.test_dataset_ids = []

    # Retrieve dataset/ids if provided.
    if command_args.dataset_tag:
        dataset_ids = dataset_ids.extend(
            u.list_ids(api.list_datasets,
                       "tags__in=%s" % command_args.dataset_tag))
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids
Exemplo n.º 5
0
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip,
                          command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend([line for line
                            in open(command_args.delete_file, "r")])

    resource_selectors = [
        (command_args.source_tag, api.list_sources),
        (command_args.dataset_tag, api.list_datasets),
        (command_args.model_tag, api.list_models),
        (command_args.prediction_tag, api.list_predictions),
        (command_args.evaluation_tag, api.list_evaluations),
        (command_args.ensemble_tag, api.list_ensembles),
        (command_args.batch_prediction_tag, api.list_batch_predictions)]

    for selector, api_call in resource_selectors:
        query_string = None
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
        elif selector:
            query_string = "tags__in=%s" % selector
        if query_string:
            delete_list.extend(u.list_ids(api_call, query_string))

    message = u.dated("Deleting objects.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
Exemplo n.º 6
0
def transform_dataset_options(command_args, api):
    """Retrieves the dataset ids from the different input options

    """
    command_args.dataset_ids = []
    command_args.test_dataset_ids = []

    try:
        dataset_ids = None
        # Parses dataset/id if provided.
        if command_args.datasets:
            dataset_ids = u.read_datasets(command_args.datasets)
            if dataset_ids:
                command_args.dataset = dataset_ids[-1]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    try:
        # Retrieve dataset/ids if provided.
        if command_args.dataset_tag:
            dataset_ids = dataset_ids.extend(
                u.list_ids(api.list_datasets,
                           "tags__in=%s" % command_args.dataset_tag))
            if dataset_ids:
                command_args.dataset = dataset_ids[-1]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass

    # if datasets_json is set, read it info in datasets
    try:
        if hasattr(command_args, 'datasets_json') \
                and command_args.datasets_json:
            command_args.dataset_ids = json.loads(command_args.datasets_json)
            if dataset_ids:
                command_args.dataset = command_args.dataset_ids[-1]
    except AttributeError:
        pass
Exemplo n.º 7
0
def transform_dataset_options(command_args, api):
    """Retrieves the dataset ids from the different input options

    """
    command_args.dataset_ids = []
    command_args.test_dataset_ids = []

    try:
        dataset_ids = None
        # Parses dataset/id if provided.
        if command_args.datasets:
            dataset_ids = u.read_datasets(command_args.datasets)
            if len(dataset_ids) > 0:
                command_args.dataset = dataset_ids[-1]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    try:
        # Retrieve dataset/ids if provided.
        if command_args.dataset_tag:
            dataset_ids = dataset_ids.extend(
                u.list_ids(api.list_datasets,
                           "tags__in=%s" % command_args.dataset_tag))
            if len(dataset_ids) > 0:
                command_args.dataset = dataset_ids[-1]
            command_args.dataset_ids = dataset_ids
    except Exception:
        pass

    # if datasets_json is set, read it info in datasets
    try:
        if hasattr(command_args, 'datasets_json') \
                and command_args.datasets_json:
            command_args.dataset_ids = json.loads(command_args.datasets_json)
            if len(dataset_ids) > 0:
                command_args.dataset = command_args.dataset_ids[-1]
    except AttributeError:
        pass
Exemplo n.º 8
0
def transform_args(command_args, flags, api, user_defaults):
    """Transforms the formatted argument strings into structured arguments

    """
    # Parses attributes in json format if provided
    command_args.json_args = {}

    json_attribute_options = {
        'source': command_args.source_attributes,
        'dataset': command_args.dataset_attributes,
        'model': command_args.model_attributes,
        'ensemble': command_args.ensemble_attributes,
        'evaluation': command_args.evaluation_attributes,
        'batch_prediction': command_args.batch_prediction_attributes}

    for resource_type, attributes_file in json_attribute_options.items():
        if attributes_file is not None:
            command_args.json_args[resource_type] = u.read_json(
                attributes_file)
        else:
            command_args.json_args[resource_type] = {}

    # Parses dataset generators in json format if provided
    if command_args.new_fields:
        json_generators = u.read_json(command_args.new_fields)
        command_args.dataset_json_generators = json_generators
    else:
        command_args.dataset_json_generators = {}

    # Parses multi-dataset attributes in json such as field maps
    if command_args.multi_dataset_attributes:
        multi_dataset_json = u.read_json(command_args.multi_dataset_attributes)
        command_args.multi_dataset_json= multi_dataset_json
    else:
        command_args.multi_dataset_json = {}

    dataset_ids = None
    command_args.dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_ids = u.read_datasets(command_args.datasets)
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    test_dataset_ids = None
    command_args.test_dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.test_datasets:
        test_dataset_ids = u.read_datasets(command_args.test_datasets)
        command_args.test_dataset_ids = test_dataset_ids

    # Retrieve dataset/ids if provided.
    if command_args.dataset_tag:
        dataset_ids = dataset_ids.extend(
            u.list_ids(api.list_datasets,
                       "tags__in=%s" % command_args.dataset_tag))
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and command_args.method != COMBINATION_LABEL and
            not (command_args.method in COMBINATION_WEIGHTS.keys())):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                for key, value in COMBINER_MAP.items()])
        combiner_methods[COMBINATION_LABEL] = COMBINATION
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Checks missing_strategy
    if (command_args.missing_strategy and
            not (command_args.missing_strategy in MISSING_STRATEGIES.keys())):
        command_args.missing_strategy = 0
    else:
        command_args.missing_strategy = MISSING_STRATEGIES.get(
            command_args.missing_strategy, 0)

    # Adds replacement=True if creating ensemble and nothing is specified
    if (command_args.number_of_models > 1 and
            not command_args.replacement and
            not '--no-replacement' in flags and
            not 'replacement' in user_defaults and
            not '--no-randomize' in flags and
            not 'randomize' in user_defaults and
            not '--sample-rate' in flags and
            not 'sample_rate' in user_defaults):
        command_args.replacement = True

    # Old value for --prediction-info='full data' maps to 'full'
    if command_args.prediction_info == 'full data':
        print "WARNING: 'full data' is a deprecated value. Use 'full' instead"
        command_args.prediction_info = FULL_FORMAT

    # Parses class, weight pairs for objective weight
    if command_args.objective_weights:
        objective_weights = (
            u.read_objective_weights(command_args.objective_weights))
        command_args.objective_weights_json = objective_weights

    command_args.multi_label_fields_list = []
    if command_args.multi_label_fields is not None:
        multi_label_fields = command_args.multi_label_fields.strip()
        command_args.multi_label_fields_list = multi_label_fields.split(
            command_args.args_separator)

    # Sets shared_flag if --shared or --unshared has been used
    if '--shared' in flags or '--unshared' in flags:
        command_args.shared_flag = True
    else:
        command_args.shared_flag = False
Exemplo n.º 9
0
def transform_args(command_args, flags, api, user_defaults):
    """Transforms the formatted argument strings into structured arguments

    """
    # Parses attributes in json format if provided
    command_args.json_args = {}

    for resource_type in RESOURCE_TYPES:
        attributes_file = getattr(command_args,
                                  "%s_attributes" % resource_type, None)
        if attributes_file is not None:
            command_args.json_args[resource_type] = u.read_json(
                attributes_file)
        else:
            command_args.json_args[resource_type] = {}

    # Parses dataset generators in json format if provided
    if command_args.new_fields:
        json_generators = u.read_json(command_args.new_fields)
        command_args.dataset_json_generators = json_generators
    else:
        command_args.dataset_json_generators = {}

    # Parses multi-dataset attributes in json such as field maps
    if command_args.multi_dataset_attributes:
        multi_dataset_json = u.read_json(command_args.multi_dataset_attributes)
        command_args.multi_dataset_json = multi_dataset_json
    else:
        command_args.multi_dataset_json = {}

    dataset_ids = None
    command_args.dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_ids = u.read_datasets(command_args.datasets)
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    command_args.test_dataset_ids = []

    # Retrieve dataset/ids if provided.
    if command_args.dataset_tag:
        dataset_ids = dataset_ids.extend(
            u.list_ids(api.list_datasets,
                       "tags__in=%s" % command_args.dataset_tag))
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    try:
        if (command_args.method and command_args.method != COMBINATION_LABEL
                and not (command_args.method in COMBINATION_WEIGHTS.keys())):
            command_args.method = 0
        else:
            combiner_methods = dict(
                [[value, key] for key, value in COMBINER_MAP.items()])
            combiner_methods[COMBINATION_LABEL] = COMBINATION
            command_args.method = combiner_methods.get(command_args.method, 0)
    except AttributeError:
        pass

    # Checks missing_strategy
    try:
        if (command_args.missing_strategy and
                not (command_args.missing_strategy in
                     MISSING_STRATEGIES.keys())):
            command_args.missing_strategy = 0
        else:
            command_args.missing_strategy = MISSING_STRATEGIES.get(
                command_args.missing_strategy, 0)
    except AttributeError:
        pass

    # Adds replacement=True if creating ensemble and nothing is specified
    try:
        if (command_args.number_of_models > 1 and
                not command_args.replacement and
                not '--no-replacement' in flags and
                not 'replacement' in user_defaults and
                not '--no-randomize' in flags and
                not 'randomize' in user_defaults and
                not '--sample-rate' in flags and
                not 'sample_rate' in user_defaults):
            command_args.replacement = True
    except AttributeError:
        pass
    try:
        # Old value for --prediction-info='full data' maps to 'full'
        if command_args.prediction_info == 'full data':
            print ("WARNING: 'full data' is a deprecated value. Use"
                   " 'full' instead")
            command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    # Parses class, weight pairs for objective weight
    try:
        if command_args.objective_weights:
            objective_weights = (
                u.read_objective_weights(command_args.objective_weights))
            command_args.objective_weights_json = objective_weights
    except AttributeError:
        pass

    try:
        command_args.multi_label_fields_list = []
        if command_args.multi_label_fields is not None:
            multi_label_fields = command_args.multi_label_fields.strip()
            command_args.multi_label_fields_list = multi_label_fields.split(
                command_args.args_separator)
    except AttributeError:
        pass

    # Sets shared_flag if --shared or --unshared has been used
    if '--shared' in flags or '--unshared' in flags:
        command_args.shared_flag = True
    else:
        command_args.shared_flag = False


    # Set remote on if scoring a trainind dataset in bigmler anomaly
    try:
        if command_args.score:
            command_args.remote = True
            if not "--prediction-info" in flags:
                command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass


    command_args.has_models_ = (
        (hasattr(command_args, 'model') and command_args.model) or
        (hasattr(command_args, 'models') and command_args.models) or
        (hasattr(command_args, 'ensemble') and command_args.ensemble) or
        (hasattr(command_args, 'ensembles') and command_args.ensembles) or
        (hasattr(command_args, 'cluster') and command_args.cluster) or
        (hasattr(command_args, 'clusters') and command_args.clusters) or
        (hasattr(command_args, 'model_tag') and command_args.model_tag) or
        (hasattr(command_args, 'anomaly') and command_args.anomaly) or
        (hasattr(command_args, 'anomalies') and command_args.anomalies) or
        (hasattr(command_args, 'ensemble_tag')
         and command_args.ensemble_tag) or
        (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag) or
        (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag))

    command_args.has_datasets_ = (
        (hasattr(command_args, 'dataset') and command_args.dataset) or
        (hasattr(command_args, 'datasets') and command_args.datasets) or
        (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))


    command_args.has_test_datasets_ = (
        (hasattr(command_args, 'test_dataset') and
         command_args.test_dataset) or
        (hasattr(command_args, 'test_datasets') and
         command_args.test_datasets) or
        (hasattr(command_args, 'test_dataset_tag') and
         command_args.test_dataset_tag))
Exemplo n.º 10
0
def get_output_args(api, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    try:
        if command_args.train_stdin:
            if command_args.test_stdin:
                sys.exit("The standard input can't be used both for training "
                         "and testing. Choose one of them")
            command_args.training_set = StringIO(sys.stdin.read())
        elif command_args.test_stdin:
            command_args.test_set = StringIO(sys.stdin.read())
    except AttributeError:
        pass

    try:
        if command_args.objective_field:
            objective = command_args.objective_field
            try:
                command_args.objective_field = int(objective)
            except ValueError:
                if not command_args.train_header:
                    sys.exit("The %s has been set as objective field but"
                             " the file has not been marked as containing"
                             " headers.\nPlease set the --train-header flag if"
                             " the file has headers or use a column number"
                             " to set the objective field." % objective)
    except AttributeError:
        pass

    command_args.resume_ = resume

    # Reads description if provided.
    try:
        if command_args.description:
            description_arg = u.read_description(command_args.description)
            command_args.description_ = description_arg
        else:
            command_args.description_ = DEFAULT_DESCRIPTION
    except AttributeError:
        pass

    # Parses fields if provided.
    try:
        if command_args.field_attributes:
            field_attributes_arg = (u.read_field_attributes(
                command_args.field_attributes))
            command_args.field_attributes_ = field_attributes_arg
        else:
            command_args.field_attributes_ = []
    except AttributeError:
        pass
    try:
        if command_args.test_field_attributes:
            field_attributes_arg = (u.read_field_attributes(
                command_args.test_field_attributes))
            command_args.test_field_attributes_ = field_attributes_arg
        else:
            command_args.test_field_attributes_ = []
    except AttributeError:
        pass

    # Parses types if provided.
    try:
        if command_args.types:
            types_arg = u.read_types(command_args.types)
            command_args.types_ = types_arg
        else:
            command_args.types_ = None
        if command_args.test_types:
            types_arg = u.read_types(command_args.test_types)
            command_args.test_types_ = types_arg
        else:
            command_args.test_types_ = None
    except AttributeError:
        pass

    # Parses dataset fields if provided.
    try:
        if command_args.dataset_fields:
            dataset_fields_arg = [
                field.strip() for field in command_args.dataset_fields.split(
                    command_args.args_separator)
            ]
            command_args.dataset_fields_ = dataset_fields_arg
        else:
            command_args.dataset_fields_ = []
    except AttributeError:
        pass

    # Parses model input fields if provided.
    try:
        if command_args.model_fields:
            model_fields_arg = [
                field.strip() for field in command_args.model_fields.split(
                    command_args.args_separator)
            ]
            command_args.model_fields_ = model_fields_arg
        else:
            command_args.model_fields_ = []
    except AttributeError:
        pass

    # Parses cluster input fields if provided.
    try:
        if command_args.cluster_fields:
            cluster_fields_arg = [
                field.strip() for field in command_args.cluster_fields.split(
                    command_args.args_separator)
            ]
            command_args.cluster_fields_ = cluster_fields_arg
        else:
            command_args.cluster_fields_ = []
    except AttributeError:
        pass

    # Parses anomaly input fields if provided.
    try:
        if command_args.anomaly_fields:
            anomaly_fields_arg = [
                field.strip() for field in command_args.anomaly_fields.split(
                    command_args.args_separator)
            ]
            command_args.anomaly_fields_ = anomaly_fields_arg
        else:
            command_args.anomaly_fields_ = []
    except AttributeError:
        pass

    model_ids = []
    try:
        # Parses model/ids if provided.
        if command_args.models:
            model_ids = u.read_resources(command_args.models)
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Retrieve model/ids if provided.
    try:
        if command_args.model_tag:
            model_ids = (model_ids + u.list_ids(
                api.list_models, "tags__in=%s" % command_args.model_tag))
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Reads votes files in the provided directories.
    try:
        if command_args.votes_dirs:
            dirs = [
                directory.strip() for directory in
                command_args.votes_dirs.split(command_args.args_separator)
            ]
            votes_path = os.path.dirname(command_args.predictions)
            votes_files = u.read_votes_files(dirs, votes_path)
            command_args.votes_files_ = votes_files
        else:
            command_args.votes_files_ = []
    except AttributeError:
        pass

    # Parses fields map if provided.
    try:
        if command_args.fields_map:
            fields_map_arg = u.read_fields_map(command_args.fields_map)
            command_args.fields_map_ = fields_map_arg
        else:
            command_args.fields_map_ = None
    except AttributeError:
        pass

    cluster_ids = []
    try:
        # Parses cluster/ids if provided.
        if command_args.clusters:
            cluster_ids = u.read_resources(command_args.clusters)
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    # Retrieve cluster/ids if provided.
    try:
        if command_args.cluster_tag:
            cluster_ids = (cluster_ids + u.list_ids(
                api.list_clusters, "tags__in=%s" % command_args.cluster_tag))
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    # Parses cluster names to generate datasets if provided
    try:
        if command_args.cluster_datasets:
            cluster_datasets_arg = [
                dataset.strip()
                for dataset in command_args.cluster_datasets.split(
                    command_args.args_separator)
            ]
            command_args.cluster_datasets_ = cluster_datasets_arg
        else:
            command_args.cluster_datasets_ = []
    except AttributeError:
        pass

    # Parses cluster names to generate models if provided
    try:
        if command_args.cluster_models:
            cluster_models_arg = [
                model.strip() for model in command_args.cluster_models.split(
                    command_args.args_separator)
            ]
            command_args.cluster_models_ = cluster_models_arg
        else:
            command_args.cluster_models_ = []
    except AttributeError:
        pass

    anomaly_ids = []
    try:
        # Parses anomaly/ids if provided.
        if command_args.anomalies:
            anomaly_ids = u.read_resources(command_args.anomalies)
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    # Retrieve anomaly/ids if provided.
    try:
        if command_args.anomaly_tag:
            anomaly_ids = (anomaly_ids + u.list_ids(
                api.list_anomalies, "tags__in=%s" % command_args.anomaly_tag))
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    sample_ids = []
    try:
        # Parses sample/ids if provided.
        if command_args.samples:
            sample_ids = u.read_resources(command_args.samples)
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Retrieve sample/ids if provided.
    try:
        if command_args.sample_tag:
            sample_ids = (sample_ids + u.list_ids(
                api.list_samples, "tags__in=%s" % command_args.sample_tag))
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Parses sample row fields
    try:
        if command_args.row_fields:
            row_fields_arg = [
                field.strip() for field in command_args.row_fields.split(
                    command_args.args_separator)
            ]
            command_args.row_fields_ = row_fields_arg
        else:
            command_args.row_fields_ = []
    except AttributeError:
        pass

    # Parses sample stat_fields
    try:
        if command_args.stat_fields:
            stat_fields_arg = [
                field.strip() for field in command_args.stat_fields.split(
                    command_args.args_separator)
            ]
            command_args.stat_fields_ = stat_fields_arg
        else:
            command_args.stat_fields_ = []
    except AttributeError:
        pass

    return {"api": api, "args": command_args}
Exemplo n.º 11
0
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip,
                          command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend([line for line
                            in open(command_args.delete_file, "r")])

    resource_selectors = [
        ("source", command_args.source_tag, api.list_sources),
        ("dataset", command_args.dataset_tag, api.list_datasets),
        ("model", command_args.model_tag, api.list_models),
        ("prediction", command_args.prediction_tag, api.list_predictions),
        ("ensemble", command_args.ensemble_tag, api.list_ensembles),
        ("evaluation", command_args.evaluation_tag, api.list_evaluations),
        ("batchprediction", command_args.batch_prediction_tag,
         api.list_batch_predictions)]

    query_string = None
    if command_args.older_than:
        date_str = get_date(command_args.older_than, api)
        if date_str:
            query_string = "created__lt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if command_args.newer_than:
        date_str = get_date(command_args.newer_than, api)
        if date_str:
            if query_string is None:
                query_string = ""
            else:
                query_string += ";"
            query_string += "created__gt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if (any([selector[1] is not None for selector in resource_selectors]) or
            command_args.all_tag):
        if query_string is None:
            query_string = ""
        else:
            query_string += ";"
        query_value = command_args.all_tag
        for label, selector, api_call in resource_selectors:
            combined_query = query_string
            if not query_value and selector:
                query_value = selector
            if command_args.all_tag or selector:
                combined_query += "tags__in=%s" % query_value
                if label == "model":
                    # avoid ensemble's models
                    combined_query += ";ensemble=false"
                delete_list.extend(u.list_ids(api_call, combined_query))
    else:
        if query_string:
            for label, selector, api_call in resource_selectors:
                combined_query = query_string
                if label == "model":
                    # avoid ensemble's models
                    combined_query += ";ensemble=false"
                delete_list.extend(u.list_ids(api_call, combined_query))

    message = u.dated("Deleting objects.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
Exemplo n.º 12
0
def get_output_args(api, train_stdin, test_stdin, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    if train_stdin:
        if test_stdin:
            sys.exit("The standard input can't be used both for training and"
                     " testing. Choose one of them")
        command_args.training_set = StringIO.StringIO(sys.stdin.read())
    elif test_stdin:
        command_args.test_set = StringIO.StringIO(sys.stdin.read())

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            if not command_args.train_header:
                sys.exit("The %s has been set as objective field but"
                         " the file has not been marked as containing"
                         " headers.\nPlease set the --train-header flag if"
                         " the file has headers or use a column number"
                         " to set the objective field." % objective)

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (u.read_field_attributes(
            command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)
    if command_args.test_field_attributes:
        field_attributes_arg = (u.read_field_attributes(
            command_args.test_field_attributes))
        output_args.update(test_field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)
    if command_args.test_types:
        types_arg = u.read_types(command_args.test_types)
        output_args.update(test_types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(str.strip,
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(str.strip, command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_resources(command_args.models)
        output_args.update(model_ids=model_ids)

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids + u.list_ids(
            api.list_models, "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(str.strip, command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    return output_args
Exemplo n.º 13
0
def transform_args(command_args, flags, api):
    """Transforms the formatted argument strings into structured arguments

    """
    attribute_args(command_args)

    # Parses dataset generators in json format if provided
    try:
        if command_args.new_fields:
            json_generators = u.read_json(command_args.new_fields)
            command_args.dataset_json_generators = json_generators
        else:
            command_args.dataset_json_generators = {}
    except AttributeError:
        pass

    # Parses multi-dataset attributes in json such as field maps
    try:
        if command_args.multi_dataset_attributes:
            multi_dataset_json = u.read_json(
                command_args.multi_dataset_attributes)
            command_args.multi_dataset_json = multi_dataset_json
        else:
            command_args.multi_dataset_json = {}
    except AttributeError:
        pass

    transform_dataset_options(command_args, api)

    script_ids = None
    command_args.script_ids = []
    # Parses script/id if provided.
    try:
        if command_args.scripts:
            script_ids = u.read_resources(command_args.scripts)
            if len(script_ids) == 1:
                command_args.script = script_ids[0]
            command_args.script_ids = script_ids
    except AttributeError:
        pass

    # Retrieve script/ids if provided.
    try:
        if command_args.script_tag:
            script_ids = script_ids.extend(
                u.list_ids(api.list_scripts,
                           "tags__in=%s" % command_args.script_tag))
            if len(script_ids) == 1:
                command_args.script = script_ids[0]
            command_args.script_ids = script_ids
    except AttributeError:
        pass

    # Reads a json filter if provided.
    try:
        if command_args.json_filter:
            json_filter = u.read_json_filter(command_args.json_filter)
            command_args.json_filter = json_filter
    except AttributeError:
        pass

    # Reads a lisp filter if provided.
    try:
        if command_args.lisp_filter:
            lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
            command_args.lisp_filter = lisp_filter
    except AttributeError:
        pass

    # Adds default tags unless that it is requested not to do so.
    try:
        if command_args.no_tag:
            command_args.tag.append('BigMLer')
            command_args.tag.append('BigMLer_%s' % NOW)
    except AttributeError:
        pass

    # Checks combined votes method
    try:
        if (command_args.method and command_args.method != COMBINATION_LABEL
                and not command_args.method in COMBINATION_WEIGHTS.keys()):
            command_args.method = 0
        else:
            combiner_methods = dict(
                [[value, key] for key, value in COMBINER_MAP.items()])
            combiner_methods[COMBINATION_LABEL] = COMBINATION
            command_args.method = combiner_methods.get(command_args.method, 0)
    except AttributeError:
        pass

    # Checks missing_strategy
    try:
        if (command_args.missing_strategy and
                not (command_args.missing_strategy in
                     MISSING_STRATEGIES.keys())):
            command_args.missing_strategy = 0
        else:
            command_args.missing_strategy = MISSING_STRATEGIES.get(
                command_args.missing_strategy, 0)
    except AttributeError:
        pass

    try:
        # Old value for --prediction-info='full data' maps to 'full'
        if command_args.prediction_info == 'full data':
            print ("WARNING: 'full data' is a deprecated value. Use"
                   " 'full' instead")
            command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    # Parses class, weight pairs for objective weight
    try:
        if command_args.objective_weights:
            objective_weights = (
                u.read_objective_weights(command_args.objective_weights))
            command_args.objective_weights_json = objective_weights
    except AttributeError:
        pass

    try:
        command_args.multi_label_fields_list = []
        if command_args.multi_label_fields is not None:
            multi_label_fields = command_args.multi_label_fields.strip()
            command_args.multi_label_fields_list = multi_label_fields.split(
                command_args.args_separator)
    except AttributeError:
        pass

    # Sets shared_flag if --shared or --unshared has been used
    command_args.shared_flag = '--shared' in flags or '--unshared' in flags

    # Set remote on if scoring a trainind dataset in bigmler anomaly
    try:
        if command_args.score:
            command_args.remote = True
            if not "--prediction-info" in flags:
                command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    command_args.has_supervised_ = (
        (hasattr(command_args, 'model') and command_args.model) or
        (hasattr(command_args, 'models') and command_args.models) or
        (hasattr(command_args, 'ensemble') and command_args.ensemble) or
        (hasattr(command_args, 'ensembles') and command_args.ensembles) or
        (hasattr(command_args, 'model_tag') and command_args.model_tag) or
        (hasattr(command_args, 'logistic_regression') and
         command_args.logistic_regression) or
        (hasattr(command_args, 'logistic_regressions') and
         command_args.logistic_regressions) or
        (hasattr(command_args, 'logistic_regression_tag') and
         command_args.logistic_regression_tag) or
        (hasattr(command_args, 'deepnet') and
         command_args.deepnet) or
        (hasattr(command_args, 'deepnets') and
         command_args.deepnets) or
        (hasattr(command_args, 'deepnet_tag') and
         command_args.deepnet_tag) or
        (hasattr(command_args, 'ensemble_tag')
         and command_args.ensemble_tag))

    command_args.has_models_ = (
        command_args.has_supervised_ or
        (hasattr(command_args, 'cluster') and command_args.cluster) or
        (hasattr(command_args, 'clusters') and command_args.clusters) or
        (hasattr(command_args, 'anomaly') and command_args.anomaly) or
        (hasattr(command_args, 'anomalies') and command_args.anomalies) or
        (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag) or
        (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag))

    command_args.has_datasets_ = (
        (hasattr(command_args, 'dataset') and command_args.dataset) or
        (hasattr(command_args, 'datasets') and command_args.datasets) or
        (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))


    command_args.has_test_datasets_ = (
        (hasattr(command_args, 'test_dataset') and
         command_args.test_dataset) or
        (hasattr(command_args, 'test_datasets') and
         command_args.test_datasets) or
        (hasattr(command_args, 'test_dataset_tag') and
         command_args.test_dataset_tag))
Exemplo n.º 14
0
def get_output_args(api, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    try:
        if command_args.train_stdin:
            if command_args.test_stdin:
                sys.exit("The standard input can't be used both for training "
                         "and testing. Choose one of them")
            command_args.training_set = StringIO(sys.stdin.read())
        elif command_args.test_stdin:
            command_args.test_set = StringIO(sys.stdin.read())
    except AttributeError:
        pass

    try:
        if command_args.objective_field:
            objective = command_args.objective_field
            try:
                command_args.objective_field = int(objective)
            except ValueError:
                if not command_args.train_header:
                    sys.exit("The %s has been set as objective field but"
                             " the file has not been marked as containing"
                             " headers.\nPlease set the --train-header flag if"
                             " the file has headers or use a column number"
                             " to set the objective field." % objective)
    except AttributeError:
        pass

    command_args.resume_ = resume
    command_args.predictions = command_args.output

    # Reads description if provided.
    try:
        if command_args.description:
            description_arg = u.read_description(command_args.description)
            command_args.description_ = description_arg
        else:
            command_args.description_ = DEFAULT_DESCRIPTION
    except AttributeError:
        pass

    # Parses fields if provided.
    try:
        if command_args.field_attributes:
            field_attributes_arg = (
                u.read_field_attributes(command_args.field_attributes))
            command_args.field_attributes_ = field_attributes_arg
        else:
            command_args.field_attributes_ = []
    except AttributeError:
        pass
    try:
        if command_args.test_field_attributes:
            field_attributes_arg = (
                u.read_field_attributes(command_args.test_field_attributes))
            command_args.test_field_attributes_ = field_attributes_arg
        else:
            command_args.test_field_attributes_ = []
    except AttributeError:
        pass

    # Parses types if provided.
    try:
        if command_args.types:
            types_arg = u.read_types(command_args.types)
            command_args.types_ = types_arg
        else:
            command_args.types_ = None
        if command_args.test_types:
            types_arg = u.read_types(command_args.test_types)
            command_args.test_types_ = types_arg
        else:
            command_args.test_types_ = None
    except AttributeError:
        pass


    # Parses dataset fields if provided.
    try:
        if command_args.dataset_fields:
            dataset_fields_arg = [
                field.strip() for field in command_args.dataset_fields.split(
                    command_args.args_separator)]
            command_args.dataset_fields_ = dataset_fields_arg
        else:
            command_args.dataset_fields_ = []
    except AttributeError:
        pass

    # Parses model input fields if provided.
    try:
        if command_args.model_fields:
            model_fields_arg = [
                field.strip() for field in command_args.model_fields.split(
                    command_args.args_separator)]
            command_args.model_fields_ = model_fields_arg
        else:
            command_args.model_fields_ = []
    except AttributeError:
        pass

    # Parses cluster input fields if provided.
    try:
        if command_args.cluster_fields:
            cluster_fields_arg = [
                field.strip() for field in command_args.cluster_fields.split(
                    command_args.args_separator)]
            command_args.cluster_fields_ = cluster_fields_arg
        else:
            command_args.cluster_fields_ = []
    except AttributeError:
        pass


    # Parses association input fields if provided.
    try:
        if command_args.association_fields:
            association_fields_arg = [
                field.strip() for field in \
                command_args.association_fields.split( \
                command_args.args_separator)]
            command_args.association_fields_ = association_fields_arg
        else:
            command_args.association_fields_ = []
    except AttributeError:
        pass

    # Parses anomaly input fields if provided.
    try:
        if command_args.anomaly_fields:
            anomaly_fields_arg = [
                field.strip() for field in command_args.anomaly_fields.split(
                    command_args.args_separator)]
            command_args.anomaly_fields_ = anomaly_fields_arg
        else:
            command_args.anomaly_fields_ = []
    except AttributeError:
        pass

    # Parses logistic regression input fields if provided.
    try:
        if command_args.logistic_fields:
            logistic_fields_arg = [
                field.strip() for field in command_args.logistic_fields.split(
                    command_args.args_separator)]
            command_args.logistic_fields_ = logistic_fields_arg
        else:
            command_args.logistic_fields_ = []
    except AttributeError:
        pass


    # Parses deepnet input fields if provided.
    try:
        if command_args.deepnet_fields:
            deepnet_fields_arg = [
                field.strip() for field in command_args.deepnet_fields.split(
                    command_args.args_separator)]
            command_args.deepnet_fields_ = deepnet_fields_arg
        else:
            command_args.deepnet_fields_ = []
    except AttributeError:
        pass

    # Parses topic model fields if provided.
    try:
        if command_args.topic_fields:
            topic_fields_arg = [
                field.strip() for field in command_args.topic_fields.split(
                    command_args.args_separator)]
            command_args.topic_model_fields_ = topic_fields_arg
        else:
            command_args.topic_model_fields_ = []
    except AttributeError:
        pass

    # Parses field_codings for deepnet
    try:
        if command_args.field_codings:
            command_args.field_codings_ = u.read_json(
                command_args.field_codings)
        else:
            command_args.field_codings_ = []
    except AttributeError:
        pass

    # Parses imports for scripts and libraries.
    try:
        if command_args.imports:
            imports_arg = [
                field.strip() for field in command_args.imports.split(
                    command_args.args_separator)]
            command_args.imports_ = imports_arg
        else:
            command_args.imports_ = []
    except AttributeError:
        pass

    # Parses objective fields for time-series.
    try:
        if command_args.objectives:
            objective_fields_arg = [
                field.strip() for field in command_args.objectives.split(
                    command_args.args_separator)]
            command_args.objective_fields_ = objective_fields_arg
        else:
            command_args.objective_fields_ = []
    except AttributeError:
        pass

    # Parses range.
    try:
        if command_args.range:
            range_arg = [
                value.strip() for value in command_args.range.split(
                    command_args.args_separator)]
            command_args.range_ = range_arg
        else:
            command_args.range_ = []
    except AttributeError:
        pass

    # Parses parameters for scripts.
    try:
        if command_args.declare_inputs:
            command_args.parameters_ = u.read_json(command_args.declare_inputs)
        else:
            command_args.parameters_ = []
    except AttributeError:
        pass

    # Parses creation_defaults for executions.
    try:
        if command_args.creation_defaults:
            command_args.creation_defaults_ = u.read_json(
                command_args.creation_defaults)
        else:
            command_args.creation_defaults_ = {}
    except AttributeError:
        pass

    # Parses arguments for executions.
    try:
        if command_args.inputs:
            command_args.arguments_ = u.read_json(command_args.inputs)
        else:
            command_args.arguments_ = []
    except AttributeError:
        pass

    # Parses input maps for executions.
    try:
        if command_args.input_maps:
            command_args.input_maps_ = u.read_json(command_args.input_maps)
        else:
            command_args.input_maps_ = []
    except AttributeError:
        pass

    # Parses outputs for executions.
    try:
        if command_args.outputs:
            command_args.outputs_ = u.read_json(command_args.outputs)
        else:
            command_args.outputs_ = []
    except AttributeError:
        pass

    # Parses outputs for scripts.
    try:
        if command_args.declare_outputs:
            command_args.declare_outputs_ = \
                u.read_json(command_args.declare_outputs)
        else:
            command_args.declare_outputs_ = []
    except AttributeError:
        pass

    model_ids = []
    try:
        # Parses model/ids if provided.
        if command_args.models:
            model_ids = u.read_resources(command_args.models)
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Retrieve model/ids if provided.
    try:
        if command_args.model_tag:
            model_ids = (model_ids +
                         u.list_ids(api.list_models,
                                    "tags__in=%s" % command_args.model_tag))
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Reads votes files in the provided directories.
    try:
        if command_args.votes_dirs:
            dirs = [
                directory.strip() for directory in
                command_args.votes_dirs.split(
                    command_args.args_separator)]
            votes_path = os.path.dirname(command_args.predictions)
            votes_files = u.read_votes_files(dirs, votes_path)
            command_args.votes_files_ = votes_files
        else:
            command_args.votes_files_ = []
    except AttributeError:
        pass

    # Parses fields map if provided.
    try:
        if command_args.fields_map:
            fields_map_arg = u.read_fields_map(command_args.fields_map)
            command_args.fields_map_ = fields_map_arg
        else:
            command_args.fields_map_ = None
    except AttributeError:
        pass

    cluster_ids = []
    try:
        # Parses cluster/ids if provided.
        if command_args.clusters:
            cluster_ids = u.read_resources(command_args.clusters)
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    # Retrieve cluster/ids if provided.
    try:
        if command_args.cluster_tag:
            cluster_ids = (cluster_ids +
                           u.list_ids(api.list_clusters,
                                      "tags__in=%s" %
                                      command_args.cluster_tag))
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    association_ids = []
    try:
        # Parses association/ids if provided.
        if command_args.associations:
            association_ids = u.read_resources(command_args.associations)
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    # Retrieve association/ids if provided.
    try:
        if command_args.association_tag:
            association_ids = (association_ids +
                               u.list_ids(api.list_associations,
                                          "tags__in=%s" %
                                          command_args.association_tag))
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    logistic_regression_ids = []
    try:
        # Parses logisticregression/ids if provided.
        if command_args.logistic_regressions:
            logistic_regression_ids = u.read_resources( \
                command_args.logistic_regressions)
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    # Retrieve logsticregression/ids if provided.
    try:
        if command_args.logistic_regression_tag:
            logistic_regression_ids = (logistic_regression_ids + \
                u.list_ids(api.list_logistic_regressions,
                           "tags__in=%s" %
                           command_args.logistic_regression_tag))
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    deepnet_ids = []
    try:
        # Parses deepnet/ids if provided.
        if command_args.deepnets:
            deepnet_ids = u.read_resources( \
                command_args.deepnets)
        command_args.deepnet_ids_ = deepnet_ids
    except AttributeError:
        pass

    # Retrieve deepnet/ids if provided.
    try:
        if command_args.deepnet_tag:
            deepnet_regression_ids = (deepnet_ids + \
                u.list_ids(api.list_deepnets,
                           "tags__in=%s" % command_args.deepnet_tag))
        command_args.deepnet_ids_ = deepnet_ids
    except AttributeError:
        pass

    topic_model_ids = []
    try:
        # Parses topicmodel/ids if provided.
        if command_args.topic_models:
            topic_model_ids = u.read_resources(command_args.topic_models)
        command_args.topic_model_ids_ = topic_model_ids
    except AttributeError:
        pass

    # Retrieve topicmodel/ids if provided.
    try:
        if command_args.topic_model_tag:
            topic_model_ids = (topic_model_ids +
                               u.list_ids(api.list_topic_models,
                                          "tags__in=%s" %
                                          command_args.topic_model_tag))
        command_args.topic_model_ids_ = topic_model_ids
    except AttributeError:
        pass


    time_series_ids = []
    try:
        # Parses timeseries/ids if provided.
        if command_args.time_series_set:
            time_series_ids = u.read_resources(command_args.time_series)
        command_args.time_series_ids_ = time_series_ids
    except AttributeError:
        pass

    # Retrieve timeseries/ids if provided.
    try:
        if command_args.time_series_tag:
            time_series_ids = (time_series_ids +
                               u.list_ids(api.list_time_series,
                                          "tags__in=%s" %
                                          command_args.time_series_tag))
        command_args.time_series_ids_ = time_series_ids
    except AttributeError:
        pass

    # Parses cluster names to generate datasets if provided
    try:
        if command_args.cluster_datasets:
            cluster_datasets_arg = [
                dataset.strip() for dataset in
                command_args.cluster_datasets.split(
                    command_args.args_separator)]
            command_args.cluster_datasets_ = cluster_datasets_arg
        else:
            command_args.cluster_datasets_ = []
    except AttributeError:
        pass

    # Parses cluster names to generate models if provided
    try:
        if command_args.cluster_models:
            cluster_models_arg = [
                model.strip() for model in
                command_args.cluster_models.split(
                    command_args.args_separator)]
            command_args.cluster_models_ = cluster_models_arg
        else:
            command_args.cluster_models_ = []
    except AttributeError:
        pass

    # Parses summary_fields to exclude from the clustering algorithm
    try:
        if command_args.summary_fields:
            summary_fields_arg = [
                field.strip() for field in
                command_args.summary_fields.split(
                    command_args.args_separator)]
            command_args.summary_fields_ = summary_fields_arg
        else:
            command_args.summary_fields_ = []
    except AttributeError:
        pass

    anomaly_ids = []
    try:
        # Parses anomaly/ids if provided.
        if command_args.anomalies:
            anomaly_ids = u.read_resources(command_args.anomalies)
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    # Retrieve anomaly/ids if provided.
    try:
        if command_args.anomaly_tag:
            anomaly_ids = (anomaly_ids +
                           u.list_ids(api.list_anomalies,
                                      "tags__in=%s" %
                                      command_args.anomaly_tag))
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    sample_ids = []
    try:
        # Parses sample/ids if provided.
        if command_args.samples:
            sample_ids = u.read_resources(command_args.samples)
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Retrieve sample/ids if provided.
    try:
        if command_args.sample_tag:
            sample_ids = (
                sample_ids + u.list_ids(api.list_samples,
                                        "tags__in=%s" %
                                        command_args.sample_tag))
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Parses sample row fields
    try:
        if command_args.row_fields:
            row_fields_arg = [field.strip() for field in
                              command_args.row_fields.split(
                                  command_args.args_separator)]
            command_args.row_fields_ = row_fields_arg
        else:
            command_args.row_fields_ = []
    except AttributeError:
        pass

    # Parses sample stat_fields
    try:
        if command_args.stat_fields:
            stat_fields_arg = [field.strip() for field in
                               command_args.stat_fields.split(
                                   command_args.args_separator)]
            command_args.stat_fields_ = stat_fields_arg
        else:
            command_args.stat_fields_ = []
    except AttributeError:
        pass

    # if boosting arguments are used, set on boosting
    try:
        if command_args.iterations or command_args.learning_rate \
                or command_args.early_holdout:
            command_args.boosting = True
    except AttributeError:
        pass

    # Extracts the imports from the JSON metadata file
    try:
        if command_args.embedded_imports:
            command_args.embedded_imports_ = u.read_resources( \
                command_args.embedded_imports)
        else:
            command_args.embedded_imports_ = []
    except AttributeError:
        pass

    # Parses hidden_layers for deepnets.
    try:
        if command_args.hidden_layers:
            command_args.hidden_layers_ = u.read_json(
                command_args.hidden_layers)
        else:
            command_args.hidden_layers_ = []
    except AttributeError:
        pass

    # Parses operating_point for predictions.
    try:
        if command_args.operating_point:
            command_args.operating_point_ = u.read_json(
                command_args.operating_point)
        else:
            command_args.operating_point_ = []
    except AttributeError:
        pass

    return {"api": api, "args": command_args}
Exemplo n.º 15
0
def models_processing(datasets,
                      models,
                      model_ids,
                      api,
                      args,
                      resume,
                      fields=None,
                      session_file=None,
                      path=None,
                      log=None,
                      labels=None,
                      multi_label_data=None,
                      other_label=None):
    """Creates or retrieves models from the input data

    """
    ensemble_ids = []

    # If we have a dataset but not a model, we create the model if the no_model
    # flag hasn't been set up.
    if datasets and not (args.has_models_ or args.no_model):
        dataset = datasets[0]
        model_ids = []
        models = []
        if args.multi_label:
            # If --number-of-models is not set or is 1, and there's
            # no boosting options on, create one model per
            # label. Otherwise, create one ensemble per label with the required
            # number of models
            if args.number_of_models < 2 and not args.boosting:
                models, model_ids, resume = model_per_label(
                    labels,
                    datasets,
                    api,
                    args,
                    resume,
                    fields=fields,
                    multi_label_data=multi_label_data,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                (ensembles, ensemble_ids, models, model_ids,
                 resume) = ensemble_per_label(
                     labels,
                     dataset,
                     api,
                     args,
                     resume,
                     fields=fields,
                     multi_label_data=multi_label_data,
                     session_file=session_file,
                     path=path,
                     log=log)

        elif args.number_of_models > 1 or args.boosting:
            ensembles = []
            # Ensembles of models
            (ensembles, ensemble_ids, models, model_ids,
             resume) = ensemble_processing(datasets,
                                           api,
                                           args,
                                           resume,
                                           fields=fields,
                                           session_file=session_file,
                                           path=path,
                                           log=log)
            ensemble = ensembles[0]
            args.ensemble = bigml.api.get_ensemble_id(ensemble)

        else:
            # Set of partial datasets created setting args.max_categories
            if len(datasets) > 1 and args.max_categories:
                args.number_of_models = len(datasets)
            if ((args.test_datasets and args.evaluate)
                    or (args.datasets and args.evaluate and args.dataset_off)):
                args.number_of_models = len(args.dataset_ids)
            # Cross-validation case: we create 2 * n models to be validated
            # holding out an n% of data
            if args.cross_validation_rate > 0:
                if args.number_of_evaluations > 0:
                    args.number_of_models = args.number_of_evaluations
                else:
                    args.number_of_models = int(MONTECARLO_FACTOR *
                                                args.cross_validation_rate)
            if resume:
                resume, model_ids = c.checkpoint(c.are_models_created,
                                                 path,
                                                 args.number_of_models,
                                                 debug=args.debug)
                if not resume:
                    message = u.dated(
                        "Found %s models out of %s. Resuming.\n" %
                        (len(model_ids), args.number_of_models))
                    u.log_message(message,
                                  log_file=session_file,
                                  console=args.verbosity)

                models = model_ids
                args.number_of_models -= len(model_ids)
            model_args = r.set_model_args(args,
                                          fields=fields,
                                          objective_id=args.objective_id_,
                                          model_fields=args.model_fields_,
                                          other_label=other_label)
            models, model_ids = r.create_models(datasets, models, model_args,
                                                args, api, path, session_file,
                                                log)
    # If a model is provided, we use it.
    elif args.model:
        model_ids = [args.model]
        models = model_ids[:]

    elif args.models or args.model_tag:
        models = model_ids[:]

    if args.ensemble:
        if not args.ensemble in ensemble_ids:
            ensemble_ids.append(args.ensemble)
        if not args.evaluate:
            ensemble = r.get_ensemble(args.ensemble, api, args.verbosity,
                                      session_file)
            model_ids = ensemble['object']['models']
            models = model_ids[:]

    if args.ensembles or args.ensemble_tag:
        model_ids = []
        ensemble_ids = []
        # Parses ensemble/ids if provided.
        if args.ensemble_tag:
            ensemble_ids = (ensemble_ids + u.list_ids(
                api.list_ensembles, "tags__in=%s" % args.ensemble_tag))
        else:
            ensemble_ids = u.read_resources(args.ensembles)
        for ensemble_id in ensemble_ids:
            ensemble = r.get_ensemble(ensemble_id, api)
            if args.ensemble is None:
                args.ensemble = ensemble_id
            model_ids.extend(ensemble['object']['models'])
        models = model_ids[:]

    # If we are going to predict we must retrieve the models
    if model_ids and args.test_set and not args.evaluate:
        models, model_ids = r.get_models(models, args, api, session_file)

    return models, model_ids, ensemble_ids, resume
Exemplo n.º 16
0
def main(args=sys.argv[1:]):
    """Main process

    """
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            args[i] = args[i].replace("_", "-")
    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        if ' ' in args[i]:
            literal_args[i] = '"%s"' % args[i]
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False

    parser = create_parser(defaults=get_user_defaults(), constants={'NOW': NOW,
                           'MAX_MODELS': MAX_MODELS, 'PLURALITY': PLURALITY})

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    default_output = ('evaluation' if command_args.evaluate
                      else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG,
                                     command_args.stack_level)
        args = shlex.split(command)[1:]
        output_dir = u.get_log_reversed(DIRS_LOG,
                                        command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        parser = create_parser(defaults=get_user_defaults(defaults_file),
                               constants={'NOW': NOW, 'MAX_MODELS': MAX_MODELS,
                                          'PLURALITY': PLURALITY})
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep,
                                         default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug}

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
        and not (command_args.training_set or command_args.source
                 or command_args.dataset)
        and not (command_args.test_set and (command_args.model or
                 command_args.models or command_args.model_tag))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            pass

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(lambda x: x.strip(),
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(lambda x: x.strip(),
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_models(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids +
                     u.list_ids(api.list_models,
                                "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and
            not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(lambda x: x.strip(), command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(lambda x: x.strip(),
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                raise Exception("File %s not found" % command_args.delete_file)
            delete_list.extend([line for line
                                in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
Exemplo n.º 17
0
def transform_args(command_args, flags, api):
    """Transforms the formatted argument strings into structured arguments

    """
    attribute_args(command_args)

    # Parses dataset generators in json format if provided
    try:
        if command_args.new_fields:
            json_generators = u.read_json(command_args.new_fields)
            command_args.dataset_json_generators = json_generators
        else:
            command_args.dataset_json_generators = {}
    except AttributeError:
        pass

    # Parses multi-dataset attributes in json such as field maps
    try:
        if command_args.multi_dataset_attributes:
            multi_dataset_json = u.read_json(
                command_args.multi_dataset_attributes)
            command_args.multi_dataset_json = multi_dataset_json
        else:
            command_args.multi_dataset_json = {}
    except AttributeError:
        pass

    transform_dataset_options(command_args, api)

    script_ids = None
    command_args.script_ids = []
    # Parses script/id if provided.
    try:
        if command_args.scripts:
            script_ids = u.read_resources(command_args.scripts)
            if len(script_ids) == 1:
                command_args.script = script_ids[0]
            command_args.script_ids = script_ids
    except AttributeError:
        pass

    # Retrieve script/ids if provided.
    try:
        if command_args.script_tag:
            script_ids = script_ids.extend(
                u.list_ids(api.list_scripts,
                           "tags__in=%s" % command_args.script_tag))
            if len(script_ids) == 1:
                command_args.script = script_ids[0]
            command_args.script_ids = script_ids
    except AttributeError:
        pass

    # Reads a json filter if provided.
    try:
        if command_args.json_filter:
            json_filter = u.read_json_filter(command_args.json_filter)
            command_args.json_filter = json_filter
    except AttributeError:
        pass

    # Reads a lisp filter if provided.
    try:
        if command_args.lisp_filter:
            lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
            command_args.lisp_filter = lisp_filter
    except AttributeError:
        pass

    # Adds default tags unless that it is requested not to do so.
    try:
        if command_args.no_tag:
            command_args.tag.append('BigMLer')
            command_args.tag.append('BigMLer_%s' % NOW)
    except AttributeError:
        pass

    # Checks combined votes method
    try:
        if (command_args.method and command_args.method != COMBINATION_LABEL
                and not command_args.method in COMBINATION_WEIGHTS.keys()):
            command_args.method = 0
        else:
            combiner_methods = dict([[value, key]
                                     for key, value in COMBINER_MAP.items()])
            combiner_methods[COMBINATION_LABEL] = COMBINATION
            command_args.method = combiner_methods.get(command_args.method, 0)
    except AttributeError:
        pass

    # Checks missing_strategy
    try:
        if (command_args.missing_strategy
                and not (command_args.missing_strategy
                         in MISSING_STRATEGIES.keys())):
            command_args.missing_strategy = 0
        else:
            command_args.missing_strategy = MISSING_STRATEGIES.get(
                command_args.missing_strategy, 0)
    except AttributeError:
        pass

    try:
        # Old value for --prediction-info='full data' maps to 'full'
        if command_args.prediction_info == 'full data':
            print("WARNING: 'full data' is a deprecated value. Use"
                  " 'full' instead")
            command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    # Parses class, weight pairs for objective weight
    try:
        if command_args.objective_weights:
            objective_weights = (u.read_objective_weights(
                command_args.objective_weights))
            command_args.objective_weights_json = objective_weights
    except AttributeError:
        pass

    try:
        command_args.multi_label_fields_list = []
        if command_args.multi_label_fields is not None:
            multi_label_fields = command_args.multi_label_fields.strip()
            command_args.multi_label_fields_list = multi_label_fields.split(
                command_args.args_separator)
    except AttributeError:
        pass

    # Sets shared_flag if --shared or --unshared has been used
    command_args.shared_flag = '--shared' in flags or '--unshared' in flags

    # Set remote on if scoring a trainind dataset in bigmler anomaly
    try:
        if command_args.score:
            command_args.remote = True
            if not "--prediction-info" in flags:
                command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    command_args.has_supervised_ = (
        (hasattr(command_args, 'model') and command_args.model)
        or (hasattr(command_args, 'models') and command_args.models)
        or (hasattr(command_args, 'ensemble') and command_args.ensemble)
        or (hasattr(command_args, 'ensembles') and command_args.ensembles)
        or (hasattr(command_args, 'model_tag') and command_args.model_tag)
        or (hasattr(command_args, 'logistic_regression')
            and command_args.logistic_regression)
        or (hasattr(command_args, 'logistic_regressions')
            and command_args.logistic_regressions)
        or (hasattr(command_args, 'logistic_regression_tag')
            and command_args.logistic_regression_tag)
        or (hasattr(command_args, 'deepnet') and command_args.deepnet)
        or (hasattr(command_args, 'deepnets') and command_args.deepnets) or
        (hasattr(command_args, 'deepnet_tag') and command_args.deepnet_tag) or
        (hasattr(command_args, 'ensemble_tag') and command_args.ensemble_tag))

    command_args.has_models_ = (
        command_args.has_supervised_
        or (hasattr(command_args, 'cluster') and command_args.cluster)
        or (hasattr(command_args, 'clusters') and command_args.clusters)
        or (hasattr(command_args, 'anomaly') and command_args.anomaly)
        or (hasattr(command_args, 'anomalies') and command_args.anomalies)
        or (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag)
        or (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag))

    command_args.has_datasets_ = (
        (hasattr(command_args, 'dataset') and command_args.dataset)
        or (hasattr(command_args, 'datasets') and command_args.datasets)
        or (hasattr(command_args, 'dataset_ids') and command_args.dataset_ids)
        or (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))

    command_args.has_test_datasets_ = (
        (hasattr(command_args, 'test_dataset') and command_args.test_dataset)
        or
        (hasattr(command_args, 'test_datasets') and command_args.test_datasets)
        or (hasattr(command_args, 'test_dataset_tag')
            and command_args.test_dataset_tag))

    command_args.new_dataset = (
        (hasattr(command_args, 'datasets_json') and command_args.datasets_json)
        or
        (hasattr(command_args, 'multi_dataset') and command_args.multi_dataset)
        or (hasattr(command_args, 'juxtapose') and command_args.juxtapose)
        or (hasattr(command_args, 'sql_query') and command_args.sql_query)
        or (hasattr(command_args, 'sql_output_fields')
            and command_args.sql_output_fields)
        or (hasattr(command_args, 'json_query') and command_args.json_query))
Exemplo n.º 18
0
def transform_args(command_args, flags, api, user_defaults):
    """Transforms the formatted argument strings into structured arguments

    """
    # Parses attributes in json format if provided
    command_args.json_args = {}

    json_attribute_options = {
        'source': command_args.source_attributes,
        'dataset': command_args.dataset_attributes,
        'model': command_args.model_attributes,
        'ensemble': command_args.ensemble_attributes,
        'evaluation': command_args.evaluation_attributes,
        'batch_prediction': command_args.batch_prediction_attributes
    }

    for resource_type, attributes_file in json_attribute_options.items():
        if attributes_file is not None:
            command_args.json_args[resource_type] = u.read_json(
                attributes_file)
        else:
            command_args.json_args[resource_type] = {}

    # Parses dataset generators in json format if provided
    if command_args.new_fields:
        json_generators = u.read_json(command_args.new_fields)
        command_args.dataset_json_generators = json_generators
    else:
        command_args.dataset_json_generators = {}

    # Parses multi-dataset attributes in json such as field maps
    if command_args.multi_dataset_attributes:
        multi_dataset_json = u.read_json(command_args.multi_dataset_attributes)
        command_args.multi_dataset_json = multi_dataset_json
    else:
        command_args.multi_dataset_json = {}

    dataset_ids = None
    command_args.dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_ids = u.read_datasets(command_args.datasets)
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Retrieve dataset/ids if provided.
    if command_args.dataset_tag:
        dataset_ids = dataset_ids.extend(
            u.list_ids(api.list_datasets,
                       "tags__in=%s" % command_args.dataset_tag))
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and command_args.method != COMBINATION_LABEL
            and not (command_args.method in COMBINATION_WEIGHTS.keys())):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                 for key, value in COMBINER_MAP.items()])
        combiner_methods[COMBINATION_LABEL] = COMBINATION
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Checks missing_strategy
    if (command_args.missing_strategy and
            not (command_args.missing_strategy in MISSING_STRATEGIES.keys())):
        command_args.missing_strategy = 0
    else:
        command_args.missing_strategy = MISSING_STRATEGIES.get(
            command_args.missing_strategy, 0)

    # Adds replacement=True if creating ensemble and nothing is specified
    if (command_args.number_of_models > 1 and not command_args.replacement
            and not '--no-replacement' in flags
            and not 'replacement' in user_defaults
            and not '--no-randomize' in flags
            and not 'randomize' in user_defaults
            and not '--sample-rate' in flags
            and not 'sample_rate' in user_defaults):
        command_args.replacement = True

    # Old value for --prediction-info='full data' maps to 'full'
    if command_args.prediction_info == 'full data':
        print "WARNING: 'full data' is a deprecated value. Use 'full' instead"
        command_args.prediction_info = FULL_FORMAT

    # Parses class, weight pairs for objective weight
    if command_args.objective_weights:
        objective_weights = (u.read_objective_weights(
            command_args.objective_weights))
        command_args.objective_weights_json = objective_weights

    command_args.multi_label_fields_list = []
    if command_args.multi_label_fields is not None:
        multi_label_fields = command_args.multi_label_fields.strip()
        command_args.multi_label_fields_list = multi_label_fields.split(',')
Exemplo n.º 19
0
def get_output_args(api, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    try:
        if command_args.train_stdin:
            if command_args.test_stdin:
                sys.exit("The standard input can't be used both for training "
                         "and testing. Choose one of them")
            command_args.training_set = StringIO(sys.stdin.read())
        elif command_args.test_stdin:
            command_args.test_set = StringIO(sys.stdin.read())
    except AttributeError:
        pass

    try:
        if command_args.objective_field:
            objective = command_args.objective_field
            try:
                command_args.objective_field = int(objective)
            except ValueError:
                if not command_args.train_header:
                    sys.exit("The %s has been set as objective field but"
                             " the file has not been marked as containing"
                             " headers.\nPlease set the --train-header flag if"
                             " the file has headers or use a column number"
                             " to set the objective field." % objective)
    except AttributeError:
        pass

    command_args.resume_ = resume
    command_args.predictions = command_args.output
    command_args.projections = command_args.output

    # Reads description if provided.
    try:
        if command_args.description:
            description_arg = u.read_description(command_args.description)
            command_args.description_ = description_arg
        else:
            command_args.description_ = DEFAULT_DESCRIPTION
    except AttributeError:
        pass

    # Parses fields if provided.
    try:
        if command_args.field_attributes:
            field_attributes_arg = (u.read_field_attributes(
                command_args.field_attributes))
            command_args.field_attributes_ = field_attributes_arg
        else:
            command_args.field_attributes_ = []
    except AttributeError:
        pass
    try:
        if command_args.test_field_attributes:
            field_attributes_arg = (u.read_field_attributes(
                command_args.test_field_attributes))
            command_args.test_field_attributes_ = field_attributes_arg
        else:
            command_args.test_field_attributes_ = []
    except AttributeError:
        pass

    # Parses types if provided.
    try:
        if command_args.types:
            types_arg = u.read_types(command_args.types)
            command_args.types_ = types_arg
        else:
            command_args.types_ = None
        if command_args.test_types:
            types_arg = u.read_types(command_args.test_types)
            command_args.test_types_ = types_arg
        else:
            command_args.test_types_ = None
    except AttributeError:
        pass

    # Parses dataset fields if provided.
    try:
        if command_args.dataset_fields:
            dataset_fields_arg = [
                field.strip() for field in command_args.dataset_fields.split(
                    command_args.args_separator)
            ]
            command_args.dataset_fields_ = dataset_fields_arg
        else:
            command_args.dataset_fields_ = []
    except AttributeError:
        pass

    # Parses model input fields if provided.
    try:
        if command_args.model_fields:
            model_fields_arg = [
                field.strip() for field in command_args.model_fields.split(
                    command_args.args_separator)
            ]
            command_args.model_fields_ = model_fields_arg
        else:
            command_args.model_fields_ = []
    except AttributeError:
        pass

    # Parses cluster input fields if provided.
    try:
        if command_args.cluster_fields:
            cluster_fields_arg = [
                field.strip() for field in command_args.cluster_fields.split(
                    command_args.args_separator)
            ]
            command_args.cluster_fields_ = cluster_fields_arg
        else:
            command_args.cluster_fields_ = []
    except AttributeError:
        pass

    # Parses association input fields if provided.
    try:
        if command_args.association_fields:
            association_fields_arg = [
                field.strip() for field in \
                command_args.association_fields.split( \
                command_args.args_separator)]
            command_args.association_fields_ = association_fields_arg
        else:
            command_args.association_fields_ = []
    except AttributeError:
        pass

    # Parses anomaly input fields if provided.
    try:
        if command_args.anomaly_fields:
            anomaly_fields_arg = [
                field.strip() for field in command_args.anomaly_fields.split(
                    command_args.args_separator)
            ]
            command_args.anomaly_fields_ = anomaly_fields_arg
        else:
            command_args.anomaly_fields_ = []
    except AttributeError:
        pass

    # Parses logistic regression input fields if provided.
    try:
        if command_args.logistic_fields:
            logistic_fields_arg = [
                field.strip() for field in command_args.logistic_fields.split(
                    command_args.args_separator)
            ]
            command_args.logistic_fields_ = logistic_fields_arg
        else:
            command_args.logistic_fields_ = []
    except AttributeError:
        pass

    # Parses linear regression input fields if provided.
    try:
        if command_args.linear_fields:
            linear_fields_arg = [
                field.strip() for field in command_args.linear_fields.split(
                    command_args.args_separator)
            ]
            command_args.linear_fields_ = linear_fields_arg
        else:
            command_args.linear_fields_ = []
    except AttributeError:
        pass

    # Parses deepnet input fields if provided.
    try:
        if command_args.deepnet_fields:
            deepnet_fields_arg = [
                field.strip() for field in command_args.deepnet_fields.split(
                    command_args.args_separator)
            ]
            command_args.deepnet_fields_ = deepnet_fields_arg
        else:
            command_args.deepnet_fields_ = []
    except AttributeError:
        pass

    # Parses topic model fields if provided.
    try:
        if command_args.topic_fields:
            topic_fields_arg = [
                field.strip() for field in command_args.topic_fields.split(
                    command_args.args_separator)
            ]
            command_args.topic_model_fields_ = topic_fields_arg
        else:
            command_args.topic_model_fields_ = []
    except AttributeError:
        pass

    # Parses pca fields if provided.
    try:
        if command_args.pca_fields:
            pca_fields_arg = [
                field.strip() for field in command_args.pca_fields.split(
                    command_args.args_separator)
            ]
            command_args.pca_fields_ = pca_fields_arg
        else:
            command_args.pca_fields_ = []
    except AttributeError:
        pass

    # Parses field_codings for deepnet
    try:
        if command_args.field_codings:
            command_args.field_codings_ = u.read_json(
                command_args.field_codings)
        else:
            command_args.field_codings_ = []
    except AttributeError:
        pass

    # Parses imports for scripts and libraries.
    try:
        if command_args.imports:
            imports_arg = [
                field.strip() for field in command_args.imports.split(
                    command_args.args_separator)
            ]
            command_args.imports_ = imports_arg
        else:
            command_args.imports_ = []
    except AttributeError:
        pass

    # Parses objective fields for time-series.
    try:
        if command_args.objectives:
            objective_fields_arg = [
                field.strip() for field in command_args.objectives.split(
                    command_args.args_separator)
            ]
            command_args.objective_fields_ = objective_fields_arg
        else:
            command_args.objective_fields_ = []
    except AttributeError:
        pass

    # Parses range.
    try:
        if command_args.range:
            range_arg = [
                value.strip() for value in command_args.range.split(
                    command_args.args_separator)
            ]
            command_args.range_ = range_arg
        else:
            command_args.range_ = []
    except AttributeError:
        pass

    # Parses parameters for scripts.
    try:
        if command_args.declare_inputs:
            command_args.parameters_ = u.read_json(command_args.declare_inputs)
        else:
            command_args.parameters_ = []
    except AttributeError:
        pass

    # Parses creation_defaults for executions.
    try:
        if command_args.creation_defaults:
            command_args.creation_defaults_ = u.read_json(
                command_args.creation_defaults)
        else:
            command_args.creation_defaults_ = {}
    except AttributeError:
        pass

    # Parses arguments for executions.
    try:
        if command_args.inputs:
            command_args.arguments_ = u.read_json(command_args.inputs)
        else:
            command_args.arguments_ = []
    except AttributeError:
        pass

    # Parses input maps for executions.
    try:
        if command_args.input_maps:
            command_args.input_maps_ = u.read_json(command_args.input_maps)
        else:
            command_args.input_maps_ = []
    except AttributeError:
        pass

    # Parses outputs for executions.
    try:
        if command_args.outputs:
            command_args.outputs_ = u.read_json(command_args.outputs)
        else:
            command_args.outputs_ = []
    except AttributeError:
        pass

    # Parses outputs for scripts.
    try:
        if command_args.declare_outputs:
            command_args.declare_outputs_ = \
                u.read_json(command_args.declare_outputs)
        else:
            command_args.declare_outputs_ = []
    except AttributeError:
        pass

    model_ids = []
    try:
        # Parses model/ids if provided.
        if command_args.models:
            model_ids = u.read_resources(command_args.models)
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Retrieve model/ids if provided.
    try:
        if command_args.model_tag:
            model_ids = (model_ids + u.list_ids(
                api.list_models, "tags__in=%s" % command_args.model_tag))
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Reads votes files in the provided directories.
    try:
        if command_args.votes_dirs:
            dirs = [
                directory.strip() for directory in
                command_args.votes_dirs.split(command_args.args_separator)
            ]
            votes_path = os.path.dirname(command_args.predictions)
            votes_files = u.read_votes_files(dirs, votes_path)
            command_args.votes_files_ = votes_files
        else:
            command_args.votes_files_ = []
    except AttributeError:
        pass

    # Parses fields map if provided.
    try:
        if command_args.fields_map:
            fields_map_arg = u.read_fields_map(command_args.fields_map)
            command_args.fields_map_ = fields_map_arg
        else:
            command_args.fields_map_ = None
    except AttributeError:
        pass

    cluster_ids = []
    try:
        # Parses cluster/ids if provided.
        if command_args.clusters:
            cluster_ids = u.read_resources(command_args.clusters)
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    # Retrieve cluster/ids if provided.
    try:
        if command_args.cluster_tag:
            cluster_ids = (cluster_ids + u.list_ids(
                api.list_clusters, "tags__in=%s" % command_args.cluster_tag))
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    association_ids = []
    try:
        # Parses association/ids if provided.
        if command_args.associations:
            association_ids = u.read_resources(command_args.associations)
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    # Retrieve association/ids if provided.
    try:
        if command_args.association_tag:
            association_ids = (
                association_ids +
                u.list_ids(api.list_associations,
                           "tags__in=%s" % command_args.association_tag))
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    logistic_regression_ids = []
    try:
        # Parses logisticregression/ids if provided.
        if command_args.logistic_regressions:
            logistic_regression_ids = u.read_resources( \
                command_args.logistic_regressions)
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    # Retrieve logsticregression/ids if provided.
    try:
        if command_args.logistic_regression_tag:
            logistic_regression_ids = (logistic_regression_ids + \
                u.list_ids(api.list_logistic_regressions,
                           "tags__in=%s" %
                           command_args.logistic_regression_tag))
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    linear_regression_ids = []
    try:
        # Parses linearregression/ids if provided.
        if command_args.linear_regressions:
            linear_regression_ids = u.read_resources( \
                command_args.linear_regressions)
        command_args.linear_regression_ids_ = linear_regression_ids
    except AttributeError:
        pass

    # Retrieve linearregression/ids if provided.
    try:
        if command_args.linear_regression_tag:
            linear_regression_ids = (linear_regression_ids + \
                u.list_ids(api.list_linear_regressions,
                           "tags__in=%s" %
                           command_args.linear_regression_tag))
        command_args.linear_regression_ids_ = linear_regression_ids
    except AttributeError:
        pass

    deepnet_ids = []
    try:
        # Parses deepnet/ids if provided.
        if command_args.deepnets:
            deepnet_ids = u.read_resources( \
                command_args.deepnets)
        command_args.deepnet_ids_ = deepnet_ids
    except AttributeError:
        pass

    # Retrieve deepnet/ids if provided.
    try:
        if command_args.deepnet_tag:
            deepnet_ids = (deepnet_ids + \
                u.list_ids(api.list_deepnets,
                           "tags__in=%s" % command_args.deepnet_tag))
        command_args.deepnet_ids_ = deepnet_ids
    except AttributeError:
        pass

    topic_model_ids = []
    try:
        # Parses topicmodel/ids if provided.
        if command_args.topic_models:
            topic_model_ids = u.read_resources(command_args.topic_models)
        command_args.topic_model_ids_ = topic_model_ids
    except AttributeError:
        pass

    # Retrieve topicmodel/ids if provided.
    try:
        if command_args.topic_model_tag:
            topic_model_ids = (
                topic_model_ids +
                u.list_ids(api.list_topic_models,
                           "tags__in=%s" % command_args.topic_model_tag))
        command_args.topic_model_ids_ = topic_model_ids
    except AttributeError:
        pass

    time_series_ids = []
    try:
        # Parses timeseries/ids if provided.
        if command_args.time_series_set:
            time_series_ids = u.read_resources(command_args.time_series)
        command_args.time_series_ids_ = time_series_ids
    except AttributeError:
        pass

    # Retrieve timeseries/ids if provided.
    try:
        if command_args.time_series_tag:
            time_series_ids = (
                time_series_ids +
                u.list_ids(api.list_time_series,
                           "tags__in=%s" % command_args.time_series_tag))
        command_args.time_series_ids_ = time_series_ids
    except AttributeError:
        pass

    pca_ids = []
    try:
        # Parses pca/ids if provided.
        if command_args.pcas:
            pca_ids = u.read_resources(command_args.pcas)
        command_args.pca_ids_ = pca_ids
    except AttributeError:
        pass

    # Retrieve pca/ids if provided.
    try:
        if command_args.pca_tag:
            pca_ids = (pca_ids + u.list_ids(
                api.pca_series, "tags__in=%s" % command_args.pca_tag))
        command_args.pca_ids_ = pca_ids
    except AttributeError:
        pass

    # Parses models list for fusions if provided.
    try:
        if command_args.fusion_models:
            fusion_models_arg = [
                model.strip() for model in command_args.fusion_models.split(
                    command_args.args_separator)
            ]
            command_args.fusion_models_ = fusion_models_arg
        else:
            command_args.fusion_models_ = []
    except AttributeError:
        pass

    # Parses models list for fusions if provided.
    if not has_value(command_args, "fusion_models_"):
        try:
            if command_args.fusion_models_file:
                fusion_models_arg = u.read_json(
                    command_args.fusion_models_file)
                command_args.fusion_models_ = fusion_models_arg
            else:
                command_args.fusion_models_ = []
        except AttributeError:
            pass

    fusion_ids = []
    try:
        # Parses fusion/ids if provided.
        if command_args.fusions:
            fusion_ids = u.read_resources(command_args.fusions)
        command_args.fusion_ids_ = fusion_ids
    except AttributeError:
        pass

    # Retrieve fusion/ids if provided.
    try:
        if command_args.fusion_tag:
            fusion_ids = (fusion_ids + u.list_ids(
                api.fusion_series, "tags__in=%s" % command_args.fusion_tag))
        command_args.fusion_ids_ = fusion_ids
    except AttributeError:
        pass

    # Parses cluster names to generate datasets if provided
    try:
        if command_args.cluster_datasets:
            cluster_datasets_arg = [
                dataset.strip()
                for dataset in command_args.cluster_datasets.split(
                    command_args.args_separator)
            ]
            command_args.cluster_datasets_ = cluster_datasets_arg
        else:
            command_args.cluster_datasets_ = []
    except AttributeError:
        pass

    # Parses cluster names to generate models if provided
    try:
        if command_args.cluster_models:
            cluster_models_arg = [
                model.strip() for model in command_args.cluster_models.split(
                    command_args.args_separator)
            ]
            command_args.cluster_models_ = cluster_models_arg
        else:
            command_args.cluster_models_ = []
    except AttributeError:
        pass

    # Parses summary_fields to exclude from the clustering algorithm
    try:
        if command_args.summary_fields:
            summary_fields_arg = [
                field.strip() for field in command_args.summary_fields.split(
                    command_args.args_separator)
            ]
            command_args.summary_fields_ = summary_fields_arg
        else:
            command_args.summary_fields_ = []
    except AttributeError:
        pass

    anomaly_ids = []
    try:
        # Parses anomaly/ids if provided.
        if command_args.anomalies:
            anomaly_ids = u.read_resources(command_args.anomalies)
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    # Retrieve anomaly/ids if provided.
    try:
        if command_args.anomaly_tag:
            anomaly_ids = (anomaly_ids + u.list_ids(
                api.list_anomalies, "tags__in=%s" % command_args.anomaly_tag))
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    sample_ids = []
    try:
        # Parses sample/ids if provided.
        if command_args.samples:
            sample_ids = u.read_resources(command_args.samples)
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Retrieve sample/ids if provided.
    try:
        if command_args.sample_tag:
            sample_ids = (sample_ids + u.list_ids(
                api.list_samples, "tags__in=%s" % command_args.sample_tag))
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Parses sample row fields
    try:
        if command_args.row_fields:
            row_fields_arg = [
                field.strip() for field in command_args.row_fields.split(
                    command_args.args_separator)
            ]
            command_args.row_fields_ = row_fields_arg
        else:
            command_args.row_fields_ = []
    except AttributeError:
        pass

    # Parses sample stat_fields
    try:
        if command_args.stat_fields:
            stat_fields_arg = [
                field.strip() for field in command_args.stat_fields.split(
                    command_args.args_separator)
            ]
            command_args.stat_fields_ = stat_fields_arg
        else:
            command_args.stat_fields_ = []
    except AttributeError:
        pass

    # if boosting arguments are used, set on boosting
    try:
        if command_args.iterations or command_args.learning_rate \
                or command_args.early_holdout:
            command_args.boosting = True
    except AttributeError:
        pass

    # Extracts the imports from the JSON metadata file
    try:
        if command_args.embedded_imports:
            command_args.embedded_imports_ = u.read_resources( \
                command_args.embedded_imports)
        else:
            command_args.embedded_imports_ = []
    except AttributeError:
        pass

    # Parses hidden_layers for deepnets.
    try:
        if command_args.hidden_layers:
            command_args.hidden_layers_ = u.read_json(
                command_args.hidden_layers)
        else:
            command_args.hidden_layers_ = []
    except AttributeError:
        pass

    # Parses operating_point for predictions.
    try:
        if command_args.operating_point:
            command_args.operating_point_ = u.read_json(
                command_args.operating_point)
        else:
            command_args.operating_point_ = []
    except AttributeError:
        pass

    # Parses the json_query
    try:
        if command_args.json_query:
            command_args.json_query_ = u.read_json(command_args.json_query)
        else:
            command_args.json_query_ = None
    except AttributeError:
        pass

    # Parses the models_file
    try:
        if command_args.models_file:
            command_args.models_file_ = u.read_json(command_args.models_file)
        else:
            command_args.models_file_ = None
    except AttributeError:
        pass

    # Parses the sql_output_fields
    try:
        if command_args.sql_output_fields:
            command_args.sql_output_fields_ = u.read_json( \
                command_args.sql_output_fields)
        else:
            command_args.sql_output_fields_ = None
    except AttributeError:
        pass

    # Parses connection info for external connectors
    try:
        if command_args.connection_json:
            command_args.connection_json_ = u.read_json(
                command_args.connection_json)
        else:
            command_args.connection_json_ = {}
    except AttributeError:
        pass

    return {"api": api, "args": command_args}
Exemplo n.º 20
0
def get_output_args(api, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    try:
        if command_args.train_stdin:
            if command_args.test_stdin:
                sys.exit("The standard input can't be used both for training "
                         "and testing. Choose one of them")
            command_args.training_set = StringIO(sys.stdin.read())
        elif command_args.test_stdin:
            command_args.test_set = StringIO(sys.stdin.read())
    except AttributeError:
        pass

    try:
        if command_args.objective_field:
            objective = command_args.objective_field
            try:
                command_args.objective_field = int(objective)
            except ValueError:
                if not command_args.train_header:
                    sys.exit("The %s has been set as objective field but"
                             " the file has not been marked as containing"
                             " headers.\nPlease set the --train-header flag if"
                             " the file has headers or use a column number"
                             " to set the objective field." % objective)
    except AttributeError:
        pass

    command_args.resume_ = resume

    # Reads description if provided.
    try:
        if command_args.description:
            description_arg = u.read_description(command_args.description)
            command_args.description_ = description_arg
        else:
            command_args.description_ = DEFAULT_DESCRIPTION
    except AttributeError:
        pass

    # Parses fields if provided.
    try:
        if command_args.field_attributes:
            field_attributes_arg = (
                u.read_field_attributes(command_args.field_attributes))
            command_args.field_attributes_ = field_attributes_arg
        else:
            command_args.field_attributes_ = []
    except AttributeError:
        pass
    try:
        if command_args.test_field_attributes:
            field_attributes_arg = (
                u.read_field_attributes(command_args.test_field_attributes))
            command_args.test_field_attributes_ = field_attributes_arg
        else:
            command_args.test_field_attributes_ = []
    except AttributeError:
        pass

    # Parses types if provided.
    try:
        if command_args.types:
            types_arg = u.read_types(command_args.types)
            command_args.types_ = types_arg
        else:
            command_args.types_ = None
        if command_args.test_types:
            types_arg = u.read_types(command_args.test_types)
            command_args.test_types_ = types_arg
        else:
            command_args.test_types_ = None
    except AttributeError:
        pass


    # Parses dataset fields if provided.
    try:
        if command_args.dataset_fields:
            dataset_fields_arg = [
                field.strip() for field in command_args.dataset_fields.split(
                    command_args.args_separator)]
            command_args.dataset_fields_ = dataset_fields_arg
        else:
            command_args.dataset_fields_ = []
    except AttributeError:
        pass

    # Parses model input fields if provided.
    try:
        if command_args.model_fields:
            model_fields_arg = [
                field.strip() for field in command_args.model_fields.split(
                    command_args.args_separator)]
            command_args.model_fields_ = model_fields_arg
        else:
            command_args.model_fields_ = []
    except AttributeError:
        pass

    # Parses cluster input fields if provided.
    try:
        if command_args.cluster_fields:
            cluster_fields_arg = [
                field.strip() for field in command_args.cluster_fields.split(
                    command_args.args_separator)]
            command_args.cluster_fields_ = cluster_fields_arg
        else:
            command_args.cluster_fields_ = []
    except AttributeError:
        pass


    # Parses association input fields if provided.
    try:
        if command_args.association_fields:
            association_fields_arg = [
                field.strip() for field in \
                command_args.association_fields.split( \
                command_args.args_separator)]
            command_args.association_fields_ = association_fields_arg
        else:
            command_args.association_fields_ = []
    except AttributeError:
        pass

    # Parses anomaly input fields if provided.
    try:
        if command_args.anomaly_fields:
            anomaly_fields_arg = [
                field.strip() for field in command_args.anomaly_fields.split(
                    command_args.args_separator)]
            command_args.anomaly_fields_ = anomaly_fields_arg
        else:
            command_args.anomaly_fields_ = []
    except AttributeError:
        pass

    # Parses logistic regression input fields if provided.
    try:
        if command_args.logistic_fields:
            logistic_fields_arg = [
                field.strip() for field in command_args.logistic_fields.split(
                    command_args.args_separator)]
            command_args.logistic_fields_ = logistic_fields_arg
        else:
            command_args.logistic_fields_ = []
    except AttributeError:
        pass

    model_ids = []
    try:
        # Parses model/ids if provided.
        if command_args.models:
            model_ids = u.read_resources(command_args.models)
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Retrieve model/ids if provided.
    try:
        if command_args.model_tag:
            model_ids = (model_ids +
                         u.list_ids(api.list_models,
                                    "tags__in=%s" % command_args.model_tag))
        command_args.model_ids_ = model_ids
    except AttributeError:
        pass

    # Reads votes files in the provided directories.
    try:
        if command_args.votes_dirs:
            dirs = [
                directory.strip() for directory in
                command_args.votes_dirs.split(
                    command_args.args_separator)]
            votes_path = os.path.dirname(command_args.predictions)
            votes_files = u.read_votes_files(dirs, votes_path)
            command_args.votes_files_ = votes_files
        else:
            command_args.votes_files_ = []
    except AttributeError:
        pass

    # Parses fields map if provided.
    try:
        if command_args.fields_map:
            fields_map_arg = u.read_fields_map(command_args.fields_map)
            command_args.fields_map_ = fields_map_arg
        else:
            command_args.fields_map_ = None
    except AttributeError:
        pass

    cluster_ids = []
    try:
        # Parses cluster/ids if provided.
        if command_args.clusters:
            cluster_ids = u.read_resources(command_args.clusters)
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    # Retrieve cluster/ids if provided.
    try:
        if command_args.cluster_tag:
            cluster_ids = (cluster_ids +
                           u.list_ids(api.list_clusters,
                                      "tags__in=%s" %
                                      command_args.cluster_tag))
        command_args.cluster_ids_ = cluster_ids
    except AttributeError:
        pass

    association_ids = []
    try:
        # Parses association/ids if provided.
        if command_args.associations:
            association_ids = u.read_resources(command_args.associations)
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    # Retrieve cluster/ids if provided.
    try:
        if command_args.association_tag:
            association_ids = (association_ids +
                               u.list_ids(api.list_associations,
                                          "tags__in=%s" %
                                          command_args.association_tag))
        command_args.association_ids_ = association_ids
    except AttributeError:
        pass

    logistic_regression_ids = []
    try:
        # Parses logisticregression/ids if provided.
        if command_args.logistic_regressions:
            logistic_regression_ids = u.read_resources( \
                command_args.logistic_regressions)
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    # Retrieve logisticregression/ids if provided.
    try:
        if command_args.logistic_tag:
            logistic_regression_ids = (logistic_ids +
                           u.list_ids(api.list_logistic_regressions,
                                      "tags__in=%s" %
                                      command_args.logistic_tag))
        command_args.logistic_regression_ids_ = logistic_regression_ids
    except AttributeError:
        pass

    # Parses cluster names to generate datasets if provided
    try:
        if command_args.cluster_datasets:
            cluster_datasets_arg = [
                dataset.strip() for dataset in
                command_args.cluster_datasets.split(
                    command_args.args_separator)]
            command_args.cluster_datasets_ = cluster_datasets_arg
        else:
            command_args.cluster_datasets_ = []
    except AttributeError:
        pass

    # Parses cluster names to generate models if provided
    try:
        if command_args.cluster_models:
            cluster_models_arg = [
                model.strip() for model in
                command_args.cluster_models.split(
                    command_args.args_separator)]
            command_args.cluster_models_ = cluster_models_arg
        else:
            command_args.cluster_models_ = []
    except AttributeError:
        pass

    # Parses summary_fields to exclude from the clustering algorithm
    try:
        if command_args.summary_fields:
            summary_fields_arg = [
                field.strip() for field in
                command_args.summary_fields.split(
                    command_args.args_separator)]
            command_args.summary_fields_ = summary_fields_arg
        else:
            command_args.summary_fields_ = []
    except AttributeError:
        pass

    anomaly_ids = []
    try:
        # Parses anomaly/ids if provided.
        if command_args.anomalies:
            anomaly_ids = u.read_resources(command_args.anomalies)
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    # Retrieve anomaly/ids if provided.
    try:
        if command_args.anomaly_tag:
            anomaly_ids = (anomaly_ids +
                           u.list_ids(api.list_anomalies,
                                      "tags__in=%s" %
                                      command_args.anomaly_tag))
        command_args.anomaly_ids_ = anomaly_ids
    except AttributeError:
        pass

    sample_ids = []
    try:
        # Parses sample/ids if provided.
        if command_args.samples:
            sample_ids = u.read_resources(command_args.samples)
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Retrieve sample/ids if provided.
    try:
        if command_args.sample_tag:
            sample_ids = (
                sample_ids + u.list_ids(api.list_samples,
                                        "tags__in=%s" %
                                        command_args.sample_tag))
        command_args.sample_ids_ = sample_ids
    except AttributeError:
        pass

    # Parses sample row fields
    try:
        if command_args.row_fields:
            row_fields_arg = [field.strip() for field in
                              command_args.row_fields.split(
                                  command_args.args_separator)]
            command_args.row_fields_ = row_fields_arg
        else:
            command_args.row_fields_ = []
    except AttributeError:
        pass

    # Parses sample stat_fields
    try:
        if command_args.stat_fields:
            stat_fields_arg = [field.strip() for field in
                               command_args.stat_fields.split(
                                   command_args.args_separator)]
            command_args.stat_fields_ = stat_fields_arg
        else:
            command_args.stat_fields_ = []
    except AttributeError:
        pass

    return {"api": api, "args": command_args}
Exemplo n.º 21
0
def main(args=sys.argv[1:]):
    """Main process

    """
    train_stdin = False
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            args[i] = args[i].replace("_", "-")
            if (args[i] == '--train'
                    and (i == len(args) - 1 or args[i + 1].startswith("--"))):
                train_stdin = True

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        if ' ' in args[i]:
            literal_args[i] = '"%s"' % args[i]
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False

    parser = create_parser(defaults=get_user_defaults(),
                           constants={
                               'NOW': NOW,
                               'MAX_MODELS': MAX_MODELS,
                               'PLURALITY': PLURALITY
                           })

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    if command_args.cross_validation_rate > 0 and (command_args.test_set
                                                   or command_args.evaluate
                                                   or command_args.model
                                                   or command_args.models
                                                   or command_args.model_tag):
        parser.error("Non compatible flags: --cross-validation-rate"
                     " cannot be used with --evaluate, --model,"
                     " --models or --model-tag. Usage:\n\n"
                     "bigmler --train data/iris.csv "
                     "--cross-validation-rate 0.1")

    default_output = ('evaluation'
                      if command_args.evaluate else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG, command_args.stack_level)
        args = shlex.split(command)[1:]
        try:
            position = args.index("--train")
            if (position == (len(args) - 1)
                    or args[position + 1].startswith("--")):
                train_stdin = True
        except ValueError:
            pass
        output_dir = u.get_log_reversed(DIRS_LOG, command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        parser = create_parser(defaults=get_user_defaults(defaults_file),
                               constants={
                                   'NOW': NOW,
                                   'MAX_MODELS': MAX_MODELS,
                                   'PLURALITY': PLURALITY
                               })
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep, default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep, default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = (
                "%s%s%s" % (NOW, os.sep, command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    if train_stdin:
        command_args.training_set = StringIO.StringIO(sys.stdin.read())

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug
    }

    if command_args.store:
        api_command_args.update({'storage': u.check_dir(session_file)})

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
            and not (command_args.training_set or command_args.source
                     or command_args.dataset)
            and not (command_args.test_set and
                     (command_args.model or command_args.models
                      or command_args.model_tag or command_args.ensemble))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate\n"
                     "bigmler --ensemble ensemble/5081d067035d076151003443"
                     " --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            pass

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (u.read_field_attributes(
            command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(lambda x: x.strip(),
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(lambda x: x.strip(),
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_models(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids + u.list_ids(
            api.list_models, "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method
            and not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                 for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(lambda x: x.strip(), command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(lambda x: x.strip(),
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                raise Exception("File %s not found" % command_args.delete_file)
            delete_list.extend(
                [line for line in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve ensembles/ids if provided
        if command_args.ensemble_tag:
            query_string = "tags__in=%s" % command_args.ensemble_tag
            delete_list.extend(u.list_ids(api.list_ensembles, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
Exemplo n.º 22
0
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip, command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend(
            [line for line in open(command_args.delete_file, "r")])

    resource_selectors = [(command_args.source_tag, api.list_sources),
                          (command_args.dataset_tag, api.list_datasets),
                          (command_args.model_tag, api.list_models),
                          (command_args.prediction_tag, api.list_predictions),
                          (command_args.evaluation_tag, api.list_evaluations),
                          (command_args.ensemble_tag, api.list_ensembles),
                          (command_args.batch_prediction_tag,
                           api.list_batch_predictions)]

    query_string = None
    if command_args.older_than:
        date_str = get_date(command_args.older_than, api)
        if date_str:
            query_string = "created__lt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if command_args.newer_than:
        date_str = get_date(command_args.newer_than, api)
        if date_str:
            if query_string is None:
                query_string = ""
            else:
                query_string += ";"
            query_string += "created__gt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if (any([selector[0] is not None for selector in resource_selectors])
            or command_args.all_tag):
        if query_string is None:
            query_string = ""
        else:
            query_string += ";"
        for selector, api_call in resource_selectors:
            combined_query = query_string
            if command_args.all_tag:
                combined_query += "tags__in=%s" % command_args.all_tag
                delete_list.extend(u.list_ids(api_call, combined_query))
            elif selector:
                combined_query += "tags__in=%s" % selector
                delete_list.extend(u.list_ids(api_call, combined_query))
    else:
        if query_string:
            for selector, api_call in resource_selectors:
                delete_list.extend(u.list_ids(api_call, query_string))

    message = u.dated("Deleting objects.\n")
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
Exemplo n.º 23
0
def models_processing(datasets, models, model_ids, objective_field, fields,
                      api, args, resume,
                      name=None, description=None, model_fields=None,
                      session_file=None, path=None,
                      log=None, labels=None, multi_label_data=None,
                      other_label=None):
    """Creates or retrieves models from the input data

    """
    ensemble_ids = []

    # If we have a dataset but not a model, we create the model if the no_model
    # flag hasn't been set up.
    if datasets and not (has_models(args) or args.no_model):
        dataset = datasets[0]
        model_ids = []
        models = []
        if args.multi_label:
            # If --number-of-models is not set or is 1, create one model per
            # label. Otherwise, create one ensemble per label with the required
            # number of models
            if args.number_of_models < 2:
                models, model_ids, resume = model_per_label(
                    labels, datasets, fields,
                    objective_field, api, args, resume, name, description,
                    model_fields, multi_label_data, session_file, path, log)
            else:
                (ensembles, ensemble_ids,
                 models, model_ids, resume) = ensemble_per_label(
                     labels, dataset, fields,
                     objective_field, api, args, resume, name, description,
                     model_fields, multi_label_data, session_file, path, log)

        elif args.number_of_models > 1:
            ensembles = []
            # Ensemble of models
            (ensembles, ensemble_ids,
             models, model_ids, resume) = ensemble_processing(
                 datasets, objective_field, fields, api, args, resume,
                 name=name, description=description, model_fields=model_fields,
                 session_file=session_file, path=path, log=log)
            ensemble = ensembles[0]
            args.ensemble = bigml.api.get_ensemble_id(ensemble)

        else:
            # Set of partial datasets created setting args.max_categories
            if len(datasets) > 1 and args.max_categories:
                args.number_of_models = len(datasets)
            # Cross-validation case: we create 2 * n models to be validated
            # holding out an n% of data
            if args.cross_validation_rate > 0:
                if args.number_of_evaluations > 0:
                    args.number_of_models = args.number_of_evaluations
                else:
                    args.number_of_models = int(MONTECARLO_FACTOR *
                                                args.cross_validation_rate)
            if resume:
                resume, model_ids = c.checkpoint(
                    c.are_models_created, path, args.number_of_models,
                    debug=args.debug)
                if not resume:
                    message = u.dated("Found %s models out of %s. Resuming.\n"
                                      % (len(model_ids),
                                        args.number_of_models))
                    u.log_message(message, log_file=session_file,
                                  console=args.verbosity)

                models = model_ids
                args.number_of_models -= len(model_ids)
            if args.max_categories > 0:
                objective_field = None

            model_args = r.set_model_args(name, description, args,
                                          objective_field, fields,
                                          model_fields, other_label)
            models, model_ids = r.create_models(datasets, models,
                                                model_args, args, api,
                                                path, session_file, log)
    # If a model is provided, we use it.
    elif args.model:
        model_ids = [args.model]
        models = model_ids[:]

    elif args.models or args.model_tag:
        models = model_ids[:]

    if args.ensemble:
        ensemble = r.get_ensemble(args.ensemble, api, args.verbosity,
                                  session_file)
        ensemble_ids = [ensemble]
        model_ids = ensemble['object']['models']

        models = model_ids[:]

    if args.ensembles or args.ensemble_tag:
        model_ids = []
        ensemble_ids = []
        # Parses ensemble/ids if provided.
        if args.ensemble_tag:
            ensemble_ids = (ensemble_ids +
                            u.list_ids(api.list_ensembles,
                                       "tags__in=%s" % args.ensemble_tag))
        else:
            ensemble_ids = u.read_resources(args.ensembles)
        for ensemble_id in ensemble_ids:
            ensemble = r.get_ensemble(ensemble_id, api)
            if args.ensemble is None:
                args.ensemble = ensemble_id
            model_ids.extend(ensemble['object']['models'])
        models = model_ids[:]

    # If we are going to predict we must retrieve the models
    if model_ids and args.test_set and not args.evaluate:
        models, model_ids = r.get_models(models, args, api, session_file)

    return models, model_ids, ensemble_ids, resume
Exemplo n.º 24
0
def get_output_args(api, train_stdin, test_stdin, command_args, resume):
    """Returns the output args needed for the main bigmler computation process

    """
    if train_stdin:
        if test_stdin:
            sys.exit("The standard input can't be used both for training and"
                     " testing. Choose one of them")
        command_args.training_set = StringIO.StringIO(sys.stdin.read())
    elif test_stdin:
        command_args.test_set = StringIO.StringIO(sys.stdin.read())

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            if not command_args.train_header:
                sys.exit("The %s has been set as objective field but"
                         " the file has not been marked as containing"
                         " headers.\nPlease set the --train-header flag if"
                         " the file has headers or use a column number"
                         " to set the objective field." % objective)

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)
    if command_args.test_field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.test_field_attributes))
        output_args.update(test_field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)
    if command_args.test_types:
        types_arg = u.read_types(command_args.test_types)
        output_args.update(test_types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(str.strip,
                                 command_args.dataset_fields.split(
                                     command_args.args_separator))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(lambda x: x.strip(),
                               command_args.model_fields.split(
                                   command_args.args_separator))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_resources(command_args.models)
        output_args.update(model_ids=model_ids)

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids +
                     u.list_ids(api.list_models,
                                "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(str.strip, command_args.votes_dirs.split(
            command_args.args_separator))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    return output_args
Exemplo n.º 25
0
def transform_args(command_args, flags, api, user_defaults):
    """Transforms the formatted argument strings into structured arguments

    """
    # Parses attributes in json format if provided
    command_args.json_args = {}

    for resource_type in RESOURCE_TYPES:
        attributes_file = getattr(command_args,
                                  "%s_attributes" % resource_type, None)
        if attributes_file is not None:
            command_args.json_args[resource_type] = u.read_json(
                attributes_file)
        else:
            command_args.json_args[resource_type] = {}

    # Parses dataset generators in json format if provided
    if command_args.new_fields:
        json_generators = u.read_json(command_args.new_fields)
        command_args.dataset_json_generators = json_generators
    else:
        command_args.dataset_json_generators = {}

    # Parses multi-dataset attributes in json such as field maps
    if command_args.multi_dataset_attributes:
        multi_dataset_json = u.read_json(command_args.multi_dataset_attributes)
        command_args.multi_dataset_json = multi_dataset_json
    else:
        command_args.multi_dataset_json = {}

    dataset_ids = None
    command_args.dataset_ids = []
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_ids = u.read_datasets(command_args.datasets)
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reading test dataset ids is delayed till the very moment of use to ensure
    # that the newly generated resources files can be used there too
    command_args.test_dataset_ids = []

    # Retrieve dataset/ids if provided.
    if command_args.dataset_tag:
        dataset_ids = dataset_ids.extend(
            u.list_ids(api.list_datasets,
                       "tags__in=%s" % command_args.dataset_tag))
        if len(dataset_ids) == 1:
            command_args.dataset = dataset_ids[0]
        command_args.dataset_ids = dataset_ids

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    try:
        if (command_args.method and command_args.method != COMBINATION_LABEL
                and not (command_args.method in COMBINATION_WEIGHTS.keys())):
            command_args.method = 0
        else:
            combiner_methods = dict([[value, key]
                                     for key, value in COMBINER_MAP.items()])
            combiner_methods[COMBINATION_LABEL] = COMBINATION
            command_args.method = combiner_methods.get(command_args.method, 0)
    except AttributeError:
        pass

    # Checks missing_strategy
    try:
        if (command_args.missing_strategy
                and not (command_args.missing_strategy
                         in MISSING_STRATEGIES.keys())):
            command_args.missing_strategy = 0
        else:
            command_args.missing_strategy = MISSING_STRATEGIES.get(
                command_args.missing_strategy, 0)
    except AttributeError:
        pass

    # Adds replacement=True if creating ensemble and nothing is specified
    try:
        if (command_args.number_of_models > 1 and not command_args.replacement
                and not '--no-replacement' in flags
                and not 'replacement' in user_defaults
                and not '--no-randomize' in flags
                and not 'randomize' in user_defaults
                and not '--sample-rate' in flags
                and not 'sample_rate' in user_defaults):
            command_args.replacement = True
    except AttributeError:
        pass
    try:
        # Old value for --prediction-info='full data' maps to 'full'
        if command_args.prediction_info == 'full data':
            print("WARNING: 'full data' is a deprecated value. Use"
                  " 'full' instead")
            command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    # Parses class, weight pairs for objective weight
    try:
        if command_args.objective_weights:
            objective_weights = (u.read_objective_weights(
                command_args.objective_weights))
            command_args.objective_weights_json = objective_weights
    except AttributeError:
        pass

    try:
        command_args.multi_label_fields_list = []
        if command_args.multi_label_fields is not None:
            multi_label_fields = command_args.multi_label_fields.strip()
            command_args.multi_label_fields_list = multi_label_fields.split(
                command_args.args_separator)
    except AttributeError:
        pass

    # Sets shared_flag if --shared or --unshared has been used
    if '--shared' in flags or '--unshared' in flags:
        command_args.shared_flag = True
    else:
        command_args.shared_flag = False

    # Set remote on if scoring a trainind dataset in bigmler anomaly
    try:
        if command_args.score:
            command_args.remote = True
            if not "--prediction-info" in flags:
                command_args.prediction_info = FULL_FORMAT
    except AttributeError:
        pass

    command_args.has_models_ = (
        (hasattr(command_args, 'model') and command_args.model)
        or (hasattr(command_args, 'models') and command_args.models)
        or (hasattr(command_args, 'ensemble') and command_args.ensemble)
        or (hasattr(command_args, 'ensembles') and command_args.ensembles)
        or (hasattr(command_args, 'cluster') and command_args.cluster)
        or (hasattr(command_args, 'clusters') and command_args.clusters)
        or (hasattr(command_args, 'model_tag') and command_args.model_tag)
        or (hasattr(command_args, 'anomaly') and command_args.anomaly)
        or (hasattr(command_args, 'anomalies') and command_args.anomalies) or
        (hasattr(command_args, 'ensemble_tag') and command_args.ensemble_tag)
        or (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag)
        or (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag))

    command_args.has_datasets_ = (
        (hasattr(command_args, 'dataset') and command_args.dataset)
        or (hasattr(command_args, 'datasets') and command_args.datasets)
        or (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))

    command_args.has_test_datasets_ = (
        (hasattr(command_args, 'test_dataset') and command_args.test_dataset)
        or
        (hasattr(command_args, 'test_datasets') and command_args.test_datasets)
        or (hasattr(command_args, 'test_dataset_tag')
            and command_args.test_dataset_tag))
Exemplo n.º 26
0
def main(args=sys.argv[1:]):
    """Main process

    """
    train_stdin = False
    test_stdin = False
    flags = []
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            flag = args[i]
            # syntax --flag=value
            if "=" in flag:
                flag = args[i][0: flag.index("=")]
            flag = flag.replace("_", "-")
            flags.append(flag)
            if (flag == '--train' and
                    (i == len(args) - 1 or args[i + 1].startswith("--"))):
                train_stdin = True
            elif (flag == '--test' and
                    (i == len(args) - 1 or args[i + 1].startswith("--"))):
                test_stdin = True

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        # quoting literals with blanks: 'petal length'
        if ' ' in args[i]:
            prefix = ""
            literal = args[i]
            # literals with blanks after "+" or "-": +'petal length'
            if args[i][0] in r.ADD_REMOVE_PREFIX:
                prefix = args[i][0]
                literal = args[i][1:]
            literal_args[i] = '%s"%s"' % (prefix, literal)
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False
    user_defaults = get_user_defaults()
    parser = create_parser(defaults=get_user_defaults(),
                           constants={'NOW': NOW,
                           'MAX_MODELS': MAX_MODELS, 'PLURALITY': PLURALITY})

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    if command_args.cross_validation_rate > 0 and (
            non_compatible(command_args, '--cross-validation-rate')):
        parser.error("Non compatible flags: --cross-validation-rate"
                     " cannot be used with --evaluate, --model,"
                     " --models or --model-tag. Usage:\n\n"
                     "bigmler --train data/iris.csv "
                     "--cross-validation-rate 0.1")

    if train_stdin and command_args.multi_label:
        parser.error("Reading multi-label training sets from stream "
                     "is not yet available.")

    if test_stdin and command_args.resume:
        parser.error("Can't resume when using stream reading test sets.")

    default_output = ('evaluation' if command_args.evaluate
                      else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG,
                                     command_args.stack_level)
        args = shlex.split(command)[1:]
        try:
            position = args.index("--train")
            train_stdin = (position == (len(args) - 1) or
                           args[position + 1].startswith("--"))
        except ValueError:
            pass
        try:
            position = args.index("--test")
            test_stdin = (position == (len(args) - 1) or
                          args[position + 1].startswith("--"))
        except ValueError:
            pass
        output_dir = u.get_log_reversed(DIRS_LOG,
                                        command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        user_defaults = get_user_defaults(defaults_file)
        parser = create_parser(defaults=user_defaults,
                               constants={'NOW': NOW,
                                          'MAX_MODELS': MAX_MODELS,
                                          'PLURALITY': PLURALITY})
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep,
                                         default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    if train_stdin:
        if test_stdin:
            sys.exit("The standard input can't be used both for training and"
                     " testing. Choose one of them")
        command_args.training_set = StringIO.StringIO(sys.stdin.read())
    elif test_stdin:
        command_args.test_set = StringIO.StringIO(sys.stdin.read())

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug}

    if command_args.store:
        api_command_args.update({'storage': u.check_dir(session_file)})

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
        and not (command_args.training_set or command_args.source
                 or command_args.dataset)
        and not ((command_args.test_set or command_args.test_split) and
                 (command_args.model or
                  command_args.models or command_args.model_tag or
                  command_args.ensemble or command_args.ensembles or
                  command_args.ensemble_tag))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate\n"
                     "bigmler --ensemble ensemble/5081d067035d076151003443"
                     " --dataset "
                     "dataset/5081d067035d076151003423 --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            if not command_args.train_header:
                sys.exit("The %s has been set as objective field but"
                         " the file has not been marked as containing"
                         " headers.\nPlease set the --train-header flag if"
                         " the file has headers or use a column number"
                         " to set the objective field." % objective)

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(str.strip,
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(str.strip,
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_resources(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids +
                     u.list_ids(api.list_models,
                                "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and
            not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Adds replacement=True if creating ensemble and nothing is specified
    if (command_args.number_of_models > 1 and
            not command_args.replacement and
            not '--no-replacement' in flags and
            not 'replacement' in user_defaults and
            not '--no-randomize' in flags and
            not 'randomize' in user_defaults and
            not '--sample-rate' in flags and
            not 'sample_rate' in user_defaults):
        command_args.replacement = True

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(str.strip, command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Old value for --prediction-info='full data' maps to 'full'
    if command_args.prediction_info == 'full data':
        print "WARNING: 'full data' is a deprecated value. Use 'full' instead"
        command_args.prediction_info = FULL_FORMAT

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(str.strip,
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                sys.exit("File %s not found" % command_args.delete_file)
            delete_list.extend([line for line
                                in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve ensembles/ids if provided
        if command_args.ensemble_tag:
            query_string = "tags__in=%s" % command_args.ensemble_tag
            delete_list.extend(u.list_ids(api.list_ensembles, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)