Esempio n. 1
0
def compute_output(api, args):
    """ Creates a sample based on a `train_set`, source or dataset.

    """

    samples = None
    # variables from command-line options
    resume = args.resume_
    sample_ids = args.sample_ids_
    output = args.predictions
    # there's only one sample to be generated at present
    args.max_parallel_clusters = 1
    # sample cannot be published yet.
    args.public_sample = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_ and (args.public_sample or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-sample step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-sample step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, _, resume, csv_properties, fields) = dataset_properties
    if args.sample_file:
        # sample is retrieved from the contents of the given local JSON file
        sample, csv_properties, fields = u.read_local_resource(
            args.sample_file, csv_properties=csv_properties)
        samples = [sample]
        sample_ids = [sample['resource']]
    else:
        # sample is retrieved from the remote object
        samples, sample_ids, resume = psa.samples_processing(
            datasets,
            samples,
            sample_ids,
            api,
            args,
            resume,
            session_file=session_file,
            path=path,
            log=log)
        if samples:
            sample = samples[0]

    # We update the sample's public state if needed
    if sample:
        if isinstance(sample, basestring):
            # build the query string from the sample options
            sample = u.check_resource(sample, api.get_sample)
        samples[0] = sample
        if (args.public_sample or
            (args.shared_flag and r.shared_changed(args.shared, sample))):
            sample_args = {}
            if args.shared_flag and r.shared_changed(args.shared, sample):
                sample_args.update(shared=args.shared)
            if args.public_sample:
                sample_args.update(r.set_publish_sample_args(args))
            if sample_args:
                sample = r.update_sample(sample,
                                         sample_args,
                                         args,
                                         api=api,
                                         path=path,
                                         session_file=session_file)
                samples[0] = sample

    # We get the fields of the sample if we haven't got
    # them yet and need them
    if sample and psa.needs_sample_fields(args):
        fields = psa.get_sample_fields(sample, csv_properties, args)

    sample_file(samples[0],
                fields,
                args,
                api,
                path=path,
                session_file=session_file)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
Esempio n. 2
0
def compute_output(api, args):
    """ Creates a sample based on a `train_set`, source or dataset.

    """

    samples = None
    # variables from command-line options
    resume = args.resume_
    sample_ids = args.sample_ids_
    output = args.predictions
    # there's only one sample to be generated at present
    args.max_parallel_clusters = 1
    # sample cannot be published yet.
    args.public_sample = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_ and (args.public_sample or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-sample step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-sample step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, _, resume,
     csv_properties, fields) = dataset_properties
    if args.sample_file:
        # sample is retrieved from the contents of the given local JSON file
        sample, csv_properties, fields = u.read_local_resource(
            args.sample_file,
            csv_properties=csv_properties)
        samples = [sample]
        sample_ids = [sample['resource']]
    else:
        # sample is retrieved from the remote object
        samples, sample_ids, resume = psa.samples_processing(
            datasets, samples, sample_ids, api, args, resume,
            session_file=session_file, path=path, log=log)
        if samples:
            sample = samples[0]

    # We update the sample's public state if needed
    if sample:
        if isinstance(sample, basestring):
            # build the query string from the sample options
            sample = u.check_resource(sample, api.get_sample)
        samples[0] = sample
        if (args.public_sample or
                (args.shared_flag and r.shared_changed(args.shared, sample))):
            sample_args = {}
            if args.shared_flag and r.shared_changed(args.shared, sample):
                sample_args.update(shared=args.shared)
            if args.public_sample:
                sample_args.update(r.set_publish_sample_args(args))
            if sample_args:
                sample = r.update_sample(sample, sample_args, args,
                                         api=api, path=path,
                                         session_file=session_file)
                samples[0] = sample

    # We get the fields of the sample if we haven't got
    # them yet and need them
    if sample and psa.needs_sample_fields(args):
        fields = psa.get_sample_fields(sample, csv_properties, args)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    sample_file(samples[0], fields, args, api, path=path,
                session_file=session_file)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)