Beispiel #1
0
def main(argv):
    output_dir = os.path.join("datasets", "tfrecords")

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get all possible datasets we can generate
    adaptation_problems = datasets.names()

    # Save tfrecord files for each of the adaptation problems
    if FLAGS.parallel:
        # TensorFlow will error from all processes trying to use ~90% of the
        # GPU memory on all parallel jobs, which will fail, so do this on the
        # CPU.
        os.environ["CUDA_VISIBLE_DEVICES"] = ""

        if FLAGS.jobs == 0:
            cores = None
        else:
            cores = FLAGS.jobs

        run_job_pool(save_dataset,
                     [(d, output_dir) for d in adaptation_problems],
                     cores=cores)
    else:
        for dataset_name in adaptation_problems:
            save_dataset(dataset_name, output_dir)
def main(argv):
    # Only list one direction since the other direction uses the same data
    adaptation_problems = [
        ("mnist", "usps"),
        ("svhn", "mnist"),
        ("svhn2", "mnist2"),
        ("mnist", "mnistm"),
        ("synnumbers", "svhn"),
        ("synsigns", "gtsrb"),
        # All combinations of these, so just make one file for each
        ("office_amazon", None),
        ("office_dslr", None),
        ("office_webcam", None),
    ]

    # Save tfrecord files for each of the adaptation problems
    if FLAGS.parallel:
        # TensorFlow will error from all processes trying to use ~90% of the
        # GPU memory on all parallel jobs, which will fail, so do this on the
        # CPU.
        os.environ["CUDA_VISIBLE_DEVICES"] = ""

        if FLAGS.jobs == 0:
            cores = None
        else:
            cores = FLAGS.jobs

        run_job_pool(save_adaptation, adaptation_problems, cores=cores)
    else:
        for source, target in adaptation_problems:
            save_adaptation(source, target)
Beispiel #3
0
def main(argv):
    # If single GPU, then split memory between jobs. But, if multiple GPUs,
    # each GPU has its own memory, so don't divide it up.
    #
    # If multiple GPUs, the jobs are split by GPU not by the "jobs" argument, so
    # ignore it and just set jobs to the GPU count.
    if FLAGS.gpus == 1:
        jobs = FLAGS.jobs
        gpumem = FLAGS.gpumem / jobs
        multi_gpu = False
    else:
        jobs = FLAGS.gpus
        gpumem = FLAGS.gpumem
        multi_gpu = True

    # Find models in the model/log directories
    models_to_evaluate = get_models_to_evaluate()

    # Run in parallel
    commands = []

    for model_params in models_to_evaluate:
        commands.append((*model_params, gpumem, multi_gpu))

    if jobs == 1:  # Eases debugging, printing even if it errors
        process_results = []

        for c in commands:
            process_results.append(process_model(*c))
    else:
        process_results = run_job_pool(process_model, commands, cores=jobs)

    # Save results
    save_results(process_results, FLAGS.output_file)
def main(argv):
    # If single GPU, then split memory between jobs. But, if multiple GPUs,
    # each GPU has its own memory, so don't divide it up.
    #
    # If multiple GPUs, the jobs are split by GPU not by the "jobs" argument, so
    # ignore it and just set jobs to the GPU count.
    if FLAGS.gpus == 1:
        jobs = FLAGS.jobs
        gpumem = FLAGS.gpumem / jobs
        multi_gpu = False
    else:
        jobs = FLAGS.gpus
        gpumem = FLAGS.gpumem
        multi_gpu = True

    # Find models in the model/log directories
    models_to_evaluate = get_models_to_evaluate()

    # Run in parallel
    commands = []

    for model_params in models_to_evaluate:
        commands.append((*model_params, gpumem, multi_gpu))

    # Also prints which models we load
    print("Log Dir,Source,Target,Model,Method,Best Step,Accuracy at Step")
    results = run_job_pool(process_model, commands, cores=jobs)

    # Print results, averages, etc.
    print_results(results)
Beispiel #5
0
def all_stats(files, source_feature_subset, target_feature_subset):
    """ Process all files, but since we may have many, many thousands, do it
    with multiple cores by default """
    if FLAGS.jobs == 1:
        results = []

        for name, filename in files:
            results.append(
                _all_stats(name, filename, source_feature_subset,
                           target_feature_subset))
    else:
        commands = []

        for name, filename in files:
            commands.append(
                (name, filename, source_feature_subset, target_feature_subset))

        jobs = FLAGS.jobs if FLAGS.jobs != 0 else None
        results = run_job_pool(_all_stats, commands, cores=jobs)

    # Remove empty dictionaries (the "no data" cases)
    results = [r for r in results if r != {}]

    # Sort by name
    results.sort(key=lambda x: x["name"])

    return results