コード例 #1
0
async def handle_list_datasets():
    """
    Handles GET requests to /datasets.

    Returns
    -------
    str
    """
    return list_datasets()
コード例 #2
0
def main(argv):
    counts = collections.defaultdict(dict)

    for dataset_name in datasets.list_datasets():
        # Note: test=False so we only look at the training samples, which is what
        # we will vary in the vary-amount-of-target-data experiments
        for user in datasets.get_dataset_users(dataset_name):
            sources, _ = load_da(dataset_name, str(user), "", test=False)
            train_count = count_training_samples(sources)
            counts[dataset_name][user] = train_count

    print_dictionary(counts, "dataset_target_training_sample_counts")
コード例 #3
0
def main(argv):
    # Don't bother using the GPU for this
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    for dataset_name in datasets.list_datasets():
        for user in datasets.get_dataset_users(dataset_name):
            # Note: test=False so we only look at the training samples, where
            # train=80% of training set, test=20% of training set, i.e. the
            # validation set
            test = False

            sources, _ = load_da(dataset_name, str(user), "", test=test)
            assert len(sources) == 1
            dataset = sources[0]
            print_stats(dataset_name + "_" + str(user), dataset, test=test)
コード例 #4
0
def main(argv):
    # Don't bother using the GPU for this
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    for dataset_name in datasets.list_datasets():
        user_source_pairs = []

        for user in datasets.get_dataset_users(dataset_name):
            # Note: test=False so we only look at the training samples, where
            # train=80% of training set, test=20% of training set, i.e. the
            # validation set
            sources, _ = load_da(dataset_name, str(user), "", test=False)

            # We load them one at a time
            assert len(sources) == 1
            source = sources[0]

            user_source_pairs.append((user, source))

        print_class_balances(dataset_name, user_source_pairs)
コード例 #5
0
ファイル: draft.py プロジェクト: blindsubmissiondts/DTS
from absl import app
from absl import flags

import models
import methods
import file_utils
import load_datasets

from datasets import datasets
from metrics import Metrics
from checkpoints import CheckpointManager
from gpu_memory import set_gpu_memory

print (methods.list_methods())
print (models.list_models())
print (datasets.list_datasets())
FLAGS = flags.FLAGS

flags.DEFINE_string("modeldir", "example-models", "Directory for saving model files")
flags.DEFINE_string("logdir", "example-logs", "Directory for saving log files")
flags.DEFINE_enum("method", "yndaws", methods.list_methods(), "What method of domain adaptation to perform (or none)")
flags.DEFINE_enum("model", "ynfcn", models.list_models(), "What model to use (note: ignored for vrada/rdann methods)")
flags.DEFINE_enum("dataset", "ucihar", datasets.list_datasets(), "What dataset to use (e.g. \"ucihar\")")
flags.DEFINE_string("sources", "14", "Which source domains to use (e.g. \"1,2,3\")")
flags.DEFINE_string("target", "19", "What target domain to use (e.g. \"4\", can be blank for no target)")
flags.DEFINE_string("uid", "0", "A unique ID saved in the log/model folder names to avoid conflicts")
flags.DEFINE_integer("ensemble", 1, "Number of models in the ensemble, 1 = no ensemble")
flags.DEFINE_integer("steps", 30000, "Number of training steps to run")
flags.DEFINE_float("gpumem", 2000, "GPU memory to let TensorFlow use, in MiB (0 for all)")
flags.DEFINE_integer("model_steps", 0, "Save the model every so many steps (0 for only when log_val_steps)")
flags.DEFINE_integer("log_train_steps", 500, "Log training information every so many steps (0 for never)")
コード例 #6
0
    pairs = []

    for source_user, target_user in combinations:
        assert source_user != target_user
        pairs.append((dataset_name, str(source_user), str(target_user)))

    return pairs


if __name__ == "__main__":
    # Sources-target pairs for training
    pairs = []
    uids = []

    for name in datasets.list_datasets():
        # Tune on "watch_noother" not "watch"
        if name == "watch":
            continue

        users = datasets.get_dataset_users(name)

        # Since sources-target aren't stored in filename anymore (too long), we
        # would run into folder name conflicts if we didn't append a unique ID
        # to each sources-target pair
        uid = 0

        # Make this repeatable
        random.seed(42)

        # Allows extra max_users for some datasets without changin uid's
コード例 #7
0
ファイル: main.py プロジェクト: blindsubmissiondts/DTS
from absl import app
from absl import flags

import models
import methods
import file_utils
import load_datasets

from datasets import datasets
from metrics import Metrics
from checkpoints import CheckpointManager
from gpu_memory import set_gpu_memory

print(methods.list_methods())
print(models.list_models())
print(datasets.list_datasets())
FLAGS = flags.FLAGS

flags.DEFINE_string("modeldir", "example-models",
                    "Directory for saving model files")
flags.DEFINE_string("logdir", "example-logs", "Directory for saving log files")
flags.DEFINE_enum("method", "yndaws", methods.list_methods(),
                  "What method of domain adaptation to perform (or none)")
flags.DEFINE_enum("model", "ynfcn", models.list_models(),
                  "What model to use (note: ignored for vrada/rdann methods)")
flags.DEFINE_enum("dataset", "ucihar", datasets.list_datasets(),
                  "What dataset to use (e.g. \"ucihar\")")
flags.DEFINE_string("sources", "14",
                    "Which source domains to use (e.g. \"1,2,3\")")
flags.DEFINE_string(
    "target", "19",
コード例 #8
0
import load_datasets

from datasets import datasets
from metrics import Metrics
from checkpoints import CheckpointManager
from gpu_memory import set_gpu_memory


FLAGS = flags.FLAGS

flags.DEFINE_string("modeldir", "models", "Directory for saving model files")
flags.DEFINE_string("logdir", "logs", "Directory for saving log files")
flags.DEFINE_enum("method", "daws", methods.list_methods(), "What method of domain adaptation to perform (or none)")
flags.DEFINE_enum("model", "fcn", models.list_models(), "What model to use (note: ignored for vrada/rdann methods)")
# flags.DEFINE_enum("dataset", "ucihar", datasets.list_datasets(), "What dataset to use (e.g. \"ucihar\")")
flags.DEFINE_enum("dataset", "cwru", datasets.list_datasets(), "What dataset to use (e.g. \"ucihar\")")
flags.DEFINE_string("sources", "1", "Which source domains to use (e.g. \"1,2,3\")")
flags.DEFINE_string("target", "2", "What target domain to use (e.g. \"4\", can be blank for no target)")
flags.DEFINE_string("uid", "1", "A unique ID saved in the log/model folder names to avoid conflicts")
flags.DEFINE_integer("ensemble", 1, "Number of models in the ensemble, 1 = no ensemble")
flags.DEFINE_integer("steps", 500000, "Number of training steps to run")
flags.DEFINE_float("gpumem", 0, "GPU memory to let TensorFlow use, in MiB (0 for all)")
flags.DEFINE_integer("model_steps", 0, "Save the model every so many steps (0 for only when log_val_steps)")
flags.DEFINE_integer("log_train_steps", 500, "Log training information every so many steps (0 for never)")
flags.DEFINE_integer("log_val_steps", 500, "Log validation information every so many steps (also saves model, 0 for only at end)")
flags.DEFINE_integer("log_plots_steps", 0, "Log plots every so many steps (0 for never)")
flags.DEFINE_boolean("test", False, "Use real test set for evaluation rather than validation set")
flags.DEFINE_boolean("subdir", True, "Save models/logs in subdirectory of prefix")
flags.DEFINE_boolean("debug", False, "Start new log/model/images rather than continuing from previous run")
flags.DEFINE_boolean("time_training", False, "Print how long each step takes, instead of every 100 steps")
flags.DEFINE_boolean("moving_average", False, "Whether to use an exponential moving average of the weights rather than the weights directly (requires tensorflow_addons)")