Beispiel #1
0
async def create_table_if_not_exists(conn: Connection, schema: str,
                                     table: str) -> None:
    table_exists_bool = await table_exists(conn, schema, table)
    if not table_exists_bool:
        for table_name, ddl in table_ddl(table, schema).items():
            logger.info(f"creating table {table_name}")
            await conn.execute(ddl)
 def _lost_connection(self, endpoint):
     player = self._room.get_player_with_endpoint(endpoint)
     if player:
         player.set_endpoint(None)
         logger.info("Player '{}' disconnected".format(
             player.get_character()))
         self._log_players()
Beispiel #3
0
def write_dataset(filepath_groups, variable, experiment_setup, requests):
    """Write data to training, validation, testing .h5 files.

    Args:
        filepath_groups (list): list of tuples associate files to data group
        variable (str): the machine learning variable X or Y to be written to
        experiment_setup (dict): map to reference task for variable
        requests (dict): requests to read from files to store with variable
    """
    for filepaths, group in filepath_groups:
        logger.info(f"Writing {variable} to the {group} set...")
        h5_filename = args.output_path + "/" + group + ".h5"
        with h5py.File(h5_filename, "a") as h5_file:
            dataset = read_h5(filepaths, requests)
            for filename in dataset.keys():
                temp_dataset = None
                for j, data_group in enumerate(experiment_setup[variable]):
                    if temp_dataset is None:
                        temp_dataset = dataset[filename][data_group]
                    else:
                        temp_dataset = np.append(temp_dataset,
                                                 dataset[filename][data_group],
                                                 axis=1)
                try:
                    h5_file.create_dataset(filename + "/" + variable,
                                           data=temp_dataset)
                except KeyError:
                    logger.info(f"{filename} does not contain {data_group}")
Beispiel #4
0
async def create_tables(connection: Connection, table_name: str, schema: str, live=False) -> None:
    for table_name, ddl in table_ddl(table_name, schema).items():
        logger.info(f"creating table {table_name}")
        if live:
            await connection.execute(ddl)
        else:
            logger.info(ddl)
Beispiel #5
0
async def create_schema(connection: Connection, schema: str, live=False) -> None:
    schema_create = f"CREATE SCHEMA IF NOT EXISTS {schema}"
    logger.info(f"creating schema {schema}")
    if live:
        await connection.execute(schema_create)
    else:
        logger.info(schema_create)
Beispiel #6
0
async def drop_schema(connection: Connection, schema: str, live=False, destroy=False) -> None:
    schema_drop = f"DROP SCHEMA IF EXISTS {schema} cascade"
    logger.warning(f"dropping schema {schema}")
    if live and destroy:
        await connection.execute(schema_drop)
    else:
        logger.info(schema_drop)
    def _register_player(self, character, endpoint):
        if 1 > len(character) or -1 == string.ascii_uppercase.find(character):
            logger.warning(
                "Login attempt with invalid character: {}".format(character))
            return Message.LoginStatus.INVALID_CHARACTER

        status = self._room.add_player(character, endpoint)

        if Room.ADDITION_SUCCESSFUL == status:
            logger.info(
                "Player '{}' registered successfully".format(character))
            return Message.LoginStatus.LOGGED

        elif Room.ADDITION_REUSE == status:
            logger.info("Player '{}' reconnected".format(character))
            return Message.LoginStatus.RECONNECTION

        elif Room.ADDITION_ERR_COMPLETE == status:
            logger.debug("Player '{}' tried to register: room complete".format(
                character))
            return Message.LoginStatus.ROOM_COMPLETED

        elif Room.ADDITION_ERR_ALREADY_EXISTS == status:
            logger.debug(
                "Player '{}' tried to register: already exists".format(
                    character))
            return Message.LoginStatus.ALREADY_EXISTS
Beispiel #8
0
async def insert_many(conn: Connection, schema: str, table: str, columns: list,
                      values: Iterable[list]) -> None:
    num_columns = len(columns)
    place_holders = ",".join(
        list(map(lambda x: f'${x}', range(1, num_columns + 1))))
    columns_str = ",".join(columns)
    insert_query = f"INSERT INTO {schema}.{table} ({columns_str}) VALUES ({place_holders}) ON CONFLICT DO NOTHING"
    logger.info(f"executing query {insert_query}")
    logger.info(values)
    await conn.executemany(insert_query, values)
    def post(self, request) -> HttpResponse:
        """
        Handle post request to write chunks from incoming ResumableJS requests

        If one of the requests notices all the chunks have been fully written
        it will write the full file to
            <self.FILE_UPLOAD_DIR>/<user.plant.name_key>/<uploaded_file_name>
        """
        total_chunks = int(request.POST.get('resumableTotalChunks'))
        chunk_num = int(request.POST.get('resumableChunkNumber', 1))
        uploaded_file_name = request.POST.get('resumableFilename')
        resumable_identifier = request.POST.get('resumableIdentifier', 'error')
        chunk_data = request.FILES['file']
        user = request.user
        plant_name_key = user.plant.name_key

        # Get - tmp file dir path for the ResumableJS upload chunks
        #     - target file path
        target_file_path = self.target_file_path(plant_name_key,
                                                 uploaded_file_name)
        chunk_dir = self.FILE_UPLOAD_DIR / plant_name_key / resumable_identifier

        # Save this chunk data using a lock file
        self._save_chunk_data(uploaded_file_name, chunk_num, chunk_data,
                              chunk_dir)

        # Check if the all chunk files created in all Resumable JS Requests
        all_chunk_paths = self._all_chunk_paths(uploaded_file_name, chunk_dir,
                                                total_chunks)

        all_chunks_exists = all([p.exists() for p in all_chunk_paths])
        if all_chunks_exists:

            # Make sure all files are finished writing, but do not wait forever
            tried = 0
            while self._all_chunks_not_written(chunk_dir, total_chunks):
                tried += 1
                if tried >= 5:
                    error_msg = f'Error uploading files with temp_dir: {chunk_dir!r}'
                    logger.error(f'[{self.LOG_PREFIX}] {error_msg}')
                    return HttpResponseServerError(error_msg)
                time.sleep(1)

            # If all chunks writen create full file and remove chunk dir/files
            self._create_full_file_from_chunks(target_file_path,
                                               all_chunk_paths, chunk_dir)
            logger.info(
                f'[{self.LOG_PREFIX}] User {user!r} successfully created '
                f'file {target_file_path}')

        return HttpResponse(status=200)
    def connect(self, ip, port):
        try:
            connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            connection.connect((ip, port))
            connection.setblocking(False)
            self._selector.register(connection, selectors.EVENT_READ,
                                    self.Operation.READ)

            logger.info("New connection to {}:{}".format(ip, port))
            return connection

        except OSError as error:
            logger.critical("Can not connect to {}:{}, error: {}".format(
                ip, port, error.errno))
            return None
    def __init__(self, players, points, arena_size, seed):
        PackageQueue.__init__(self)
        self._active = True
        self._room = Room(players, points)
        self._arena_size = arena_size
        self._seed = seed

        self._arena = None
        self._arena_enabled = False

        self._last_frame_time_stamp = 0
        self._last_waiting_time = 0

        logger.info("Required players: {} - Points to win: {}".format(
            players, points))
Beispiel #12
0
def solve():
    longest = (0, [])

    for a in range(1, 10):
        for b in range(a + 1, 10):
            for c in range(b + 1, 10):
                for d in range(c + 1, 10):
                    digits = [a, b, c, d]
                    length = consecutive_positive_integers(targets(digits))

                    logger.info(f"{digits}: {length}")

                    if length > longest[0]:
                        longest = (length, digits)

    return join_digits(longest[1])
def solve(limit=13):
    odd_periods = 0

    for n in range(2, limit + 1):
        a0, *period = period_of_root(n)

        if len(period) % 2 != 0:
            odd_periods += 1

        if period:
            logger.info(
                f"√{n} = [{a0}; {tuple(period)}], period={len(period)}")
        else:
            logger.debug(f"√{n} = {sqrt(n)}")

    return odd_periods
    def listen(self, port):
        try:
            server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            server_socket.setblocking(False)
            server_socket.bind(("0.0.0.0", port))
            server_socket.listen()
            self._selector.register(server_socket, selectors.EVENT_READ,
                                    self.Operation.ACCEPT)

            logger.info("Listening on port: {}".format(port))
            return server_socket

        except OSError as error:
            logger.critical(
                "Problem initializing the server on port {}, error: {}".format(
                    port, error.errno))
            if (98 == error.errno):
                logger.critical("Port {} is already in use".format(port))
            return None
    def new_arena(self):
        pre_time_stamp = time.time()
        seed = self._seed if "" != self._seed else ServerManager.compute_random_seed(
            RANDOM_SEED_SIZE)
        logger.info("Load arena - size: {}, seed: {}".format(
            self._arena_size, seed))

        self._arena = Arena(self._arena_size, seed)

        position_list = self._arena.compute_player_origins(
            self._room.get_size())

        for i, player in enumerate(self._room.get_player_list()):
            control = self._arena.create_player(player.get_character(),
                                                position_list[i])
            player.set_control(control)

        post_time_stamp = time.time()
        logger.info("Load arena - done! {0:.2}s".format(post_time_stamp -
                                                        pre_time_stamp))

        self._server_signal(ServerSignal.ARENA_CREATED_SIGNAL, 0)
Beispiel #16
0
async def main(db_config: dict, kafka_config: dict, buffer_limit: int = 100):
    pool = await init_pool(db_config['conn_string'])
    consumer = await kafka_consumer(kafka_config)
    async with pool.acquire() as connection:
        await create_table_if_not_exists(connection, db_config['schema'],
                                         db_config['table'])
        try:
            buffer = []
            async for message in consumer:
                logger.info(f"received message : {message}")
                value = message.value
                value['kafka_partition_offset_id'] = kafka_meta_id(message)
                buffer.append(value)
                if len(buffer) >= buffer_limit:
                    logger.info(
                        "Buffer is full. Going to attempt insert into table")
                    await insert_into_db(connection, db_config['schema'],
                                         db_config['table'], buffer)
                    await consumer.commit()
                    buffer.clear()
        finally:
            await consumer.stop()
    await pool.close()
Beispiel #17
0
async def main(db_config: dict, live=False, destroy=False) -> None:
    if destroy:
        logger.warning(
            "!!!!Running with destroy mode and this will destory the existing schema!!!! The script will wait for 5 secs to help you reevaluate the decision.")
        await asyncio.sleep(5)
    if live:
        logger.warning("Running in live mode. This will create tables and schemas")
    else:
        logger.info("Running in dry run mode. Just the DDLs will be printed")
    connection = None
    try:
        connection = await connect(db_config['conn_string'])
        async with connection.transaction():
            if destroy:
                logger.warning("destroying the existing schema")
            await drop_schema(connection, db_config['schema'], live, destroy)
            await create_schema(connection, db_config['schema'], live)
            await create_tables(connection, db_config['table'], db_config['schema'], live)

    except DuplicateTableError as e:
        logger.error("Always run the fixtures on clean database. We don't support incremental migrations yet!",
                     exc_info=e)
    finally:
        await connection.close()
    def _create_full_file_from_chunks(self, target_file_path: Path,
                                      all_chunk_paths: List[Path],
                                      chunk_dir: Path):
        """
        Once all the chunk data has been written create the full file
        with the aggregated chunks
        """
        # Make sure some other chunk didn't trigger file reconstruction
        if target_file_path.exists():
            logger.debug(
                f'[{self.LOG_PREFIX}] File {target_file_path!r} exists already. Overwriting..'
            )
            target_file_path.unlink()

        # Save file from all uploaded chunk data
        with open(target_file_path, "ab") as fp:
            for p in all_chunk_paths:
                with open(p, 'rb') as stored_chunk_file:
                    fp.write(stored_chunk_file.read())
            logger.info(
                f'[{self.LOG_PREFIX}] File saved to {target_file_path!r}')

        # Remove the chunk dir all all files in it
        shutil.rmtree(chunk_dir)
Beispiel #19
0
 async def execute(self, client: HttpClientLike) -> CheckResult:
     results = dict()
     logger.info(f"GET url: {self.url}")
     response = await client.get(self.url, retries=3)
     if self.regex_checks is not None:
         logger.info(f"executing extra regex checks for url: {self.url}")
         for name, check_func in self.regex_checks.items():
             logger.info(
                 f"executing regex check : {name} for url: {self.url}")
             results[name] = check_func(response.body)
     return CheckResult(self.url, response.code, response.time, results,
                        response.error)
Beispiel #20
0
async def schedule(client: HttpClientLike, kafka_config: dict,
                   kafka_p: AIOKafkaProducer, check: Check, repeat_in_s: int):
    logger.info(
        f"scheduling observer for {check.url} every {repeat_in_s} seconds")
    while True:
        result = await check.execute(client)
        logger.info(
            f"Results for url: {check.url} - results:{result.to_json()}")
        await kafka_send(kafka_p, kafka_config['topic_name'],
                         bytes(result.to_json(), 'utf-8'))
        await asyncio.sleep(repeat_in_s)
        logger.info(
            f"repeating check for {check.url} after {repeat_in_s} seconds")
def fit(models, optims, epochs, dataloaders, training_criterion,
        validation_criteria, schedulers, device, model_file_path,
        teacher_forcing_ratio=0.0, use_attention=False,
        norm_quaternions=False, schedule_rate=1.0):
    """Fit a seq2seq model to data, logging training and validation loss.

    Args:
        models (tuple): tuple containing the encoder and decoder
        optims (tuple): tuple containing the encoder and decoder optimizers for
            training
        epochs (int): number of epochs to train for
        dataloaders (tuple): tuple containing the training dataloader and val
            dataloader.
        training_criterion (nn.Module): criterion for backpropagation during
            training
        validation_criteria (list): list of criteria for validation
        schedulers (list): list of schedulers to control learning rate for
            optimizers
        device (torch.device): device to place data on
        model_file_path (str): where to save the model when validation loss
            reaches new minimum
        teacher_forcing_ratio (float, optional): percent of the time to use
            teacher forcing in the decoder. Defaults to 0.0.
        use_attention (bool, optional): whether decoder uses attention or not.
            Defaults to False.
        norm_quaternions (bool, optional): whether quaternions should be
            normalized after they are output from the decoder.
            Defaults to False.
        schedule_rate (float, optional): rate to increase or decrease teacher
            forcing ratio. Defaults to 1.0.
    """

    train_dataloader, val_dataloader = dataloaders

    min_val_loss = math.inf
    for epoch in range(epochs):
        losses = []
        total_time = 0

        logger.info(f"Epoch {epoch+1} / {epochs}")

        for index, data in enumerate(train_dataloader, 0):
            with Timer() as timer:
                loss = loss_batch(data, models,
                                  optims, training_criterion, device,
                                  use_attention=use_attention,
                                  norm_quaternions=norm_quaternions)

                losses.append(loss)
            total_time += timer.interval
            if index % (len(train_dataloader) // 10) == 0:
                logger.info((f"Total time elapsed: {total_time} - "
                             f"Batch Number: {index} / {len(train_dataloader)}"
                             f" - Training loss: {loss}"
                             ))
        val_loss = []
        for validation_criterion in validation_criteria:
            with torch.no_grad():
                val_losses = [loss_batch(data, models,
                                         None, validation_criterion, device,
                                         use_attention=use_attention,
                                         norm_quaternions=norm_quaternions)
                              for _, data in enumerate(val_dataloader, 0)]

            val_loss.append(np.sum(val_losses) / len(val_losses))

        loss = np.sum(losses) / len(losses)

        for scheduler in schedulers:
            scheduler.step()

        val_loss_strs = ", ".join(map(str, val_loss))
        logger.info(f"Training Loss: {loss} - Val Loss: {val_loss_strs}")

        teacher_forcing_ratio *= schedule_rate
        if val_loss[0] < min_val_loss:
            min_val_loss = val_loss[0]
            logger.info(f"Saving model to {model_file_path}")
            torch.save({
                "encoder_state_dict": models[0].state_dict(),
                "decoder_state_dict": models[1].state_dict(),
                "optimizerA_state_dict": optims[0].state_dict(),
                "optimizerB_state_dict": optims[1].state_dict(),
            }, model_file_path)
    parser.add_argument("--num-layers",
                        help="number of layers in Transformer Encoder")

    args = parser.parse_args()

    if args.data_path_parent is None:
        parser.print_help()

    return args


if __name__ == "__main__":
    args = parse_args()

    for arg in vars(args):
        logger.info(f"{arg} - {getattr(args, arg)}")

    logger.info("Starting Transformer testing...")

    logger.info(f"Device count: {str(torch.cuda.device_count())}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info("Testing on {}...".format(device))
    seq_length = int(args.seq_length) // int(args.downsample)

    data_paths = [
        args.data_path_parent + "/" + name
        for name in os.listdir(args.data_path_parent)
        if os.path.isdir(args.data_path_parent + "/" + name)
    ]
from common.logging import logger

limit = 100
count = 0

for b in range(1, limit):
    for e in range(1, limit):
        n = b ** e

        if len(str(n)) == e:
            count += 1
            logger.info(f"{count}: {b}^{e} = {n}")

print(count)
Beispiel #24
0
    parser.add_argument("--dropout", help="dropout percentage in Transformer")
    parser.add_argument("--num-layers", help="number of layers in Transformer")

    args = parser.parse_args()

    if args.data_path is None:
        parser.print_help()

    return args


if __name__ == "__main__":
    args = parse_args()

    for arg in vars(args):
        logger.info(f"{arg} - {getattr(args, arg)}")

    logger.info("Starting Transformer training...")

    logger.info(f"Device count: {torch.cuda.device_count()}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"Training on {device}...")
    seq_length = int(args.seq_length) // int(args.downsample)

    assert seq_length % int(args.in_out_ratio) == 0

    lr = float(args.learning_rate)

    normalize = True
    train_dataloader, norm_data = load_dataloader(args, "training", normalize)
                        default=False,
                        action="store_true")
    parser.add_argument("--attention",
                        help="use decoder with specified attention",
                        default="general")

    args = parser.parse_args()

    return args


if __name__ == "__main__":
    args = parse_args()

    for arg in vars(args):
        logger.info("{} - {}".format(arg, getattr(args, arg)))

    logger.info("Starting seq2seq model testing...")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    seq_length = int(args.seq_length)
    stride = int(args.stride)
    batch_size = int(args.batch_size)

    data_paths = [
        args.data_path_parent + "/" + name
        for name in os.listdir(args.data_path_parent)
        if os.path.isdir(args.data_path_parent + "/" + name)
    ]
 def _log_players(self):
     connected_players = self._room.get_character_list_with_endpoints()
     logger.info("Logged players: {} - Connected players: {}".format(
         self._room.get_character_list(), connected_players))
Beispiel #27
0
 def __init__(self, players, points, arena_size, seed):
     logger.info("Server version: {}".format(version.CURRENT))
     self._server_manager = ServerManager(players, points, arena_size, seed)
def fit(model,
        optimizer,
        scheduler,
        epochs,
        dataloaders,
        training_criterion,
        validation_criteria,
        device,
        model_file_path,
        full_transformer=False,
        min_val_loss=math.inf):
    """Fit a Transformer model to data, logging training and validation loss.

    Args:
        model (nn.Module): the model to train
        optimizer (tuple): the optimizer for training
        scheduler (list): scheduler to control learning rate for
            optimizers
        epochs (int): number of epochs to train for
        dataloaders (tuple): tuple containing the training dataloader and val
            dataloader.
        training_criterion (nn.Module): criterion for backpropagation during
            training
        validation_criteria (list): list of criteria for validation
        device (torch.device): device to place data on
        model_file_path (str): where to save the model when validation loss
            reaches new minimum
        full_transformer (bool): whether the model is a full transformer and
            needs to run inference for evaluation
        min_val_loss (float, optional): minimum validation loss

    Returns:
        float: minimum validation loss reached during training
    """
    train_dataloader, val_dataloader = dataloaders
    total_time = 0

    for epoch in range(epochs):
        losses = 0
        logger.info("Epoch {}".format(epoch + 1))
        avg_loss = 0
        for index, data in enumerate(train_dataloader, 0):
            with Timer() as timer:
                loss = loss_batch(model,
                                  optimizer,
                                  data,
                                  training_criterion,
                                  device,
                                  full_transformer=full_transformer)
            losses += loss
            avg_loss += loss
            total_time += timer.interval
            if index % (len(train_dataloader) // 10) == 0 and index != 0:
                avg_training_loss = avg_loss / (len(train_dataloader) // 10)
                logger.info((f"Total time elapsed: {total_time}"
                             " - "
                             f"Batch number: {index} / {len(train_dataloader)}"
                             " - "
                             f"Training loss: {avg_training_loss}"
                             " - "
                             f"LR: {optimizer.param_groups[0]['lr']}"))
                avg_loss = 0

        val_loss = []
        for validation_criterion in validation_criteria:
            with torch.no_grad():
                val_losses = [
                    loss_batch(model,
                               None,
                               data,
                               validation_criterion,
                               device,
                               full_transformer=full_transformer)
                    for _, data in enumerate(val_dataloader, 0)
                ]

            val_loss.append(np.sum(val_losses) / len(val_losses))

        loss = losses / len(train_dataloader)

        scheduler.step()
        val_loss_str = ", ".join(map(str, val_loss))
        logger.info(f"Epoch {epoch+1} - "
                    f"Training Loss: {loss} - "
                    f"Val Loss: {val_loss_str}")

        if full_transformer:
            inference_loss = []
            for validation_criterion in validation_criteria:
                with torch.no_grad():
                    inference_losses = [
                        inference(model, data, validation_criterion, device)
                        for _, data in enumerate(val_dataloader, 0)
                    ]
                inference_loss.append(
                    np.sum(inference_losses) / len(inference_losses))
            inference_loss_str = ", ".join(map(str, inference_loss))
            logger.info(f"Inference Loss: {inference_loss_str}")

        if val_loss[0] < min_val_loss:
            min_val_loss = val_loss[0]
            logger.info(f"Saving model to {model_file_path}")
            torch.save(
                {
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                }, model_file_path)

    return min_val_loss
Beispiel #29
0
                        help="will use decoder with given attention method",
                        default="general")

    args = parser.parse_args()

    if args.data_path is None:
        parser.print_help()

    return args


if __name__ == "__main__":
    args = parse_args()

    for arg in vars(args):
        logger.info(f"{arg} - {getattr(args, arg)}")

    logger.info("Starting seq2seq model training...")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    seq_length = int(args.seq_length)
    stride = int(args.stride)
    lr = float(args.learning_rate)

    assert seq_length % int(args.in_out_ratio) == 0

    normalize = True
    train_dataloader, norm_data = load_dataloader(args, "training",
                                                  normalize, norm_data=None)
    val_dataloader, _ = load_dataloader(args, "validation",
Beispiel #30
0
def parse_args():
    """Parse arguments for module.

    Returns:
        argparse.Namespace: contains accessible arguments passed in to module
    """
    parser = argparse.ArgumentParser()

    parser.add_argument("--training",
                        help=("participants for training, space separated; "
                              "e.g., W1 W2"))
    parser.add_argument("--validation",
                        help=("participants for validation, space separated; "
                              "e.g., P1 P2"))
    parser.add_argument("--testing",
                        help=("participants for testing, space separated; "
                              "e.g., P4 P5"))
    parser.add_argument("-f",
                        "--data-path",
                        help="path to h5 files for reading data")
    parser.add_argument("-o",
                        "--output-path",
                        help=("path to directory to save h5 files for "
                              "training, validation, and testing"))
    parser.add_argument("-x",
                        "--task-input",
                        help=("input type; "
                              "e.g., orientation, relativePosition, "
                              "or jointAngle"))
    parser.add_argument("--input-label-request",
                        help=("input label requests, space separated; "
                              "e.g., all or Pelvis RightForearm"))
    parser.add_argument("-y",
                        "--task-output",
                        help="output type; e.g., orientation or jointAngle")
    parser.add_argument("--output-label-request",
                        help=("output label requests, space separated; "
                              "e.g., all or jRightElbow"))
    parser.add_argument("--aux-task-output",
                        help=("auxiliary task output in addition "
                              "to regular task output"))
    parser.add_argument("--aux-output-label-request",
                        help="aux output label requests, space separated")

    args = parser.parse_args()

    if None in [args.training, args.validation, args.testing]:
        logger.info(("Participant numbers for training, validation, "
                     "or testing dataset were not provided."))
        parser.print_help()
        sys.exit()

    if None in [args.data_path, args.output_path]:
        logger.error("Data path or output path were not provided.")
        parser.print_help()
        sys.exit()

    if None in [args.task_input, args.input_label_request, args.task_output]:
        logger.error(("Task input and label requests "
                      "or task output were not given."))
        parser.print_help()
        sys.exit()

    if args.output_label_request is None:
        if args.task_input == args.task_output:
            logger.info("Will create h5 files with input data only.")
        else:
            logger.error("Label output requests were not given for the task.")
            parser.print_help()
            sys.exit()

    if args.aux_task_output == args.task_output:
        logger.error("Auxiliary task should not be the same as the main task.")
        parser.print_help()
        sys.exit()

    if (args.aux_task_output is not None
            and args.aux_output_label_request is None):
        logger.error("Need auxiliary output labels if using aux output task")
        parser.print_help()
        sys.exit()

    if args.task_input == args.task_output:
        if args.output_label_request is None:
            logger.info(("Will create h5 files with only input "
                         "data for self-supervision tasks..."))
        else:
            logger.info("Will create h5 files with input and output data.")

    return args