Example #1
0
def main():
    global logger, stop_event, test_stats_collector
    logger = ConsoleLogger('io_stress').logger
    data_queue = multiprocessing.Manager().Queue()
    stop_event = multiprocessing.Event()
    file_name = "io_test_huge_file.bin"
    chunks_number = 100000000
    test_stats_collector = TestStatsCollector(print_chunks_stats)
    mode = 'a+'

    args = get_args()
    dir_name = args.test_dir
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'IO_STRESS',
                      logger=logger,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()
    logger.info("Done mounting. Starting workload ...")
    dir_path = os.path.join(mounter.get_random_mountpoint(), dir_name)
    try:
        os.mkdir(dir_path)
    except FileExistsError:
        pass
    futures = []
    test_stats_collector.start()
    cores = multiprocessing.cpu_count()
    with ProcessPoolExecutor() as executor:
        for _ in range(10):
            futures.append(
                executor.submit(data_chunks_generator_worker, data_queue,
                                chunks_number, TB1))
        for i in range(cores - 10):
            futures.append(
                executor.submit(singe_file_random_writes_worker,
                                mounter.get_random_mountpoint(), dir_name,
                                f'{file_name}-{i}', data_queue, mode))
    futures_validator(futures, logger)
    for i in range(cores - 10):
        logger.info(f"Test completed. Deleting the HUGE file {file_name}-{i}")
        os.remove(
            os.path.join(mounter.get_random_mountpoint(), dir_name,
                         f'{file_name}-{i}'))
    test_stats_collector.cancel()
Example #2
0
def main():
    global logger
    logger = ConsoleLogger('bmp_split_stress').logger
    q1 = Queue(maxsize=10)
    num_threads = 100
    num_files = 100000

    args = get_args()
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'OPEN_CREATE_STRESS',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()
    for i in range(num_threads):
        path = mounter.get_random_mountpoint()
        worker = Thread(target=open_file_for_n_sec, args=(q1, 1, path))
        worker.setDaemon(True)
        worker.start()

    for i in range(num_files):
        q1.put('t%d' % i)

    time.sleep(2)
def main():
    global logger, stop_event
    logger = ConsoleLogger('bmp_split_stress').logger
    stop_event = Event()
    dirs_queue = queue.Queue()

    args = get_args()
    test_dir = args.test_dir
    dirs_num = args.dirs_num // 16 + args.dirs_num % 16
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'BM_SPLIT_DIR',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()
    os.mkdir(os.path.join(mounter.get_random_mountpoint(), test_dir))
    logger.info("Test directory created on {}".format(
        mounter.get_random_mountpoint(), test_dir))
    futures = []
    logger.info("Going to produce {} Directories per thread".format(dirs_num))
    with ThreadPoolExecutor() as executor:
        for _ in range(16):
            futures.append(
                executor.submit(dir_producer_worker, mounter, test_dir,
                                dirs_num))
    futures_validator(futures)
    cycle = args.files_num
    for i in range(cycle):
        stop_event = Event()
        logger.info("Starting Directories Scan cycle {}".format(i))
        with ThreadPoolExecutor() as executor:
            futures.append(
                executor.submit(dir_scanner_worker, mounter, test_dir,
                                dirs_queue))
            for _ in range(16):
                futures.append(
                    executor.submit(files_producer_worker, dirs_queue))
        futures_validator(futures)
    logger.info("### Workload is Done. Come back tomorrow.")
Example #4
0
import sys
import errno
import redis
import config
from multiprocessing import Event
from multiprocessing import Process
from config.redis_config import redis_config
from logger.pubsub_logger import SUBLogger
from logger.server_logger import ConsoleLogger
from server.async_controller import Controller
from tree import dirtree
from utils import ssh_utils
from utils.shell_utils import ShellUtils

stop_event = Event()
logger = ConsoleLogger(__name__).logger


def get_args():
    """
    Supports the command-line arguments listed below.
    """

    parser = argparse.ArgumentParser(
        description='vfs_stress Server runner')
    parser.add_argument('cluster', type=str, help='File server name or IP')
    parser.add_argument('-c', '--clients', type=str, nargs='+', help="Space separated list of clients")
    parser.add_argument('-e', '--export', type=str, default="/", help="NFS export name")
    parser.add_argument('--start_vip', type=str, help="Start VIP address range")
    parser.add_argument('--end_vip', type=str, help="End VIP address range")
    parser.add_argument('--tenants', action="store_true", help="Enable MultiTenancy")
Example #5
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('bmp_split_stress').logger
    stop_event = Event()
    dirs_queue = queue.Queue()
    dirs_to_delete = queue.Queue()

    args = get_args()
    stopper_thread = Timer(60 * args.duration, workload_stopper)
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'BM_SPLIT_DIR',
                      logger=logger,
                      sudo=True,
                      timeout=int(args.timeout * 0.1),
                      retrans=args.retrans,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()
    logger.info("Done mounting. Starting workload ...")
    futures = []
    stopper_thread.start()
    with ThreadPoolExecutor() as executor:
        for _ in range(64):
            futures.append(
                executor.submit(dir_producer_worker, mounter, dirs_queue,
                                dirs_to_delete))
        for _ in range(64):
            futures.append(executor.submit(files_producer_worker, dirs_queue))
    futures_validator(futures)
    logger.info("#### Totally created >>> Directories: {} Files: {}".format(
        dirs_counter, files_counter))
    logger.info("#### Deleting Workload DataSet ####")
    stop_event = Event()
    futures = []
    with ThreadPoolExecutor() as executor:
        for _ in range(64):
            futures.append(executor.submit(dirs_delete_worker, dirs_to_delete))
    futures_validator(futures)
    logger.info("#### Totally deleted directories: {}".format(delete_counter))
    logger.info("Workload is Done. Come back tomorrow.")
Example #6
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('msrsync_sim').logger
    stats_collector = StatsCollector(print_stats_worker)
    stop_event = Event()
    args = get_args()
    test_dir = args.test_dir
    files_num = args.files_num

    logger.info("Initialising DataSet ...")
    init_data_array()

    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'MSRSYNC_SIM',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn("VIP range is bad or None. Falling back to single mount")
        mounter.mount()
    mount_point = mounter.get_random_mountpoint()
    try:
        os.mkdir(os.path.join(mount_point, test_dir))
    except FileExistsError as e:
        logger.warn(f"{e}")
    logger.info(f"Test directory {test_dir} created on {mount_point}")

    futures = []
    logger.info(f"Going to produce {files_num * 100} files")
    with ThreadPoolExecutor() as executor:
        for _ in range(100):
            futures.append(
                executor.submit(files_producer_worker,
                                mounter.get_random_mountpoint(), test_dir,
                                files_num))
    futures_validator(futures, True)

    logger.info("Done writing dataset, verifying...")
    scandir_iterator = os.scandir(os.path.join(mount_point, test_dir))
    for file_entry in scandir_iterator:
        file_name = file_entry.name
        stored_checksum, stored_length = file_name.split('_')
        if int(stored_length) != os.stat(file_entry.path).st_size:
            raise RuntimeError(
                f"File {file_entry.path} length mismatch!"
                f" {int(stored_length)} != {os.stat(file_entry.path).st_size}")
        with open(file_entry.path, 'rb') as f:
            buf = f.read()
            checksum = hashlib.md5(buf).hexdigest()
            if stored_checksum != checksum:
                raise RuntimeError(f"File {file_entry.path} checksum mismatch!"
                                   f" {stored_checksum} != {checksum}")

    logger.info("### Workload is Done. Come back tomorrow.")
Example #7
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('NLM_STRESS').logger
    stats_collector = StatsCollector(print_stats_worker)
    stop_event = multiprocessing.Event()
    writer_worker = None
    reader_worker = None
    args = get_args()
    test_dir = args.test_dir
    file_name = args.file_name
    file_size = args.size
    locking_processes = args.processes
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'NLM_STRESS',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn("VIP range is bad or None. Falling back to single mount")
        mounter.mount()
    test_dir_path = os.path.join(mounter.get_random_mountpoint(), test_dir)
    try:
        os.mkdir(test_dir_path)
    except FileExistsError as e:
        logger.warn("{}".format(e))
    logger.info(f"Test directory created on {test_dir_path}")
    test_file_path = os.path.join(test_dir_path, file_name)
    if not os.path.exists(test_file_path):
        with open(test_file_path, "w+b") as fh:
            fh.write(os.urandom(file_size))
    logger.info(f"Test file created on {test_file_path}")
    futures = []
    stats_collector.start()
    if args.withio:
        writer_worker = threading.Thread(target=file_writer_worker,
                                         args=(mounter.mount_points, test_dir,
                                               file_name, file_size))
        reader_worker = threading.Thread(target=file_reader_worker,
                                         args=(mounter.mount_points, test_dir,
                                               file_name, file_size))
        writer_worker.start()
        reader_worker.start()
    logger.info(f"Going to fork {locking_processes} locking processes")
    with ProcessPoolExecutor(locking_processes) as executor:
        for _ in range(locking_processes):
            futures.append(
                executor.submit(file_locker_worker, mounter.mount_points,
                                test_dir, file_name))
    futures_validator(futures)
    writer_worker.join()
    reader_worker.join()
    stats_collector.cancel()
    logger.info("### Workload is Done. Come back tomorrow.")
Example #8
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('mass_reader').logger
    stop_event = Event()
    files_queue = queue.Queue()

    args = get_args()
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'MASSREAD',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()

    futures = []
    mount_points = mounter.mount_points
    scanning_threads = MAX_SCANNING_THREADS if not args.snapshots else MAX_SCANNING_THREADS // 2
    stats_collector = TestStatsCollector(print_stats, args=[
        files_queue,
    ])
    stats_collector.start()
    logger.info("Workers ThreadPool started")
    with ThreadPoolExecutor() as executor:
        futures.append(
            executor.submit(dir_scanner,
                            files_queue,
                            mount_points,
                            args.test_dir,
                            0,
                            read_snapshots=args.snapshots,
                            max_scanning_threads=scanning_threads))
        for _ in range(64):
            futures.append(
                executor.submit(reader_worker, files_queue, args.skipread))
    for future in futures:
        try:
            logger.info(
                "{}".format("Job Done OK" if not future.result() else ""))
        except Exception as e:
            logger.error(f"ThreadPool raised exception {e}")
            raise e
    stats_collector.cancel()
Example #9
0
def main():
    global logger, stop_event, threads_count
    logger = ConsoleLogger('md_massdel').logger
    stop_event = Event()
    files_queue = queue.Queue()

    args = get_args()
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'MASSDEL_RENAME',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn(
            "VIP range is bad or None. Falling back to mounting storage server IP"
        )
        mounter.mount()

    futures = []
    mount_points = mounter.mount_points
    stats_collector = TestStatsCollector(print_stats, args=[
        files_queue,
    ])
    stats_collector.start()
    logger.info("Workers ThreadPool started")
    with ThreadPoolExecutor() as executor:
        futures.append(
            executor.submit(dir_scanner, files_queue, mount_points,
                            args.test_dir, 0))
        if args.action == "all" or args.action == "rename":
            for _ in range(64):
                futures.append(executor.submit(rename_worker, files_queue))
        if args.action == "all" or args.action == "delete":
            for _ in range(64):
                futures.append(executor.submit(delete_worker, files_queue))
    for future in futures:
        try:
            logger.info(f'{"Job Done OK" if not future.result() else ""}')
        except Exception as e:
            logger.error(f"ThreadPool raised exception: {e}")
            raise e
    stats_collector.cancel()
Example #10
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('patterns_sim').logger
    stop_event = Event()
    args = get_args()
    test_dir = args.test_dir

    logger.info("Initialising DataSet ...")
    init_data_array()

    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'PATTERNS',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn("VIP range is bad or None. Falling back to single mount")
        mounter.mount()
    mount_point = mounter.get_random_mountpoint()
    try:
        os.mkdir(os.path.join(mount_point, test_dir))
    except FileExistsError as e:
        logger.warn(f"{e}")
    logger.info(f"Test directory {test_dir} created on {mount_point}")

    file_name = f"hhmi_tstfile-{uuid.uuid4()}"
    with open(os.path.join(mount_point, test_dir, file_name), "wb") as f, \
            open(os.path.join('/vast', file_name), "wb") as f2:
        for i in range(1024 * 128):
            if round(random.random() * 10) % 2:
                buf_size = KB1 * 4 - 2
                hole_size = 2
            else:
                buf_size = KB1 * 4 - 1
                hole_size = 1
            skip_size = hole_size + random.choice([
                0, KB1 * 4, KB1 * 8, KB1 * 16, KB1 * 32, KB1 * 64, KB1 * 128,
                KB1 * 256, KB1 * 512, KB1 * 1024
            ])
            buf = get_random_buf(buf_size)
            f.write(buf)
            f2.write(buf)
            offset = f.tell()
            logger.debug(
                f"Going to write buf_size={buf_size} at offset={offset}")
            f.seek(offset + skip_size)
            f2.seek(offset + skip_size)
            logger.debug(f"Offset after seek={f.tell()}")

    logger.info("Comparing VAST vs Local client before fsync:")
    with open(os.path.join(mount_point, test_dir, file_name), "rb") as f:
        vast_checksum = hashlib.md5()
        for chunk in iter(lambda: f.read(MB1), b""):
            vast_checksum.update(chunk)
        vast_checksum = vast_checksum.hexdigest()
    with open(os.path.join('/vast', file_name), "rb") as f:
        local_checksum = hashlib.md5()
        for chunk in iter(lambda: f.read(MB1), b""):
            local_checksum.update(chunk)
        local_checksum = local_checksum.hexdigest()
    logger.info(
        f"Local checksum={local_checksum}, Vast checksum={vast_checksum}")
    assert vast_checksum == local_checksum
    os.remove(os.path.join('/vast', file_name))
    logger.info("### Workload is Done. Come back tomorrow.")
Example #11
0
def main():
    global logger, stop_event
    logger = ConsoleLogger('bmp_split_stress').logger
    stats_collector = StatsCollector(print_stats_worker)
    stop_event = Event()
    dirs_queue = queue.Queue()
    args = get_args()
    test_dir = args.test_dir
    dirs_num = args.dirs_num
    files_num = args.files_num
    logger.info("Mounting work path...")
    mounter = Mounter(args.cluster,
                      args.export,
                      'nfs3',
                      'BM_SPLIT_DIR',
                      logger=logger,
                      nodes=0,
                      domains=0,
                      sudo=True,
                      start_vip=args.start_vip,
                      end_vip=args.end_vip)
    try:
        mounter.mount_all_vips()
    except AttributeError:
        logger.warn("VIP range is bad or None. Falling back to single mount")
        mounter.mount()
    for i in range(dirs_num):
        dirs_queue.put(str(i))
    try:
        os.mkdir(os.path.join(mounter.get_random_mountpoint(), test_dir))
    except FileExistsError as e:
        logger.warn("{}".format(e))
    logger.info("Test directory created on {}".format(
        mounter.get_random_mountpoint(), test_dir))
    stats_collector.start()

    try:
        futures = []
        logger.info("Going to produce {} Directories".format(dirs_num))
        with ThreadPoolExecutor() as executor:
            for _ in range(16):
                futures.append(
                    executor.submit(dir_producer_worker, mounter, test_dir,
                                    dirs_num, dirs_queue))
        futures_validator(futures, True)

        for i in range(dirs_num):
            dirs_queue.put(str(i))
        stop_event = Event()
        logger.info("Starting Producing Files...")
        with ThreadPoolExecutor() as executor:
            for _ in range(args.threads):
                futures.append(
                    executor.submit(files_producer_worker, mounter, dirs_queue,
                                    test_dir, files_num))
        futures_validator(futures)
        logger.info("### Workload is Done. Come back tomorrow.")
    except OSError:
        raise
    finally:
        stats_collector.cancel()