Example #1
0
def create_ccw_addr_disk(params):
    """
    Create one ccw address disk

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    source_file_path = params.get("virt_disk_device_source")
    disk_src_dict = {"attrs": {"file": source_file_path}}
    addr_str = params.get("addr_attrs")

    if source_file_path:
        libvirt.create_local_disk("file", source_file_path, 1, device_format)
    ccw_addr_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device, device_target, device_bus, device_format,
        disk_src_dict, None)
    if addr_str:
        addr_dict = eval(addr_str)
        ccw_addr_disk.address = ccw_addr_disk.new_disk_address(
            **{"attrs": addr_dict})
    logging.debug("create_ccw_addr_disk xml: %s", ccw_addr_disk)
    return ccw_addr_disk
Example #2
0
def create_same_pci_slot_disk(params):
    """
    Create disk with existed pci slot

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    source_file_path = params.get("virt_disk_device_source")
    disk_src_dict = {"attrs": {"file": source_file_path}}
    vm_name = params.get("main_vm")

    if source_file_path:
        libvirt.create_local_disk("file", source_file_path, 1, device_format)
    pci_slot_addr_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device, device_target, device_bus, device_format,
        disk_src_dict, None)

    # Get existed disk address
    addr = vm_xml.VMXML.get_disk_address(vm_name, 'vda')
    if not addr:
        raise Exception("Failed to get vda disk address")

    pci_addr_dict = tr_pci_address_to_dict(addr)
    pci_slot_addr_disk.address = pci_slot_addr_disk.new_disk_address(
        **{"attrs": pci_addr_dict})
    logging.debug("create_pci_slot_addr_disk xml: %s", pci_slot_addr_disk)
    return pci_slot_addr_disk
    def setup_file_backend_env(params):
        """
        Setup iscsi test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        type_name = params.get("virt_disk_device_type")
        disk_device = params.get("virt_disk_device")
        device_target = params.get("virt_disk_device_target")
        device_bus = params.get("virt_disk_device_bus")
        device_format = params.get("virt_disk_device_format")
        blockcopy_image_name = params.get("blockcopy_image_name")
        emulated_size = int(params.get("emulated_size", "2"))

        libvirt.create_local_disk("file", blockcopy_image_name, emulated_size,
                                  "qcow2")

        disk_src_dict = {"attrs": {"file": blockcopy_image_name}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("guest xml after undefined and recreated:\n%s",
                      file_disk)
        return file_disk
Example #4
0
    def setup_file_env(params):
        """
        Setup file test environment

        :param params: one dict to wrap up parameters
        """
        # If additional_disk is False, it means that there is no need to create additional disk
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        backstore_image_target_path = params.get("backstore_image_name")
        tmp_blkpull_path.append(backstore_image_target_path)
        libvirt.create_local_disk("file", backstore_image_target_path, "1",
                                  "qcow2")
        backing_chain_list.append(backstore_image_target_path)

        disk_src_dict = {"attrs": {"file": backstore_image_target_path}}

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)
def create_vhostuser_disk(params):
    """
    Create one vhost disk

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    queues = params.get("queues", "1")
    sock_path = params.get("source_file")
    disk_src_dict = {"attrs": {"type": "unix",
                     "path": sock_path}}
    vhostuser_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device,
        device_target, device_bus,
        device_format, disk_src_dict, None)
    vhostuser_disk.snapshot = "no"
    device_model = params.get("model")
    if device_model:
        vhostuser_disk.model = device_model
    driver_dict = {"name": "qemu", "type": device_format, "queues": int(queues)}
    packed = params.get('packed')
    ats = params.get('ats')
    if packed:
        driver_dict.update({'packed': packed})
    if ats:
        driver_dict.update({'ats': ats})
    vhostuser_disk.driver = driver_dict
    return vhostuser_disk
def create_vhostuser_disk(params):
    """
    Create one vhost disk

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    queues = params.get("queues", "1")
    sock_path = params.get("source_file")
    disk_src_dict = {"attrs": {"type": "unix", "path": sock_path}}
    vhostuser_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device, device_target, device_bus, device_format,
        disk_src_dict, None)
    vhostuser_disk.snapshot = "no"
    driver_dict = {
        "name": "qemu",
        "type": device_format,
        "queues": int(queues)
    }
    vhostuser_disk.driver = driver_dict
    return vhostuser_disk
Example #7
0
    def setup_nbd_env(params):
        """
        Setup nbd test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port", "10001")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        enable_ga_agent = "yes" == params.get("enable_ga_agent", "no")

        # Create NbdExport object
        nbd = NbdExport(image_path,
                        image_format=device_format,
                        port=nbd_server_port)
        nbd.start_nbd_server()

        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % "no"}

        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        network_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)

        logging.debug("disk xml is:\n%s" % network_disk)
        # Sync VM xml.
        vmxml.add_device(network_disk)
        vmxml.sync()
        if enable_ga_agent:
            vm.prepare_guest_agent()
            vm.destroy(gracefully=False)
def create_customized_disk(params):
    """
    Create one customized disk with related attributes

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    source_file_path = params.get("virt_disk_device_source")
    source_dict = {}

    if source_file_path:
        cmd = "qemu-img create -f %s %s %s %s" % (
            device_format, "-o preallocation=full", source_file_path, "100M")
        process.run(cmd, shell=True, ignore_status=True)
        cleanup_files.append(source_file_path)
        if 'block' in type_name:
            source_dict.update({"dev": source_file_path})
        else:
            source_dict.update({"file": source_file_path})

    disk_src_dict = {"attrs": source_dict}

    customized_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device, device_target, device_bus, device_format,
        disk_src_dict, None)

    # Sometimes, slice size can be gotten by command (du -b source_file_path), but here not necessary
    disk_slice_attrs = params.get('disk_slice_attrs')
    if disk_slice_attrs:
        disk_source = customized_disk.source
        disk_source.slices = customized_disk.new_slices(
            **eval(disk_slice_attrs))
        customized_disk.source = disk_source

    additional_driver_attrs = params.get("additional_driver_attrs")
    if additional_driver_attrs:
        customized_disk.driver = dict(customized_disk.driver,
                                      **eval(additional_driver_attrs))

    if disk_device == "cdrom":
        customized_disk.readonly = True
    LOG.debug("create customized xml: %s", customized_disk)
    return customized_disk
def create_customized_disk(params):
    """
    Create one customized disk with related attributes

    :param params: dict wrapped with params
    """
    type_name = params.get("type_name")
    disk_device = params.get("device_type")
    device_target = params.get("target_dev")
    device_bus = params.get("target_bus")
    device_format = params.get("target_format")
    source_file_path = params.get("virt_disk_device_source")
    source_dict = {}
    if source_file_path:
        if 'block' in type_name:
            source_dict.update({"dev": source_file_path})
        else:
            source_dict.update({"file": source_file_path})
    startup_policy = params.get("startupPolicy")
    if startup_policy:
        source_dict.update({"startupPolicy": startup_policy})
    disk_src_dict = {"attrs": source_dict}

    addr_str = params.get("addr_attrs")

    customized_disk = libvirt_disk.create_primitive_disk_xml(
        type_name, disk_device, device_target, device_bus, device_format,
        disk_src_dict, None)
    if addr_str:
        addr_dict = eval(addr_str)
        customized_disk.address = customized_disk.new_disk_address(
            **{"attrs": addr_dict})
    target_tray = params.get("tray")
    if target_tray:
        customized_disk.target = dict(customized_disk.target,
                                      **{'tray': target_tray})
    copy_on_read = params.get("copy_on_read")
    if copy_on_read:
        customized_disk.driver = dict(customized_disk.driver,
                                      **{'copy_on_read': copy_on_read})
    LOG.debug("create customized xml: %s", customized_disk)
    return customized_disk
Example #10
0
    def setup_iscsi_block_env(params):
        """
        Setup iscsi as block test environment

        :param params: one dict to wrap up parameters
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        emulated_size = params.get("emulated_size", "10G")
        chap_user = params.get("iscsi_user")
        chap_passwd = params.get("iscsi_password")
        auth_sec_usage_type = params.get("secret_usage_type")
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            chap_passwd.encode(encoding)).decode(encoding)

        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")

        auth_sec_uuid = libvirt_ceph_utils._create_secret(
            auth_sec_usage_type, secret_string)
        disk_auth_dict = {
            "auth_user": chap_user,
            "secret_type": auth_sec_usage_type,
            "secret_uuid": auth_sec_uuid
        }

        disk_src_dict = {'attrs': {'dev': device_source}}
        iscsi_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, disk_auth_dict)
        # Add disk xml.
        logging.debug("disk xml is:\n%s" % iscsi_disk)
        # Sync VM xml.
        vmxml.add_device(iscsi_disk)
        vmxml.sync()
Example #11
0
    def setup_block_env(params):
        """
        Setup block test environment

        :param params: one dict to wrap up parameters
        """
        if additional_disk is False:
            return
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
        disk_src_dict = {'attrs': {'dev': device_source}}
        backing_chain_list.append(device_source)

        file_disk = libvirt_disk.create_primitive_disk_xml(
            type_name, disk_device, device_target, device_bus, device_format,
            disk_src_dict, None)
        logging.debug("disk xml is:\n%s" % file_disk)
        # Sync VM xml.
        vmxml.add_device(file_disk)
        vmxml.sync()
        _generate_backstore_attribute(params)
Example #12
0
def run(test, params, env):
    """
    Test <transient/> disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def check_transient_disk_keyword(vm_names):
        """
        Check VM disk with TRANSIENT keyword.

        :param vm_names. VM names list.
        """
        logging.info("Checking disk with transient keyword...")

        output0 = ""
        output1 = ""
        for i in list(range(2)):
            ret = virsh.dumpxml(vm_names[i], ignore_status=False)

            cmd = ("echo \"%s\" | grep '<source file=.*TRANSIENT.*/>'" %
                   ret.stdout_text)
            if process.system(cmd, ignore_status=False, shell=True):
                test.fail("Check transident disk on %s failed" % vm_names[i])
            if i == 0:
                output0 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
            else:
                output1 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
        if output0 == output1:
            test.fail("Two vms have same source transident disk %s" % output0)

    def check_share_transient_disk(vms_list):
        """
        Check share base image of <transient/> disks.

        :param vms_list. VM object list.
        """
        logging.info("Checking share base image of transient disk...")

        try:
            test_str = "teststring"
            sha_cmd = ("sha1sum /dev/%s" % disk_target)
            cmd = ("fdisk -l /dev/%s && mkfs.ext4 -F /dev/%s && mount /dev/%s"
                   " /mnt && echo '%s' > /mnt/test && umount /mnt" %
                   (disk_target, disk_target, disk_target, test_str))

            # check on vm0.
            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
            s, o = session0.cmd_status_output(cmd)
            logging.debug("session in vm0 exit %s; output: %s", s, o)
            if s:
                session0.close()
                test.fail("Shared disk on vm0 doesn't work well")

            vm0_disk_sha1 = session0.cmd_output(sha_cmd)
            session0.close()
            vms_list[0]['vm'].destroy(gracefully=False)

            # check on vm1.
            session = vms_list[1]['vm'].wait_for_login(timeout=10)
            vm1_disk_sha1 = session.cmd_output(sha_cmd)
            if vm0_disk_sha1 == vm1_disk_sha1:
                session.close()
                test.fail(
                    "Still can find file created in transient disk of vm0")

            s, o = session.cmd_status_output(cmd)
            logging.debug("session in vm1 exit %s; output: %s", s, o)
            if s:
                session.close()
                test.fail("Shared disk on vm1 doesn't work well")
            session.close()
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            test.error("Test transient disk shareable: login failed")

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        test.cancel("No multi vms provided.")

    # Disk specific attributes.
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_device = params.get("virt_disk_device", "disk")
    disk_format = params.get("virt_disk_format", "qcow2")
    target_format = params.get("virt_target_format", "qcow2")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    sharebacking = params.get("share_transient").split()
    on_reboot_destroy = "yes" == params.get("on_reboot_destroy", "no")
    disk_source_path = data_dir.get_data_dir()
    disk_path = ""

    libvirt_version.is_libvirt_feature_supported(params)

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in list(range(2)):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)
    # Initialize VM list
    vms_list = []
    try:
        # Create disk images if needed.
        disks = []
        image_size = params.get("image_size", "1G")
        disk_path = "%s/test.%s" % (disk_source_path, disk_format)
        disk_source = libvirt.create_local_disk("file",
                                                disk_path,
                                                image_size,
                                                disk_format=disk_format)
        disk_src_dict = {"attrs": {"file": disk_path}}
        disks.append({"format": disk_format, "source": disk_source})

        # Compose the new domain xml
        for i in list(range(2)):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            disk_xml = libvirt_disk.create_primitive_disk_xml(
                disk_type, disk_device, disk_target, disk_bus, target_format,
                disk_src_dict, None)

            if sharebacking[i] == "yes":
                disk_xml.sharebacking = "yes"
                if on_reboot_destroy:
                    vmxml.on_reboot = "destroy"
            else:
                disk_xml.transient = "yes"

            logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                logging.debug("vm xml is {}".format(vmxml))
                vmxml.sync()
            vms_list.append({
                "name": vm_names[i],
                "vm": vm,
                "status": "yes" == status_error[i],
                "disk": disk_xml
            })
            logging.debug("vms_list %s" % vms_list)

        for i in list(range(len(vms_list))):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    test.fail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml,
                                                 debug=True).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        test.fail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        test.fail('Hotplug disk device unexpectedly.')

                if i == 1:
                    check_transient_disk_keyword(vm_names)
                    check_share_transient_disk(vms_list)

                session.close()
            except virt_vm.VMStartError as start_error:
                if vms_list[i]['status']:
                    test.fail("VM failed to start."
                              "Error: %s" % str(start_error))
    finally:
        # Stop VMs.
        for i in list(range(len(vms_list))):
            if vms_list[i]['vm'].is_alive():
                vms_list[i]['vm'].destroy(gracefully=False)

        # Recover VMs.
        for vmxml_backup in vms_backup:
            vmxml_backup.sync()

        # Remove disks.
        for img in disks:
            if "source" in img:
                os.remove(img["source"])
Example #13
0
def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True):
    """
    Setup vm ceph disk with given parameters

    :param vm: the vm object
    :param params: dict, dict include setup vm disk xml configurations
    :param is_setup: one parameter indicate whether setup or clean up
    """
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
    logging.debug("original xml is: %s", vmxml)

    # Device related configurations
    device_format = params.get("virt_disk_device_format", "raw")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    hotplug = "yes" == params.get("virt_disk_device_hotplug", "no")
    keep_raw_image_as = "yes" == params.get("keep_raw_image_as", "no")

    # Ceph related configurations
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    auth_sec_usage_type = params.get("ceph_auth_sec_usage_type", "ceph")
    storage_size = params.get("storage_size", "1G")
    img_file = params.get("ceph_image_file")
    attach_option = params.get("virt_device_attach_option", "--live")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = ""
    is_local_img_file = True if img_file is None else False
    rbd_key_file = None

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    disk_auth_dict = None
    auth_sec_uuid = None
    names = ceph_disk_name.split('/')
    pool_name = names[0]
    image_name = names[1]
    if not utils_package.package_install(["ceph-common"]):
        raise exceptions.TestError("Failed to install ceph-common")

    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(ceph_mon_ip)
    # If enable auth, prepare a local file to save key
    if ceph_client_name and ceph_client_key:
        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" %
                    (ceph_client_name, ceph_client_key))
        key_opt = "--keyring %s" % key_file
        rbd_key_file = key_file
    if is_setup:
        # If enable auth, prepare disk auth
        if ceph_client_name and ceph_client_key:
            auth_sec_uuid = _create_secret(auth_sec_usage_type, ceph_auth_key)
            disk_auth_dict = {"auth_user": ceph_auth_user,
                              "secret_type": auth_sec_usage_type,
                              "secret_uuid": auth_sec_uuid}
        # clean up image file if exists
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)

        #Create necessary image file if not exists
        _create_image(device_format, img_file, vm.name, storage_size,
                      ceph_disk_name, ceph_mon_ip, key_opt,
                      ceph_auth_user, ceph_auth_key)

        # Disk related config
        disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": ceph_disk_name},
                         "hosts":  [{"name": ceph_mon_ip,
                                     "port": ceph_host_port}]}
        # Create network disk
        disk_xml = libvirt_disk.create_primitive_disk_xml("network", device, device_target, device_bus,
                                                          device_format, disk_src_dict, disk_auth_dict)
        if not keep_raw_image_as:
            if hotplug:
                virsh.attach_device(vm.name, disk_xml.xml,
                                    flagstr=attach_option, ignore_status=False, debug=True)
            else:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
                vmxml.add_device(disk_xml)
                vmxml.sync()
    else:
        ceph.rbd_image_rm(ceph_mon_ip, pool_name,
                          image_name, keyfile=rbd_key_file)
        # Remove ceph config and key file if created.
        for file_path in [ceph_cfg, key_file]:
            if os.path.exists(file_path):
                os.remove(file_path)
        if is_local_img_file and img_file and os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid, ignore_status=True)