Пример #1
0
def setup_tftp_root(tftp_rootdir):
    """
    [DESTRUCTIVELY!!!] Sets up a TFTP root to work

    It will wipe anything in some parts of there with 'rsync --delete'
    """
    def _rsync_files(dest, files):
        try:
            commonl.makedirs_p(dest, 0o0775)
            cmdline = ["rsync", "-a", "--delete"] + files + [dest]
            subprocess.check_output(cmdline,
                                    shell=False,
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            logging.error("PXE setup: root::%s: can't copy files %s "
                          " (do they exist?)\n%s" %
                          (dest, " ".join(files), e.output))
            raise

    # TFTP setup
    commonl.makedirs_p(os.path.join(tftp_rootdir, "pxelinux.cfg"), 0o0775)
    if 'root' in architectures:
        arch_data = architectures['root']
        _rsync_files(tftp_rootdir, arch_data['copy_files'])
    for arch_name, arch_data in architectures.iteritems():
        if arch_name == 'root':  # skip, we handled it ...
            continue  # ... differently
        tftp_arch_dir = os.path.join(tftp_rootdir, arch_name)
        _rsync_files(tftp_arch_dir, arch_data['copy_files'])
        # We use always the same configurations; because the rsync
        # above might remove the symlink, we re-create it
        # We use a relative symlink so in.tftpd doesn't nix it
        commonl.symlink_f("../pxelinux.cfg",
                          os.path.join(tftp_arch_dir, "pxelinux.cfg"))
Пример #2
0
 def poll(self, testcase, run_name, buffers_poll):
     target = self.target
     # we name the screenshots after the poll_name name, as
     # we'll share them amongs multiple expectations
     buffers_poll.setdefault('screenshot_count', 0)
     buffers_poll.setdefault('screenshots', [])
     dirname = os.path.join(testcase.tmpdir,
                            'expect-buffer-poll-%s' % self.poll_name)
     commonl.makedirs_p(dirname)
     filename = os.path.join(
         dirname,
         '.'.join([
             'screenshot',
             run_name,
             self.poll_name,
             # FIXME: replace number with datestamp? ideally from server?
             '%02d' % buffers_poll['screenshot_count'],
             'png'
         ]))
     target.capture.get(self.capturer, filename)
     buffers_poll['screenshot_count'] += 1
     buffers_poll['screenshots'].append(filename)
     target.report_info('captured screenshot from %s to %s' %
                        (self.capturer, filename),
                        dlevel=2)
Пример #3
0
 def _capture_path(target):
     # FIXME: Ideally we'd include teh ALLOCID here, so we could
     # keep data after release for future reference?
     capture_path = os.path.join(target.state_dir, "capture")
     # just make sure it always exist
     commonl.makedirs_p(capture_path)
     return capture_path
Пример #4
0
    def _mkreport_junit(self, _tc, kws, header, output, tag_info,
                        reproduction):

        for hook in self.junit_hooks:
            hook(self, _tc, kws, output)
        jtc = junit_xml.TestCase(self.junit_name % kws,
                                 classname=self.junit_classname % kws,
                                 elapsed_sec=123.456,
                                 stdout=header + tag_info + reproduction,
                                 stderr=None)

        # FIXME: nail down the exception
        # <error/failure/blockage/skipped/or cause to put that only in
        # the messagee and let's put the whole output always in
        # stdout, with the rest of the info on stderr
        msg_tag = kws['msg_tag']
        if msg_tag == "FAIL":
            jtc.add_failure_info(message="Failed", output=output)
        elif msg_tag == "ERRR":
            jtc.add_error_info(message="Error", output=output)
        elif msg_tag == "BLCK":
            jtc.add_error_info(message="Infrastructure", output=output)
        elif msg_tag == "SKIP":
            if self.junit_report_skip:
                jtc.add_skipped_info(message="Skipped", output=output)
            else:
                jtc.add_skipped_info(message="Skipped")
                jtc.stdout = None
                jtc.stderr = None
        elif msg_tag == "PASS":
            if self.junit_report_pass:
                jtc.stderr = output
            elif self.junit_report_pass == None:
                # we don  want *anything*
                jtc.stderr = None
                jtc.stdout = None
            else:  # False
                jtc.stderr = "<inclusion of output disabled by " \
                             "configuration setting of " \
                             "tcfl.report.junit_report_pass>"
                jtc.stdout = "<inclusion of output disabled by " \
                             "configuration setting of " \
                             "tcfl.report.junit_report_pass>"

        # Write the JUNIT to a pickle file, as we'll join it later
        # with the rest in _finalize. We can't put it in a
        # global because this testcase might be running in a separate
        # thread or process.  later == when the global testcase
        # reporter (tcfl.tc.tc_global) emits a COMPLETION message,
        # then we call _finalize()
        domain = commonl.file_name_make_safe(self.junit_domain % kws)
        # use the core keywords, so it is not modified
        tc_hash = _tc.kws['tc_hash']
        # Note we store it in the common
        pathname = os.path.join(tcfl.tc.tc_c.tmpdir, "junit", domain)
        commonl.makedirs_p(pathname)
        with open(os.path.join(pathname, tc_hash + ".pickle"), "w") as picklef:
            cPickle.dump(jtc, picklef, protocol=2)
Пример #5
0
def request(groups, user, obo_user,
            priority = None, preempt = False,
            queue = False, reason = None):
    """
    :params list(str) groups: list of groups of targets

    """
    assert isinstance(groups, dict)
    for group_name, target_list in groups.items():
        assert isinstance(group_name, basestring)
        assert isinstance(target_list, list)
        # FIXME: verify not empty
        for target_name in target_list:
            assert isinstance(target_name, basestring)
            assert target_is_valid(target_name)

    user = user._get_current_object()
    obo_user = obo_user._get_current_object()
    assert isinstance(user, ttbl.user_control.User), \
        "user is %s (%s)" % (user, type(user))
    assert isinstance(obo_user, ttbl.user_control.User)

    if priority != None:
        assert priority > 0
        # FIXME: verify calling user has this priority
    else:
        priority = 500 # DEFAULT FROM USER

    assert isinstance(preempt, bool)
    assert isinstance(queue, bool)
    assert reason == None or isinstance(reason, basestring)

    allocationid = commonl.mkid(obo_user.get_id() + str(time.time()))

    dirname = os.path.join(path, allocationid)
    commonl.makedirs_p(dirname + "/guests")
    commonl.makedirs_p(dirname + "/groups")
    alloc = one_c(allocationid, dirname)

    alloc.set("user", obo_user.get_id())
    alloc.set("creator", user.get_id())
    alloc.timestamp()
    for group in groups:
        # FIXME: this is severly limited in size, we need a normal file to set this info with one target per file
        alloc.set("groups/" + group, " ".join(group))

    result = {
        # { 'busy', 'queued', 'allocated', 'rejected' },
        "state": 'rejected',
        "allocationid": allocationid,
        #"allocationid": None,    # if queued; derived from OWNER's cookie
        # "not allowed on TARGETNAMEs"
        # "targets TARGETNAMEs are busy"
        "message": "not implemented yet"
    }
    return result
Пример #6
0
 def _rsync_files(dest, files):
     try:
         commonl.makedirs_p(dest, 0o0775)
         cmdline = ["rsync", "-a", "--delete"] + files + [dest]
         subprocess.check_output(cmdline,
                                 shell=False,
                                 stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         logging.error("PXE setup: root::%s: can't copy files %s "
                       " (do they exist?)\n%s" %
                       (dest, " ".join(files), e.output))
         raise
Пример #7
0
    def eval_00_fsdb_create(self):

        self.fsdb_dir = os.path.join(self.tmpdir, "db")
        commonl.makedirs_p(self.fsdb_dir)
        fsdb = ttbl.fsdb_symlink_c(self.fsdb_dir)
        self.fsdb = fsdb

        l = os.listdir(self.fsdb_dir)
        if l:
            raise tcfl.tc.failed_e(
                "fsdb database directory not empty",
                dict(listdir = l))
Пример #8
0
 def post_file(self, target, _who, args, files, user_path):
     # we can only upload to the user's storage path, never to
     # paths_allowed -> hence why we alway prefix it.
     file_path = self.arg_get(args, 'file_path', str)
     file_path_final, rw = self._validate_file_path(target, file_path,
                                                    user_path)
     if not rw:
         raise PermissionError(f"{file_path}: is a read only location")
     file_object = files['file']
     file_object.save(file_path_final)
     commonl.makedirs_p(user_path)
     target.log.debug("%s: saved" % file_path_final)
     return dict()
Пример #9
0
 def post_file(self, target, _who, args, files, user_path):
     # we can only upload to the user's storage path, never to
     # paths_allowed -> hence why we alway prefix it.
     file_path = self.arg_get(args, 'file_path', basestring)
     if os.path.isabs(file_path):
         raise RuntimeError(
             "%s: trying to upload a file to an area that is not allowed"
             % file_path)
     file_object = files['file']
     file_path_final = self._validate_file_path(file_path, user_path)
     commonl.makedirs_p(user_path)
     file_object.save(file_path_final)
     target.log.debug("%s: saved" % file_path_final)
     return dict()
Пример #10
0
Файл: dhcp.py Проект: intel/tcf
    def power_on_do(self, target):
        """
        Start DHCPd servers on the network interface
        described by `target`
        """
        if self.target == None:
            self.target = target
        else:
            assert self.target == target
        # FIXME: detect @target is an ipv4 capable network, fail otherwise
        self._init_for_process(target)
        # Create runtime directories where we place everything based
        # on the infomation in pxe_architectures
        shutil.rmtree(self.state_dir, ignore_errors = True)
        os.makedirs(self.state_dir)

        # TFTP setup
        commonl.makedirs_p(os.path.join(tftp_dir, tftp_prefix,
                                        "pxelinux.cfg"),
                           0o0775)
        for arch_name, arch_data in pxe_architectures.iteritems():
            tftp_arch_dir = os.path.join(tftp_dir, tftp_prefix, arch_name)
            commonl.makedirs_p(tftp_arch_dir, 0o0775)
            cmdline = [ "rsync", "-a", "--delete" ] \
                + arch_data['copy_files'] + [ tftp_arch_dir ]
            subprocess.call(cmdline, shell = False, stderr = subprocess.STDOUT)
            # We use always the same configurations; because the rsync
            # above might remove the symlink, we re-create it
            # We use a relative symlink so in.tftpd doesn't nix it
            commonl.symlink_f("../pxelinux.cfg",
                              os.path.join(tftp_arch_dir, "pxelinux.cfg"))

        # We set the parameters in a dictionary so we can use it to
        # format strings
        # FUGLY; relies on ttbl.conf_00_lib.vlan_pci renaming the
        # network interfaces like this.
        self._params['if_name'] = "b" + target.id

        # FIXME: if we get the parameters from the network here, we
        # have target -- so we don't need to set them on init
        with open(os.path.join(self.state_dir, "dhcpd.conf"), "wb") as f:
            self._dhcp_conf_write(f)

        # FIXME: before start, filter out leases file, anything in the
        # leases dhcpd.leases file that has a "binding state active"
        # shall be kept ONLY if we still have that client in the
        # configuration...or sth like that.
        # FIXME: rm old leases file, overwrite with filtered one

        self._dhcpd_start()
Пример #11
0
    def power_on_do(self, target):
        """
        Start DHCPd servers on the network interface
        described by `target`
        """
        if self.target == None:
            self.target = target
        else:
            assert self.target == target
        # FIXME: detect @target is an ipv4 capable network, fail otherwise
        self._init_for_process(target)
        # Create runtime directories where we place everything based
        # on the infomation in pxe_architectures
        shutil.rmtree(self.state_dir, ignore_errors=True)
        os.makedirs(self.state_dir)

        # TFTP setup
        commonl.makedirs_p(os.path.join(tftp_dir, tftp_prefix, "pxelinux.cfg"),
                           0o0775)
        for arch_name, arch_data in pxe_architectures.iteritems():
            tftp_arch_dir = os.path.join(tftp_dir, tftp_prefix, arch_name)
            commonl.makedirs_p(tftp_arch_dir, 0o0775)
            cmdline = [ "rsync", "-a", "--delete" ] \
                + arch_data['copy_files'] + [ tftp_arch_dir ]
            subprocess.call(cmdline, shell=False, stderr=subprocess.STDOUT)
            # We use always the same configurations; because the rsync
            # above might remove the symlink, we re-create it
            # We use a relative symlink so in.tftpd doesn't nix it
            commonl.symlink_f("../pxelinux.cfg",
                              os.path.join(tftp_arch_dir, "pxelinux.cfg"))

        # We set the parameters in a dictionary so we can use it to
        # format strings
        # FUGLY; relies on ttbl.conf_00_lib.vlan_pci renaming the
        # network interfaces like this.
        self._params['if_name'] = "b" + target.id

        # FIXME: if we get the parameters from the network here, we
        # have target -- so we don't need to set them on init
        with open(os.path.join(self.state_dir, "dhcpd.conf"), "wb") as f:
            self._dhcp_conf_write(f)

        # FIXME: before start, filter out leases file, anything in the
        # leases dhcpd.leases file that has a "binding state active"
        # shall be kept ONLY if we still have that client in the
        # configuration...or sth like that.
        # FIXME: rm old leases file, overwrite with filtered one

        self._dhcpd_start()
Пример #12
0
 def __init__(self, userid, fail_if_new=False, roles=None):
     path = self.create_filename(userid)
     self.userid = userid
     if not os.path.isdir(path) and fail_if_new == False:
         commonl.rm_f(path)  # cleanup, just in case
         commonl.makedirs_p(path)
     try:
         self.fsdb = ttbl.fsdb_symlink_c(path)
     except (AssertionError, ttbl.fsdb_c.exception) as e:
         if fail_if_new:
             raise self.user_not_existant_e("%s: no such user" % userid)
     self.fsdb.set('userid', userid)
     if roles:
         assert isinstance(roles, list)
         for role in roles:
             self.role_add(role)
Пример #13
0
    def on(self, target, _component):
        ic = target  # Note the rename (target -> ic)

        # Create records for each target that we know will connect to
        # this interconnect, place them in the directory TARGET/dnsmasq.hosts
        dirname = os.path.join(ic.state_dir, "dnsmasq.hosts")
        shutil.rmtree(dirname, ignore_errors=True)
        commonl.makedirs_p(dirname)

        # Create an A record for the network, needed for --auth-server
        with open(os.path.join(dirname, ic.id), "w+") as f:
            f.write("%s\t%s\n" % (ic.tags['ipv4_addr'], ic.id))

        # Find the targets that connect to this interconnect
        # FIXME: parallelize for many
        for target in ttbl.config.targets.values():
            interconnects = target.tags.get('interconnects', {})
            # iterate interconnects this thing connects to
            for interconnect_id, interconnect in interconnects.iteritems():
                if interconnect_id != ic.id:
                    continue
                addrs = []
                if 'ipv4_addr' in interconnect:
                    addrs.append(interconnect['ipv4_addr'])
                if 'ipv6_addr' in interconnect:
                    addrs.append(interconnect['ipv6_addr'])
                if addrs:
                    # Create a record for each target that will connect to
                    # this interconnect
                    with open(os.path.join(dirname, target.id), "w+") as f:
                        for addr in addrs:
                            f.write("%s\t%s.%s %s\n" %
                                    (addr, target.id, ic.id, target.id))

        # note the rename we did target -> ic
        ttbl.power.daemon_c.on(self, ic, _component)
Пример #14
0
def pre_tftp_pos_setup(target):
    pos_mode = target.fsdb.get("pos_mode")
    # we always run, as we have set the RPI3 to always depend on
    # network boot to control it

    assert 'raspberry_serial_number' in target.tags, \
        "%s: configuration error: target configured to pre-power" \
        " up with ttbl.raspberry.pre_tftp_pos_setup() but no" \
        " raspberry_serial_number tag specified"

    # We only care if mode is set to pxe or local -- local makes us
    # tell the thing to go boot local disk
    # if none, we assume go local
    if pos_mode != "pxe" and pos_mode != "local":
        pos_mode = "local"

    boot_ic = target.tags.get('pos_boot_interconnect', None)
    if boot_ic == None:
        raise RuntimeError("CONFIG ERROR: no 'pos_boot_interconnect'"
                           " tag defined, can't boot off network")
    if not boot_ic in target.tags['interconnects']:
        raise RuntimeError("CONFIG ERROR: this target does not belong to"
                           " the boot interconnect '%s' defined in tag "
                           "'pos_boot_interconnect'" % boot_ic)

    # we need the interconnect object to get some values
    ic = ttbl.test_target.get(boot_ic)

    raspberry_serial_number = target.tags['raspberry_serial_number']

    # rsync the bootloader to TFTPROOT/SERIALNUMBER/. -- this way we
    # just override only what needs overriding
    #
    # then we will configure the bootmode in there.
    # HACK, this should be an internal tag of the ic
    tftp_dirname = os.path.join(ic.state_dir, "tftp.root", raspberry_serial_number)
    commonl.makedirs_p(tftp_dirname)
    cmdline = [ "rsync", "-a", "--delete",
                bootloader_path + "/.", tftp_dirname ]
    subprocess.check_output(cmdline, shell = False, stderr = subprocess.STDOUT)

    # now generate the cmdline we want to send and put it in
    # TFTPROOT/SERIALNUMBER/cmdline.txt.
    #
    # For POS boot, we NFS root it to whatever is in tcf-live -- the
    # root-path is given by DHCP (see dnsmasq.py/dhcp.py, look for
    # root-path) from the {pos_nfs_path,pos_nfs_root,pos_image}
    # keywords.
    #
    # For local boot, we take the default from the bootloader
    if pos_mode == "pxe":
        with open(os.path.join(tftp_dirname, "cmdline.txt"), "w") as f:
            f.write(
                "console=serial0,115200 console=tty1"
                " rootwait quiet splash plymouth.ignore-serial-consoles"
                " ip=dhcp"
                " root=/dev/nfs"		# we are NFS rooted
                # no exotic storage options
                " ro"				# we are read only
                #" plymouth.enable=0 "		# No installer to run
                # kernel, be quiet to avoid your messages polluting the serial
                # terminal
                #" loglevel=2"
                " netconsole=@/eth0,[email protected]/")
    else:
        with open(os.path.join(tftp_dirname, "cmdline.txt"), "w") as f:
            f.write(
                "console=serial0,115200"
                " console=tty1"
                " root=/dev/mmcblk0p2"
                " rootfstype=ext4"
                " elevator=deadline"
                " fsck.repair=yes"
                " rootwait"
                " quiet"
                #" init=/usr/lib/raspi-config/init_resize.sh"
                " splash"
                " plymouth.ignore-serial-consoles")
Пример #15
0
    def eval(self):

        fsdb_dir = os.path.join(self.tmpdir, "db")
        commonl.makedirs_p(fsdb_dir)
        fsdb = ttbl.fsdb_symlink_c(fsdb_dir)
        self.fsdb = fsdb

        l = os.listdir(fsdb_dir)
        if l:
            raise tcfl.tc.failed_e(
                "fsdb database directory not empty",
                dict(listdir = l))
            if False:
                self.subtc["fsdb database directory not empty"].update(
                    tcfl.tc.result_c(errors = 1),
                    "fsdb database directory not empty",
                    listdir = l)

        db = {
            "name ascii" : "string value",
            "name ascii" : "string value",
            # use weird names that would not be welcome in a file,
            # make sure they are encoded -- / and : catch both Win and
            # Linux platforms
            "name :/1" : "string value",
            "name :/2" : "string value",
            "name :/3" : "string value",
            "name :/4" : "string value",
            "name weird /:" : True,
            "name weird /: 2" : False,
            "name ñá %% int" : 2,
            "name ñá %% float" : 3.0
        }
        expected_len = 0
        for name, value in db.items():
            fsdb.set(name, value)
            self.report_pass("key name %s set" % name)
            expected_len += 1
            l = os.listdir(fsdb_dir)
            if len(l) != expected_len:
                raise tcfl.tc.failed_e(
                    "%s: number of files (%d) does not match records (%d)"
                    % (fsdb_dir, len(l), expected_len),
                    dict(listdir = l))
            self.report_pass("%s: number of files (%d) matches records (%d)"
                             % (fsdb_dir, len(l), expected_len))
        self.report_pass("we were able to create records, including"
                         " those with non-filename compatible")

        # verify the entries in the directory match what we have
        # tkae the list of files we created for each field, get an
        # unquoted list and compare against the list of keys we fed
        # and the lsit of keys FSDB reports.
        # Make it all a set() so they are ordered the same.
        l_raw = set(os.listdir(fsdb_dir))
        l_unquoted = set([ urllib.unquote(i) for i in l_raw ])
        keys_db = set(db.keys())
        keys_fsdb = set(fsdb.keys())
        if l_unquoted != keys_db:
            raise tcfl.tc.failed_e(
                "%s: files don't match files"
                % (fsdb_dir),
                dict(listdir_unquoted = l_unquoted,
                     listdir_raw = l_raw,
                     keys_fsdb = keys_fsdb,
                     keys_db = keys_db))

        if keys_fsdb != keys_db:
            raise tcfl.tc.failed_e(
                "%s: keys don't match DB keys"
                % (fsdb_dir),
                dict(listdir_unquoted = l_unquoted,
                     listdir_raw = l_raw,
                     keys_fsdb = keys_fsdb,
                     keys_db = keys_db))
        self.report_pass("keys() match files and db keys")

        for name, value in db.items():
            value_fsdb = fsdb.get(name)
            if value_fsdb != value:
                raise tcfl.tc.failed_e(
                    "%s: key '%s' value from fsdb '%s' does not match"
                    " what we set ('%s')" % (fsdb_dir, name, value, value_fsdb))
        self.report_pass("values set check")

        d = fsdb.get_as_dict()
        if d != db:
            raise tcfl.tc.failed_e(
                "%s: get_as_dict() doesn't match db" % (
                    fsdb_dir), dict(get_as_dict = d, db = db))
        self.report_pass("get_as_dict() returns same as db")

        l = fsdb.get_as_slist()
        for k, v in l:
            if k not in db:
                raise tcfl.tc.failed_e(
                    "%s: get_as_slist() reported field %s doesn't match db" % (
                        fsdb_dir, k), dict(get_as_slist = l, db = db))
            if db[k] != v:
                raise tcfl.tc.failed_e(
                    "%s: get_as_slist() reported field %s, value %s, doesn't match db" % (
                        fsdb_dir, k, v), dict(get_as_slist = l, db = db))
        self.report_pass("get_as_slist() returns same as db")
Пример #16
0
    def on(self, target, _component):
        ic = target  # Note the rename (target -> ic)

        # Create records for each target that we know will connect to
        # this interconnect, place them in the directory TARGET/dnsmasq.hosts
        dirname = os.path.join(ic.state_dir, "dnsmasq.hosts")
        shutil.rmtree(dirname, ignore_errors=True)
        commonl.makedirs_p(dirname)
        tftp_dirname = os.path.join(ic.state_dir, "tftp.root")
        shutil.rmtree(tftp_dirname, ignore_errors=True)
        commonl.makedirs_p(tftp_dirname, 0o0775)
        ttbl.pxe.setup_tftp_root(tftp_dirname)  # creates the dir

        # Find the targets that connect to this interconnect and
        # collect their IPv4/6/MAC addresses to create the record and
        # DHCP info; in theory we wouldn't need to create the host
        # info, as the DHCP host info would do it--doesn't hurt
        # FIXME: parallelize for many
        dhcp_hosts = collections.defaultdict(dict)
        for target in ttbl.config.targets.values():
            interconnects = target.tags.get('interconnects', {})
            # iterate interconnects this thing connects to
            for interconnect_id, interconnect in interconnects.iteritems():
                if interconnect_id != ic.id:
                    continue
                addrs = []
                mac_addr = interconnect.get('mac_addr', None)
                if mac_addr:
                    dhcp_hosts[target]['mac_addr'] = mac_addr
                ipv4_addr = interconnect.get('ipv4_addr', None)
                if ipv4_addr:
                    dhcp_hosts[target]['ipv4_addr'] = ipv4_addr
                    addrs.append(ipv4_addr)
                ipv6_addr = interconnect.get('ipv6_addr', None)
                if ipv6_addr:
                    dhcp_hosts[target]['ipv6_addr'] = ipv6_addr
                    addrs.append(ipv6_addr)
                if addrs:
                    # Create a file for each target that will connect to
                    # this interconnect
                    with open(os.path.join(dirname, target.id), "w+") as f:
                        for addr in addrs:
                            f.write("%s\t%s %s.%s\n" %
                                    (addr, target.id, target.id, ic.id))
        # Create a configuration file
        #
        # configl has all the options with template values which we
        # expand later.
        with open(os.path.join(ic.state_dir, "dnsmasq.conf"), "w+") as f:

            configl = [
                "no-hosts",  # only files in...
                "hostsdir=%(path)s/dnsmasq.hosts",  # ..this dir
                # we are defining a domain .NETWORKNAME
                "domain=%(id)s",
                "local=/%(id)s/",
                # serve only on the in the interface for this network;
                # listen-address not needed since we specify
                # interface--having a hard time making listen-address
                # only work anyway
                # FIXME: hardcoded to knowing the network interface
                #        name is called bTARGET
                "interface=b%(id)s",
                # need to use this so we only bind to our
                # interface and we can run multiple dnsmasqa and coexists
                # with whichever are in the system
                "bind-interfaces",
                "except-interface=lo",
                # if a plain name (w/o domain name) is not found in the
                # local database, do not forward it upstream
                "domain-needed",
                # Needs an A record "%(ipv4_addr)s %(id)s", created in on()
                # DISABLED: unknown why, this messes up resolution of
                # plain names
                # auth-server=%(id)s,b%(id)s",
                "auth-zone=%(id)s,b%(id)s",
                "dhcp-authoritative",
                # Enable TFTP server to STATEDIR/tftp.root
                "enable-tftp",
                "tftp-root=%(path)s/tftp.root",
                # all files TFTP is to send have to be owned by the
                # user running it (the same one running this daemon)
                "tftp-secure",
            ]

            # Add stuff based on having ipv4/6 support
            #
            # dhcp-range activates the DHCP server
            # host-record creates a record for the host that
            # represents the domain zone; but not sure it is working
            # all right.
            addrs = []
            ic_ipv4_addr = ic.kws.get('ipv4_addr', None)
            if ic_ipv4_addr:
                addrs.append(ic_ipv4_addr)
                # IPv4 server address so we can do auth-server
                configl.append("host-record=%(id)s,%(ipv4_addr)s")
                # we let DNSMASQ figure out the range from the
                # configuration of the network interface and we only
                # allow (static) the ones set below with dhcp-host
                configl.append("dhcp-range=%(ipv4_addr)s,static")

            ic_ipv6_addr = ic.kws.get('ipv6_addr', None)
            if ic_ipv6_addr:
                addrs.append(ic_ipv6_addr)
                # IPv6 server address so we can do auth-server
                configl.append("host-record=%(id)s,[%(ipv6_addr)s]")
                # FIXME: while this is working, it is still not giving
                # the IPv6 address we hardcoded in the doc :/
                ipv6_prefix_len = ic.kws['ipv6_prefix_len']
                network = ipaddress.IPv6Network(unicode(ic_ipv6_addr + "/" +
                                                        str(ipv6_prefix_len)),
                                                strict=False)
                configl.append(
                    "dhcp-range=%s,%s,%s" %
                    (ic_ipv6_addr, network.broadcast_address, ipv6_prefix_len))

            # Create A record for the server/ domain
            # this is a separat file in DIRNAME/dnsmasq.hosts/NAME
            if addrs:
                configl.append("listen-address=" + ",".join(addrs))
                with open(os.path.join(dirname, ic.id), "w+") as hf:
                    for addr in addrs:
                        hf.write("%s\t%s\n" % (addr, ic.id))

            for config in configl:
                f.write(config % ic.kws + "\n")

            # For each target we know can connect, create a dhcp-host entry
            for target, data in dhcp_hosts.iteritems():
                infol = [
                    # we set a tag after the host name to match a
                    # host-specific dhcp-option line to it
                    "set:" + target.id,
                    data['mac_addr']
                ]
                if 'ipv4_addr' in data:
                    infol.append(data['ipv4_addr'])
                if 'ipv6_addr' in data:
                    # IPv6 addr in [ADDR] format, per man page
                    infol.append("[" + data['ipv6_addr'] + "]")
                infol.append(target.id)
                infol.append("infinite")
                f.write("dhcp-host=" + ",".join(infol) + "\n")
                # next fields can be in the target or fall back to the
                # values from the interconnect
                kws = target.kws
                bsps = target.tags.get('bsps', {}).keys()
                if bsps:
                    # take the first BSP in sort order...yeah, not a
                    # good plan
                    bsp = sorted(bsps)[0]
                    kws['bsp'] = bsp
                ttbl.pxe.tag_get_from_ic_target(kws, 'pos_http_url_prefix', ic,
                                                target)
                ttbl.pxe.tag_get_from_ic_target(kws, 'pos_nfs_server', ic,
                                                target)
                ttbl.pxe.tag_get_from_ic_target(kws, 'pos_nfs_path', ic,
                                                target)

                f.write(
                    "dhcp-option=tag:%(id)s,option:root-path,%(pos_nfs_server)s:%(pos_nfs_path)s,udp,soft,nfsvers=3\n"
                    % kws)

                # If the target declares a BSP (at this point of the
                # game, it should), figure out which architecture is
                # so we can point it to the right file.
                if bsp:
                    # try ARCH or efi-ARCH
                    # override with anything the target declares in config
                    arch = None
                    boot_filename = None
                    if 'pos_tftp_boot_filename' in target.tags:
                        boot_filename = target.tags['pos_tftp_boot_filename']
                    elif bsp in ttbl.pxe.architectures:
                        arch = ttbl.pxe.architectures[bsp]
                        arch_name = bsp
                        boot_filename = arch_name + "/" + arch.get(
                            'boot_filename', None)
                    elif "efi-" + bsp in ttbl.pxe.architectures:
                        arch_name = "efi-" + bsp
                        arch = ttbl.pxe.architectures[arch_name]
                        boot_filename = arch_name + "/" + arch.get(
                            'boot_filename', None)
                    if boot_filename:
                        f.write("dhcp-option=tag:%(id)s," % kws +
                                "option:bootfile-name," + boot_filename + "\n")
                    if ic_ipv4_addr:
                        f.write("dhcp-option=tag:%(id)s," % kws +
                                "option:tftp-server," + ic_ipv4_addr + "\n")
                    if ic_ipv6_addr:
                        f.write("dhcp-option=tag:%(id)s," % kws +
                                "option:tftp-server," + ic_ipv4_addr + "\n")
                    else:
                        raise RuntimeError(
                            "%s: TFTP/PXE boot mode selected, but no boot"
                            " filename can be guessed for arch/BSP %s/%s;"
                            " declare tag pos_tftp_boot_filename?" %
                            (target.id, arch_name, bsp))

        # note the rename we did target -> ic
        ttbl.power.daemon_c.on(self, ic, _component)
Пример #17
0
def init():
    commonl.makedirs_p(path)
Пример #18
0
    def _mkreport(self, msg_tag, code, _tc, message):
        """
        Generate a failure report

        """
        # FIXME: initialize this in the core, so it shows in test_dump_kws*.py
        kws = commonl.dict_missing_c(_tc.kws)
        kws['msg_tag'] = msg_tag
        kws['result'] = tcfl.tc.valid_results.get(
            msg_tag, ( None, "BUG-RESULT-%s" % msg_tag))[0]
        kws['result_past'] = tcfl.tc.valid_results.get(
            msg_tag, ( None, "BUG-RESULT-%s" % msg_tag))[1]
        kws['message'] = message
        tfids = []
        for target_want_name, target in _tc.targets.iteritems():
            if len(target.rt.get('bsp_models', {})) > 1:
                tfids.append(
                    '(' + target.fullid
                    + ' and bsp_model == "%s")' % target.bsp_model)
            else:
                tfids.append(target.fullid)
        if tfids:
            kws['t_option'] = " -t '" + " or ".join(tfids) + "'"
        else:
            kws['t_option'] = ""

        # tcfl.config.VARNAME -> tcfl_config_VARNAME
        # this makes it easy to publish configuration items into the
        # tcfl.config space that then can be used in templates. It's a
        # hack, but makes configuration later on way easier
        tcfl_config = sys.modules['tcfl.config']
        for symbol in dir(tcfl_config):
            value = getattr(tcfl_config, symbol)
            if symbol.startswith("__"):
                continue
            elif callable(value):
                continue
            elif any([ isinstance(value, i)
                       for i in (list, dict, tuple, basestring, int)]):
                kws['tcfl_config_%s' % symbol] = value
            else:
                pass

        kws['targets'] = []
        for target_want_name, target in _tc.targets.iteritems():
            entry = {}
            entry['want_name'] = target_want_name
            entry['fullid'] = target.fullid
            entry['type'] = _tc.type_map.get(target.type, target.type)
            kws['targets'].append(entry)
        kws['tags'] = {}
        for tag in _tc._tags:
            (value, origin) = _tc.tag_get(tag, None, None)
            kws['tags'][tag] = dict(value = value, origin = origin)
        kws['count'] = 1
        kws['count_passed'] = 1 if msg_tag == 'PASS' else 0
        kws['count_failed'] = 1 if msg_tag == 'FAIL' else 0
        kws['count_errored'] = 1 if msg_tag == 'ERRR' else 0
        kws['count_skipped'] = 1 if msg_tag == 'SKIP' else 0
        kws['count_blocked'] = 1 if msg_tag == 'BLCK' else 0

        kws['ts_start'] = _tc.ts_end
        kws['ts_start_h'] = time.ctime(_tc.ts_end)
        kws['ts_end'] = _tc.ts_end
        kws['ts_end_h'] = time.ctime(_tc.ts_end)
        kws['duration_s'] = _tc.ts_end - _tc.ts_start

        for hook in self.hooks:
            hook(self, _tc, kws)

        # Write to report file
        # FIXME: consider compiling the template as we'll keep reusing it
        template_path = [ i for i in reversed(tcfl.config.path) ] \
                        + [ tcfl.config.share_path ]
        j2_env = jinja2.Environment(
            loader = jinja2.FileSystemLoader(template_path))
        j2_env.filters['xml_escape'] = jinja2_xml_escape
        for entry_name, template_entry in self.templates.iteritems():
            template_name = template_entry['name']
            if message.startswith("COMPLETION failed") \
               and not template_entry.get('report_fail', True):
                _tc.log.info("%s|%s: reporting failed disabled"
                             % (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION error") \
               and not template_entry.get('report_error', True):
                _tc.log.info("%s|%s: reporting errors disabled"
                             % (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION skipped") \
               and not template_entry.get('report_skip', False):
                _tc.log.info("%s|%s: reporting skips disabled"
                             % (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION blocked") \
               and not template_entry.get('report_block', True):
                _tc.log.info("%s|%s: reporting blockages disabled"
                             % (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION passed") \
               and not template_entry.get('report_pass', False):
                _tc.log.info("%s|%s: reporting pass disabled"
                             % (entry_name, template_name))
                continue
            else:
                assert True, "Unknown COMPLETION message: %s" % message

            # Need to do this every time so the iterator is reset
            kws['log'] = self._log_iterator(code)

            template = j2_env.get_template(template_name)
            file_name = template_entry['output_file_name'] % kws
            if not os.path.isabs(file_name):
                file_name = os.path.join(self.log_dir, file_name)
            # the template might specify a new directory path that
            # still does not exist
            commonl.makedirs_p(os.path.dirname(file_name), 0o750)
            with codecs.open(file_name, "w", encoding = 'utf-8',
                             errors = 'ignore') as fo:
                for text in template.generate(**kws):
                    fo.write(text)
Пример #19
0
    def _mkreport(self, msg_tag, code, _tc, message):
        """
        Generate a failure report

        """
        # FIXME: initialize this in the core, so it shows in test_dump_kws*.py
        kws = commonl.dict_missing_c(_tc.kws)
        kws['msg_tag'] = msg_tag
        kws['result'] = tcfl.tc.valid_results.get(
            msg_tag, (None, "BUG-RESULT-%s" % msg_tag))[0]
        kws['result_past'] = tcfl.tc.valid_results.get(
            msg_tag, (None, "BUG-RESULT-%s" % msg_tag))[1]
        kws['message'] = message
        tfids = []
        for target_want_name, target in _tc.targets.iteritems():
            if len(target.rt.get('bsp_models', {})) > 1:
                tfids.append('(' + target.fullid +
                             ' and bsp_model == "%s")' % target.bsp_model)
            else:
                tfids.append(target.fullid)
        if tfids:
            kws['t_option'] = " -t '" + " or ".join(tfids) + "'"
        else:
            kws['t_option'] = ""

        # tcfl.config.VARNAME -> tcfl_config_VARNAME
        # this makes it easy to publish configuration items into the
        # tcfl.config space that then can be used in templates. It's a
        # hack, but makes configuration later on way easier
        tcfl_config = sys.modules['tcfl.config']
        for symbol in dir(tcfl_config):
            value = getattr(tcfl_config, symbol)
            if symbol.startswith("__"):
                continue
            elif callable(value):
                continue
            elif any([
                    isinstance(value, i)
                    for i in (list, dict, tuple, basestring, int)
            ]):
                kws['tcfl_config_%s' % symbol] = value
            else:
                pass

        kws['targets'] = []
        for target_want_name, target in _tc.targets.iteritems():
            entry = {}
            entry['want_name'] = target_want_name
            entry['fullid'] = target.fullid
            entry['type'] = _tc.type_map.get(target.type, target.type)
            kws['targets'].append(entry)
        kws['tags'] = {}
        for tag in _tc._tags:
            (value, origin) = _tc.tag_get(tag, None, None)
            kws['tags'][tag] = dict(value=value, origin=origin)
        kws['count'] = 1
        kws['count_passed'] = 1 if msg_tag == 'PASS' else 0
        kws['count_failed'] = 1 if msg_tag == 'FAIL' else 0
        kws['count_errored'] = 1 if msg_tag == 'ERRR' else 0
        kws['count_skipped'] = 1 if msg_tag == 'SKIP' else 0
        kws['count_blocked'] = 1 if msg_tag == 'BLCK' else 0

        kws['ts_start'] = _tc.ts_end
        kws['ts_start_h'] = time.ctime(_tc.ts_end)
        kws['ts_end'] = _tc.ts_end
        kws['ts_end_h'] = time.ctime(_tc.ts_end)
        kws['duration_s'] = _tc.ts_end - _tc.ts_start

        for hook in self.hooks:
            hook(self, _tc, kws)

        # Write to report file
        # FIXME: consider compiling the template as we'll keep reusing it
        template_path = [ i for i in reversed(tcfl.config.path) ] \
                        + [ tcfl.config.share_path ]
        j2_env = jinja2.Environment(
            loader=jinja2.FileSystemLoader(template_path))
        j2_env.filters['xml_escape'] = jinja2_xml_escape
        for entry_name, template_entry in self.templates.iteritems():
            template_name = template_entry['name']
            if message.startswith("COMPLETION failed") \
               and not template_entry.get('report_fail', True):
                _tc.log.info("%s|%s: reporting failed disabled" %
                             (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION error") \
               and not template_entry.get('report_error', True):
                _tc.log.info("%s|%s: reporting errors disabled" %
                             (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION skipped") \
               and not template_entry.get('report_skip', False):
                _tc.log.info("%s|%s: reporting skips disabled" %
                             (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION blocked") \
               and not template_entry.get('report_block', True):
                _tc.log.info("%s|%s: reporting blockages disabled" %
                             (entry_name, template_name))
                continue
            elif message.startswith("COMPLETION passed") \
               and not template_entry.get('report_pass', False):
                _tc.log.info("%s|%s: reporting pass disabled" %
                             (entry_name, template_name))
                continue
            else:
                assert True, "Unknown COMPLETION message: %s" % message

            # Need to do this every time so the iterator is reset
            kws['log'] = self._log_iterator(code)

            template = j2_env.get_template(template_name)
            file_name = template_entry['output_file_name'] % kws
            if not os.path.isabs(file_name):
                file_name = os.path.join(self.log_dir, file_name)
            # the template might specify a new directory path that
            # still does not exist
            commonl.makedirs_p(os.path.dirname(file_name), 0o750)
            with codecs.open(file_name,
                             "w",
                             encoding='utf-8',
                             errors='replace') as fo:
                for text in template.generate(**kws):
                    fo.write(text)
Пример #20
0
    def __init__(self,
                 config_text=None,
                 config_files=None,
                 use_ssl=False,
                 tmpdir=None,
                 keep_temp=True,
                 errors_ignore=None,
                 warnings_ignore=None,
                 aka=None,
                 local_auth=True):

        # Force all assertions, when running like this, to fail the TC
        tcfl.tc.tc_c.exception_to_result[AssertionError] = tcfl.tc.failed_e

        # If no aka is defined, we make one out of the place when this
        # object is being created, so it is always the same *and* thus
        # the report hashes are always identical with each run
        if aka == None:
            self.aka = "ttbd-" + commonl.mkid(commonl.origin_get(2), 4)
        else:
            self.aka = aka
        if config_files == None:
            config_files = []
        self.keep_temp = keep_temp
        self.port = commonl.tcp_port_assigner()
        self.use_ssl = use_ssl
        if use_ssl == True:
            self.url = "https://localhost:%d" % self.port
            ssl_context = ""
        else:
            self.url = "http://localhost:%d" % self.port
            ssl_context = "--no-ssl"
        self.url_spec = "fullid:'^%s'" % self.aka
        if tmpdir:
            self.tmpdir = tmpdir
        else:
            # default to place the server's dir in the tempdir for
            # testcases
            self.tmpdir = os.path.join(tcfl.tc.tc_c.tmpdir, "server", self.aka)
        shutil.rmtree(self.tmpdir, ignore_errors=True)
        commonl.makedirs_p(self.tmpdir)

        self.etc_dir = os.path.join(self.tmpdir, "etc")
        self.files_dir = os.path.join(self.tmpdir, "files")
        self.lib_dir = os.path.join(self.tmpdir, "lib")
        self.state_dir = os.path.join(self.tmpdir, "state")
        os.mkdir(self.etc_dir)
        os.mkdir(self.files_dir)
        os.mkdir(self.lib_dir)
        os.mkdir(self.state_dir)
        self.stdout = self.tmpdir + "/stdout"
        self.stderr = self.tmpdir + "/stderr"

        for fn in config_files:
            shutil.copy(fn, self.etc_dir)

        with open(os.path.join(self.etc_dir, "conf_00_base.py"), "w") as cfgf:
            cfgf.write(r"""
import ttbl.config
ttbl.config.processes = 2
host = '127.0.0.1'
""")
            # We don't define here the port, so we see it in the
            # command line
            if config_text:
                cfgf.write(config_text)

        self.srcdir = os.path.realpath(
            os.path.join(os.path.dirname(__file__), ".."))
        self.cmdline = [
            "stdbuf",
            "-o0",
            "-e0",
            # This allows us to default to the source location,when
            # running from source, or the installed when running from
            # the system
            os.environ.get("TTBD_PATH", self.srcdir + "/ttbd/ttbd"),
            "--port",
            "%d" % self.port,
            ssl_context,
            "-vvvvv",
            "--files-path",
            self.files_dir,
            "--state-path",
            self.state_dir,
            "--config-path",
            "",  # This empty one is to clear them all
            "--config-path",
            self.etc_dir
        ]
        self.local_auth = local_auth
        if local_auth:
            self.cmdline.append("--local-auth")
        self.p = None
        #: Exclude these regexes / strings from triggering an error
        #: message check
        self.errors_ignore = [] if errors_ignore == None else errors_ignore

        #: Exclude these regexes / strings from triggering an warning
        #: message check
        self.warnings_ignore = [re.compile('daemon lacks CAP_NET_ADMIN')]
        if warnings_ignore:
            self.warnings_ignore += warnings_ignore

        def _preexec_fn():
            stdout_fd = os.open(
                self.stdout,
                # O_CREAT: Always a new file, so
                # we can check for errors and not
                # get confused with previous runs
                os.O_WRONLY | os.O_EXCL | os.O_CREAT,
                0o0644)
            stderr_fd = os.open(
                self.stderr,
                # O_CREAT: Always a new file, so
                # we can check for errors and not
                # get confused with previous runs
                os.O_WRONLY | os.O_EXCL | os.O_CREAT,
                0o0644)
            os.dup2(stdout_fd, 1)
            os.dup2(stderr_fd, 2)

        logging.info("Launching: %s", " ".join(self.cmdline))
        self.p = subprocess.Popen(self.cmdline,
                                  shell=False,
                                  cwd=self.tmpdir,
                                  close_fds=True,
                                  preexec_fn=_preexec_fn,
                                  bufsize=0)
        try:
            self._check_if_alive()
        finally:
            self.check_log_for_issues()
        # if we call self.terminate() from __del__, the garbage
        # collector has started to wipe things, so we can't use, ie:
        # open() to check the log file
        atexit.register(self.terminate)
Пример #21
0
    def build_00(self, ic, target):
        if not 'LK_BUILDDIR' in os.environ:
            raise tcfl.tc.skip_e(
                "please export env LK_BUILDDIR pointing to path of "
                "configured, built or ready-to-build linux kernel tree")
        builddir = os.environ["LK_BUILDDIR"]
        rootdir = os.environ.get("LK_ROOTDIR", self.tmpdir + "/root")

        # update the build
        #
        ## $ make -C BUILDDIR all
        ## ...
        #
        target.report_pass("re/building kernel in %s" % builddir, dlevel=-1)
        output = subprocess.check_output("${MAKE:-make} -C %s all" % builddir,
                                         shell=True,
                                         stderr=subprocess.STDOUT)
        target.report_pass("re/built kernel in %s" % builddir,
                           dict(output=output),
                           alevel=0,
                           dlevel=-2)

        target.report_pass("installing kernel to %s" % rootdir, dlevel=-1)
        # will run to install the kernel to our fake root dir
        #
        ## $ make INSTALLKERNEL=/dev/null \
        ##       INSTALL_PATH=ROOTDIR/boot INSTALL_MOD_PATH=ROOTDIR \
        ##       install modules_install
        ## sh PATH/linux.git/arch/x86/boot/install.sh 4.19.5 arch/x86/boot/bzImage \
        ##    System.map "../root-linux/boot"
        ## Cannot find LILO.
        ## INSTALL arch/x86/crypto/blowfish-x86_64.ko
        ## INSTALL arch/x86/crypto/cast5-avx-x86_64.ko
        ## INSTALL arch/x86/crypto/cast6-avx-x86_64.ko
        ## INSTALL arch/x86/crypto/des3_ede-x86_64.ko
        ## INSTALL arch/x86/crypto/sha1-mb/sha1-mb.ko
        ## ...
        #
        # note that:
        #
        # - INSTALLKERNEL: shortcircuit kernel installer, not needed,
        #   since we won't boot it in the machine doing the building
        #
        # - LILO will not we found, we don't care -- we only want the
        #   files in rootdir/
        commonl.makedirs_p(rootdir + "/boot")
        output = subprocess.check_output(
            "${MAKE:-make} -C %s INSTALLKERNEL=ignoreme"
            " INSTALL_PATH=%s/boot INSTALL_MOD_PATH=%s"
            " install modules_install" % (builddir, rootdir, rootdir),
            shell=True,
            stderr=subprocess.STDOUT)
        target.report_pass("installed kernel to %s" % rootdir,
                           dict(output=output),
                           dlevel=-2)

        target.report_pass("stripping debugging info")
        subprocess.check_output(
            "find %s -iname \*.ko | xargs strip --strip-debug" % rootdir,
            shell=True,
            stderr=subprocess.STDOUT)
        target.report_pass("stripped debugging info", dlevel=-1)
Пример #22
0
#! /usr/bin/python2
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0

import os

import commonl
import ttbl.store
import ttbl.config

target = ttbl.test_target("t0")
ttbl.config.target_add(target)  # store interface added automatically
ttbl.store.paths_allowed["/path1"] = os.path.join(target.state_dir,
                                                  "test_path1")
ttbl.store.paths_allowed["/path2"] = os.path.join(target.state_dir,
                                                  "test_path2")
ttbl.store.paths_allowed["/path3"] = os.path.join(target.state_dir,
                                                  "test_path3")
ttbl.store.paths_allowed["/path4"] = os.path.join(target.state_dir,
                                                  "test_path4")
commonl.makedirs_p(ttbl.store.paths_allowed["/path2"])
commonl.makedirs_p(ttbl.store.paths_allowed["/path3"])
commonl.makedirs_p(os.path.join(ttbl.store.paths_allowed["/path4"], "subdir1"))
commonl.makedirs_p(os.path.join(ttbl.store.paths_allowed["/path4"], "subdir2"))
commonl.makedirs_p(os.path.join(ttbl.store.paths_allowed["/path4"], "subdir3"))
with open(os.path.join(ttbl.store.paths_allowed["/path2"], 'fileA'),
          "w") as wf:
    wf.write("This is a test")
Пример #23
0
    def start(self, target, capturer, path):
        commonl.makedirs_p(path)
        stream_filename = "%s%s" % (capturer, self.extension)
        log_filename = "%s.log" % capturer
        pidfile = "%s/capture-%s.pid" % (target.state_dir, capturer)

        kws = target.kws_collect(self)
        kws['output_file_name'] = os.path.join(path, stream_filename)  # LEGACY
        kws['stream_filename'] = os.path.join(path, stream_filename)  # LEGACY
        kws['_impl.stream_filename'] = os.path.join(path, stream_filename)
        kws['_impl.log_filename'] = os.path.join(path, log_filename)
        kws['_impl.capturer'] = capturer
        kws['_impl.timestamp'] = str(datetime.datetime.utcnow())

        with open(kws['_impl.log_filename'], "w+") as logf:
            logf.write(
                commonl.kws_expand(
                    """\
INFO: ttbd running generic_stream capture for '%(_impl.capturer)s' at %(_impl.timestamp)s
INFO: log_file (this file): %(_impl.log_filename)s
INFO: stream_file: %(_impl.stream_filename)s
""", kws))
            try:
                for command in self.pre_commands:
                    # yup, run with shell -- this is not a user level
                    # command, the configurator has full control
                    pre_command = commonl.kws_expand(command, kws)
                    logf.write("INFO: calling pre-command: %s\n" % pre_command)
                    logf.flush()
                    subprocess.check_call(pre_command,
                                          shell=True,
                                          close_fds=True,
                                          cwd="/tmp",
                                          stdout=logf,
                                          stderr=subprocess.STDOUT)
                cmdline = []
                for i in self.cmdline:
                    cmdline.append(commonl.kws_expand(i, kws))
                target.log.info("%s: stream command: %s" %
                                (capturer, " ".join(cmdline)))
                logf.write("INFO: calling commandline: %s\n" %
                           " ".join(cmdline))
                logf.flush()
                p = subprocess.Popen(cmdline,
                                     cwd="/tmp",
                                     shell=False,
                                     close_fds=True,
                                     stdout=logf,
                                     stderr=subprocess.STDOUT)
                target.log.info("%s: generic streaming started" % capturer)
                time.sleep(1)  # let it settle or fail
                if p.poll() != None:
                    logf.close()
                    return False, {"log": log_filename}

            except subprocess.CalledProcessError as e:
                target.log.error(
                    "%s: capturing of '%s' with '%s' failed: (%d) %s" %
                    (capturer, self.name, " ".join(
                        e.cmd), e.returncode, e.output))
                logf.write("ERROR: capture failed\n")
                raise

        with open(pidfile, "w+") as pidf:
            pidf.write("%s" % p.pid)
        ttbl.daemon_pid_add(p.pid)

        return True, {"default": stream_filename, "log": log_filename}
Пример #24
0
    def _setup_maybe(self, target, cert_path, cert_client_path):
        if os.path.isdir(cert_path) and os.path.isdir(cert_client_path):
            return
        # not initialized or inconsistent state, just wipe it all
        self._release_hook(target, True)
        try:
            commonl.makedirs_p(cert_path)
            commonl.makedirs_p(cert_client_path)
            # FIXME: do from python-openssl?

            # Create a Certificate authority for signing
            #
            # The duration is irrelevant since all these certificates will
            # be killed when the target is released

            allocid = target.fsdb.get("_alloc.id", "UNKNOWN")
            subprocess.run(
                f"openssl req -nodes -newkey rsa:{self.key_size}"
                f" -keyform PEM -keyout ca.key"
                f" -subj /C=LC/ST=Local/L=Local/O=TCF-Signing-Authority-{target.id}-{allocid}/CN=TTBD"
                f" -x509 -days 1000 -outform PEM -out ca.cert".split(),
                check=True,
                cwd=cert_path,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)
            target.log.debug(
                f"created target's certificate authority in {cert_path}")

            # Now create a server key
            subprocess.run(
                f"openssl genrsa -out server.key {self.key_size}".split(),
                stdin=None,
                timeout=5,
                capture_output=True,
                cwd=cert_path,
                check=True)
            target.log.debug("created target's server key")

            subprocess.run(
                f"openssl req -new -key server.key -out server.req -sha256"
                f" -subj /C=LC/ST=Local/L=Local/O=TCF-Signing-Authority-{target.id}-{allocid}/CN=TTBD"
                .split(),
                check=True,
                cwd=cert_path,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)
            target.log.debug("created target's server req")

            subprocess.run(
                f"openssl x509 -req -in server.req -CA ca.cert -CAkey ca.key"
                f" -set_serial 100 -extensions server -days 1460 -outform PEM"
                f" -out server.cert -sha256".split(),
                check=True,
                cwd=cert_path,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT)
            target.log.debug("created target's server cert")
        except subprocess.CalledProcessError as e:
            target.log.error(f"command {' '.join(e.cmd)} failed: " +
                             e.output.decode('ascii'))
            raise
        except:
            # wipe the dir on any error, to avoid having half
            # initialized state
            self._release_hook(target, True)
            raise