예제 #1
0
    def setUp(self):

        # create temporary directory for the source files:
        self.sourcedir = tempfile.mkdtemp(prefix='sources')
        self.linkdir = tempfile.mkdtemp(prefix='links')

        os.chdir(self.sourcedir)
        logging.info("\nTestReplacingLinkSourceAndTarget: sourcedir: " + self.sourcedir +
                     " and linkdir: " + self.linkdir)

        # create set of test files:
        self.create_source_file(self.SOURCEFILE1)
        self.create_source_file(self.SOURCEFILE2)
        self.create_source_file(self.SOURCEFILE3)
        self.create_source_file(self.SOURCEFILE3)
        self.create_source_file(self.SOURCEFILE4)

        # create symbolic links:
        self.create_link_file(self.SOURCEFILE1, self.LINKFILE1)
        self.create_link_file(self.SOURCEFILE2, self.LINKFILE2)
        self.create_link_file(self.SOURCEFILE3, self.LINKFILE3)
        self.create_link_file(self.SOURCEFILE4, self.LINKFILE4)

        if platform.system() != 'Windows':
            os.sync()
def delete_item(item_path):
	failed = 0
	while os.path.exists(item_path):
		try:
			shutil.rmtree(item_path)
		except Exception as e:
			print("TryDelete failed!")
			print("Item: ", item_path)
			print("Stat: ", os.stat(item_path))
			failed += 1
			if failed > 20:
				print("Deletion failed!")
				return

			# You need an explicit sync call or the load_zips call can sometimes miss the new files.
			# Yes, this was actually an issue.
			os.sync()
			time.sleep(0.1 * failed)

			# Force the destructors to run in case we have the handle open still, somehow.
			gc.collect()

			os.stat(item_path)
			if not os.path.exists(item_path):
				return
예제 #3
0
파일: sdperf2.py 프로젝트: nwgat/pyboard
def doio(blks) :

    os.remove(file)
    os.sync()
    print ("remove done, blocks",blks)

    t= t2.counter()
    f = open(file,'wb')
    for i in range(blks):
        f.write(buf)
    f.close()
    t= t2.counter() - t
    print ("all write",t,"us")

    t= t2.counter()
    f = open(file,'rb')
    for i in range(blks) :
        a=f.read(512)
    f.close()
    t= t2.counter() - t
    print ("all read",t,"us")
# verify last block
    errs=0
    for i in range(len(a)):
        if a[i] != i%256 :
            errs += 1
    print ("errs",errs)
    return
예제 #4
0
파일: wifi.py 프로젝트: Jonty/Mk3-Firmware
def choose_wifi(dialog_title='TiLDA'):
    with dialogs.WaitingMessage(text='Scanning for networks...', title=dialog_title):
        visible_aps = nic().list_aps()
        visible_aps.sort(key=lambda x:x['rssi'], reverse=True)
        visible_ap_names = []
        # We'll get one result for each AP, so filter dupes
        for ap in visible_aps:
            if ap['ssid'] not in visible_ap_names:
                visible_ap_names.append(ap['ssid'])
        visible_aps = None

    ssid = dialogs.prompt_option(
        visible_ap_names,
        text='Choose wifi network',
        title=dialog_title
    )
    key = dialogs.prompt_text("Enter wifi key (blank if none)", width = 310, height = 220)
    if ssid:
        with open("wifi.json", "wt") as file:
            if key:
                conn_details = {"ssid": ssid, "pw": key}
            else:
                conn_details = {"ssid": ssid}

            file.write(json.dumps(conn_details))
        os.sync()
        # We can't connect after scanning for some bizarre reason, so we reset instead
        pyb.hard_reset()
예제 #5
0
파일: base.py 프로젝트: agangidi53/openbmc
 def sync():
     """Sync and drop kernel caches"""
     log.debug("Syncing and dropping kernel caches""")
     KernelDropCaches.drop()
     os.sync()
     # Wait a bit for all the dirty blocks to be written onto disk
     time.sleep(3)
예제 #6
0
    def refresh_token(self):
        LOG.debug('Refreshing token')
        if identity_lock.acquire(blocking=False):
            try:
                data = self.send({
                    "path": "auth/token",
                    "headers": {
                        "Authorization": "Bearer " + self.identity.refresh
                    }
                })
                IdentityManager.save(data, lock=False)
                LOG.debug('Saved credentials')
            except HTTPError as e:
                if e.response.status_code == 401:
                    LOG.error('Could not refresh token, invalid refresh code.')
                else:
                    raise

            finally:
                identity_lock.release()
        else:  # Someone is updating the identity wait for release
            with identity_lock:
                LOG.debug('Refresh is already in progress, waiting until done')
                time.sleep(1.2)
                os.sync()
                self.identity = IdentityManager.load(lock=False)
                LOG.debug('new credentials loaded')
예제 #7
0
	def download_to(self, target, timeout=90):
		start_time = time.time()
		if not self.socket:
			raise OSError("Invalid response socket state. Has the content already been consumed?")
		try:
			if "Content-Length" in self.headers:
				remaining = int(self.headers["Content-Length"])
			elif "content-length" in self.headers:
				remaining = int(self.headers["content-length"])
			else:
				raise Exception("No Content-Length")

			with open(target, 'wb') as f:
				f.write(self.content_so_far)
				remaining -= len(self.content_so_far)
				del self.content_so_far
				while remaining > 0:
					buf = self.socket.recv(BUFFER_SIZE)
					f.write(buf)
					remaining -= len(buf)

					if (time.time() - start_time) > timeout:
						raise Exception("HTTP request timeout")

				f.flush()
			os.sync()

		finally:
			self.close()
예제 #8
0
 def _sync(self):
     LOG.debug(_("Flush file system buffers"))
     if hasattr(os, 'sync'):
         os.sync()
     else:
         import ctypes
         libc = ctype.CDLL("libc.so.6")
         libc.sync()
예제 #9
0
 def flush(self):
     """Writes changes to flash"""
     if self.dirty:
         with open(self.filename, "wt") as file:
             file.write(json.dumps(self.data))
             file.flush()
         os.sync()
         self.dirty = False
예제 #10
0
파일: iosync.py 프로젝트: vpino/kds
def sync():
    try:
        getattr(os, 'sync')
    except Exception:
        import sh
        os.sync = sh.sync
    finally:
        os.sync()
예제 #11
0
파일: base.py 프로젝트: 01org/luv-yocto
 def sync():
     """Sync and drop kernel caches"""
     runCmd2('bitbake -m', ignore_status=True)
     log.debug("Syncing and dropping kernel caches""")
     KernelDropCaches.drop()
     os.sync()
     # Wait a bit for all the dirty blocks to be written onto disk
     time.sleep(3)
예제 #12
0
    def inner(*args):

        self = args[0]
        path = args[1]

        overlay_path = path+".empty"
        mount_path = path+".mount"
        work_path = path+".work"

        print(_("Sanitary umount, to ensure that there aren't old mounting points"))

        self.run_external_program("umount {:s}".format(mount_path))
        print(_("Mounting overlayfs for {:s} at {:s}, using {:s} for new files").format(path,mount_path,overlay_path))

        # remove data inside destination path
        shutil.rmtree(overlay_path, ignore_errors=True)
        shutil.rmtree(mount_path, ignore_errors=True)
        shutil.rmtree(work_path, ignore_errors=True)
        try:
            os.mkdir(overlay_path)
        except:
            pass
        try:
            os.mkdir(mount_path)
        except:
            pass
        try:
            os.mkdir(work_path)
        except:
            pass

        if (0 != self.run_external_program('mount -t overlay -o rw,lowerdir="{:s}",upperdir="{:s}",workdir="{:s}" overlay "{:s}"'.format(path,overlay_path,work_path,mount_path))):
            return True # error!!!

        args2 = []
        for a in range(len(args)):
            if a == 1:
                args2.append(mount_path)
            else:
                args2.append(args[a])

        try:
            retval = func(*args2)
        except:
            retval = true

        self.run_external_program("umount {:s}".format(mount_path))
        if (not retval):
            print(_("Mixing file systems"))
            self.merge_overlay(path,overlay_path)

        shutil.rmtree(overlay_path, ignore_errors=True)
        shutil.rmtree(mount_path, ignore_errors=True)
        shutil.rmtree(work_path, ignore_errors=True)
        os.sync() # sync disks

        return retval
예제 #13
0
파일: benchmark.py 프로젝트: sahib/rmlint
def flush_fs_caches():
    os.sync()
    try:
        with open('/proc/sys/vm/drop_caches', 'w') as handle:
            handle.write('3\n')
        print('-- Flushed fs cache.')
    except IOError:
        print('!! You need to be root to flush the fs cache.')
        sys.exit(-1)
예제 #14
0
    def run(self, distro, image):
        # from now on we always cleanup ourselves after finishing
        signal.signal(signal.SIGINT, sig_handler)

        setup_device(image)
        wipe_device()

        self.create_partitions()
        self.mount_partitions()

        d = next(d for d in self.DISTROS if d.name == distro)

        # set rootfs location and start installing
        d.rootfs = self.part_mount['root']

        log.info('Updating distro database')
        d.update_database()

        log.info('Bootstrap distro \'{}\''.format(d.long_name))
        d.bootstrap()

        log.info('Mounting kernel partitions')
        self.mount_kernel_partitions()

        log.info('Bootstrap distro (phase 2) \'{}\''.format(d.long_name))
        d.bootstrap_phase2()

        log.info('Setting up locale')
        d.setup_locale()

        log.info('Installing bootloader')
        d.install_bootloader()

        log.info('Installing kernel')
        d.install_kernel()

        log.info('Customizing image')
        d.customize_image()

        log.info('Setting up network')
        d.setup_network()

        # finish installation
        log.info('Finishing installation')
        os.sync()

        uid = os.getenv('SUDO_UID', '')
        guid = os.getenv('SUDO_GUID', '')
        if os.path.isfile(image) and uid != '':
            uid = int(uid)
            if guid == '':
                guid = None
            else:
                guid = int(guid)
            shutil.chown(image, uid, guid)

        return 0
예제 #15
0
def sync_os():
    try:
        if hasattr(os, 'sync'):
            os.sync()
        else:
            import ctypes
            libc = ctypes.CDLL("libc.so.6")
            libc.sync()
    except:
        check_call(['sync'], shell=True)
예제 #16
0
 def unmount(self, button=None):
     folder = self.win.choose_folder.get_file()
     try:
         if folder is not None and folder.has_prefix(Gio.File.new_for_path("/media")):
             mount_point = folder.find_enclosing_mount().get_default_location().get_path()
             subprocess.check_call(["umount", mount_point])
     except (subprocess.CalledProcessError, Error) as ex: # pylint: disable=E0712
         self.show_notification(
             "Speicherkarte konnte nicht sicher entfernt werden", "", "dialog-error")
         print(ex, file=sys.stderr)
     finally:
         os.sync()
예제 #17
0
 def check_files(fn1, fn2):
     ld = listdir('/fram')
     if len(ld) > 2:
         for f in ld:
             #ignore config file
             if f !='config':
                 fullname = '/fram/' + f
                 if fullname != fn1 and fullname != fn2:
                     # file not current so move
                     cp(fullname, '/sd/logs/')
                     remove(fullname)
                     sync()
예제 #18
0
def check_environment():
    from mod.settings import (LV2_PEDALBOARDS_DIR,
                              DEFAULT_PEDALBOARD, DEFAULT_PEDALBOARD_COPY,
                              DATA_DIR, DOWNLOAD_TMP_DIR, KEYS_PATH,
                              BANKS_JSON_FILE, FAVORITES_JSON_FILE,
                              UPDATE_CC_FIRMWARE_FILE, UPDATE_MOD_OS_FILE,
                              CAPTURE_PATH, PLAYBACK_PATH)

    # create temp dirs
    if not os.path.exists(DOWNLOAD_TMP_DIR):
        os.makedirs(DOWNLOAD_TMP_DIR)

    # remove temp files
    for path in (CAPTURE_PATH, PLAYBACK_PATH, UPDATE_CC_FIRMWARE_FILE):
        if os.path.exists(path):
            os.remove(path)

    # check RW access
    if os.path.exists(DATA_DIR):
        if not os.access(DATA_DIR, os.W_OK):
            print("ERROR: No write access to data dir '%s'" % DATA_DIR)
            return False
    else:
        try:
            os.makedirs(DATA_DIR)
        except OSError:
            print("ERROR: Cannot create data dir '%s'" % DATA_DIR)
            return False

    # create needed dirs and files
    if not os.path.exists(KEYS_PATH):
        os.makedirs(KEYS_PATH)

    if not os.path.exists(LV2_PEDALBOARDS_DIR):
        os.makedirs(LV2_PEDALBOARDS_DIR)

    if os.path.exists(DEFAULT_PEDALBOARD_COPY) and not os.path.exists(DEFAULT_PEDALBOARD):
        shutil.copytree(DEFAULT_PEDALBOARD_COPY, DEFAULT_PEDALBOARD)

    if not os.path.exists(BANKS_JSON_FILE):
        with open(BANKS_JSON_FILE, 'w') as fh:
            fh.write("[]")

    if not os.path.exists(FAVORITES_JSON_FILE):
        with open(FAVORITES_JSON_FILE, 'w') as fh:
            fh.write("[]")

    # remove previous update file
    if os.path.exists(UPDATE_MOD_OS_FILE):
        os.remove(UPDATE_MOD_OS_FILE)
        os.sync()

    return True
예제 #19
0
파일: usb.py 프로젝트: mbusb/multibootusb
 def __exit__(self, type_, value, traceback_):
     if not self.is_relevant:
         return
     os.sync()     # This should not be strictly necessary
     time.sleep(1)  # Yikes, mount always fails without this sleep().
     try:
         mount_point = UDISKS.mount(self.usb_disk)
         config.add_remounted(self.usb_disk)
         self.exit_callback(details(self.usb_disk))
     except dbus.exceptions.DBusException as e:
         raise MountError(e)
     gen.log("Mounted %s" % (self.usb_disk))
예제 #20
0
파일: cms.py 프로젝트: wtpayne/hiai
def register(cfg):
    """
    Register build outputs with the local Configuration Management System.

    """
    build_id           = cfg['build_id']
    dirpath_branch_cms = cfg['paths']['dirpath_branch_cms']
    dirpath_build_cms  = os.path.join(dirpath_branch_cms, build_id)

    da.util.ensure_dir_exists(dirpath_build_cms)
    os.sync()

    # Archive results.
    archive_plan = {
        'index':  'copy',
        'log':    'copy',
        '1report': 'copy',
    }
    dirpath_branch_tmp = cfg['paths']['dirpath_branch_tmp']
    for name in sorted(os.listdir(dirpath_branch_tmp)):

        if name not in archive_plan:
            continue

        action = archive_plan[name]

        if action == 'copy':
            dirpath_src = os.path.join(dirpath_branch_tmp, name)
            dirpath_dst = os.path.join(dirpath_build_cms,  name)
            if os.path.isdir(dirpath_src):
                shutil.copytree(dirpath_src, dirpath_dst)
            else:
                shutil.copyfile(dirpath_src, dirpath_dst)

    # Set the expiration date on the CMS.
    build_time = cfg['timestamp']['datetime_utc']
    expiration = datetime.timedelta(
                    days = cfg['options']['cms_expiration_days'])

    da.expiration.set_expiration_date(
                    dirpath_build_cms    = dirpath_build_cms,
                    identifier           = 'cms',
                    current_time         = build_time,
                    timedelta_expiration = expiration)

    # Delete expired builds.
    if cfg['options']['enable_cms_delete_old_builds']:
        rootpath_cms = cfg['paths']['rootpath_cms']
        for dirpath_prev_build in _gen_all_previous_builds(rootpath_cms):
            if da.expiration.has_expired(
                                    dirpath_build_cms = dirpath_prev_build,
                                    time_now          = build_time):
                shutil.rmtree(dirpath_prev_build)
예제 #21
0
파일: base.py 프로젝트: rvykydal/anaconda
    def write(self):
        """ Write the bootloader configuration and install the bootloader. """
        if self.skip_bootloader:
            return

        if self.update_only:
            self.update()
            return

        self.write_config()
        os.sync()
        self.stage2_device.format.sync(root=util.getTargetPhysicalRoot())
        self.install()
예제 #22
0
파일: util.py 프로젝트: resin-os/resinhup
def safeFileCopy(src, dst, sync=True):
    # src must be a file
    if (not os.path.isfile(src)) and (not os.path.islink(src)):
        log.error("safeFileCopy: Can't copy source as " + src + " is not a handled file.")
        return False

    # Make sure dst is either non-existent or a file (which we overwrite)
    if os.path.exists(dst):
        if os.path.isfile(dst):
            log.warning("safeFileCopy: Destination file " + dst + " already exists. Will overwrite.")
        elif os.path.isdir(dst):
            log.error("safeFileCopy: Destination target " + dst + " is a directory.")
            return False
        else:
            log.error("safeFileCopy: Destination target " + dst + " is unknown.")
            return False

    # Copy file to dst.tmp
    if not os.path.isdir(os.path.dirname(dst)):
        try:
            os.makedirs(os.path.dirname(dst))
        except:
            log.error("safeFileCopy: Failed to create directories structure for destination " + dst + ".")
            return False
    if os.path.islink(src):
        linkto = os.readlink(src)
        os.symlink(linkto, dst + ".tmp")
    else:
        with open(src, 'rb') as srcfd, open(dst + ".tmp", "wb") as dsttmpfd:
            try:
                shutil.copyfileobj(srcfd, dsttmpfd)
            except Exception as s:
                log.error("safeFileCopy: Failed to copy " + src + " to " + dst + ".tmp .")
                log.error(str(s))
                return False
            shutil.copymode(src, dst + ".tmp")
            if sync:
                os.fsync(dsttmpfd)

    # Rename and sync filesystem to disk
    os.rename(dst + ".tmp", dst)
    if sync:
        # # Make sure the write operation is durable - avoid data loss
        dirfd = os.open(os.path.dirname(dst), os.O_DIRECTORY)
        os.fsync(dirfd)
        os.close(dirfd)

        os.sync()

    return True
예제 #23
0
파일: efi.py 프로젝트: rvykydal/anaconda
    def write(self):
        """ Write the bootloader configuration and install the bootloader. """
        if self.skip_bootloader:  # pylint: disable=no-member
            return

        if self.update_only:  # pylint: disable=no-member
            self.update()
            return

        try:
            os.sync()
            self.stage2_device.format.sync(root=util.getTargetPhysicalRoot()) # pylint: disable=no-member
            self.install()
        finally:
            self.write_config()  # pylint: disable=no-member
예제 #24
0
def Show_Pass():
	global PPID
	global DEBUG_MODE
	global ROOT_DIR
	global LOG_DIR

	now = datetime.datetime.now()
	print_green(PASS_BANNER)
	Log("PASS")

	shutil.move(os.path.join(ROOT_DIR, "%s.txt"%PPID), os.path.join(LOG_DIR, "%s_PASS.txt"%(PPID)))
	os.sync()

	print_green("Press Power Button to Shutdown UUT!!")
	Unmount_Folder()
예제 #25
0
    def setUp(self):

        # create temporary directory:
        self.tempdir = tempfile.mkdtemp()
        os.chdir(self.tempdir)
        print("\nTestFileWithoutTags: temporary directory: " + self.tempdir)

        # create set of test files:
        self.create_tmp_file(self.testfilename)

        # double-check set-up:
        self.assertTrue(self.file_exists(self.testfilename))

        if platform.system() != 'Windows':
            os.sync()
예제 #26
0
파일: usb.py 프로젝트: mbusb/multibootusb
 def __enter__(self):
     if not self.is_relevant:
         return
     self.assert_no_access()
     try:
         gen.log("Unmounting %s" % self.usb_disk)
         os.sync() # This is needed because UDISK.unmount() can timeout.
         UDISKS.unmount(self.usb_disk)
     except dbus.exceptions.DBusException as e:
         gen.log("Unmount of %s has failed." % self.usb_disk)
         # This may get the partition mounted. Don't call!
         # self.exit_callback(details(self.usb_disk))
         raise UnmountError(e)
     gen.log("Unmounted %s" % self.usb_disk)
     return self
예제 #27
0
    def generate(self,path):
        """ Ensures that the base system, to create a CHROOT environment, exists """

        # Create all, first, in a temporal folder
        tmp_path = path+".tmp"

        shutil.rmtree(tmp_path, ignore_errors=True)

        os.makedirs(tmp_path)
        if self.distro_type == "debian":
            server = "http://http.debian.net/debian/"
        else:
            server = "http://archive.ubuntu.com/ubuntu/"
        command = "debootstrap --variant=buildd --arch {:s} {:s} {:s} {:s}".format(self.architecture,self.distro_name,tmp_path,server)


        if (0 != self.run_external_program(command)):
            return True # error!!!

        f = open(os.path.join(tmp_path,"etc","apt","sources.list"),"w")
        if (self.distro_type == "debian"):
            # Add contrib and non-free to the list of packages sources if DEBIAN
            f.write("deb http://ftp.debian.org/debian/ {:s} main contrib non-free\n".format(self.distro_name))
        else:
            # Add restricted, universe and multiverse if UBUNTU
            f.write("deb http://archive.ubuntu.com/ubuntu/ {:s} main restricted universe multiverse\n".format(self.distro_name))
        f.close()

        command = 'apt clean'
        if (0 != self.run_chroot(tmp_path,command)):
            return True # error!!!

        command = 'apt update'
        if (0 != self.run_chroot(tmp_path,command)):
            return True # error!!!

        command = 'apt install meson ninja-build -y'
        if (0 != self.run_chroot(tmp_path,command)):
            return True # error!!!

        os.sync()
        os.rename(tmp_path,path) # rename the folder to the definitive name
        os.sync()

        shutil.rmtree(tmp_path, ignore_errors=True)

        return False # no error
예제 #28
0
def choose_wifi(dialog_title='TiLDA'):
    filtered_aps = []
    with dialogs.WaitingMessage(text='Scanning for networks...', title=dialog_title):
        visible_aps = nic().list_aps()
        visible_aps.sort(key=lambda x:x['rssi'], reverse=True)
        # We'll get one result for each AP, so filter dupes
        for ap in visible_aps:
            title = ap['ssid']
            security = get_security_level(ap)
            if security:
                title = title + ' (%s)' % security
            ap = {
                'title': title,
                'ssid': ap['ssid'],
                'security': security,
            }
            if ap['ssid'] not in [ a['ssid'] for a in filtered_aps ]:
                filtered_aps.append(ap)
        del visible_aps

    ap = dialogs.prompt_option(
        filtered_aps,
        text='Choose wifi network',
        title=dialog_title
    )
    if ap:
        key = None
        if ap['security'] != 0:
            # Backward compat
            if ap['security'] == None:
                ap['security'] = 'wifi'

            key = dialogs.prompt_text(
                "Enter %s key" % ap['security'],
                width = 310,
                height = 220
            )
        with open("wifi.json", "wt") as file:
            if key:
                conn_details = {"ssid": ap['ssid'], "pw": key}
            else:
                conn_details = {"ssid": ap['ssid']}

            file.write(json.dumps(conn_details))
        os.sync()
        # We can't connect after scanning for some bizarre reason, so we reset instead
        pyb.hard_reset()
예제 #29
0
def test_etag_sync(tmpdir):
    fpath = os.path.join(str(tmpdir), 'foo')

    file_ = open(fpath, 'w')
    file_.write('foo')
    file_.close()
    os.sync()

    old_etag = vdir.get_etag_from_file(fpath)

    file_ = open(fpath, 'w')
    file_.write('foo')
    file_.close()

    new_etag = vdir.get_etag_from_file(fpath)

    assert old_etag != new_etag
예제 #30
0
    def save_off(self, announce=True, reply=print):
        """Turn off automatic world saves, then force-save once.

        Optional arguments:
        announce -- Whether to announce in-game that saves are being disabled.
        reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
        """
        if self.status():
            reply('Minecraft is running... suspending saves')
            if announce:
                self.say('Server backup starting. Server going readonly...')
            self.command('save-off')
            self.command('save-all')
            time.sleep(10)
            os.sync()
        else:
            reply('Minecraft is not running. Not suspending saves.')
예제 #31
0
def create_ps3(dest, game_id, game_title, icon0, pic0, pic1, cue_files, cu2_files, img_files, mem_cards, aea_files, magic_word, resolution):
    print('Create PS3 PKG for', game_title) if verbose else None

    p = popstation()
    p.verbose = verbose
    p.game_id = game_id
    p.game_title = game_title
    #p.icon0 = icon0
    #p.pic1 = pic1
    p.complevel = 0
    p.magic_word = magic_word
    if len(aea_files):
        p.aea = aea_files
    
    for i in range(len(img_files)):
        f = img_files[i]
        toc = None
        #toc = p.get_toc_from_ccd(f)  # ps3 do not like these tocs
        if not toc:
            print('Need to create a TOC') if verbose else None
            toc = get_toc_from_cu2(cu2_files[i])

        print('Add image', f) if verbose else None
        p.add_img((f, toc))

    # create directory structure
    f = game_id
    print('GameID', f)
    try:
        os.mkdir(f)
    except:
        True

    sfo = {
        'ANALOG_MODE': {
            'data_fmt': 1028,
            'data': 1},
        'ATTRIBUTE': {
            'data_fmt': 1028,
            'data': 2},
        'BOOTABLE': {
            'data_fmt': 1028,
            'data': 1},
        'CATEGORY': {
            'data_fmt': 516,
            'data_max_len': 4,
            'data': '1P'},
        'PARENTAL_LEVEL': {
            'data_fmt': 1028,
            'data': 3},
        'PS3_SYSTEM_VER': {
            'data_fmt': 516,
            'data_max_len': 8,
            'data': '01.7000'},
        'RESOLUTION': {
            'data_fmt': 1028,
            'data': resolution},
        'SOUND_FORMAT': {
            'data_fmt': 1028,
            'data': 1},
        'TITLE': {
            'data_fmt': 516,
            'data_max_len': 128,
            'data': game_title},
        'TITLE_ID': {
            'data_fmt': 516,
            'data_max_len': 16,
            'data': game_id},
        'VERSION': {
            'data_fmt': 516,
            'data_max_len': 8,
            'data': '01.00'}
        }
    with open(f + '/PARAM.SFO', 'wb') as of:
        of.write(GenerateSFO(sfo))
        temp_files.append(f + '/PARAM.SFO')

    image = icon0.resize((320, 176), Image.BILINEAR)
    i = io.BytesIO()
    image.save(f + '/ICON0.PNG', format='PNG')
    temp_files.append(f + '/ICON0.PNG')
    
    image = pic0.resize((1000, 560), Image.NEAREST)
    i = io.BytesIO()
    image.save(f + '/PIC0.PNG', format='PNG')
    temp_files.append(f + '/PIC0.PNG')
    
    image = pic1.resize((1920, 1080), Image.NEAREST)
    i = io.BytesIO()
    image.save(f + '/PIC1.PNG', format='PNG')
    temp_files.append(f + '/PIC1.PNG')
    
    image = pic1.resize((310, 250), Image.NEAREST)
    i = io.BytesIO()
    image.save(f + '/PIC2.PNG', format='PNG')
    temp_files.append(f + '/PIC2.PNG')
    
    with open('PS3LOGO.DAT', 'rb') as i:
        with open(f + '/PS3LOGO.DAT', 'wb') as o:
            o.write(i.read())
            temp_files.append(f + '/PS3LOGO.DAT')

    f = game_id + '/USRDIR'
    try:
        os.mkdir(f)
    except:
        True

    _cfg = bytes([
        0x1c, 0x00, 0x00, 0x00, 0x50, 0x53, 0x31, 0x45,
        0x6d, 0x75, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
        0x46, 0x69, 0x6c, 0x65, 0x00, 0xe3, 0xb7, 0xeb,
        0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
        0xbb, 0xfa, 0xe2, 0x1b, 0x10, 0x00, 0x00, 0x00,
        0x64, 0x69, 0x73, 0x63, 0x5f, 0x6e, 0x6f, 0x00,
        0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x93, 0xd1, 0x5b, 0xf8
    ])
    with open(f + '/CONFIG', 'wb') as o:
        o.write(_cfg)
        temp_files.append(f + '/CONFIG')

        
    f = game_id + '/USRDIR/CONTENT'
    try:
        os.mkdir(f)
    except:
        True

    p.eboot = game_id + '/USRDIR/CONTENT/EBOOT.PBP'
    p.iso_bin_dat = game_id + '/USRDIR/ISO.BIN.DAT'
    try:
        os.unlink(p.iso_bin_dat)
    except:
        True
    print('Create EBOOT.PBP at', p.eboot)
    p.create_pbp()
    temp_files.append(p.eboot)
    temp_files.append(p.iso_bin_dat)
    try:
        os.sync()
    except:
        True

    # sign the ISO.BIN.DAT
    print('Signing', p.iso_bin_dat)
    subprocess.call(['python3', './sign3.py', p.iso_bin_dat])

    #
    # USRDIR/SAVEDATA
    #
    f = game_id + '/USRDIR/SAVEDATA'
    try:
        os.mkdir(f)
    except:
        True
    image = icon0.resize((80,80), Image.BILINEAR)
    i = io.BytesIO()
    image.save(f + '/ICON0.PNG', format='PNG')
    temp_files.append(f + '/ICON0.PNG')    

    if len(mem_cards) < 1:
        create_blank_mc(f + '/SCEVMC0.VMP')
    if len(mem_cards) < 2:
        create_blank_mc(f + '/SCEVMC1.VMP')
    idx = 0
    for mc in mem_cards:
        mf = f + ('/SCEVMC%d.VMP' % idx)
        with open(mf, 'wb') as of:
            print('Installing MemoryCard as', mf)
            of.write(encode_vmp(mc))
        idx = idx + 1 
    temp_files.append(f + '/SCEVMC0.VMP')
    temp_files.append(f + '/SCEVMC1.VMP')

    sfo = {
        'CATEGORY': {
            'data_fmt': 516,
            'data_max_len': 4,
            'data': 'MS'},
        'PARENTAL_LEVEL': {
            'data_fmt': 1028,
            'data': 1},
        'SAVEDATA_DETAIL': {
            'data_fmt': 516,
            'data_max_len': 4,
            'data': ''},
        'SAVEDATA_DIRECTORY': {
            'data_fmt': 516,
            'data_max_len': 4,
            'data': game_id},
        'SAVEDATA_FILE_LIST': {
            'data_fmt': 4,
            'data_max_len': 3168,
            'data': str(bytes(3168))},
        'SAVEDATA_TITLE': {
            'data_fmt': 516,
            'data_max_len': 128,
            'data': ''},
        'TITLE': {
            'data_fmt': 516,
            'data_max_len': 128,
            'data': game_title},
        'SAVEDATA_PARAMS': {
            'data_fmt': 4,
            'data_max_len': 128,
            'data': str(b"A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xda\xdaC4\x1br\xc2\xede\xa1/k'D\xc6\x11(\xcf\xc8\xb7(\xb8tG+*f\x85L\nm\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8a\xfa,\xa1\xe7+mA\xc5m.\x9a\xba\xbct\xb0")}
    }
    with open(f + '/PARAM.SFO', 'wb') as of:
        of.write(GenerateSFO(sfo))
        temp_files.append(f + '/PARAM.SFO')

    #
    # Create ISO.BIN.EDAT
    #
    print('Create ISO.BIN.EDAT')
    pack('%s/USRDIR/ISO.BIN.DAT' % game_id,
         '%s/USRDIR/ISO.BIN.EDAT' % game_id,
         'UP9000-%s_00-0000000000000001' % game_id)
    temp_files.append('%s/USRDIR/ISO.BIN.EDAT' % game_id)

    #
    # Create PS3 PKG
    #
    print('Create PKG')
    subprocess.call(['python3',
                     'PSL1GHT/tools/ps3py/pkg.py',
                     '-c', 'UP9000-%s_00-0000000000000001' % game_id,
                     game_id, dest])
    temp_files.append(game_id + '/USRDIR/CONTENT')
    temp_files.append(game_id + '/USRDIR/SAVEDATA')
    temp_files.append(game_id + '/USRDIR')
    temp_files.append(game_id)
    print('Finished.', dest, 'created')
예제 #32
0
 def _set_jupyter_password(self, password):
     self._get_db().put('DEVICE_JUPYTER_PASSWORD', password)
     os.sync()
예제 #33
0
def manager_thread() -> None:

    if EON:
        Process(name="autoshutdownd",
                target=launcher,
                args=("selfdrive.autoshutdownd", "autoshutdownd")).start()
        system("am startservice com.neokii.optool/.MainService")

    Process(name="road_speed_limiter",
            target=launcher,
            args=("selfdrive.road_speed_limiter",
                  "road_speed_limiter")).start()
    cloudlog.bind(daemon="manager")
    cloudlog.info("manager start")
    cloudlog.info({"environ": os.environ})

    params = Params()

    ignore: List[str] = []
    if params.get("DongleId",
                  encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
        ignore += ["manage_athenad", "uploader"]
    if os.getenv("NOBOARD") is not None:
        ignore.append("pandad")
    ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]

    ensure_running(managed_processes.values(), started=False, not_run=ignore)

    started_prev = False
    sm = messaging.SubMaster(['deviceState'])
    pm = messaging.PubMaster(['managerState'])

    while True:
        sm.update()
        not_run = ignore[:]

        started = sm['deviceState'].started
        driverview = params.get_bool("IsDriverViewEnabled")
        ensure_running(managed_processes.values(), started, driverview,
                       not_run)

        # trigger an update after going offroad
        if started_prev and not started and 'updated' in managed_processes:
            os.sync()
            managed_processes['updated'].signal(signal.SIGHUP)

        started_prev = started

        running = ' '.join(
            "%s%s\u001b[0m" %
            ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
            for p in managed_processes.values() if p.proc)
        print(running)
        cloudlog.debug(running)

        # send managerState
        msg = messaging.new_message('managerState')
        msg.managerState.processes = [
            p.get_process_state_msg() for p in managed_processes.values()
        ]
        pm.send('managerState', msg)

        # Exit main loop when uninstall/shutdown/reboot is needed
        shutdown = False
        for param in ("DoUninstall", "DoShutdown", "DoReboot"):
            if params.get_bool(param):
                shutdown = True
                params.put("LastManagerExitReason", param)
                cloudlog.warning(f"Shutting down manager - {param} set")

        if shutdown:
            break
NEW_SERIAL_NUMBER = "SB-R4"

from opentrons import robot
import os

robot.comment(f"Setting serial number to {NEW_SERIAL_NUMBER}.")

if not robot.is_simulating():
    with open("/var/serial", "w") as serial_number_file:
        serial_number_file.write(NEW_SERIAL_NUMBER + "\n")
    with open("/etc/machine-info", "w") as serial_number_file:
        serial_number_file.write(f"DEPLOYMENT=production\nPRETTY_HOSTNAME={NEW_SERIAL_NUMBER}\n")
    with open("/etc/hostname", "w") as serial_number_file:
        serial_number_file.write(NEW_SERIAL_NUMBER + "\n")

    os.sync()

    robot.comment("Done.")
예제 #35
0
    def executeCommands(self, command):
        '''
        This is where we will actually be executing the commands

        :param command: the command we want to execute
        :return: Nothing
        '''

        logging.debug("Execute Command: %s", command)
        usb = USB()
        if command == 'remove_usb':
            logging.debug("In remove usb page")
            if usb.isUsbPresent():  # check to see if usb is inserted
                logging.debug("USB still present")
                self.display.showRemoveUsbPage(
                )  # tell them to remove it if so
                self.display.pageStack = 'removeUsb'  # let handleButtonPress know to repeat
                self.command_to_reference = 'remove_usb'  # let executeCommands know what we want
            else:  # if they were good and removed USB
                logging.debug("USB removed")
                self.display.pageStack = 'success'  # let out handleButtonPress know
                self.display.showSuccessPage()  # display our success page

        if command == 'copy_from_usb':

            logging.debug("copy from USB")
            x = ord("a")
            while (not usb.isUsbPresent('/dev/sd' + chr(x) + "1")
                   and x < ord("k")):  # check to see if usb is inserted
                x += 1
            if x == ord("k"):
                self.display.showNoUsbPage(
                )  # if not, alert as this is required
                self.display.pageStack = 'error'
                return  # cycle back to menu
            dev = '/dev/sd' + chr(x) + '1'
            with open('/usr/local/connectbox/PauseMount', 'w') as fp:
                fp.write(" ")
            time.sleep(2)
            self.pageStack = 'wait'  # Dont allow the display to turn off
            self.display.showWaitPage("Checking Space")
            logging.debug("Using location " + dev + " as media copy location")
            if usb.getMount(dev) == '/media/usb0':
                logging.debug(
                    "Moving /media/usb0 to /media/usb11  be able to copy")
                if not os.path.exists(
                        '/media/usb11'
                ):  # check that usb11 exsists to be able to move the mount
                    os.mkdir('/media/usb11')  # make the directory
                if not usb.moveMount(
                        usb.getDev(dev), dev,
                        '/media/usb11'):  # see if our remount was successful
                    self.display.showErrorPage(
                        "Moving Mount")  # if not generate error page and exit
                    self.display.pageStack = 'error'
                    try:
                        os.remove('/usr/local/connectbox/PauseMount')
                    except:
                        pass
                    return
            logging.debug("Preparing to check space of source " +
                          (usb.getMount(dev)))
            (d, s) = usb.checkSpace(usb.getMount(
                dev))  # verify that source is smaller than destination
            logging.debug("space checked source : " + str(s) +
                          ", destination : " + str(d) + " device " + dev)
            if s > d:
                logging.debug(
                    "There is not enough space we will call an error on " +
                    dev)
                self.display.showNoSpacePage(
                    1, dev)  # if not, alert as this is a problem
                self.display.pageStack = 'error'
                if usb.getMount(dev) == '/media/usb11':
                    logging.debug(
                        "since we moved the moount we want /media/usb0 back")
                    usb.moveMount(dev, '/media/usb11', '/media/usb0')
                    try:
                        os.remove('/media/usb11')
                    except:
                        pass
                try:
                    os.remove('/usr/local/connectbox/PauseMount')
                except:
                    pass
                return
            a = usb.getMount(dev)
            logging.debug("starting to do the copy with device " + a)
            self.display.showWaitPage("Copying Files")
            if not usb.copyFiles(a):  # see if we copied successfully
                logging.debug("failed the copy. display an error page")
                self.display.showErrorPage(
                    "Failed Copy")  # if not generate error page and exit
                self.display.pageStack = 'error'
                try:
                    os.remove('/usr/local/connectbox/PauseMount')
                except:
                    pass
                return
            logging.debug("Finished all usb keys")
            logging.debug("Ok now we want to remove all the usb keys")
            curDev = '/dev/sda1'
            x = ord('a')
            while (not usb.isUsbPresent(curDev)) and x < ord("k"):
                logging.debug("is key " + curDev + " present? " +
                              str(usb.isUsbPresent(curDev)))
                x += 1
                curDev = '/dev/sd' + chr(x) + '1'

            while usb.isUsbPresent(curDev) and x < ord("k"):
                self.display.showRemoveUsbPage()  #show the remove usb page
                self.display.pageStack = 'removeUsb'  #show we removed the usb key
                self.command_to_reference = 'remove_usb'
                time.sleep(1)  #Wait a second for the removeal
                while (not usb.isUsbPresent(curDev)) and x < ord("k"):
                    x += 1  # lets look at the next one
                    curDev = '/dev/sd' + chr(x) + '1'  #create the next curdev
            # We finished the umounts
            self.display.pageStack = 'success'
            self.display.showSuccessPage()
            logging.debug("Success page now deleting the PauseMount file")
            try:
                os.remove('/usr/local/connectbox/PauseMount')
            except:
                pass
            self.display.pageStack = 'success'  # if the usb was removed
            self.display.showSuccessPage()  # display success page
            os.sync()
            return

        elif command == 'erase_folder':
            file_exists = False  # in regards to README.txt file
            if usb.isUsbPresent():
                self.display.pageStack = 'error'
                self.display.showRemoveUsbPage()
                return
            if os.path.isfile('/media/usb0/README.txt'
                              ):  # keep the default README if possible
                file_exists = True
                subprocess.call(
                    ['cp', '/media/usb0/README.txt', '/tmp/README.txt'])
                logging.debug("README.txt moved")
            for file_object in os.listdir('/media/usb0'):
                file_object_path = os.path.join('/media/usb0', file_object)
                if os.path.isfile(file_object_path):
                    os.unlink(file_object_path)
                else:
                    shutil.rmtree(file_object_path)
            logging.debug("FILES NUKED!!!")
            if file_exists:
                subprocess.call(
                    ['mv', '/tmp/README.txt',
                     '/media/usb0/README.txt'])  # move back
                logging.debug("README.txt returned")
            logging.debug("Life is good!")
            self.display.pageStack = 'success'
            self.display.showSuccessPage()

        elif command == 'copy_to_usb':
            logging.debug("got to copy to usb code")
            self.display.showConfirmPage()  #We found at least one key
            x = ord('a')
            dev = '/dev/sd' + chr(x) + '1'
            self.display.showInsertUsbPage()  #tell them to inert new keys
            while (not usb.isUsbPresent(dev)) and x < ord('k'):
                x += 1
                dev = '/dev/sd' + chr(x) + '1'
                if x == ord('k'):
                    x = ord('a')
                    dev = '/dev/sd' + chr(x) + '1'
                if x == ord('k'):
                    x = ord('a')

            self.display.pageStack = 'confirm'
            self.display.showConfirmPage()
            time.sleep(1)
            with open('/usr/local/connectbox/PauseMount', 'w') as fp:
                pass
            fp.close()
            time.sleep(2)

            self.display.pageStack = 'wait'
            self.display.showWaitPage("Checking Sizes")

            logging.debug("we have found at least one usb to copy to: " + dev)
            x = ord('a')
            dev = '/dev/sd' + chr(x) + '1'
            y = 0
            logging.debug("were ready to start size check")

            while x < ord('k'):
                if usb.getMount(
                        dev
                ) == '/media/usb0':  # if the key is mounted on '/media/usb0' then we have to move it.
                    logging.debug(
                        "Moving /media/usb0 to /media/usb11 be able to copy")
                    y += 1
                    if not os.path.exists(
                            '/media/usb11'
                    ):  # check that usb11 exsists to be able to move the mount
                        os.mkdir('/media/usb11')  # make the directory
                    if not usb.moveMount(
                            dev, '/media/usb0', '/media/usb11'
                    ):  # see if our remount was successful
                        self.display.showErrorPage(
                            "Moving Mount"
                        )  # if not generate error page and exit
                        self.display.pageStack = 'error'
                        try:
                            os.remove('/usr/local/connectbox/PauseMount')
                        except:
                            pass
                        return
                if usb.getMount(dev) != "": y += 1
                x += 1
                dev = '/dev/sd' + chr(x) + '1'

            x = ord('a')
            dev = '/dev/sd' + chr(x) + '1'
            while x < ord(
                    'k'
            ) and y > 0:  #While we know we have a usb key lets check the sizes
                if usb.getMount(dev) != "":
                    zz = usb.getMount(dev)
                    logging.debug(
                        "getting the size for source /media/usb0 and destination "
                        + zz)
                    (d, s) = usb.checkSpace(
                        '/media/usb0',
                        zz)  # verify that source is smaller than destination
                    logging.debug("Space of Destination  is : " + str(d) +
                                  " , Source: " + str(s) + " at: " + dev)
                    if d < s:  #if destination free is less than source we don't have enough space
                        logging.info("source exceeds destination at" + zz)
                        y -= 1
                        while usb.isUsbPresent(dev):
                            logging.info(
                                "we found we don't have enough sapce on usb key "
                                + dev)
                            self.display.showNoSpacePage(
                                2, dev)  #alert that there is a problem
                            self.display.pageStack = 'remove_usb'  #remove this usb
                            self.command_to_reference = 'remove_usb'  #let execute commands know what we want
                            time.sleep(1)  #wait a second
                        usb.unmount(zz)  #Make sure we unmount that device.
                        usb.unmount(dev)  #Make sure we unmount the mount point
                        if zz[len(zz) -
                              1] != '0':  # as long as its not /media/usb0
                            os.system(
                                'rm -r ' + z
                            )  #Make sure we remove that directory since PauseMount is set
                    else:
                        logging.debug(
                            "Space of Desitinationis ok for source to copy to "
                            + zz)
                else:  #we have a key but it is not mounted
                    if usb.isUsbPresent(
                            dev):  #Hmm USB is present but not mounted.
                        z = ord(dev[len(dev) - 2]) - ord(
                            'a'
                        )  #get the base number of the /dev/sdX1 device that it should be not the ordinate
                        if z == 0:
                            z == ord('1')  #I don't want to mount as usb0
                        else:
                            z += ord('0')
                        while usb.isUsbPresent('/media/usb' +
                                               chr(z)) and z < ord(':'):
                            z += 1  #Find a mount that isn't there
                        if z < ord(':'):
                            os.system('mkdir /media/usb' +
                                      chr(z))  #Make the directory
                            if (not usb.mount(dev, '/media/usb' + chr(z))):
                                self.disiplay.showErrorPage(
                                    "Directory Creation")
                                self.display.pageStack = 'error'
                                try:
                                    os.remove(
                                        '/usr/local/connectbox/PauseMount')
                                except:
                                    pass
                                return
                            x -= 1  #decrement so we can recheck this mount
                        else:
                            self.disiplay.showErrorPage("")
                            self.display.pageStack = 'error'
                            try:
                                os.remove('/usr/local/connectbox/PauseMount')
                            except:
                                pass
                            return
                x += 1
                dev = '/dev/sd' + chr(x) + '1'
            logging.info(
                "we passed size checks so we are going on to copy now")

            # we think we have keys to work with if we go forward from here.
            l = []
            y = [0, 0, 0]
            self.display.showWaitPage("Copying Now")
            x = ord('a')
            dev = '/dev/sd' + chr(x) + "1"
            logging.info("Ready to start the copies")
            while x < ord('k'):
                if usb.isUsbPresent(dev):  #find the first usb key
                    y[0] = subprocess.Popen(("/usr/bin/cp -r /media/usb0/* " +
                                             usb.getMount(dev) + "/"),
                                            shell=True)
                    y[1] = usb.getMount(dev) + "/"
                    y[2] = y[0].pid + 1
                    l.append(y)
                    logging.info("started copy from /media/usb0/* to " + y[1] +
                                 " as pid: " + str(y[2]))
                x += 1
                dev = '/dev/sd' + chr(x) + '1'

#            x = 0
#            y = [0,0,0]
#            while y in l:
#                if y[0] == 0 or x ==1:
#                    self.display.showErrorPage("Failed on /media/usb"+y[1])     # if not generate error page and exit
#                    self.display.pageStack = 'error'
#                   logging.info("ok we failed to copy to "+y[1]+" at mount point "+(usb.getMount("/media/usb"+y[1])))
#                    x =1
#                    try:
#                        os.kill(y[2],-9)
#                    except:
#                        pass
#                    if usb.isUsbPresent('/media/usb11') and (usb.getDev('/media/usb11')+"/") == y[1]:
#                        os.command('unmount '+usb.getDev('/media/usb11'))
#                        os.command('unmount /media/usb11')
#                        os.command('rmdir -f /media/usb11')
#                        logging.debug("we failed on the move of /media/usb11 -> /media/usb0")
#                        self.disiplay.showErrorPage("Failed on /media/usb11 remount")
#                        self.display.pageStack = 'error'
#                    y[1] = ""                                        #null this event
#            if x ==1:
#                try: os.remove('/usr/local/connectbox/PauseMount')
#                except:
#                    pass
#                return

# Ok we started all the copyies now we need to check for closure of the copy
            logging.info("Starting end of copy testing, lenth of l is " +
                         str(len(l)) + "value of: " + str(l))
            y = [0, 0, 0]
            yy = 0
            x = 0
            w = subprocess.Popen(("/usr/bin/ps -ae | /usr/bin/grep cp"),
                                 shell=True,
                                 stdout=subprocess.PIPE,
                                 universal_newlines=True)
            wo, errs = w.communicate()
            w.kill()
            logging.info("ps is showing " + wo)
            logging.info("l is showing" + str(l))
            while x == 0:
                w = subprocess.Popen(("/usr/bin/ps -ae | /usr/bin/grep cp"),
                                     shell=True,
                                     stdout=subprocess.PIPE,
                                     universal_newlines=True)
                wo, errs = w.communicate()
                w.kill()
                while yy < len(l):
                    y = l[yy]
                    logging.info("y values are: " + str(y[0]) + ", " +
                                 str(y[1]) + ", " + str(y[2]))
                    if y[2] != 0:
                        logging.info("testing for " + str(y[2]))
                        if (str(y[2]) not in wo) or (((wo.split(str(
                                y[2]))[1]).split("<")[1]).split(">")[0]
                                                     == 'defunct'):
                            logging.info(
                                "We finished the copy from /media/usb0 to " +
                                y[1])
                            try:
                                os.kill(y[2], -9)
                                os.kill(y[2] - 1, -9)
                            except:
                                pass
                            y[2] = 0
                            y[1] = ""
                            y[0] = 0
                            l[yy] = y
                            x = 1
                        else:
                            x = 0
                            logging.info("testing failed")
                    yy += 1
                yy = 0
                if x == 1:
                    while y in l:
                        if y[2] != 0:
                            x = 0
                logging.info("looping on x=" + str(x))
            os.sync()
            logging.info("Ok now we want to remove all the usb keys")
            curDev = '/dev/sda1'
            x = ord('a')
            while (not usb.isUsbPresent(curDev)) and x < ord("k"):
                logging.debug("is key " + curDev + " present? " +
                              str(usb.isUsbPresent(curDev)))
                x += 1
                curDev = '/dev/sd' + chr(x) + '1'

            while usb.isUsbPresent(curDev) and x < ord("k"):
                self.display.showRemoveUsbPage()  #show the remove usb page
                self.display.pageStack = 'removeUsb'  #show we removed the usb key
                self.command_to_reference = 'remove_usb'
                time.sleep(3)  #Wait a second for the removeal
                while (not usb.isUsbPresent(curDev)) and x < ord("k"):
                    x += 1  # lets look at the next one
                    curDev = '/dev/sd' + chr(x) + '1'  #create the next curdev
            # We finished the umounts
            self.display.pageStack = 'success'
            self.display.showSuccessPage()
            logging.debug("Success page now deleting the PauseMount file")
            try:
                os.remove('/usr/local/connectbox/PauseMount')
            except:
                pass
            self.display.pageStack = 'success'  # if the usb was removed
            self.display.showSuccessPage()  # display success page
            os.sync()
            return
예제 #36
0
def get_model_file(name, tag=None, root=os.path.join("~", ".ncnn", "models")):
    r"""Return location for the pretrained on local file system.

    This function will download from online model zoo when model cannot be found or has mismatch.
    The root directory will be created if it doesn't exist.

    Parameters
    ----------
    name : str
        Name of the model.
    root : str, default '~/.ncnn/models'
        Location for keeping the model parameters.

    Returns
    -------
    file_path
        Path to the requested pretrained model file.
    """
    if "NCNN_HOME" in os.environ:
        root = os.path.join(os.environ["NCNN_HOME"], "models")

    use_tag = isinstance(tag, str)
    if use_tag:
        file_name = "{name}-{short_hash}".format(name=name, short_hash=tag)
    else:
        file_name = "{name}".format(name=name)

    root = os.path.expanduser(root)
    params_path = os.path.join(root, file_name)
    lockfile = os.path.join(root, file_name + ".lock")
    if use_tag:
        sha1_hash = tag
    else:
        sha1_hash = _model_sha1[name]

    if not os.path.exists(root):
        os.makedirs(root)

    with portalocker.Lock(
        lockfile, timeout=int(os.environ.get("NCNN_MODEL_LOCK_TIMEOUT", 300))
    ):
        if os.path.exists(params_path):
            if check_sha1(params_path, sha1_hash):
                return params_path
            else:
                logging.warning(
                    "Hash mismatch in the content of model file '%s' detected. "
                    "Downloading again.",
                    params_path,
                )
        else:
            logging.info("Model file not found. Downloading.")

        zip_file_path = os.path.join(root, file_name)
        if file_name in _split_model_bins:
            file_name_parts = [
                "%s.part%02d" % (file_name, i + 1)
                for i in range(_split_model_bins[file_name])
            ]
            for file_name_part in file_name_parts:
                file_path = os.path.join(root, file_name_part)
                repo_url = os.environ.get("NCNN_REPO", github_repo_url)
                if repo_url[-1] != "/":
                    repo_url = repo_url + "/"
                download(
                    _url_format.format(repo_url=repo_url, file_name=file_name_part),
                    path=file_path,
                    overwrite=True,
                )

            merge_file(root, file_name_parts, zip_file_path)
        else:
            repo_url = os.environ.get("NCNN_REPO", github_repo_url)
            if repo_url[-1] != "/":
                repo_url = repo_url + "/"
            download(
                _url_format.format(repo_url=repo_url, file_name=file_name),
                path=zip_file_path,
                overwrite=True,
            )
        if zip_file_path.endswith(".zip"):
            with zipfile.ZipFile(zip_file_path) as zf:
                zf.extractall(root)
            os.remove(zip_file_path)
        # Make sure we write the model file on networked filesystems
        try:
            os.sync()
        except AttributeError:
            pass
        if check_sha1(params_path, sha1_hash):
            return params_path
        else:
            raise ValueError("Downloaded file has different hash. Please try again.")
예제 #37
0
파일: model_store.py 프로젝트: zyg11/pyncnn
def get_model_file(name, tag=None, root=os.path.join('~', '.ncnn', 'models')):
    r"""Return location for the pretrained on local file system.

    This function will download from online model zoo when model cannot be found or has mismatch.
    The root directory will be created if it doesn't exist.

    Parameters
    ----------
    name : str
        Name of the model.
    root : str, default '~/.ncnn/models'
        Location for keeping the model parameters.

    Returns
    -------
    file_path
        Path to the requested pretrained model file.
    """
    if 'NCNN_HOME' in os.environ:
        root = os.path.join(os.environ['NCNN_HOME'], 'models')

    use_tag = isinstance(tag, str)
    if use_tag:
        file_name = '{name}-{short_hash}'.format(name=name, short_hash=tag)
    else:
        file_name = '{name}'.format(name=name)

    root = os.path.expanduser(root)
    params_path = os.path.join(root, file_name)
    lockfile = os.path.join(root, file_name + '.lock')
    if use_tag:
        sha1_hash = tag
    else:
        sha1_hash = _model_sha1[name]

    if not os.path.exists(root):
        os.makedirs(root)

    with portalocker.Lock(lockfile,
                          timeout=int(
                              os.environ.get('NCNN_MODEL_LOCK_TIMEOUT', 300))):
        if os.path.exists(params_path):
            if check_sha1(params_path, sha1_hash):
                return params_path
            else:
                logging.warning(
                    "Hash mismatch in the content of model file '%s' detected. "
                    "Downloading again.", params_path)
        else:
            logging.info('Model file not found. Downloading.')

        zip_file_path = os.path.join(root, file_name)
        repo_url = os.environ.get('NCNN_REPO', apache_repo_url)
        if repo_url[-1] != '/':
            repo_url = repo_url + '/'
        download(_url_format.format(repo_url=repo_url, file_name=file_name),
                 path=zip_file_path,
                 overwrite=True)
        if zip_file_path.endswith(".zip"):
            with zipfile.ZipFile(zip_file_path) as zf:
                zf.extractall(root)
            os.remove(zip_file_path)
        # Make sure we write the model file on networked filesystems
        try:
            os.sync()
        except AttributeError:
            pass
        if check_sha1(params_path, sha1_hash):
            return params_path
        else:
            raise ValueError(
                'Downloaded file has different hash. Please try again.')
예제 #38
0
def update_mbusb_cfg_file(iso_link, usb_uuid, usb_mount, distro):
    """
    Update main multibootusb suslinux.cfg file after distro is installed.
    :return:
    """
    if platform.system() == 'Linux':
        os.sync()
    log('Updating multibootusb config file...')
    sys_cfg_file = os.path.join(usb_mount, "multibootusb", "syslinux.cfg")
    install_dir = os.path.join(usb_mount, "multibootusb",
                               iso_basename(iso_link))
    if os.path.exists(sys_cfg_file):

        if distro == "hbcd":
            if os.path.exists(
                    os.path.join(usb_mount, "multibootusb", "menu.lst")):
                _config_file = os.path.join(usb_mount, "multibootusb",
                                            "menu.lst")
                config_file = open(_config_file, "w")
                string = re.sub(
                    r'/HBCD',
                    '/multibootusb/' + iso_basename(iso_link) + '/HBCD',
                    _config_file)
                config_file.write(string)
                config_file.close()
            with open(sys_cfg_file, "a") as f:
                f.write("#start " + iso_basename(config.image_path) + "\n")
                f.write("LABEL " + iso_basename(config.image_path) + "\n")
                f.write("MENU LABEL " + iso_basename(config.image_path) + "\n")
                f.write("BOOT " + '/multibootusb/' + iso_basename(iso_link) +
                        '/' + isolinux_bin_dir(iso_link).replace("\\", "/") +
                        '/' + distro + '.bs' + "\n")
                f.write("#end " + iso_basename(config.image_path) + "\n")
        elif distro == "Windows":
            if os.path.exists(sys_cfg_file):
                config_file = open(sys_cfg_file, "a")
                config_file.write("#start " + iso_basename(iso_link) + "\n")
                config_file.write("LABEL " + iso_basename(iso_link) + "\n")
                config_file.write("MENU LABEL " + iso_basename(iso_link) +
                                  "\n")
                config_file.write("KERNEL chain.c32 hd0 1 ntldr=/bootmgr" +
                                  "\n")
                config_file.write("#end " + iso_basename(iso_link) + "\n")
                config_file.close()
        elif distro == 'f4ubcd':
            if os.path.exists(sys_cfg_file):
                config_file = open(sys_cfg_file, "a")
                config_file.write("#start " + iso_basename(iso_link) + "\n")
                config_file.write("LABEL " + iso_basename(iso_link) + "\n")
                config_file.write("MENU LABEL " + iso_basename(iso_link) +
                                  "\n")
                config_file.write("KERNEL grub.exe" + "\n")
                config_file.write('APPEND --config-file=/multibootusb/' +
                                  iso_basename(config.image_path) +
                                  '/menu.lst' + "\n")
                config_file.write("#end " + iso_basename(iso_link) + "\n")
                config_file.close()
        elif distro == 'kaspersky':
            if os.path.exists(sys_cfg_file):
                config_file = open(sys_cfg_file, "a")
                config_file.write("#start " + iso_basename(iso_link) + "\n")
                config_file.write("LABEL " + iso_basename(iso_link) + "\n")
                config_file.write("MENU LABEL " + iso_basename(iso_link) +
                                  "\n")
                config_file.write("CONFIG " + '/multibootusb/' +
                                  iso_basename(config.image_path) +
                                  '/kaspersky.cfg' + "\n")
                config_file.write("#end " + iso_basename(iso_link) + "\n")
                config_file.close()
        elif distro == 'grub4dos':
            update_menu_lst()
        elif distro == 'grub4dos_iso':
            update_grub4dos_iso_menu()
        else:
            config_file = open(sys_cfg_file, "a")
            config_file.write("#start " + iso_basename(iso_link) + "\n")
            config_file.write("LABEL " + iso_basename(iso_link) + "\n")
            config_file.write("MENU LABEL " + iso_basename(iso_link) + "\n")
            if distro == "salix-live":
                if os.path.exists(
                        os.path.join(config.usb_mount, 'multibootusb',
                                     iso_basename(iso_link), 'boot',
                                     'grub2-linux.img')):
                    config_file.write("LINUX " + '/multibootusb/' +
                                      iso_basename(iso_link) +
                                      '/boot/grub2-linux.img' + "\n")
                else:
                    config_file.write(
                        "BOOT " + '/multibootusb/' + iso_basename(iso_link) +
                        '/' + isolinux_bin_dir(iso_link).replace("\\", "/") +
                        '/' + distro + '.bs' + "\n")
            elif distro == "pclinuxos":
                config_file.write("kernel " + '/multibootusb/' +
                                  iso_basename(iso_link) +
                                  '/isolinux/vmlinuz' + "\n")
                config_file.write(
                    "append livecd=livecd root=/dev/rd/3 acpi=on vga=788 keyb=us vmalloc=256M nokmsboot "
                    "fromusb root=UUID=" + usb_uuid +
                    " bootfromiso=/multibootusb/" + iso_basename(iso_link) +
                    "/" + iso_name(iso_link) + " initrd=/multibootusb/" +
                    iso_basename(iso_link) + '/isolinux/initrd.gz' + "\n")
            elif distro == "memtest":
                config_file.write("kernel " + '/multibootusb/' +
                                  iso_basename(iso_link) +
                                  '/BOOT/MEMTEST.IMG\n')

            elif distro == "sgrubd2" or config.distro == 'raw_iso':
                config_file.write("LINUX memdisk\n")
                config_file.write("INITRD " + "/multibootusb/" +
                                  iso_basename(iso_link) + '/' +
                                  iso_name(iso_link) + '\n')
                config_file.write("APPEND iso\n")

            elif distro == 'ReactOS':
                config_file.write("COM32 mboot.c32" + '\n')
                config_file.write("APPEND /loader/setupldr.sys" + '\n')
            elif distro == 'pc-unlocker':
                config_file.write("kernel ../ldntldr" + '\n')
                config_file.write("append initrd=../ntldr" + '\n')
            elif distro == 'pc-tool':
                config_file.write(
                    menus.pc_tool_config(syslinux=True, grub=False))
            elif distro == 'grub2only':
                config_file.write(menus.grub2only())
            elif distro == 'memdisk_iso':
                print(menus.memdisk_iso_cfg(syslinux=True, grub=False))
                config_file.write(
                    menus.memdisk_iso_cfg(syslinux=True, grub=False))
            elif distro == 'memdisk_img':
                config_file.write(
                    menus.memdisk_img_cfg(syslinux=True, grub=False))
            else:
                if isolinux_bin_exist(config.image_path) is True:
                    if distro == "generic":
                        distro_syslinux_install_dir = isolinux_bin_dir(
                            iso_link)
                        if isolinux_bin_dir(iso_link) != "/":
                            distro_sys_install_bs = os.path.join(
                                usb_mount, isolinux_bin_dir(
                                    iso_link)) + '/' + distro + '.bs'
                        else:
                            distro_sys_install_bs = '/' + distro + '.bs'
                    else:
                        distro_syslinux_install_dir = install_dir
                        distro_syslinux_install_dir = distro_syslinux_install_dir.replace(
                            usb_mount, '')
                        distro_sys_install_bs = distro_syslinux_install_dir + '/' + isolinux_bin_dir(
                            iso_link) + '/' + distro + '.bs'

                    distro_sys_install_bs = "/" + distro_sys_install_bs.replace(
                        "\\", "/")  # Windows path issue.

                    if config.syslinux_version == '3':
                        config_file.write(
                            "CONFIG /multibootusb/" + iso_basename(iso_link) +
                            '/' +
                            isolinux_bin_dir(iso_link).replace("\\", "/") +
                            '/isolinux.cfg\n')
                        config_file.write(
                            "APPEND /multibootusb/" + iso_basename(iso_link) +
                            '/' +
                            isolinux_bin_dir(iso_link).replace("\\", "/") +
                            '\n')
                        config_file.write(
                            "# Delete or comment above two lines using # and remove # from below line if "
                            "you get not a COM module error.\n")
                        config_file.write(
                            "#BOOT " +
                            distro_sys_install_bs.replace("//", "/") + "\n")
                    else:
                        config_file.write(
                            "BOOT " +
                            distro_sys_install_bs.replace("//", "/") + "\n")

            config_file.write("#end " + iso_basename(iso_link) + "\n")
            config_file.close()
            # Update extlinux.cfg file by copying updated syslinux.cfg
            shutil.copy(
                os.path.join(usb_mount, 'multibootusb', 'syslinux.cfg'),
                os.path.join(usb_mount, 'multibootusb', 'extlinux.cfg'))
예제 #39
0
    def createPortable(self, mutex):
        staggingDir = os.path.join(self.installDir, 'stagging')

        if not os.path.exists(staggingDir):
            os.makedirs(staggingDir)

        try:
            shutil.copytree(self.appInstallPath,
                            os.path.join(staggingDir, self.programName + '.app'),
                            True)
        except:
            pass

        imageSize = self.dirSize(staggingDir)
        tmpDmg = os.path.join(self.installDir, self.programName + '_tmp.dmg')
        volumeName = "{}-portable-{}".format(self.programName,
                                    self.programVersion)

        process = subprocess.Popen(['hdiutil', 'create',
                                    '-srcfolder', staggingDir,
                                    '-volname', volumeName,
                                    '-fs', 'HFS+',
                                    '-fsargs', '-c c=64,a=16,e=16',
                                    '-format', 'UDRW',
                                    '-size', str(math.ceil(imageSize * 1.1)),
                                    tmpDmg],
                                   stdout=subprocess.PIPE)
        process.communicate()

        process = subprocess.Popen(['hdiutil',
                                    'attach',
                                    '-readwrite',
                                    '-noverify',
                                    tmpDmg],
                                   stdout=subprocess.PIPE)
        stdout, stderr = process.communicate()
        device = ''

        for line in stdout.split(b'\n'):
            line = line.strip()

            if len(line) < 1:
                continue

            dev = line.split()

            if len(dev) > 2:
                device = dev[0].decode(sys.getdefaultencoding())

                break

        time.sleep(2)
        volumePath = os.path.join('/Volumes', volumeName)
        volumeIcon = os.path.join(volumePath, '.VolumeIcon.icns')
        self.copy(os.path.join(self.rootDir, 'StandAlone/share/icons/webcamoid.icns'),
                  volumeIcon)

        process = subprocess.Popen(['SetFile',
                                    '-c', 'icnC',
                                    volumeIcon],
                                   stdout=subprocess.PIPE)
        process.communicate()

        process = subprocess.Popen(['SetFile',
                                    '-a', 'C',
                                    volumePath],
                                   stdout=subprocess.PIPE)
        process.communicate()

        appsShortcut = os.path.join(volumePath, 'Applications')

        if not os.path.exists(appsShortcut):
            os.symlink('/Applications', appsShortcut)

        os.sync()

        process = subprocess.Popen(['hdiutil',
                                    'detach',
                                    device],
                                   stdout=subprocess.PIPE)
        process.communicate()

        packagePath = \
            os.path.join(self.pkgsDir,
                         '{}-portable-{}-{}.dmg'.format(self.programName,
                                                        self.programVersion,
                                                        platform.machine()))

        if not os.path.exists(self.pkgsDir):
            os.makedirs(self.pkgsDir)

        if os.path.exists(packagePath):
            os.remove(packagePath)

        process = subprocess.Popen(['hdiutil',
                                    'convert',
                                    tmpDmg,
                                    '-format', 'UDZO',
                                    '-imagekey', 'zlib-level=9',
                                    '-o', packagePath],
                                   stdout=subprocess.PIPE)
        process.communicate()

        mutex.acquire()
        print('Created portable package:')
        self.printPackageInfo(packagePath)
        mutex.release()
예제 #40
0
 def download(self):
     self._s3c.download_from_s3(self.s3_bucket, self.s3_key,
                                self.local_file())
     os.sync()  # ensure flush to disk
예제 #41
0
 def _sync_local_filesystem(self):
     if hasattr(os, "sync"):
         os.sync()
예제 #42
0
def install_patch():
    """
    Function to certain distros which uses makeboot.sh script for making bootable usb disk.
    This is required to make sure that same version (32/64 bit) of modules present is the isolinux directory
    :return:
    """
    if config.distro == 'debian':
        if platform.system(
        ) == 'Linux':  # Need to syn under Linux. Otherwise, USB disk becomes random read only.
            os.sync()
        iso_cfg_ext_dir = os.path.join(multibootusb_host_dir(),
                                       "iso_cfg_ext_dir")
        isolinux_path = os.path.join(iso_cfg_ext_dir,
                                     isolinux_bin_path(config.image_path))
        iso_linux_bin_dir = isolinux_bin_dir(config.image_path)
        config.syslinux_version = isolinux_version(isolinux_path)
        iso_file_list = iso.iso_file_list(config.image_path)
        os.path.join(config.usb_mount, "multibootusb",
                     iso_basename(config.image_path),
                     isolinux_bin_dir(config.image_path))
        if any("makeboot.sh" in s.lower() for s in iso_file_list):
            for module in os.listdir(
                    os.path.join(config.usb_mount, "multibootusb",
                                 iso_basename(config.image_path),
                                 isolinux_bin_dir(config.image_path))):
                if module.endswith(".c32"):
                    if os.path.exists(
                            os.path.join(config.usb_mount, "multibootusb",
                                         iso_basename(config.image_path),
                                         isolinux_bin_dir(config.image_path),
                                         module)):
                        try:
                            os.remove(
                                os.path.join(
                                    config.usb_mount, "multibootusb",
                                    iso_basename(config.image_path),
                                    isolinux_bin_dir(config.image_path),
                                    module))
                            log("Copying " + module)
                            log((resource_path(
                                os.path.join(multibootusb_host_dir(),
                                             "syslinux", "modules",
                                             config.syslinux_version, module)),
                                 os.path.join(
                                     config.usb_mount, "multibootusb",
                                     iso_basename(config.image_path),
                                     isolinux_bin_dir(config.image_path),
                                     module)))
                            shutil.copy(
                                resource_path(
                                    os.path.join(multibootusb_host_dir(),
                                                 "syslinux", "modules",
                                                 config.syslinux_version,
                                                 module)),
                                os.path.join(
                                    config.usb_mount, "multibootusb",
                                    iso_basename(config.image_path),
                                    isolinux_bin_dir(config.image_path),
                                    module))
                        except Exception as err:
                            log(err)
                            log("Could not copy " + module)
        else:
            log('Patch not required...')
예제 #43
0
파일: manager.py 프로젝트: sarvex/openpilot
def manager_thread():
    cloudlog.info("manager start")
    cloudlog.info({"environ": os.environ})

    # save boot log
    subprocess.call("./bootlog",
                    cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))

    params = Params()

    ignore = []
    if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
        ignore += ["manage_athenad", "uploader"]
    if os.getenv("NOBOARD") is not None:
        ignore.append("pandad")
    if os.getenv("BLOCK") is not None:
        ignore += os.getenv("BLOCK").split(",")

    ensure_running(managed_processes.values(), started=False, not_run=ignore)

    started_prev = False
    sm = messaging.SubMaster(['deviceState'])
    pm = messaging.PubMaster(['managerState'])

    while True:
        sm.update()
        not_run = ignore[:]

        if sm['deviceState'].freeSpacePercent < 5:
            not_run.append("loggerd")

        started = sm['deviceState'].started
        driverview = params.get_bool("IsDriverViewEnabled")
        ensure_running(managed_processes.values(), started, driverview,
                       not_run)

        # trigger an update after going offroad
        if started_prev and not started and 'updated' in managed_processes:
            os.sync()
            managed_processes['updated'].signal(signal.SIGHUP)

        started_prev = started

        running_list = [
            "%s%s\u001b[0m" %
            ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
            for p in managed_processes.values() if p.proc
        ]
        cloudlog.debug(' '.join(running_list))

        # send managerState
        msg = messaging.new_message('managerState')
        msg.managerState.processes = [
            p.get_process_state_msg() for p in managed_processes.values()
        ]
        pm.send('managerState', msg)

        # TODO: let UI handle this
        # Exit main loop when uninstall is needed
        if params.get_bool("DoUninstall"):
            break
    #         util.upload_forecast_batch(conn, json_io_dict_batch, forecast_filename_batch, project_name, model_name, timezero_date_batch, overwrite = over_write)
    #     except Exception as ex:
    #         return ex
    return "Pass"


# Example Run: python3 ./code/zoltar_scripts/upload_covid19_forecasts_to_zoltar.py
if __name__ == '__main__':
    list_of_model_directories = os.listdir('./data-processed/')
    output_errors = {}
    for directory in list_of_model_directories:
        if "." in directory:
            continue
        output = upload_covid_all_forecasts(
            './data-processed/' + directory + '/', directory)
        if output != "Pass":
            output_errors[directory] = output

    with open('./code/zoltar_scripts/validated_file_db.json', 'w') as fw:
        json.dump(db, fw, indent=4)

    # List all files that did not get upload and its error
    if len(output_errors) > 0:
        for directory, errors in output_errors.items():
            print("\n* ERROR IN '", directory, "'")
            print(errors)
        os.sync()  # make sure we flush before exiting
        sys.exit("\n ERRORS FOUND EXITING BUILD...")
    else:
        print("✓ no errors")
예제 #45
0
def train(variable_value_dict=None):

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    assert not (FLAGS.is_training and FLAGS.cost_saturation)

    #Chong added
    print_training_parameters()

    #Chong added
    #get the initial value dict, will be used by assign_from_feed_dict function
    if variable_value_dict is None:
        if not mvm.is_empty():
            #need to change to feed_dict of masking variables?
            raise NotImplementedError
            variable_value_dict = mvm.get_variable_name_to_initial_value_dict()

    #Chong added
    #if user didn't request to resume an ongoing training stored in train_dir
    if FLAGS.resume_training_from_train_dir is not True or FLAGS.K_heuristic is not None:
        #clear the training folder
        import shutil
        shutil.rmtree(FLAGS.train_dir, ignore_errors=True)
        os.sync()

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        ######################
        # Config model_deploy#
        ######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = tf.train.get_or_create_global_step()

        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        ####################
        # Select the network #
        ####################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                shuffle=True,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size)
            [image, label] = provider.get(['image', 'label'])
            label -= FLAGS.labels_offset

            train_image_size = FLAGS.train_image_size or network_fn.default_image_size

            image = image_preprocessing_fn(image, train_image_size,
                                           train_image_size)

            images, labels = tf.train.batch(
                [image, label],
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)

            labels = slim.one_hot_encoding(
                labels, dataset.num_classes - FLAGS.labels_offset)
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, labels], capacity=2 * deploy_config.num_clones)

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            with tf.device(deploy_config.inputs_device()):
                images, labels = batch_queue.dequeue()
            logits, end_points = network_fn(images)

            #############################
            # Specify the loss function #
            #############################
            if 'AuxLogits' in end_points:
                tf.losses.softmax_cross_entropy(
                    logits=end_points['AuxLogits'],
                    onehot_labels=labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weights=0.4,
                    scope='aux_loss')
            tf.losses.softmax_cross_entropy(
                logits=logits,
                onehot_labels=labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=1.0)
            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn,
                                            [batch_queue])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            #summaries.add(tf.summary.histogram('activations/' + end_point, x))
            #summaries.add(tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x)))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

        # Add summaries for variables.
        #for variable in slim.get_model_variables():
        #summaries.add(tf.summary.histogram(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     global_step)
            optimizer = _configure_optimizer(
                learning_rate=(FLAGS.learning_rate if FLAGS.optimizer ==
                               'yellowfin' else learning_rate))
            summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        if FLAGS.sync_replicas:
            # If sync_replicas is enabled, the averaging will be done in the chief
            # queue runner.
            optimizer = tf.train.SyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                variable_averages=variable_averages,
                variables_to_average=moving_average_variables,
                replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
                total_num_replicas=FLAGS.worker_replicas)
        elif FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(
                variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones, optimizer, var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)

        update_op = tf.group(*update_ops)
        with tf.control_dependencies([update_op]):
            train_tensor = tf.identity(total_loss, name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        #session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
        session_config = tf.ConfigProto(allow_soft_placement=True)

        #set saver parameters
        saver = tf_saver.Saver(max_to_keep=2, write_version=2)

        ###########################
        # Kicks off the training. #
        ###########################
        slim.learning.train(
            #my_slim_learning.train(
            train_tensor,
            logdir=FLAGS.train_dir,
            master=FLAGS.master,
            is_chief=(FLAGS.task == 0),
            #Chong added
            #init_op = assign_ops,
            #init_feed_dict = assign_feed_dict,
            saver=saver,
            #Chong added end
            init_fn=_get_init_fn(variable_value_dict),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            save_interval_secs=FLAGS.save_interval_secs,
            session_config=session_config,
            sync_optimizer=optimizer if FLAGS.sync_replicas else None)
예제 #46
0
def set_up(ctx):
    cfg = ctx.config()
    lower_mntroot = cfg.lower_mntroot()
    lowerdir = cfg.lowerdir()
    lowerimg = cfg.lowerimg()
    testdir = cfg.testdir()

    os.sync()

    if cfg.testing_none():
        try:
            while system("grep -q 'lower_layer " + cfg.union_mntroot() +
                         "' /proc/mounts" + " && umount " +
                         cfg.union_mntroot()):
                pass
        except RuntimeError:
            pass

        try:
            while system("grep -q 'lower_layer " + lower_mntroot +
                         "' /proc/mounts" + " && umount " + lower_mntroot):
                pass
        except RuntimeError:
            pass

    if cfg.testing_overlayfs():
        try:
            while system("grep -q 'overlay " + cfg.union_mntroot() +
                         "' /proc/mounts" + " && umount " +
                         cfg.union_mntroot()):
                pass
        except RuntimeError:
            pass

        try:
            while system("grep -q 'lower_layer " + cfg.base_mntroot() +
                         "' /proc/mounts" + " && umount " +
                         cfg.base_mntroot()):
                pass
        except RuntimeError:
            pass

        try:
            while system("grep -q 'lower_layer " + lower_mntroot +
                         "' /proc/mounts" + " && umount " + lower_mntroot):
                pass
        except RuntimeError:
            pass

        try:
            # grep filter to catch <lower|upper|N>_layer, in case upper and lower are on same fs
            # and in case different layers are on different fs
            while system("grep -q '_layer " + cfg.upper_mntroot() +
                         "/' /proc/mounts" + " && umount " +
                         cfg.upper_mntroot() + "/* 2>/dev/null"):
                pass
        except RuntimeError:
            pass

        try:
            # grep filter to catch <low|upp>er_layer, in case upper and lower are on same fs
            while system("grep -q 'er_layer " + cfg.upper_mntroot() +
                         "' /proc/mounts" + " && umount " +
                         cfg.upper_mntroot()):
                pass
        except RuntimeError:
            pass

    if cfg.is_samefs() and cfg.testing_overlayfs():
        # Create base fs for both lower and upper
        base_mntroot = cfg.base_mntroot()
        system("mount " + base_mntroot + " 2>/dev/null"
               " || mount -t tmpfs lower_layer " + base_mntroot)
        system("mount --make-private " + base_mntroot)
        try:
            os.mkdir(base_mntroot + lower_mntroot)
        except OSError:
            pass
        system("mount -o bind " + base_mntroot + lower_mntroot + " " +
               lower_mntroot)
    else:
        # Create a lower layer to union over
        system("mount " + lower_mntroot + " 2>/dev/null"
               " || mount -t tmpfs lower_layer " + lower_mntroot)

    # Systemd has weird ideas about things
    system("mount --make-private " + lower_mntroot)

    #
    # Create a few test files we can use in the lower layer
    #
    try:
        os.mkdir(lowerdir)
    except OSError:
        system("rm -rf " + lowerdir)
        os.mkdir(lowerdir)

    pieces = testdir.split("/")
    del pieces[0]
    path = ""
    for i in pieces:
        path += "/" + i
        ctx.record_file(path, "d")
    ctx.set_cwd(testdir)

    for i in range(100, 130):
        si = str(i)

        # Under the test directory, we create a bunch of regular files
        # containing data called foo100 to foo129:
        create_file(lowerdir + "/foo" + si, ":xxx:yyy:zzz")
        rec = ctx.record_file("foo" + si, "r")

        # Then we create a bunch of direct symlinks to those files
        to = "../a/foo" + si
        os.symlink(to, lowerdir + "/direct_sym" + si)
        rec = ctx.record_file("direct_sym" + si, "s", to, rec)

        # Then we create a bunch of indirect symlinks to those files
        to = "direct_sym" + si
        os.symlink(to, lowerdir + "/indirect_sym" + si)
        ctx.record_file("indirect_sym" + si, "s", to, rec)

        # Then we create a bunch symlinks that don't point to extant files
        to = "no_foo" + si
        os.symlink(to, lowerdir + "/pointless" + si)
        rec = ctx.record_file("no_foo" + si, None)
        ctx.record_file("pointless" + si, "s", to, rec)

        # We create a bunch of directories, each with an empty file
        # and a populated subdir
        os.mkdir(lowerdir + "/dir" + si)
        rec = ctx.record_file("dir" + si, "d")
        create_file(lowerdir + "/dir" + si + "/a", "")
        ctx.record_file("dir" + si + "/a", "f")

        os.mkdir(lowerdir + "/dir" + si + "/pop")
        ctx.record_file("dir" + si + "/pop", "d")
        create_file(lowerdir + "/dir" + si + "/pop/b", ":aaa:bbb:ccc")
        ctx.record_file("dir" + si + "/pop/b", "f")
        os.mkdir(lowerdir + "/dir" + si + "/pop/c")
        ctx.record_file("dir" + si + "/pop/c", "d")

        # And add direct and indirect symlinks to those
        to = "../a/dir" + si
        os.symlink(to, lowerdir + "/direct_dir_sym" + si)
        rec = ctx.record_file("direct_dir_sym" + si, "s", to, rec)
        #ctx.record_file("direct_dir_sym" + si + "/a", "f")

        to = "direct_dir_sym" + si
        os.symlink(to, lowerdir + "/indirect_dir_sym" + si)
        ctx.record_file("indirect_dir_sym" + si, "s", to, rec)
        #ctx.record_file("indirect_dir_sym" + si + "/a", "f")

        # And a bunch of empty directories
        os.mkdir(lowerdir + "/empty" + si)
        ctx.record_file("empty" + si, "d")

        # Everything above is then owned by the bin user
        for f in ["foo", "direct_sym", "indirect_sym", "pointless"]:
            os.lchown(lowerdir + "/" + f + si, 1, 1)

        # Create some root-owned regular files also
        create_file(lowerdir + "/rootfile" + si, ":xxx:yyy:zzz")
        ctx.record_file("rootfile" + si, "r")

        # Non-existent dir
        ctx.record_file("no_dir" + si, None)

    if cfg.is_squashfs():
        system("mksquashfs " + lowerdir + " " + lowerimg +
               " -keep-as-directory > /dev/null")
        system("mount -o loop,ro " + lowerimg + " " + lower_mntroot)
        system("mount --make-private " + lower_mntroot)
    else:
        # The mount has to be read-only for us to make use of it
        system("mount -o remount,ro " + lower_mntroot)
    ctx.note_lower_fs(lowerdir)
예제 #47
0
def manager_thread():

    cloudlog.info("manager start")
    cloudlog.info({"environ": os.environ})

    # save boot log
    subprocess.call("./bootlog",
                    cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))

    # start daemon processes
    for p in daemon_processes:
        start_daemon_process(p)

    # start persistent processes
    for p in persistent_processes:
        start_managed_process(p)

    # start offroad
    if EON:
        pm_apply_packages('enable')
        start_offroad()

    if os.getenv("NOBOARD") is not None:
        del managed_processes["pandad"]

    if os.getenv("BLOCK") is not None:
        for k in os.getenv("BLOCK").split(","):
            del managed_processes[k]

    started_prev = False
    logger_dead = False
    params = Params()
    thermal_sock = messaging.sub_sock('thermal')
    pm = messaging.PubMaster(['managerState'])

    while 1:
        msg = messaging.recv_sock(thermal_sock, wait=True)

        if msg.thermal.freeSpacePercent < 0.05:
            logger_dead = True

        if msg.thermal.started:
            for p in car_started_processes:
                if p == "loggerd" and logger_dead:
                    kill_managed_process(p)
                else:
                    start_managed_process(p)
        else:
            logger_dead = False
            driver_view = params.get("IsDriverViewEnabled") == b"1"

            # TODO: refactor how manager manages processes
            for p in reversed(car_started_processes):
                if p not in driver_view_processes or not driver_view:
                    kill_managed_process(p)

            for p in driver_view_processes:
                if driver_view:
                    start_managed_process(p)
                else:
                    kill_managed_process(p)

            # trigger an update after going offroad
            if started_prev:
                os.sync()
                send_managed_process_signal("updated", signal.SIGHUP)

        started_prev = msg.thermal.started

        # check the status of all processes, did any of them die?
        running_list = [
            "%s%s\u001b[0m" %
            ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p)
            for p in running
        ]
        cloudlog.debug(' '.join(running_list))

        # send managerState
        states = []
        for p in managed_processes:
            state = log.ManagerState.ProcessState.new_message()
            state.name = p
            if p in running:
                state.running = running[p].is_alive()
                state.pid = running[p].pid
                state.exitCode = running[p].exitcode or 0
            states.append(state)
        msg = messaging.new_message('managerState')
        msg.managerState.processes = states
        pm.send('managerState', msg)

        # Exit main loop when uninstall is needed
        if params.get("DoUninstall", encoding='utf8') == "1":
            break
예제 #48
0
 def sync(self) -> None:
     os.sync()
예제 #49
0
    def resize(self, new_size):
        logging.info('Start resizing image \'' + self.name + '\'')

        # Image must be unmounted to resize it
        if self.is_mounted:
            raise ValueError('Cannot resize a mounted livenet image')

        # Check livenet size
        if not isinstance(
                new_size,
                str) or int(new_size) < LivenetImage.MIN_LIVENET_SIZE or int(
                    new_size) > LivenetImage.MAX_LIVENET_SIZE:
            raise ValueError('Invalid livenet size')

        # Create usefull directories for resizement
        logging.debug('Executing \'mkdir -p ' + self.MOUNT_DIRECTORY +
                      'mnt_copy\'')
        os.makedirs(self.MOUNT_DIRECTORY + 'mnt_copy')

        logging.debug('Executing \'mkdir -p ' + self.MOUNT_DIRECTORY + 'mnt\'')
        os.makedirs(self.MOUNT_DIRECTORY + 'mnt')

        logging.debug('Executing \'mkdir -p ' + self.WORKING_DIRECTORY +
                      'current\'')
        os.makedirs(self.WORKING_DIRECTORY + 'current')

        logging.debug('Executing \'mkdir -p ' + self.WORKING_DIRECTORY +
                      'copy/squashfs-root/LiveOS/\'')
        os.makedirs(self.WORKING_DIRECTORY + 'copy/squashfs-root/LiveOS/')

        # Create a new rootfs image with the new size
        logging.debug('Executing \'dd if=/dev/zero of=' +
                      self.WORKING_DIRECTORY +
                      'copy/squashfs-root/LiveOS/rootfs.img bs=1M count=' +
                      new_size + '\'')
        os.system('dd if=/dev/zero of=' + self.WORKING_DIRECTORY +
                  'copy/squashfs-root/LiveOS/rootfs.img bs=1M count=' +
                  new_size)

        # Format the fresh rootfs.img into an xfs system
        logging.debug('Executing \'mkfs.xfs ' + self.WORKING_DIRECTORY +
                      'copy/squashfs-root/LiveOS/rootfs.img\'')
        os.system('mkfs.xfs ' + self.WORKING_DIRECTORY +
                  'copy/squashfs-root/LiveOS/rootfs.img')

        # Mount the new rootfs.img on it's mount directory
        logging.debug('Executing \'mount ' + self.WORKING_DIRECTORY +
                      'copy/squashfs-root/LiveOS/rootfs.img ' +
                      self.MOUNT_DIRECTORY + 'mnt_copy/\'')
        os.system('mount ' + self.WORKING_DIRECTORY +
                  'copy/squashfs-root/LiveOS/rootfs.img ' +
                  self.MOUNT_DIRECTORY + 'mnt_copy/')

        # Unsquash current image
        logging.debug('Executing \'unsquashfs -d ' + self.WORKING_DIRECTORY +
                      'current/squashfs-root ' + self.IMAGE_DIRECTORY +
                      'squashfs.img\'')
        os.system('unsquashfs -d ' + self.WORKING_DIRECTORY +
                  'current/squashfs-root ' + self.IMAGE_DIRECTORY +
                  'squashfs.img')

        # Mount current rootfs.img on it's mount directory
        logging.debug('Executing \'mount ' + self.WORKING_DIRECTORY +
                      'current/squashfs-root/LiveOS/rootfs.img ' +
                      self.MOUNT_DIRECTORY + 'mnt\'')
        os.system('mount ' + self.WORKING_DIRECTORY +
                  'current/squashfs-root/LiveOS/rootfs.img ' +
                  self.MOUNT_DIRECTORY + 'mnt')

        # Create image.xfsdump from current image
        logging.debug('Executing \'xfsdump -l 0 -L ' + self.name +
                      ' -M media -f ' + self.WORKING_DIRECTORY +
                      '/current/image.xfsdump ' + self.MOUNT_DIRECTORY +
                      'mnt\'')
        os.system('xfsdump -l 0 -L ' + self.name + ' -M media -f ' +
                  self.WORKING_DIRECTORY + '/current/image.xfsdump ' +
                  self.MOUNT_DIRECTORY + 'mnt')

        # Restore with new sized rootfs.img mountage
        logging.debug('Executing \'xfsrestore -f ' + self.WORKING_DIRECTORY +
                      'current/image.xfsdump ' + self.MOUNT_DIRECTORY +
                      'mnt_copy\'')
        os.system('xfsrestore -f ' + self.WORKING_DIRECTORY +
                  'current/image.xfsdump ' + self.MOUNT_DIRECTORY + 'mnt_copy')
        os.sync()

        # Umount mnt and mnt_copy
        logging.debug('Executing \'umount ' + self.MOUNT_DIRECTORY + '*\'')
        os.system('umount ' + self.MOUNT_DIRECTORY + '*')

        logging.debug('Executing \'rm -rf ' + self.MOUNT_DIRECTORY + '\'')
        shutil.rmtree(self.MOUNT_DIRECTORY)

        # Remove old squashfs
        logging.debug('Executing \'rm -f ' + self.IMAGE_DIRECTORY +
                      'squashfs.img\'')
        os.remove(self.IMAGE_DIRECTORY + 'squashfs.img')

        # Generate the new squashfs
        logging.debug('Executing \'mksquashfs ' + self.WORKING_DIRECTORY +
                      'copy/squashfs-root/ ' + self.IMAGE_DIRECTORY +
                      'squashfs.img\'')
        os.system('mksquashfs ' + self.WORKING_DIRECTORY +
                  'copy/squashfs-root/ ' + self.IMAGE_DIRECTORY +
                  'squashfs.img')

        logging.debug('Executing \'rm -rf ' + self.WORKING_DIRECTORY + '\'')
        shutil.rmtree(self.WORKING_DIRECTORY)

        # Update image size attribute value
        self.livenet_size = new_size
        self.register_image()

        logging.info('Image has been resized to ' + new_size + 'Mb')
예제 #50
0
파일: app.py 프로젝트: jimc13/Mk4-Apps-1
def write_launch_file(app, file = "once.txt"):
    with open(file, "wt") as file:
        file.write(app)
        file.flush()
    os.sync()
예제 #51
0
def os_sync():
    """Ensure flush to disk"""
    if not ON_WINDOWS:
        os.sync()
예제 #52
0
 def uninstall(self):
     Path("/data/__system_reset__").touch()
     os.sync()
     self.reboot()
    def install_vyos(self, payload):
        try:
            install_config = load(payload)
        except Exception as err:
            logger.error("Unable to load YAML file: {}".format(err))
            return
        try:
            # enable output to the stderr
            from logging import StreamHandler
            logger.addHandler(StreamHandler())

            # define all variables
            vyos_version = get_version()
            if vyos_version.startswith('1.2'):
                vyos_generation = '1.2'
                rootfspath = '/lib/live/mount/medium/live/filesystem.squashfs'
            else:
                vyos_generation = 'other'
                rootfspath = '/usr/lib/live/mount/medium/live/filesystem.squashfs'
            install_drive = install_config['install_drive']
            partition_size = install_config.get('partition_size', '')
            root_drive = '/mnt/wroot'
            root_read = '/mnt/squashfs'
            root_install = '/mnt/inst_root'
            dir_rw = '{}/boot/{}/rw'.format(root_drive, vyos_version)
            dir_work = '{}/boot/{}/work'.format(root_drive, vyos_version)

            # find and prepare drive and partition
            if install_drive == 'auto':
                regex_lsblk = re.compile(r'^(?P<dev_name>\w+) +(?P<dev_size>\d+) +(?P<dev_type>\w+)( +(?P<part_type>[\w-]+))?$')
                drive_list = self.run_command('lsblk --bytes --nodeps --list --noheadings --output KNAME,SIZE,TYPE')
                for device_line in drive_list.splitlines():
                    found = regex_lsblk.search(device_line)
                    if found:
                        if int(found.group('dev_size')) > 2000000000 and found.group('dev_type') == 'disk':
                            # check if a device is not mounted
                            regex_mount = re.compile(r'^(?P<dev_name>\w+) +(?P<mount_point>/.*)$', re.MULTILINE)
                            drive_details = self.run_command('lsblk --list --noheadings --output KNAME,MOUNTPOINT /dev/{}'.format(found.group('dev_name')))
                            mount_details = regex_mount.search(drive_details)
                            if mount_details:
                                logger.debug("Skipping {}, {} is mounted on {}".format(found.group('dev_name'), mount_details.group('dev_name'), mount_details.group('mount_point')))
                            else:
                                install_drive = '/dev/{}'.format(found.group('dev_name'))
                                break
            if install_drive == 'auto':
                logger.error("No suitable drive found for installation")
                return
            logger.debug("Installing to drive: {}".format(install_drive))

            # Detect system type
            if Path('/sys/firmware/efi/').exists():
                sys_type = 'EFI'
                grub_efi = '--force-extra-removable --efi-directory=/boot/efi --bootloader-id=VyOS --no-uefi-secure-boot '
            else:
                sys_type = 'Non-EFI'
                grub_efi = ''
            logger.debug("Detected {} system".format(sys_type))

            # Create partitions
            logger.debug('Clearing current partitions table on {}'.format(install_drive))
            with open(install_drive, 'w+b') as drive:
                drive.seek(0)
                drive.write(b'0' * 17408)
                drive.seek(-17408, 2)
                drive.write(b'0' * 17408)
            sync()
            self.run_command('partprobe {}'.format(install_drive))

            if vyos_generation == '1.2':
                if sys_type == 'EFI':
                    logger.debug('Creating EFI-compatible partitions table on {}'.format(install_drive))
                    self.run_command('sgdisk -n 1:1M:100M {}'.format(install_drive))
                    self.run_command('sgdisk -t 1:EF00 {}'.format(install_drive))
                    self.run_command('sgdisk -n 2::{} {}'.format(partition_size, install_drive))
                else:
                    logger.debug('Creating BIOS-compatible partitions table on {}'.format(install_drive))
                    disk_parts = '3,{},L,*\n'.format(partition_size)
                    self.run_command('sfdisk -q {}'.format(install_drive), disk_parts)
            else:
                if sys_type == 'EFI':
                    logger.debug('Creating EFI-compatible partitions table on {}'.format(install_drive))
                    disk_parts = 'label: gpt\n,100M,U,*\n,{},L'.format(partition_size)
                else:
                    logger.debug('Creating BIOS-compatible partitions table on {}'.format(install_drive))
                    disk_parts = 'label: dos\n,{},L,*\n'.format(partition_size)
                self.run_command('sfdisk -q -w always -W always {}'.format(install_drive), disk_parts)
            # update partitons in kernel
            self.run_command('partprobe {}'.format(install_drive))

            partitions_list = self.run_command('fdisk -l {}'.format(install_drive))
            regex_fdisk = re.compile(r'^(?P<dev_name>/dev/\w+) +(\* +)?(\d+ +)+([\d\.]+\D) +(\d+ +)?(?P<part_type>\w+).*$')
            for partition_line in partitions_list.splitlines():
                found = regex_fdisk.search(partition_line)
                if found:
                    if found.group('part_type') == 'Linux':
                        root_partition = '{}'.format(found.group('dev_name'))
                        logger.debug("Using partition for root: {}".format(root_partition))
                        self.run_command('mkfs -t ext4 -L persistence {}'.format(root_partition))
                    if found.group('part_type') == 'EFI':
                        efi_partition = '{}'.format(found.group('dev_name'))
                        logger.debug("Using partition for EFI: {}".format(efi_partition))
                        self.run_command('mkfs -t fat -n EFI {}'.format(efi_partition))

            # creating directories
            for dir in [root_drive, root_read, root_install]:
                dirpath = Path(dir)
                logger.debug("Creating directory: {}".format(dir))
                dirpath.mkdir(mode=0o755, parents=True)
            # mounting root drive
            logger.debug("Mounting root drive: {}".format(root_drive))
            self.run_command('mount {} {}'.format(root_partition, root_drive))
            for dir in [dir_rw, dir_work]:
                dirpath = Path(dir)
                logger.debug("Creating directory: {}".format(dir))
                dirpath.mkdir(mode=0o755, parents=True)
            if sys_type == 'EFI':
                Path('/boot/efi').mkdir(mode=0o755, parents=True)
                self.run_command('mount {} {}'.format(efi_partition, '/boot/efi'))
            # copy rootfs
            logger.debug("Copying rootfs: {}/boot/{}/{}.squashfs".format(root_drive, vyos_version, vyos_version))
            self.run_command('cp -p {} {}/boot/{}/{}.squashfs'.format(rootfspath, root_drive, vyos_version, vyos_version))
            # get list of other files for boot and copy to the installation boot directory
            boot_files = self.run_command('find /boot -maxdepth 1 -type f -o -type l')
            for file in boot_files.splitlines():
                logger.debug("Copying file: {}".format(file))
                self.run_command('cp -dp {} {}/boot/{}/'.format(file, root_drive, vyos_version))
            # write persistense.conf
            logger.debug("Writing '{}/persistence.conf".format(root_drive))
            self.write_file('{}/persistence.conf'.format(root_drive), '/ union\n')
            # mount new rootfs
            logger.debug("Mounting read-only rootfs: {}".format(root_read))
            self.run_command('mount -o loop,ro -t squashfs {}/boot/{}/{}.squashfs {}'.format(root_drive, vyos_version, vyos_version, root_read))
            logger.debug("Mounting overlay rootfs: {}".format(root_install))
            self.run_command('mount -t overlay -o noatime,upperdir={},lowerdir={},workdir={} overlay {}'.format(dir_rw, root_read, dir_work, root_install))
            # copy configuration
            logger.debug("Copying configuration to: {}/opt/vyatta/etc/config/config.boot".format(root_install))
            self.run_command('cp -p /opt/vyatta/etc/config/config.boot {}/opt/vyatta/etc/config/config.boot'.format(root_install))
            logger.debug("Copying .vyatta_config to: {}/opt/vyatta/etc/config/.vyatta_config".format(root_install))
            self.run_command('cp -p /opt/vyatta/etc/config/.vyatta_config {}/opt/vyatta/etc/config/.vyatta_config'.format(root_install))
            # install grub
            logger.debug("Installing GRUB to {}".format(install_drive))
            self.run_command('grub-install --no-floppy --recheck --root-directory={} {}{}'.format(root_drive, grub_efi, install_drive))
            # configure GRUB
            logger.debug("Configuring GRUB")
            self.run_command('/opt/vyatta/sbin/vyatta-grub-setup -u {} {} '' {}'.format(vyos_version, root_partition, root_drive))
            # disable cloud-init if required
            if install_config.get('cloud_init_disable', False) is True:
                logger.info("Disabling Cloud-init")
                self.write_file('{}/etc/cloud/cloud-init.disabled'.format(root_install), 'Disabled by VyOS installer (requested by user)\n')
            # unmount all fs
            for dir in [root_install, root_read, root_drive]:
                logger.debug("Unmounting: {}".format(dir))
                self.run_command('umount {}'.format(dir))
            # reboot the system if this was requested by config
            if install_config.get('after_install', None) == 'reboot':
                logger.info("Rebooting host")
                self.run_command('systemctl reboot', disconnect=True)
            if install_config.get('after_install', None) == 'poweroff':
                logger.info("Powering off host")
                self.run_command('systemctl poweroff', disconnect=True)

        except Exception as err:
            logger.error("Unable to install VyOS: {}".format(err))
            return
예제 #54
0
def main():
    if len(sys.argv) != 3:
        print("Please specify a serial port and filename")
        exit(1)
    serial_port = sys.argv[1]
    filename = sys.argv[2]
    print("Watching {filename} and uploading board at {serial_port}".format(
        filename=filename, serial_port=serial_port))
    with serial.Serial(serial_port, 115200, timeout=1) as ser:
        ser_text = io.TextIOWrapper(ser, newline='\n')
        continue_reading = True
        assume_in_prompt = False
        while continue_reading:
            line = ser_text.readline().strip()
            print(">>>> {}".format(line))
            #if 'Hit any key to stop autoboot:' in line:
            if 'FEC [PRIME], usb_ether' in line:
                print("Writing control characters")
                for i in range(5):
                    ser.write(b'\r\n')
                    time.sleep(0.05)
                assume_in_prompt = True
            if '=>' in line or assume_in_prompt:
                print("found prompt")
                ser.write(b'\r\n')

                premount_paths = [
                    partition.mountpoint
                    for partition in psutil.disk_partitions()
                ]

                print("Entering USB Mass Storage Mode")
                print(ser_text.readline())
                ser.write(b'ums 0 mmc 0\r\n')
                print(ser_text.readline())
                # Find USB Device
                target_partition = None

                for _ in range(3):
                    postmount_paths = [
                        partition.mountpoint
                        for partition in psutil.disk_partitions()
                    ]
                    new_paths = list(
                        set(premount_paths) - set(postmount_paths))

                    for partition in new_paths:
                        for node in os.listdir(partition):
                            if node.endswith('-m4.dtb'):
                                target_partition = partition
                                break
                        if target_partition is not None:
                            break
                    if target_partition is not None:
                        break

                    time.sleep(500)  # Wait for udev to mount this thing

                if target_partition is None:
                    print("Unable to find a target partition")
                    exit(1)
                print("Copying artifact to target partition {}".format(
                    target_partition))

                # Copy files
                basename = os.path.basename(filename)
                shutil.copy(filename, os.path.join(target_partition, basename))
                os.sync()
                os.sync()

                print("Copy complete; exiting UMS")
                ser.write(b'\x03')  # CTRL+C

                print("Requesting system boot...")
                ser.write(b'boot\r\n')
                ser.close()
예제 #55
0
 def _set_labs_auth_code(self, auth_code):
     self._get_db().put('DEVICE_LABS_AUTH_CODE', auth_code)
     os.sync()
예제 #56
0
def manager_thread():

    shutdownd = Process(name="shutdownd",
                        target=launcher,
                        args=("selfdrive.shutdownd", ))
    shutdownd.start()

    cloudlog.info("manager start")
    cloudlog.info({"environ": os.environ})

    # save boot log
    subprocess.call(["./loggerd", "--bootlog"],
                    cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))

    # start daemon processes
    for p in daemon_processes:
        start_daemon_process(p)

    # start persistent processes
    for p in persistent_processes:
        start_managed_process(p)

    # start offroad
    if EON:
        pm_apply_packages('enable')
        start_offroad()

    if os.getenv("NOBOARD") is None:
        start_managed_process("pandad")

    if os.getenv("BLOCK") is not None:
        for k in os.getenv("BLOCK").split(","):
            del managed_processes[k]

    started_prev = False
    logger_dead = False
    params = Params()
    thermal_sock = messaging.sub_sock('thermal')

    while 1:
        msg = messaging.recv_sock(thermal_sock, wait=True)

        if msg.thermal.freeSpace < 0.05:
            logger_dead = True

        if msg.thermal.started:
            for p in car_started_processes:
                if p == "loggerd" and logger_dead:
                    kill_managed_process(p)
                else:
                    start_managed_process(p)
        else:
            logger_dead = False
            driver_view = params.get("IsDriverViewEnabled") == b"1"

            # TODO: refactor how manager manages processes
            for p in reversed(car_started_processes):
                if p not in driver_view_processes or not driver_view:
                    kill_managed_process(p)

            for p in driver_view_processes:
                if driver_view:
                    start_managed_process(p)
                else:
                    kill_managed_process(p)

            # trigger an update after going offroad
            if started_prev:
                os.sync()
                send_managed_process_signal("updated", signal.SIGHUP)

        started_prev = msg.thermal.started

        # check the status of all processes, did any of them die?
        running_list = [
            "%s%s\u001b[0m" %
            ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p)
            for p in running
        ]
        cloudlog.debug(' '.join(running_list))

        # Exit main loop when uninstall is needed
        if params.get("DoUninstall", encoding='utf8') == "1":
            break
예제 #57
0
def install_distro():
    """
    Install selected ISO to USB disk.
    :return:
    """
    usb_mount = config.usb_mount
    install_dir = os.path.join(config.usb_mount, "multibootusb",
                               iso_basename(config.image_path))
    _iso_file_list = iso.iso_file_list(config.image_path)

    if not os.path.exists(os.path.join(usb_mount, "multibootusb")):
        log("Copying multibootusb directory to " + usb_mount)
        shutil.copytree(
            resource_path(os.path.join("data", "tools", "multibootusb")),
            os.path.join(config.usb_mount, "multibootusb"))

    if not os.path.exists(install_dir):
        os.makedirs(install_dir)
        with open(os.path.join(install_dir, "multibootusb.cfg"), "w") as f:
            f.write(config.distro)
        with open(os.path.join(install_dir, "iso_file_list.cfg"), 'w') as f:
            for file_path in _iso_file_list:
                f.write(file_path + "\n")
    log("Installing " + iso_name(config.image_path) + " on " + install_dir)

    if config.distro == "opensuse":
        iso.iso_extract_file(config.image_path, install_dir, 'boot')
        status_text = "Copying ISO..."
        if platform.system() == "Windows":
            subprocess.call(
                ["xcopy", config.image_path, usb_mount], shell=True
            )  # Have to use xcopy as python file copy is dead slow.
        elif platform.system() == "Linux":
            log("Copying " + config.image_path + " to " + usb_mount)
            shutil.copy(config.image_path, usb_mount)
    elif config.distro == "Windows" or config.distro == "alpine" or config.distro == 'pc-unlocker'\
            or config.distro == 'pc-tool' or config.distro == 'grub2only':
        log("Extracting iso to " + usb_mount)
        iso_extract_full(config.image_path, usb_mount)
    elif config.distro == "trinity-rescue":
        iso.iso_extract_file(config.image_path, usb_mount, '*trk3')
    elif config.distro == "ipfire":
        iso.iso_extract_file(config.image_path, usb_mount, '*.tlz')
        iso.iso_extract_file(config.image_path, usb_mount, 'distro.img')
        iso.iso_extract_file(config.image_path, install_dir, 'boot')
    elif config.distro == "zenwalk":
        config.status_text = "Copying ISO..."
        iso.iso_extract_file(config.image_path, install_dir, "kernel")
        copy_iso(config.image_path, install_dir)
    elif config.distro == "salix-live":
        # iso.iso_extract_file(config.image_path, install_dir, "boot")
        iso.iso_extract_file(config.image_path, install_dir, '*syslinux')
        iso.iso_extract_file(config.image_path, install_dir, '*menus')
        iso.iso_extract_file(config.image_path, install_dir, '*vmlinuz')
        iso.iso_extract_file(config.image_path, install_dir, '*initrd*')
        iso.iso_extract_file(config.image_path, usb_mount, '*modules')
        iso.iso_extract_file(config.image_path, usb_mount, '*packages')
        iso.iso_extract_file(config.image_path, usb_mount, '*optional')
        iso.iso_extract_file(config.image_path, usb_mount, '*liveboot')
        #iso.iso_extract_full(config.image_path, usb_mount)
        config.status_text = "Copying ISO..."
        copy_iso(config.image_path, install_dir)
    elif config.distro == 'sgrubd2':
        copy_iso(config.image_path, install_dir)
    elif config.distro == 'alt-linux':
        iso.iso_extract_file(config.image_path, install_dir, '-xr!*rescue')
        iso.iso_extract_file(config.image_path, config.usb_mount, 'rescue')
    elif config.distro == "generic":
        #with open(os.path.join(install_dir, "generic.cfg"), "w") as f:
        #    f.write(os.path.join(isolinux_bin_dir(config.image_path), "generic") + ".bs")
        iso_extract_full(config.image_path, usb_mount)
    elif config.distro == 'grub4dos':
        iso_extract_full(config.image_path, usb_mount)
    elif config.distro == 'ReactOS':
        iso_extract_full(config.image_path, usb_mount)
    elif config.distro == 'grub4dos_iso' or config.distro == 'raw_iso':
        copy_iso(config.image_path, install_dir)
    else:
        iso.iso_extract_full(config.image_path, install_dir)

    if platform.system() == 'Linux':
        log('ISO extracted successfully. Sync is in progress...')
        os.sync()

    if config.persistence != 0:
        log('Creating persistence...')
        config.status_text = 'Creating persistence...'
        persistence.create_persistence()

    install_patch()
예제 #58
0
def init_overlay() -> None:

    overlay_init_file = Path(os.path.join(BASEDIR, ".overlay_init"))

    # Re-create the overlay if BASEDIR/.git has changed since we created the overlay
    if overlay_init_file.is_file():
        git_dir_path = os.path.join(BASEDIR, ".git")
        new_files = run(
            ["find", git_dir_path, "-newer",
             str(overlay_init_file)])
        if not len(new_files.splitlines()):
            # A valid overlay already exists
            return
        else:
            cloudlog.info(".git directory changed, recreating overlay")

    cloudlog.info("preparing new safe staging area")

    params = Params()
    params.put_bool("UpdateAvailable", False)
    set_consistent_flag(False)
    dismount_overlay()
    if TICI:
        run(["sudo", "rm", "-rf", STAGING_ROOT])
    if os.path.isdir(STAGING_ROOT):
        shutil.rmtree(STAGING_ROOT)

    for dirname in [
            STAGING_ROOT, OVERLAY_UPPER, OVERLAY_METADATA, OVERLAY_MERGED
    ]:
        os.mkdir(dirname, 0o755)

    if os.lstat(BASEDIR).st_dev != os.lstat(OVERLAY_MERGED).st_dev:
        raise RuntimeError(
            "base and overlay merge directories are on different filesystems; not valid for overlay FS!"
        )

    # Leave a timestamped canary in BASEDIR to check at startup. The device clock
    # should be correct by the time we get here. If the init file disappears, or
    # critical mtimes in BASEDIR are newer than .overlay_init, continue.sh can
    # assume that BASEDIR has used for local development or otherwise modified,
    # and skips the update activation attempt.
    consistent_file = Path(os.path.join(BASEDIR, ".overlay_consistent"))
    if consistent_file.is_file():
        consistent_file.unlink()
    overlay_init_file.touch()

    os.sync()
    overlay_opts = f"lowerdir={BASEDIR},upperdir={OVERLAY_UPPER},workdir={OVERLAY_METADATA}"

    mount_cmd = [
        "mount", "-t", "overlay", "-o", overlay_opts, "none", OVERLAY_MERGED
    ]
    if TICI:
        run(["sudo"] + mount_cmd)
        run(["sudo", "chmod", "755", os.path.join(OVERLAY_METADATA, "work")])
    else:
        run(mount_cmd)

    git_diff = run(["git", "diff"], OVERLAY_MERGED, low_priority=True)
    params.put("GitDiff", git_diff)
    cloudlog.info(f"git diff output:\n{git_diff}")
예제 #59
0
def create_ps2(dest, game_id, game_title, icon0, pic1, cue_files, cu2_files, img_files):
    print('Create PS2 VCD for', game_title) if verbose else None
    print('Install VCD in', dest + '/POPS')

    try:
        os.stat(dest + '/POPS')
    except:
        raise Exception('No POPS directory found')
    try:
        os.stat(dest + '/ART')
    except:
        raise Exception('No ART directory found')
        
    p = popstation()
    p.verbose = verbose
    p.game_id = game_id
    p.game_title = game_title

    discs_txt = None
    vmcdir_txt = None
    if len(img_files) > 1:
        for i in range(4):
            pp = game_id[:4] + '_' + game_id[4:7] + '.' + game_id[7:9] + '.' + game_title
            pp = pp + '_CD%d.VCD\n' % (i + 1)
            if not vmcdir_txt:
                vmcdir_txt = pp[:-5] + '\n'
            if i >= len(img_files):
                pp = '\n'
            if not discs_txt:
                discs_txt = pp
            else:
                discs_txt = discs_txt + pp

    for i in range(len(img_files)):
        f = img_files[i]
        toc = p.get_toc_from_ccd(f)
        if not toc:
            print('Need to create a TOC') if verbose else None
            toc = get_toc_from_cu2(cu2_files[i])

        print('Add image', f) if verbose else None
        p.add_img((f, toc))

        print('GameID', game_id, game_title) if verbose else None
        pp = dest + '/POPS/' + game_id[:4] + '_' + game_id[4:7] + '.' + game_id[7:9] + '.' + game_title
        if len(img_files) > 1:
            pp = pp + '_CD%d' % (i + 1)
        try:
            os.mkdir(pp)
        except:
            True
        p.vcd = pp + '.VCD'
        print('Create VCD at', p.vcd) if verbose else None
        p.create_vcd()
        try:
            os.sync()
        except:
            True

        if discs_txt:
            with open(pp + '/DISCS.TXT', 'w') as f:
                f.write(discs_txt)
        if vmcdir_txt:
            with open(pp + '/VMCDIR.TXT', 'w') as f:
                f.write(vmcdir_txt)


        if i == 0:
            create_blank_mc(pp + '/SLOT0.VMC')
            create_blank_mc(pp + '/SLOT1.VMC')
            
    pp = dest + '/ART/'
    f = pp + game_id[0:4] + '_' + game_id[4:7] + '.' + game_id[7:9] + '_COV.jpg'
    image = icon0.resize((200, 200))
    image = image.convert('RGB')
    image.save(f, format='JPEG', quality=100, subsampling=0)
    f = pp + game_id[0:4] + '_' + game_id[4:7] + '.' + game_id[7:9] + '_BG.jpg'
    image = pic1.resize((640, 480))
    image = image.convert('RGB')
    image.save(f, format='JPEG', quality=100, subsampling=0)
예제 #60
0
def callPlot():
    global ycIpFid, ycIpNam, ycOpFid
    global spc2Fid, spc3Fid, vbl2Fid, vbl3Fid
    global Va, Aa, Ka, Ra, Fa  # Approach  parms
    global Vc, Hc, Kc, Rc  # Cruise    parms
    global Cw, Iw, Aw, Ww, Pw, Lf, Df, Lr, Dr  # Wing/Ailr parms
    global Ch, Ih, Ah, Wh, Ph, Lh, Dh  # Hstab     parms
    global Cv, Iv, Av, Wv, Pv, Lv, Dv  # Vstab     parms
    global Mp, Rp, Ap, Np, Xp, Ip, Op, Vp, Cp, Tp  # Prop      parms
    global Mb, Xb, Yb, Zb  # Ballast   parms
    global Hy, Vy  # Solver    parms
    #
    # Versions in Yasim configuration strings, OrderedDict
    ## single plot
    versDict = OrderedDict([
        ('YASIM_VERSION_CURRENT', '-vCurr'),
    ])
    ## all versions
    ##versDict =             OrderedDict([ ('YASIM_VERSION_ORIGINAL', '-vOrig'), \
    ##                                     ('YASIM_VERSION_32',       '-v32'),   \
    ##                                     ('YASIM_VERSION_CURRENT',  '-vCurr'), \
    ##                                     ('2017.2',                 '-v2017-2') ])
    ## Iterate through each version in dictionary
    for versKywd in versDict.keys():
        versSfix = versDict[versKywd]
        vcfgFid = ycIpNam + versSfix + '.xml'
        vdatFid = ycIpNam + '-dat' + versSfix + '.txt'
        ##
        ## # open yasim config file, apply version string
        ## Tix tix for python 3
        if (pythVers < 3):
            vcfgHndl = open(vcfgFid, 'w', 0)
        else:
            vcfgHndl = open(vcfgFid, 'w')
        # Phase 3 write auto file via yconfig template and subsVbls from Tix
        with open(ycOpFid, 'r') as ycOpHndl:
            # step each line in template yasim config file
            for line in ycOpHndl:
                if '<airplane mass="' in line:
                    # make an index list of double quotes in the line
                    sepsList = []
                    sepsChar = '"'
                    lastIndx = 0
                    while (lastIndx > -1):
                        sepsIndx = line.find(sepsChar, (lastIndx + 1))
                        if (sepsIndx > 0):
                            sepsList.append(sepsIndx)
                        lastIndx = sepsIndx
                    #
                    if ('version=' in line):
                        linePart = line[:line.find('version=')]
                        sepsIndx = line.find('version=')
                        sepsIndx = line.find('"', (sepsIndx + 1))
                        sepsIndx = line.find('"', (sepsIndx + 1))
                        linePart = linePart + 'version="' + versKywd
                        linePart = linePart + line[sepsIndx:]
                        line = linePart
                    else:
                        # Use index list to split line into text and numbers
                        lineMass = line[0:(sepsList[1] + 1)]
                        line = lineMass + 'version="' + versKywd + '">'
                # Write unchanged/modified line into versioned xml
                vcfgHndl.write(line)
        #close and sync files
        vcfgHndl.flush
        os.fsync(vcfgHndl.fileno())
        vcfgHndl.close
        #
        os.sync()
        ycOpHndl.close
        ##
        ## GNUPlot needs specification files: auto create 2 vble gnuplot spec file
        ##ab apltHndl  = open(vbl2Fid, 'w', 0)
        apltHndl = open(vbl2Fid, 'w')
        #create common annotation test parsed / menu-altered values, big version: all menu parms
        commNota = ' set title "' + ycIpNam + versSfix + 'Parms:\\nAp:' + str(Va)  \
          + ' ' + str(Aa) + ' ' + str(Ka) + ' ' + str(Ra) + ' ' + str(Fa) +'\\n'   \
          + ' Cr:'  + str(Vc) + ' ' + str(Hc) + ' ' + str(Kc) + ' '                \
          + str(Rc) + '\\nWi:' + str(Cw) + ' ' + str(Iw) + ' ' + str(Aw)           \
          + ' ' + str(Ww) + ' ' + str(Pw) + ' ' + str(Lf) + ' ' + str(Df)          \
          + ' ' + str(Lr) + ' ' + str(Dr)                                          \
          + '\\nHs:' + str(Ch) + ' ' + str(Ih) + ' ' + str(Ah)                     \
          + ' ' + str(Wh) + ' ' + str(Ph) + ' '   + str(Lh) + ' ' + str(Dh)        \
          + '\\nVs:' + str(Cv) + ' ' + str(Iv) + ' ' + str(Av)                     \
          + ' ' + str(Wv) + ' ' + str(Pv) + ' '   + str(Lv) + ' ' + str(Dv)        \
          + 'Ys:'+ str(Vy) + ' ' + str(Hy) + '" \n'
        # uncomment a line below to have gnuplot show shortened legend
        #commNota = ' set title "yasiVers.py ' + ycIpNam + versSfix + ' : ' + str(Vy) + 'kTAS at ' + str(Hy) + 'ft" \n'
        #commNota = ' set title "' + ycIpNam + ' ' + versSfix + ' : ' + str(Vy) + 'kTAS at ' + str(Hy) + 'ft" \n'
        commNota = ' set title "' + ycIpNam + versSfix + ' Va:' + str(
            Va) + ' Aa:' + str(Aa) + ' Vc:' + str(Vc) + ' Cw:' + str(
                Cw) + ' Aw:' + str(Aw) + ' Ww:' + str(Ww) + '"\n'
        with open(spc2Fid, 'r') as tplt:
            plotFlag = 0
            for line in tplt:
                # set flag near end when 'plot' string is found
                if (' plot ' in line): plotFlag = 1
                # find the title line in template config
                if ('set title' in line):
                    line = commNota
                if (plotFlag < 1):
                    # Write line into auto.xml
                    apltHndl.write(line)
            # At EOF append plot lines with proper data file name
#     line = ' plot "' + vdatFid +'" every ::2        using '            \
#          + '1:2 with lines title \'lift\', \\\n'
#     apltHndl.write(line)
#     line = '      "' + vdatFid +'" every ::2        using '            \
#          + '1:3 with lines title \'drag\''
#     apltHndl.write(line)
### ab 2020Jan23  L/D only
            line = ' plot "' + vdatFid +'" every ::2        using '            \
                 + '1:4 with lines title \'LvsD\''
            apltHndl.write(line)
            apltHndl.close
            tplt.close
        #
        ## auto create 3 vble gnuplot config file
        apltHndl = open(vbl3Fid, 'w')
        with open(spc3Fid, 'r') as tplt:
            plotFlag = 0
            for line in tplt:
                # find the title line in template config
                if (' plot ' in line): plotFlag = 1
                if ('set title' in line):
                    line = commNota
                if (plotFlag < 1):
                    # Write line into auto.xml
                    apltHndl.write(line)
            # At EOF append plot lines with proper data file name
            line = ' plot "' + vdatFid +'" every ::2        using '             \
                 + '1:2 with lines title \'lift\', \\\n'
            apltHndl.write(line)
            line = '      "' + vdatFid +'" every ::2        using '             \
                 + '1:3 with lines title \'drag\', \\\n'
            apltHndl.write(line)
            line = '      "' + vdatFid +'" every ::2        using '             \
               + '1:4 with lines title \'L-D\' \n'
            apltHndl.write(line)
            apltHndl.close
            tplt.close
        ## python3 Need to open/close file to flush it ?
        vcfgHndl = open(vcfgFid, 'r')
        vcfgHndl.close()
        ##
        # run yasim external process to show console output
        command_line = 'yasim ' + vcfgFid + ' -a ' + str(Hy) + ' -s ' + str(Vy)
        command_line = 'yasim ' + vcfgFid
        args = shlex.split(command_line)
        p = subprocess.Popen(args)
        #p.communicate()
        #p.wait()
        #
        # run yasim external process for auto dataset, name used in .p spec
        ###ab smite yDatHndl = open(yDatFid, 'w')
        ###ab smite command_line = 'yasim ' + vcfgFid + ' -g -a ' + str(Hy) + ' -s ' + str(Vy)
        ###ab smite args = shlex.split(command_line)
        ###ab smite p = subprocess.run(args, stdout=yDatHndl)
        ###ab smite yDatHndl.close
        ###ab smite p.wait()
        #
        ##
        # run yasim external process for saved dataset file
        vDatHndl = open(vdatFid, 'w')
        command_line = 'yasim ' + vcfgFid + ' -g -a ' + str(Hy) + ' -s ' + str(
            Vy)
        args = shlex.split(command_line)
        p = subprocess.run(args, stdout=vDatHndl)
        vDatHndl.close
        p.wait()
        ##
        ## Uncomment for separate Lift, Drag curves
        # run gnuplot with 2 vble command file to plot dataset
        #apltHndl  = open(vbl2Fid, 'w')
        #apltHndl.close()
        #command_line = "gnuplot -p " + vbl2Fid
        #args = shlex.split(command_line)
        #DEVNULL = open(os.devnull, 'wb')
        #p = subprocess.run(args, stdout=DEVNULL, stderr=DEVNULL)
        #p.communicate()
        #DEVNULL.close()
        #
        # run gnuplot with 3 vble command file to plot dataset
        apltHndl = open(vbl3Fid, 'w')
        apltHndl.close()
        command_line = "gnuplot -p " + vbl3Fid
        args = shlex.split(command_line)
        DEVNULL = open(os.devnull, 'wb')
        p = subprocess.run(args, stdout=DEVNULL, stderr=DEVNULL)
        #p.communicate()
        DEVNULL.close()