def _cache_pip_packages(progress, priority=Priority.NONE): """ Downloads all updatable python modules and caches them in pip's internal pacakge cache. """ # Urgent updates don't do PIP updates if priority == Priority.URGENT: return phase_name = 'downloading-pip-pkgs' progress.start(phase_name) ensure_dir(PIP_CACHE_DIR) packages = read_file_contents_as_lines(PIP_PACKAGES_LIST) progress.init_steps(phase_name, len(packages)) for pkg in packages: progress.next_step(phase_name, "Downloading {}".format(pkg)) # The `--no-install` parameter has been deprecated in pip. However, the # version of pip in wheezy doesn't yet support the new approach which # is supposed to provide the same behaviour. args = "install --upgrade --download '{}' '{}'".format(PIP_CACHE_DIR, pkg) success = run_pip_command(args) # TODO: abort the install? if not success: msg = "Downloading the '{}' pip package failed.".format(pkg) logger.error(msg)
def change_overclock_value(config, board_name=None): board = get_board_props(board_name=board_name) if not board: logger.error("Could not get overclocking settings for board") return try: values = board.CLOCKING['values'][config] except KeyError: logger.error( "kano-settings: set_overclock: SetOverclock: set_overclock(): " \ "was called with an invalid overclock setting={}" .format(config) ) return logger.info( u"set_overclock / apply_changes: " \ "config:{} arm_freq:{arm_freq} " \ "core_freq:{core_freq} " \ "sdram_freq:{sdram_freq} " \ "over_voltage:{over_voltage}" .format(config, **values) ) # Apply changes for val in values: set_config_value(val, values[val]) # Update config set_setting('Overclocking', config)
def set_ssh_enabled(enabled, with_logging=True): """ Sets the Dropbear SSH client between disabled and enabled. Requires sudo. Args: enabled - bool to enable & start or disabled & stop the SSH service with_logging - bool to control whether or not the operation logs messages Returns: successful - bool whether the operation succeeded or not """ if enabled: rc = os.system("systemctl enable dropbear.service") rc = rc or os.system("systemctl start dropbear.service") if rc == 0 and with_logging: logger.info('set_ssh_enabled: Enabled and started Dropbear SSH') elif rc != 0 and with_logging: logger.error('set_ssh_enabled: Failed to enable and start Dropbear SSH') else: rc = os.system("systemctl disable dropbear.service") rc = rc or os.system("systemctl stop dropbear.service") if rc == 0 and with_logging: logger.info('set_ssh_enabled: Disabled and stopped Dropbear SSH') elif rc != 0 and with_logging: logger.error('set_ssh_enabled: Failed to disable and stop Dropbear SSH') return (rc == 0)
def get_tracker_events(old_only=False): """ Read the events log and return a dictionary with all of them. :param old_only: Don't return events from the current boot. :type old_only: boolean :returns: A dictionary suitable to be sent to the tracker endpoint. :rtype: dict """ data = {'events': []} try: rf = open_locked(tracker_events_file, 'r') except IOError as e: logger.error("Error opening the tracker events file {}".format(e)) else: with rf: for event_line in rf.readlines(): try: event = json.loads(event_line) except: logger.warn("Found a corrupted event, skipping.") if _validate_event(event) and event['token'] != TOKEN: data['events'].append(event) return data
def add_item(self, item): if item: self._items[item.get_id()] = item item.set_category(self) else: logger.error("Item {} can't be added to Category {}".format( item, self))
def run(args): try: share_id = args[0] except Exception: return # Start an hourglass with an unspecified app, # remove it as soon as we have a download resolution hourglass_start("") success, text, share = get_share_by_id(share_id) if not success: msg = "Error downloading share: {}".format(text) logger.error(msg) hourglass_end() return success, data = download_share(share) if not success: msg = "Could not download share, error: {}".format(data) logger.error(msg) hourglass_end() return data.append(share_id) hourglass_end() return data
def enable_system_recovery_flow(): """Configure the system to start in recovery mode on next bootup. This sets up a few things: 1. Replaces the normal bootup splash animation with another for recovery. 2. Configures LightDM autologin for multi-user systems since the Updater runs under the user. Returns: bool - Whether the operation was successful """ logger.debug('Configuring recovery stategy for next boot') successful = True # Set the recovery bootup splash and replace normal bootup one. set_splash_interrupted() # Configure the system to autologin for multi-user systems. This is due # to the Updater process running under the user. try: user = get_user_unsudoed() if user: # TODO: Create a single function for these in kano_init. enable_console_autologin(user) set_ldm_autologin(user) enable_ldm_autostart() else: successful = False except: logger.error('Could not configure autologin for update recovery!') successful = False return successful
def clear_tracker_events(old_only=True): """ Truncate the events file, removing all the cached data. :param old_only: Don't remove data from the current boot. :type old_only: boolean """ try: rf = open_locked(tracker_events_file, "r") except IOError as e: logger.error('Error opening tracking events file {}'.format(e)) else: with rf: events = [] for event_line in rf.readlines(): try: event = json.loads(event_line) if 'token' in event and event['token'] == TOKEN: events.append(event_line) except: logger.warn("Found a corrupted event, skipping.") with open(tracker_events_file, "w") as wf: for event_line in events: wf.write(event_line) if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def generate_tracker_token(): """ Generating the token is a simple md5hash of the current time. The token is saved to the `tracker_token_file`. :returns: The token. :rtype: str """ token = hashlib.md5(str(time.time())).hexdigest() ensure_dir(tracker_dir) try: f = open_locked(tracker_token_file, "w") except IOError as e: logger.error( 'Error opening tracker token file (generate) {}'.format(e)) else: with f: f.write(token) if 'SUDO_USER' in os.environ: chown_path(tracker_token_file) # Make sure that the events file exist try: f = open(tracker_events_file, 'a') except IOError as e: logger.error('Error opening tracker events file {}'.format(e)) else: f.close() if 'SUDO_USER' in os.environ: chown_path(tracker_events_file) return token
def track_data(name, data): """ Track arbitrary data. Calling this function will generate a data tracking event. :param name: The identifier of the data. :type name: str :param data: Arbitrary data, must be compatible with JSON. :type data: dict, list, str, int, float, None """ event = { "type": "data", "time": int(time.time()), "timezone_offset": get_utc_offset(), "os_version": OS_VERSION, "cpu_id": CPU_ID, "token": TOKEN, "name": str(name), "data": data } try: af = open_locked(tracker_events_file, "a") except IOError as e: logger.error('Error opening tracker events file {}'.format(e)) else: with af: af.write(json.dumps(event) + "\n") if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def get_tracker_events(old_only=False): """ Read the events log and return a dictionary with all of them. :param old_only: Don't return events from the current boot. :type old_only: boolean :returns: A dictionary suitable to be sent to the tracker endpoint. :rtype: dict """ data = {'events': []} try: rf = open_locked(tracker_events_file, "r") except IOError as e: logger.error('Error opening the tracker events file {}'.format(e)) else: with rf: for event_line in rf.readlines(): try: event = json.loads(event_line) except: logger.warn("Found a corrupted event, skipping.") if _validate_event(event) and event['token'] != TOKEN: data['events'].append(event) return data
def track_data_and_sync(event_name, event_data): """Create a tracking event with data and upload it to the servers. This function also appends a uuid to the event data such that these immediate events can be grouped more easily. See :func:`kano_profile.tracker.tracking_uuids.get_tracking_uuid`. Note: This is a slow function, requires a network connection and the user being logged into Kano World. Args: event_name (str): See :func:`kano_profile.tracker.track_data` event_data (dict): See :func:`kano_profile.tracker.track_data` """ try: from kano_profile.tracker import track_data from kano_profile.tracker.tracking_uuids import get_tracking_uuid event_data['uuid'] = get_tracking_uuid(TRACKING_UUID_KEY) track_data(event_name, event_data) rc = os.system('kano-sync --skip-kdesk --upload-tracking-data --silent') logger.debug( 'track_data_and_sync: {} {} and sync rc {}' .format(event_name, event_data, rc) ) except: logger.error('Unexpected error:\n{}'.format(traceback.format_exc()))
def set_login_data(id, username, email, token): """ Reverse Engineered Wrapper: Simulates a "payload" JSON dictionary using the login and register data from the WEB API (Qt Interface). Invoked from kano-webengine. Args: id (str): numerical ID of the user username (str): user name email (str): user email token (str): a unique login/registration token Exception: Error if unable to login Returns: bool: True for successful login [through the kano-login backend] """ data = { 'session': { 'token': token, 'user': { 'username': username, 'email': email, 'id': id }, }, 'success': True # data are only sent from the WEB API on successful login } try: return login_register_data(data) except Exception as e: logger.error("Error with data from the WEB API: {}".format(e))
def get_ascii_art(name): """ Load an ASCII art file. Args: name (str) - the name of the asset file Returns: ascii_art (str) - the ASCII art asset as a block of text """ ascii_art = name asset_path = get_path_to_file_in_system(name) try: with open(asset_path) as f: ascii_art = f.read() except (IOError, OSError) as e: logger.error('Could not load file {} - [{}]'.format(asset_path, e)) except Exception as e: logger.error('Unexpected error while loading the ascii art' ' - [{}]'.format(e)) return ascii_art
def _get_co_assets(): from kano_content.api import ContentManager cm = ContentManager.from_local() co_index = {} for co in cm.list_local_objects(spec='make-art-assets'): co_files = co.get_data('').get_content() if len(co_files) != 2: logger.warning( 'Count of files other than 2 in co[{}], skipping'.format( co.get_data('').get_dir() ) ) continue # Check whether the first file is the index index_no = _get_co_index_apply_order(co_files[0]) if index_no is not None: co_index[index_no] = co_files[1] else: # It wasn't the first one, go for the second one index_no = _get_co_index_apply_order(co_files[1]) if index_no is not None: co_index[index_no] = co_files[0] else: err_msg = 'None of the files contained in co have apply index' logger.error(err_msg) continue return co_index
def _add_led_speaker_checkbox(self): self.cpu_monitor_checkbox = Gtk.CheckButton() is_led_speaker_plugged = False is_pi_hat_plugged = False try: from kano_peripherals.speaker_leds.driver.high_level import \ get_speakerleds_interface from kano_peripherals.pi_hat.driver.high_level import \ get_pihat_interface speaker_led_api = get_speakerleds_interface(retry_count=0) if speaker_led_api: # can be None is_led_speaker_plugged = speaker_led_api.detect() pi_hat_api = get_pihat_interface(retry_count=0) if pi_hat_api: # can be None is_pi_hat_plugged = pi_hat_api.detect() except Exception as e: logger.error("Something unexpected occured in _add_led_speaker_checkbox" " - [{}]".format(e)) if has_min_performance(RPI_2_B_SCORE) and (is_led_speaker_plugged or is_pi_hat_plugged): self.buttons.append(self.cpu_monitor_checkbox) self.label_button_and_pack( self.cpu_monitor_checkbox, _("Enable LED ring CPU Animation"), '' )
def parseRawData(rawdata): # Parses a string containing the data printed by iwlist # Pre-condition: rawdata is not empty rawdatas = rawdata.split("\n") # Strip blanks # Let's separate by cells cellDataL = [] # currentCell = None for s in rawdatas: try: # skip empty lines if not s.strip(): continue # If new cell: if s.lstrip().startswith("Cell "): # log.debug("parseRawData: new cell") cellDataL.append([]) if len(cellDataL) > 0 and len(s) > 0: cellDataL[len(cellDataL) - 1].append(s) except Exception as e: logger.error("unexpected error occurred while looping rawdatas {}".format(rawdata), exception=e) # Data is separated by cells, now we'll parse each cell's data parsedCellData = {} for s in cellDataL: try: cellNumber, cellData = parseCellData("\n".join(s)) parsedCellData[cellNumber] = cellData except Exception as e: logger.error("unexpected error occurred while parsing cellDataL {}".format(rawdata), exception=e) return parsedCellData
def get_board_props(board_name=None): if not board_name: board_name = get_rpi_model() cpu_profile = get_board_property(board_name, 'cpu_profile') if not cpu_profile: cpu_profile = RPI_1_CPU_PROFILE board_module = re.sub(r'[-/ ]', '_', cpu_profile).lower() try: board = importlib.import_module( '{}.{}'.format(__name__, board_module) ) except ImportError: logger.error('Board not found') return None required_props = ['CLOCKING', 'DEFAULT_CONFIG'] for prop in required_props: if not hasattr(board, prop): logger.error('No {} data in board config' .format(prop.replace('_', ' ').lower())) return None # TODO: Validate board info return board
def get_status(): status = dict() cmd = '{tvservice} --status'.format(tvservice=tvservice_path) status_str, _, _ = run_cmd(cmd) if 'DMT' in status_str: status['group'] = 'DMT' elif 'CEA' in status_str: status['group'] = 'CEA' else: logger.error("status parsing error") return status['mode'] = int(status_str.split('(')[1].split(')')[0].strip()) status['full_range'] = 'RGB full' in status_str status['overscan'] = not (get_screen_value('disable_overscan') == 1 and get_screen_value('overscan_top') == 0 and get_screen_value('overscan_bottom') == 0 and get_screen_value('overscan_left') == 0 and get_screen_value('overscan_right') == 0) res, hz = status_str.split(',')[1].split('@') status['resolution'] = res.strip() status['hz'] = float(hz.strip()[:-2]) return status
def session_end(session_file): if not os.path.exists(session_file): msg = "Someone removed the tracker file, the runtime of this " \ "app will not be logged" logger.warn(msg) return try: rf = open_locked(session_file, 'r') except IOError as e: logger.error("Error opening the tracker session file {}".format(e)) else: with rf: data = json.load(rf) data['elapsed'] = int(time.time()) - data['started'] data['finished'] = True try: wf = open(session_file, 'w') except IOError as e: logger.error( "Error opening the tracker session file {}".format(e)) else: with wf: json.dump(data, wf) if 'SUDO_USER' in os.environ: chown_path(session_file)
def _apply_co_packages(dest_dir): import tarfile co_index = _get_co_assets() for order in sorted(co_index.iterkeys()): tar_file = co_index[order] # First try to open the file try: tarball = tarfile.open(tar_file) except (IOError, OSError) as exc: err_msg = "Couldn't open file '{}', [{}]".format(tar_file, exc) logger.error(err_msg) continue except tarfile.ReadError as exc: err_msg = 'Error parsing tarfile "{}", [{}]'.format(tar_file, exc) logger.error(err_msg) continue else: # Now try to extract the files one by one with tarball: for tarred_file in tarball: try: tarball.extract(tarred_file, path=dest_dir) except IOError as exc: # This is to guard against weird tar behaviour when # trying to ovewrite symlinks bad_filename = os.path.join(dest_dir, tarred_file.name) if os.path.islink(bad_filename): logger.debug( 'Remove link and ovewrite "{}"'.format( bad_filename) ) os.remove(os.path.join(dest_dir, tarred_file.name)) tarball.extract(tarred_file, path=dest_dir)
def pause_tracking_session(session): ''' Close session and make a note of the session if it is open so that it can be resumed. ''' if session.is_open(): try: sessions_f = open_locked(PAUSED_SESSIONS_FILE, 'a') except IOError as err: logger.error('Error opening the paused sessions file: {}'.format(err)) else: with sessions_f: sessions_f.write( '{}\n'.format(session.dumps()) ) session_end(session.path) closed_session = TrackingSession(name=session.name, pid=999999) shutil.move( session.path, '-{}'.format(time.time()).join( os.path.splitext(closed_session.path) ) )
def track_data(name, data): """ Track arbitrary data. Calling this function will generate a data tracking event. :param name: The identifier of the data. :type name: str :param data: Arbitrary data, must be compatible with JSON. :type data: dict, list, str, int, float, None """ event = { 'type': 'data', 'time': int(time.time()), 'timezone_offset': get_utc_offset(), 'os_version': OS_VERSION, 'cpu_id': CPU_ID, 'token': TOKEN, 'language': LANGUAGE, 'name': str(name), 'data': data } try: af = open_locked(tracker_events_file, 'a') except IOError as e: logger.error("Error opening tracker events file {}".format(e)) else: with af: af.write(json.dumps(event) + "\n") if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def session_log(name, started, length): """ Log a session that was tracked outside of the tracker. :param name: The identifier of the session. :type name: str :param started: When was the session started (UTC unix timestamp). :type started: int :param length: Length of the session in seconds. :param started: int """ try: af = open_locked(tracker_events_file, 'a') except IOError as e: logger.error("Error while opening events file: {}".format(e)) else: with af: session = { 'name': name, 'started': int(started), 'elapsed': int(length) } event = get_session_event(session) af.write(json.dumps(event) + "\n") if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def get_volume(): from kano.logging import logger percent = 100 millibel = 400 cmd = "amixer | grep %" o, _, _ = run_cmd(cmd) o = o.strip().split(' ') try: millibel = int(o[2]) except Exception: msg = 'asmixer format bad for millibel, o: {}'.format(o) logger.error(msg) pass try: percent = int(o[3].translate(None, '[]%')) except Exception: msg = 'asmixer format bad for percent, o: {}'.format(o) logger.error(msg) pass # logger.debug('percent: {}, millibel: {}'.format(percent, millibel)) return percent, millibel
def set_ssh_enabled(enabled, with_logging=True): """ Sets the Dropbear SSH client between disabled and enabled. Requires sudo. Args: enabled - bool to enable & start or disabled & stop the SSH service with_logging - bool to control whether or not the operation logs messages Returns: successful - bool whether the operation succeeded or not """ if enabled: rc = os.system("systemctl enable dropbear.service") rc = rc or os.system("systemctl start dropbear.service") if rc == 0 and with_logging: logger.info('set_ssh_enabled: Enabled and started Dropbear SSH') elif rc != 0 and with_logging: logger.error( 'set_ssh_enabled: Failed to enable and start Dropbear SSH') else: rc = os.system("systemctl disable dropbear.service") rc = rc or os.system("systemctl stop dropbear.service") if rc == 0 and with_logging: logger.info('set_ssh_enabled: Disabled and stopped Dropbear SSH') elif rc != 0 and with_logging: logger.error( 'set_ssh_enabled: Failed to disable and stop Dropbear SSH') return (rc == 0)
def get_status(): status = dict() status_str, _, _ = run_cmd(tvservice_path + ' -s') if 'DMT' in status_str: status['group'] = 'DMT' elif 'CEA' in status_str: status['group'] = 'CEA' else: logger.error('status parsing error') return status['mode'] = int(status_str.split('(')[1].split(')')[0].strip()) status['full_range'] = 'RGB full' in status_str status['overscan'] = not ( get_config_value('disable_overscan') == 1 and get_config_value('overscan_top') == 0 and get_config_value('overscan_bottom') == 0 and get_config_value('overscan_left') == 0 and get_config_value('overscan_right') == 0 ) res, hz = status_str.split(',')[1].split('@') status['resolution'] = res.strip() status['hz'] = float(hz.strip()[:-2]) return status
def remove_priority(self, priority): """ Remove a lock with a given priority. This should be slightly faster than calling remove(). Args: priority - priority of the lock to be removed. Returns: True or False if the operation was successful. """ priority = self._standardise_priority(priority) successful = True try: if self.locks[priority] is not None: self.num_locks -= 1 self.locks[priority] = None if priority == self.top_priority: for index in xrange(self.top_priority, -1, -1): self.top_priority = index if self.locks[index] is not None: break else: successful = False except IndexError: logger.error( 'There was an unintentional IndexError in remove_priority() with' ' priority [{}] and locks [{}]. Check the code!'.format( priority, self.locks)) successful = False return successful
def _add_led_speaker_checkbox(self): self.cpu_monitor_checkbox = Gtk.CheckButton() is_led_speaker_plugged = False is_pi_hat_plugged = False try: from kano_peripherals.speaker_leds.driver.high_level import \ get_speakerleds_interface from kano_peripherals.pi_hat.driver.high_level import \ get_pihat_interface speaker_led_api = get_speakerleds_interface(retry_count=0) if speaker_led_api: # can be None is_led_speaker_plugged = speaker_led_api.detect() pi_hat_api = get_pihat_interface(retry_count=0) if pi_hat_api: # can be None is_pi_hat_plugged = pi_hat_api.detect() except Exception as e: logger.error( "Something unexpected occured in _add_led_speaker_checkbox" " - [{}]".format(e)) if has_min_performance(RPI_2_B_SCORE) and (is_led_speaker_plugged or is_pi_hat_plugged): self.buttons.append(self.cpu_monitor_checkbox) self.label_button_and_pack(self.cpu_monitor_checkbox, _("Enable LED ring CPU Animation"), '')
def search_youtube_by_keyword(keyword=None, popular=False, max_results=10, start_index=1, parent_control=False): url = 'http://gdata.youtube.com/feeds/api/videos' params = { 'v': 2, 'vq': keyword, 'racy': 'exclude', 'orderby': 'relevance', 'alt': 'json', 'max-results': max_results, 'start-index': start_index } if popular: params['orderby'] = 'viewCount' if parent_control is True: params['safeSearch'] = 'strict' success, error, data = requests_get_json(url, params=params) if not success: logger.error('Searching YouTube by keyword failed: ' + error) return None if 'feed' in data and 'entry' in data['feed']: global last_search_count last_search_count = data['feed']['openSearch$totalResults']['$t'] return data['feed']['entry']
def touch(path, times=None): """Set the access and modified times of the file specified by path. The function calls :func:`.ensure_dir` beforehand for you. This is essentially a simple wrapper for :func:`os.utime`. Args: path (str): Path to the file create/modify times (tuple): See :func:`os.utime` Returns: bool: Whether the operation was successful or not """ try: ensure_dir(os.path.dirname(path)) with open(path, 'a'): os.utime(path, times) except (IOError, OSError) as error: from kano.logging import logger logger.error( "Could not touch {} due to permission/IO - {}" .format(path, error) ) return False return True
def install_urgent(progress, status): progress.split( Phase( 'installing-urgent', _("Installing Hotfix"), 100, is_main=True ) ) logger.info("Installing urgent hotfix") apt_handle = AptWrapper.get_instance() packages_to_update = apt_handle.packages_to_be_upgraded() progress.start('installing-urgent') install_deb_packages(progress, priority=Priority.URGENT) status.is_urgent = False try: from kano_profile.tracker import track_data track_data('updated_hotfix', { 'packages': packages_to_update }) logger.info("Tracking Data: '{}'".format(packages_to_update)) except ImportError as imp_exc: logger.error("Couldn't track hotfix installation, failed to import " "tracking module", exception=imp_exc) except Exception: pass return True
def update(self, progress, sources_list=None): src_list = aptsources.sourceslist.SourcesList() src_count = 0 for src in src_list.list: if not src.disabled and not src.invalid: src_count += len(src.comps) + 1 updating_sources = "{}-updating-apt-sources".format( progress.get_current_phase().name) cache_init = "{}-apt-cache-init".format( progress.get_current_phase().name) progress.split( Phase(updating_sources, _("Updating apt sources")), Phase(cache_init, _("Initialising apt cache")) ) progress.start(updating_sources) apt_progress = AptDownloadProgress(progress, src_count) try: self._cache.update(fetch_progress=apt_progress, sources_list=sources_list) except apt.cache.FetchFailedException: err_msg = N_("Failed to update sources") logger.error(err_msg) progress.fail(_(err_msg)) progress.start(cache_init) ops = [("reading-package-lists", _("Reading package lists")), ("building-dependency-tree", _("Building dependency tree")), ("reading-state-information", _("Reading state information")), ("building-data-structures", _("Building data structures"))] op_progress = AptOpProgress(progress, ops) self._cache.open(op_progress)
def fix_broken(self, progress): progress.split( Phase('dpkg-clean', _("Cleaning dpkg journal")), Phase('fix-broken', _("Fixing broken packages")) ) if self._cache.dpkg_journal_dirty: progress.start('dpkg-clean') logger.info("Cleaning dpkg journal") run_cmd_log("dpkg --configure -a") self._cache.clear() self._cache.open() progress.start('fix-broken') # Naughty but don't want to re-initialise if self._cache._depcache.broken_count: try: self._cache._depcache.fix_broken() except SystemError as e: logger.error('Error attempting to fix broken pkgs', exception=e) self._cache.clear() self._cache.open()
def layer(self, character_name): if character_name not in self._layers: logger.error("Character '{}' not in character layers".format( character_name)) return None else: return self._layers[character_name]
def is_power_hat_plugged(with_dbus=True, retry_count=5): """Check if the Kano PowerHat board is plugged in. NOTE: Running this function with_dbus=False must be done when the daemon is certainly not running. Otherwise, bad things might happen. Args: with_dbus (bool): Whether to run the detection through the central dbus kano-boards-daemon, or bypass to use the underlying library retry_count: See :func:`~kano_peripherals.ck2_pro_hat.driver.high_level.get_ck2_pro_hat_interface` Returns: bool: Whether the PowerHat is plugged in or not """ is_plugged = False try: if with_dbus: ck2prohat_iface = get_ck2_pro_hat_interface( retry_count=retry_count) is_plugged = bool(ck2prohat_iface and ck2prohat_iface.detect()) else: is_plugged = CK2ProHatService.quick_detect() except: logger.error('Unexpected error occured:\n{}'.format( traceback.format_exc())) return is_plugged
def set_gfx_driver(enabled): if enabled: set_config_value('dtoverlay', 'vc4-kms-v3d') try: try: os.makedirs(os.path.dirname(fpturbo_conf_backup_path)) except OSError as e: if e.strerror == "File exists": pass else: raise shutil.copyfile(fpturbo_conf_path, fpturbo_conf_backup_path) os.remove(fpturbo_conf_path) except Exception as e: logger.error("Error restoring fpturbo_config", exception=e) else: set_config_value('dtoverlay', None) if not os.path.exists(fpturbo_conf_path): try: shutil.copyfile(fpturbo_conf_backup_path, fpturbo_conf_path) except Exception as e: logger.error("Error restoring fpturbo_config", exception=e) end_config_transaction() set_setting('Use_GLX', enabled)
def launch(app_name): if app_name is not None: try: if check_installed(app_name): launch_project(app_name, '', '') except ValueError: logger.error("Failed to launch app '{}'".format(str(app_name)))
def _add_boot_config_options(self, extra_config): """ Helper function to add a block of options (text) to the boot/config.txt Args: extra_config - unindedted multiline or not str as it would go into the .txt """ config_path = '/boot/config.txt' use_transactions = False # if we can't use transactions, fall back to editting the file directly tmp_path = config_path try: try: # append uart config to config.txt from kano_settings.boot_config import _trans, \ end_config_transaction use_transactions = True tmp_path = '/tmp/config.tmp' _trans().copy_to(tmp_path) except ImportError: pass with open(tmp_path, 'a') as tmp_config: tmp_config.write(extra_config) if use_transactions: _trans().copy_from(tmp_path) end_config_transaction() except: logger.error("failed to update config")
def set_avatar(subcat, item, sync=False): """ Set the avatar in the local profile structure and optionally sync :param subcat: first field to be used (usually character namespace) :param item: Dict with the mapping from category to item :param sync: (Optional) sync to World :returns: True iff an update has happened internally, False iff the internal structures were up to date, None if there is a version issue :rtype: None or Boolean """ profile = load_profile() if 'version' in profile and profile['version'] == 2: if not isinstance(item, dict): logger.error( "Incompatible form of item for this version of the API") return None # Check whether we are updating this value and if so, recreate the assets needs_update = True if 'avatar' in profile: char_ex, items_ex = profile['avatar'] if char_ex == subcat: if items_ex == item: needs_update = False if needs_update: # Update the profile structure profile['avatar'] = [subcat, item] save_profile(profile, skip_kdesk_refresh=True) if sync: sync_profile() return needs_update
def clear_tracker_events(old_only=True): """ Truncate the events file, removing all the cached data. :param old_only: Don't remove data from the current boot. :type old_only: boolean """ try: rf = open_locked(tracker_events_file, 'r') except IOError as e: logger.error("Error opening tracking events file {}".format(e)) else: with rf: events = [] for event_line in rf.readlines(): try: event = json.loads(event_line) if 'token' in event and event['token'] == TOKEN: events.append(event_line) except: logger.warn("Found a corrupted event, skipping.") with open(tracker_events_file, 'w') as wf: for event_line in events: wf.write(event_line) if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def env_select(self, env_name): """ Set an environment for the background. If the environment given is not unlocked a different unlocked one is selected in random :param env_name: Environment name :returns: True iff the environment exists (is available) :rtype: Boolean """ env_inst = self._sel_char_layer().item(env_name) if env_inst.category().get_id() == self.env_label: if not env_inst.is_unlocked(): logger.warn( "Environment {} is locked, replacing with random".format( env_name)) # Select randomly among the unlocked environments self._sel_env = random.choice( [env for env in env_inst.category().items() if env.is_unlocked()]) else: self._sel_env = env_inst logger.debug( "Selected Environment: {}".format(self._sel_env.get_id())) return True else: logger.error( "Environment {} is not in the available env list".format( env_name)) return False
def main(): # Ensure script is not run as root. if os.getuid() == 0: return 10 if _logs_already_sent(): return if not os.path.exists(LOGS_PATH): return # Service is running too early in user space. Give the system some # time to settle. time.sleep(30) out, err, rc = run_cmd( 'kano-feedback-cli' ' --title "Kano OS: Safe Mode Boot Logs"' ' --description "Kano OS booted into Safe Mode. Debugging logs attached."' ' --send --logs {path}' ' --flag {flag}' ' >{output} 2>&1'.format(path=LOGS_PATH, flag=LOGS_FLAG_PATH, output=TMP_LOGS_OUTPUT_PATH)) if rc != 0: logger.error('Could not send logs, kano-feedback-cli rc {}'.format(rc)) return 20
def merge_conf_files(conf_base, conf_added): """ """ if not is_valid_configuration(conf_base) or \ not is_valid_configuration(conf_added): return None else: for cat in conf_added.iterkeys(): if cat in conf_base: if type(conf_base[cat]) != type(conf_added[cat]): logger.error( "base and auxiliary configuration types mismatch") return None else: if type(conf_base[cat]) == list: conf_base[cat] += conf_added[cat] elif type(conf_base[cat]) == dict: conf_base[cat].update(conf_added[cat]) else: logger.warning( "Can't handle type {}".format(conf_base[cat])) return None else: conf_base[cat] = conf_added[cat] return True
def play_sound(audio_file, background=False): from kano.logging import logger # Check if file exists if not os.path.isfile(audio_file): logger.error('audio file not found: {}'.format(audio_file)) return False _, extension = os.path.splitext(audio_file) if extension in ['wav', 'voc', 'raw', 'au']: cmd = 'aplay -q {}'.format(audio_file) else: volume_percent, _ = get_volume() volume_str = '--vol {}'.format( percent_to_millibel(volume_percent, raspberry_mod=True)) cmd = 'omxplayer -o both {volume} {link}'.format( volume=volume_str, link=audio_file) logger.debug('cmd: {}'.format(cmd)) if background: run_bg(cmd) rc = 0 else: _, _, rc = run_cmd_log(cmd) return rc == 0
def get_board_props(board_name=None): if not board_name: board_name = get_rpi_model() cpu_profile = get_board_property(board_name, 'cpu_profile') if not cpu_profile: cpu_profile = RPI_1_CPU_PROFILE board_module = re.sub(r'[-/ ]', '_', cpu_profile).lower() try: board = importlib.import_module('{}.{}'.format(__name__, board_module)) except ImportError: logger.error('Board not found') return None required_props = ['CLOCKING', 'DEFAULT_CONFIG'] for prop in required_props: if not hasattr(board, prop): logger.error('No {} data in board config'.format( prop.replace('_', ' ').lower())) return None # TODO: Validate board info return board
def parseRawData(rawdata): # Parses a string containing the data printed by iwlist # Pre-condition: rawdata is not empty rawdatas = rawdata.split("\n") # Strip blanks # Let's separate by cells cellDataL = [] for s in rawdatas: try: # skip empty lines if not s.strip(): continue # If new cell: if s.lstrip().startswith("Cell "): # log.debug("parseRawData: new cell") cellDataL.append([]) if len(cellDataL) > 0 and len(s) > 0: cellDataL[len(cellDataL) - 1].append(s) except Exception as e: logger.error('unexpected error occurred while looping rawdatas {}' .format(rawdata), exception=e) # Data is separated by cells, now we'll parse each cell's data parsedCellData = {} for s in cellDataL: try: cellNumber, cellData = parseCellData("\n".join(s)) parsedCellData[cellNumber] = cellData except Exception as e: logger.error('unexpected error occurred while parsing cellDataL {}' .format(rawdata), exception=e) return parsedCellData