def recursively_copy(src, dst): src_dir = os.path.abspath(src) dest_dir = os.path.abspath(dst) if not os.path.isdir(src_dir) or not os.path.isdir(dest_dir): from kano.logging import logger logger.warn( "Can't copy '{}' contents into '{}', one of them is not a dir" .format(src_dir, dest_dir) ) return False try: for root_d, dirs, files in os.walk(src_dir): # Firstly create the dirs dest_root = os.path.join( dest_dir, os.path.relpath(root_d, src_dir) ) for dir_n in dirs: new_dir = os.path.join(dest_root, dir_n) os.mkdir(new_dir) # Now deal with the files for file_n in files: src_file = os.path.join(root_d, file_n) new_file = os.path.join(dest_root, file_n) shutil.copy(src_file, new_file) except (IOError, OSError) as exc_err: from kano.logging import logger logger.warn( "Can't copy '{}' contents into '{}', due to permission/IO - {}" .format(src_dir, dest_dir, exc_err) ) return False return True
def _start_service(self, service_object_path): """ Helper to start a D-Bus service based on its object_path. The implementation is specific to this class service base classes. Returns: successful - bool whether or not the operation succeeded """ if service_object_path not in self.services: logger.error('ServiceManager: _start_service: No entry for {} in' ' self.services!'.format(service_object_path)) return False try: Service = self.services[service_object_path] # Pass the KanoHatLeds object to the PiHatService since reinstantiating the # object clashes with the audio module. if service_object_path == PI_HAT_OBJECT_PATH: service_instance = Service(self.bus_name, self.pi_hat_lib) else: service_instance = Service(self.bus_name) self.running_services[service_object_path] = service_instance except dbus.exceptions.NameExistsException as e: logger.warn( 'Could not reserve the SystemBus name, most likely another instance' ' of kano-boards-daemon already exists.\n{}'.format(e)) return False except Exception as e: logger.error( 'Unexpected error when starting the services.\n{}'.format( traceback.format_exc())) return False return True
def clear_tracker_events(old_only=True): """ Truncate the events file, removing all the cached data. :param old_only: Don't remove data from the current boot. :type old_only: boolean """ try: rf = open_locked(tracker_events_file, "r") except IOError as e: logger.error('Error opening tracking events file {}'.format(e)) else: with rf: events = [] for event_line in rf.readlines(): try: event = json.loads(event_line) if 'token' in event and event['token'] == TOKEN: events.append(event_line) except: logger.warn("Found a corrupted event, skipping.") with open(tracker_events_file, "w") as wf: for event_line in events: wf.write(event_line) if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def load(self): logger.debug("Loading status instance from file") with open(self._status_file, 'r') as status_file: try: data = json.load(status_file) # File format sanity check: Try to access the expected keys data['state'] data['last_update'] data['last_check'] data['last_check_urgent'] data['first_boot_countdown'] data['is_urgent'] data['is_scheduled'] data['is_shutdown'] except Exception: # Initialise the file again if it is corrupted logger.warn("The status file was corrupted.") self.save() return self._state = data['state'] self._last_update = data['last_update'] self._last_check = data['last_check'] self._updatable_independent_packages = data.get('ind_pkg',[]) self._last_check_urgent = data['last_check_urgent'] self._first_boot_countdown = data['first_boot_countdown'] self._is_urgent = (data['is_urgent'] == 1) self._is_scheduled = (data['is_scheduled'] == 1) self._is_shutdown = (data['is_shutdown'] == 1) if 'notifications_muted' in data: self._notifications_muted = (data['notifications_muted'] == 1)
def get_tracker_events(old_only=False): """ Read the events log and return a dictionary with all of them. :param old_only: Don't return events from the current boot. :type old_only: boolean :returns: A dictionary suitable to be sent to the tracker endpoint. :rtype: dict """ data = {'events': []} try: rf = open_locked(tracker_events_file, 'r') except IOError as e: logger.error("Error opening the tracker events file {}".format(e)) else: with rf: for event_line in rf.readlines(): try: event = json.loads(event_line) except: logger.warn("Found a corrupted event, skipping.") if _validate_event(event) and event['token'] != TOKEN: data['events'].append(event) return data
def set_state_writable(self): if self.state == 0: self.raise_state_to_locked() if self.state == 1: temp = tempfile.NamedTemporaryFile(mode='w', delete=False, prefix="config_tmp_", dir=self.dir) self.temp_path = temp.name logger.info( "Enable modifications in config transaction: {}".format( self.temp_path)) temp.close() if os.path.exists(self.path): shutil.copy2(self.path, self.temp_path) else: logger.warn( "Could not make a copy of config.txt, using default") shutil.copy2(default_config_path, self.temp_path) # create temporary self.temp_config = BootConfig(self.temp_path) self.state = 2
def _evaluate_rules(self, rules): """ Evaluates the rules and returns whether the badge has been unlocked :param rules: The category whose z-index will be returned :returns: True or False if the badge has been achieved. If the rules are malformed it returns None :rtype: Boolean or NoneType """ warn_template = "Malformed badge rules, missing '{}' - [{}]" req_fields = ['operation', 'targets'] for field in req_fields: if field not in rules: logger.warn(warn_template.format(field, rules)) return None if rules['operation'] == 'each_greater': return self._are_each_greater(rules['targets']) if rules['operation'] == 'sum_greater': if 'value' not in rules: logger.warn(warn_template.format('value', rules)) return None return self._is_sum_greater(rules['targets'], rules['value']) return None
def check_corrupt(self): # Quick check for corruption in config file. # Check that is has at least some expected data if not os.path.exists(self.path): return True try: lines = read_file_contents_as_lines(self.path) except: return True must_contain = set(['dtparam']) found = set() for l in lines: for m in must_contain: if m in l: found.add(m) if must_contain == found: return False logger.warn( 'Parameters {} not found in config.txt, assuming corrupt' .format(must_contain) ) return True
def get_avatar_circ_image_path(): """ Returns a full path to the file that contains the asset that is being used as the icon stamp on the desktop. To do this, assuming the version of the profile structure is correct, it looks into the default avatar asset folder for a file with the suffix '_circ_ring.png'. In case of error the string that is returned is empty :returns: Path to circular image to be used as icon stamp :rtype: string """ profile = load_profile() if 'version' not in profile or profile['version'] == 1: logger.error("Version field not existent or is less than 2") return '' elif profile['version'] == 2: direc = AVATAR_DEFAULT_LOC dirs = list_dir(direc) circ = [fl for fl in dirs if fl.endswith('_circ_ring.png')] if len(circ) == 0: logger.error("Couldn't find a file with the appropriate suffix") return '' elif len(circ) == 1: return os.path.join(direc, circ[0]) elif len(circ) > 1: # Return the first one but inform about the existance of multiple logger.warn( "There are more than one files with appropriate suffix") return os.path.join(direc, circ[0]) else: logger.error("Unknown profile version: {}".format(profile['version'])) return ''
def clear_tracker_events(old_only=True): """ Truncate the events file, removing all the cached data. :param old_only: Don't remove data from the current boot. :type old_only: boolean """ try: rf = open_locked(tracker_events_file, 'r') except IOError as e: logger.error("Error opening tracking events file {}".format(e)) else: with rf: events = [] for event_line in rf.readlines(): try: event = json.loads(event_line) if 'token' in event and event['token'] == TOKEN: events.append(event_line) except: logger.warn("Found a corrupted event, skipping.") with open(tracker_events_file, 'w') as wf: for event_line in events: wf.write(event_line) if 'SUDO_USER' in os.environ: chown_path(tracker_events_file)
def set_to_HDMI(HDMI): if not hdmi_supported: HDMI = False # 1 analog # 2 hdmi # These are the changes we'll apply if they have changed from what they were if HDMI: amixer_cmd = amixer_set_cmd.format(value=hdmi_value) set_config_value("hdmi_ignore_edid_audio", None) set_config_value("hdmi_drive", 2) config = "HDMI" else: amixer_cmd = amixer_set_cmd.format(value=analogue_value) set_config_value("hdmi_ignore_edid_audio", 1) set_config_value("hdmi_drive", None) config = "Analogue" # Set audio path in amixer o, e, rc = run_cmd(amixer_cmd) if rc: logger.warn("error from amixer: {} {} {}".format(o, e, rc)) # trigger alsa-utils to store the path in /var/lib/alsa/asound.state o, e, rc = run_cmd(store_cmd) if rc: logger.warn("error from alsa-utils: {} {} {}".format(o, e, rc)) set_setting('Audio', config)
def get_paused_sessions(): if not os.path.exists(PAUSED_SESSIONS_FILE): return [] try: sessions_f = open_locked(PAUSED_SESSIONS_FILE, 'r') except IOError as err: logger.error('Error opening the paused sessions file: {}'.format(err)) return [] else: with sessions_f: paused_sessions = [] for session in sessions_f: if not session: continue try: new_session = TrackingSession.loads(session) except TypeError: logger.warn( 'Failed to process session: {}'.format(session)) continue paused_sessions.append(new_session) return paused_sessions
def load(self): logger.debug("Loading status instance from file") with open(self._status_file, 'r') as status_file: try: data = json.load(status_file) # File format sanity check: Try to access the expected keys data['state'] data['last_update'] data['last_check'] data['last_check_urgent'] data['first_boot_countdown'] data['is_urgent'] data['is_scheduled'] data['is_shutdown'] except Exception: # Initialise the file again if it is corrupted logger.warn("The status file was corrupted.") self.save() return self._state = data['state'] self._last_update = data['last_update'] self._last_check = data['last_check'] self._updatable_independent_packages = data.get('ind_pkg', []) self._last_check_urgent = data['last_check_urgent'] self._first_boot_countdown = data['first_boot_countdown'] self._is_urgent = (data['is_urgent'] == 1) self._is_scheduled = (data['is_scheduled'] == 1) self._is_shutdown = (data['is_shutdown'] == 1) if 'notifications_muted' in data: self._notifications_muted = (data['notifications_muted'] == 1)
def check_internet(): if is_internet(): return True logger.warn("No internet connection detected") os.system("kano-settings 12") return is_internet()
def env_select(self, env_name): """ Set an environment for the background. If the environment given is not unlocked a different unlocked one is selected in random :param env_name: Environment name :returns: True iff the environment exists (is available) :rtype: Boolean """ env_inst = self._sel_char_layer().item(env_name) if env_inst.category().get_id() == self.env_label: if not env_inst.is_unlocked(): logger.warn( "Environment {} is locked, replacing with random".format( env_name)) # Select randomly among the unlocked environments self._sel_env = random.choice( [env for env in env_inst.category().items() if env.is_unlocked()]) else: self._sel_env = env_inst logger.debug( "Selected Environment: {}".format(self._sel_env.get_id())) return True else: logger.error( "Environment {} is not in the available env list".format( env_name)) return False
def _write_char_log_file(self, fname): """ Creates a file that includes the avatar configuration used when the rest of the assets where created as a label. The purpose of this file is to avoid the time consuming process of recreating assets that are already present and updated. :param fname: filename for the configuration file to be created :returns: False iff some error occurs :rtype: Boolean """ if self._sel_char is None: logger.warn( "Character not selected, will abandon writing log file") return False if self._sel_env is None: logger.warn( "Environment not selected, will abandon writing log file") return False created_file = False with open(fname, 'w') as fp: obj_av = {} # ensure that environments is not present in this dict items = self.selected_items_per_cat() items.pop(self.env_label, None) obj_av['avatar'] = [self._sel_char.get_id(), items] obj_av['environment'] = self._sel_env.get_id() obj_av['date_created'] = get_date_now() dump(obj_av, fp) created_file = True return created_file
def get_tracker_events(old_only=False): """ Read the events log and return a dictionary with all of them. :param old_only: Don't return events from the current boot. :type old_only: boolean :returns: A dictionary suitable to be sent to the tracker endpoint. :rtype: dict """ data = {'events': []} try: rf = open_locked(tracker_events_file, "r") except IOError as e: logger.error('Error opening the tracker events file {}'.format(e)) else: with rf: for event_line in rf.readlines(): try: event = json.loads(event_line) except: logger.warn("Found a corrupted event, skipping.") if _validate_event(event) and event['token'] != TOKEN: data['events'].append(event) return data
def get_avatar_circ_image_path(): """ Returns a full path to the file that contains the asset that is being used as the icon stamp on the desktop. To do this, assuming the version of the profile structure is correct, it looks into the default avatar asset folder for a file with the suffix '_circ_ring.png'. In case of error the string that is returned is empty :returns: Path to circular image to be used as icon stamp :rtype: string """ profile = load_profile() if 'version' not in profile or profile['version'] == 1: logger.error("Version field not existent or is less than 2") return '' elif profile['version'] == 2: direc = AVATAR_DEFAULT_LOC dirs = list_dir(direc) circ = [fl for fl in dirs if fl.endswith('_circ_ring.png')] if len(circ) == 0: logger.error("Couldn't find a file with the appropriate suffix") return '' elif len(circ) == 1: return os.path.join(direc, circ[0]) elif len(circ) > 1: # Return the first one but inform about the existance of multiple logger.warn( "There are more than one files with appropriate suffix" ) return os.path.join(direc, circ[0]) else: logger.error( 'Unknown profile version: {}'.format(profile['version']) ) return ''
def _locking_thread(self): """ Check if the locking processes are still alive. This method is run in a separate thread with GObject. If any process that locked the API has died, it automatically unlocks its priority level. It keeps executing as long as there are locks. """ for priority in xrange(len(self.locks)): lock_data = self.locks.get(priority) if lock_data is not None: try: os.kill(lock_data['PID'], 0) except OSError: # the current locking process has died logger.warn( '[{}] with PID [{}] and priority [{}] died and forgot' ' to unlock the LED Speaker API. Unlocking.'.format( lock_data['cmd'], lock_data['PID'], priority)) self.locks.remove_priority(priority) except Exception as e: logger.warn( 'Something unexpected occurred in _locking_thread' ' - [{}]'.format(e)) # while there are still locks active, keep calling this function indefinitely return not self.locks.is_empty()
def _evaluate_rules(self, rules): """ Evaluates the rules and returns whether the badge has been unlocked :param rules: The category whose z-index will be returned :returns: True or False if the badge has been achieved. If the rules are malformed it returns None :rtype: Boolean or NoneType """ warn_template = "Malformed badge rules, missing '{}' - [{}]" req_fields = [ 'operation', 'targets' ] for field in req_fields: if field not in rules: logger.warn(warn_template.format(field, rules)) return None if rules['operation'] == 'each_greater': return self._are_each_greater(rules['targets']) if rules['operation'] == 'sum_greater': if 'value' not in rules: logger.warn(warn_template.format('value', rules)) return None return self._is_sum_greater(rules['targets'], rules['value']) return None
def session_end(session_file): if not os.path.exists(session_file): msg = "Someone removed the tracker file, the runtime of this " \ "app will not be logged" logger.warn(msg) return try: rf = open_locked(session_file, 'r') except IOError as e: logger.error("Error opening the tracker session file {}".format(e)) else: with rf: data = json.load(rf) data['elapsed'] = int(time.time()) - data['started'] data['finished'] = True try: wf = open(session_file, 'w') except IOError as e: logger.error( "Error opening the tracker session file {}".format(e)) else: with wf: json.dump(data, wf) if 'SUDO_USER' in os.environ: chown_path(session_file)
def set_hostname_postinst(): # when running as post install, get the existing first user and set as host name new_hostname = get_first_username() if new_hostname is None: logger.warn("No first user") else: set_hostname(new_hostname)
def _emergency_exit_cb(self, widget, data=None): self._emergency_counter += 1 msg = "Emergency button pressed {}x".format(self._emergency_counter) logger.warn(msg) if self._emergency_counter >= self.EMERGENCY_EXIT_CLICKS: logger.warn("Emergency exiting the init flow") self._ctl.complete() Gtk.main_quit()
def state(self, value): if value not in self._valid_states: msg = "'{}' is not a valid state".format(value) logger.warn(msg) raise UpdaterStatusError(msg) logger.info("Setting the status' state to: {}".format(value)) self._state = value
def add_item(self, cat_name, item_obj): cat = self.category(cat_name) if cat: cat.add_item(item_obj) else: logger.warn( "Category '{}' not available in [{}], skipping '{}'".format( cat_name, self, item_obj))
def install_ind_package(progress, package): status = UpdaterStatus.get_instance() # install an independent package. previous_state = status.state # Allow installing only if the updater is in certain safe states. if status.state not in [UpdaterStatus.NO_UPDATES, UpdaterStatus.UPDATES_AVAILABLE, UpdaterStatus.UPDATES_INSTALLED]: msg = "The install is already running" logger.warn(msg) progress.abort(msg) return False if package not in status.updatable_independent_packages: msg = "tried to install non-independent package {} using update_ind_pkg".format(package) logger.warn(msg) progress.abort(msg) return False status.state = UpdaterStatus.INSTALLING_INDEPENDENT status.save() update_sources_phase = 'updating-sources' installing_idp_phase = 'installing-idp-package' progress.split( Phase( update_sources_phase, _("Updating apt sources"), 10 ), Phase( installing_idp_phase, _("Installing independent package"), 90 ) ) apt_handle = AptWrapper.get_instance() progress.start(update_sources_phase) apt_handle.update(progress) progress.start(installing_idp_phase) apt_handle.upgrade(package, progress) status.state = previous_state status.last_update = int(time.time()) # always check independent packages as NONE as urgent updates to # these packages are dealt with by the main updater status.updatable_independent_packages = get_ind_packages(Priority.NONE) status.is_scheduled = False status.save() progress.finish(_("Update completed")) return True
def create_user(username): """ Create and initialise an account for a new user. The user will be added to several default groups, including kanousers. This function requires root permissions to run properly. Will rase in case of an error. :param username: The name of the new user :type name: str """ if user_exists(username): raise UserError( _("The user '{string_username}' already exists").format( string_username=username)) home = "/home/{}".format(username) home_old = '/home/' + username + '-old' if os.path.exists(home): msg = ("The home directory for the new user '{}' was there already, " + "moving it to {}".format(username, home_old)) logger.warn(msg) shutil.move(home, home_old) # The umask force is used to blind the actual /home/username # folder from other users umask_override = '0077' cmd = "useradd -u {} -m -K UMASK={} -s /bin/bash {}".format( get_next_uid(), umask_override, username) _, _, rv = run_cmd_log(cmd) if rv != 0: msg = N_("Unable to create new user, useradd failed.") logger.error(msg) raise UserError(_(msg)) cmd = "echo '{}:{}' | chpasswd".format(username, DEFAULT_USER_PASSWORD) _, _, rv = run_cmd_log(cmd) if rv != 0: delete_user(username) msg = N_("Unable to change the new user's password, chpasswd failed.") logger.error(msg) raise UserError(_(msg)) # Make sure the kanousers group exists if not group_exists('kanousers'): _, _, rv = run_cmd_log('groupadd kanousers -f') if rv != 0: msg = N_("Unable to create the kanousers group, groupadd failed.") raise UserError(_(msg)) # Add the new user to all necessary groups cmd = "usermod -G '{}' {}".format(DEFAULT_USER_GROUPS, username) _, _, rv = run_cmd_log(cmd)
def _replace_locked(item): ret = item if not item.is_unlocked(): logger.warn( "Item {} is locked, replacing with random from " \ "its category".format(item)) ret = random.choice( [obj for obj in item.category().items() if obj.is_unlocked()] ) return ret
def migrate_repository(apt_file, old_repo, new_repo): try: sed(old_repo, new_repo, apt_file, use_regexp=False) except IOError as exc: logger.warn("Changing repository URL failed ({})".format(exc)) return # TODO: track progress of this apt_handle.clear_cache() apt_handle.update(DummyProgress())
def discover_devices(): if not is_bluetooth_available(): logger.warn("No bluetooth available") return try: ADAPTOR_IFACE.StartDiscovery() except dbus.DBusException as e: logger.error("Error entering bluetooth discovery mode. " \ "This is likely because DBus isn't ready", exception=e)
def load(self): with open_locked(self._status_file, 'r', timeout=1.0) as status_file: try: data = json.load(status_file) self._stage = data['stage'] self._username = data['username'] except: # Initialise the file again if it is corrupted logger.warn("The status file was corrupted.") self.save() return
def expand_partition(partition): ''' Expands the given partition to the maximum available size. Args: partition (dict): Partition to expand. Must be of the form of :const:`kano_updater.expand_fs.schemas.DISK_SCHEMA` Returns: int: Success code for the operation as defined by members of :class:`kano_updater.expand_fs.return_codes.RC` ''' try: jsonschema.validate(partition, PARTITION_SCHEMA) except jsonschema.ValidationError: logger.error( 'Partiton supplied for expand does not match schema.\n' 'Expected: {expected}\n' 'Got: {got}\n' .format(expected=PARTITION_SCHEMA, got=partition) ) return RC.E_INVALID_PARTITION_FORMAT partition_number = get_partition_number(partition['node']) if partition_number < 0: logger.error('Could not determine extended partition number') return RC.E_PARTITION_NUMBER_NOT_FOUND # TODO: Check that the extended and root partitions lie at the end of the # partition table # Run parted command first as a script and if that fails due to it asking # a question, revert to command which auto-answers yes cmd = ( "parted {disk} --script unit % resizepart {partition} 100 || " "parted {disk} ---pretend-input-tty unit % resizepart {partition} " "Yes 100" .format( disk=DISK, partition=partition_number, ) ) out, err, rc = run_cmd(cmd) if rc != 0: logger.error('Partition expand command failed: {cmd}'.format(cmd=cmd)) logger.warn('Parted stdout: {out}'.format(out=out)) logger.warn('Parted stderr: {err}'.format(err=err)) return RC.E_PARTITION_EXPAND_FAILED return RC.SUCCESS
def stop_discovering_devices(): if not is_bluetooth_available(): logger.warn("No bluetooth available") return try: get_adaptor_iface().StopDiscovery() except dbus.DBusException as e: logger.error("Error exiting bluetooth discovery mode. " \ "This is likely because DBus isn't ready", exception=e)
def get_path_owner(path): owner = '' try: owner = pwd.getpwuid(os.stat(path).st_uid).pw_name except (IOError, OSError) as exc_err: from kano.logging import logger logger.warn( "Can't get path owner on {} due to permission/IO issues - {}". format(path, exc_err)) return owner
def main(): """TODO""" if os.getuid() != 0: return 10 logger.warn("Applying Safe Mode config.txt settings") config_copy_to(CONFIG_TXT_BACKUP_PATH) set_safe_mode_config() end_config_transaction()
def get_path_owner(path): owner = '' try: owner = pwd.getpwuid(os.stat(path).st_uid).pw_name except (IOError, OSError) as exc_err: from kano.logging import logger logger.warn( "Can't get path owner on {} due to permission/IO issues - {}" .format(path, exc_err) ) return owner
def _should_skip_init_flow(self): if os.path.exists(self.INIT_CONF): with open(self.INIT_CONF, 'r') as f: try: init_conf = json.load(f) return ('kano_init_flow' in init_conf and 'skip' in init_conf['kano_init_flow'] and init_conf['kano_init_flow']['skip']) except: logger.warn('Failed to parse init.conf') return False
def remove_user_files(files): logger.info("utils / remove_user_files files:{}".format(files)) for d in os.listdir("/home/"): if os.path.isdir("/home/{}/".format(d)): for f in files: file_path = "/home/{}/{}".format(d, f) if os.path.exists(file_path): logger.info("trying to delete file: {}".format(file_path)) try: os.remove(file_path) except: logger.warn("could not delete file: {}".format(file_path))
def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: heartbeat() return f(*args, **kwargs) except ExceptionToCheck, err: logger.warn("{}, Retrying in {} seconds...".format( str(err), mdelay)) time.sleep(mdelay) mtries -= 1 mdelay *= backoff
def _populate_object_structures(self, conf_data): """ Populates internal structures related to items :param conf_data: YAML format configuration structure read from file """ for obj in conf_data[self.objects_label]: obj, char, cat = AvatarAccessory.from_data(obj) char_layer = self.layer(char) if char_layer: char_layer.add_item(cat, obj) else: logger.warn( "Character layer '{}' missing, skipping item " \ "'{}'".format(char, obj.get_id()))
def parse_youtube_entries(entries): if os.path.exists(tmp_dir): rmtree(tmp_dir) os.makedirs(tmp_dir) my_entries = list() for e in entries: # Small thumbnail for thumb in e['media$group']['media$thumbnail']: if thumb['width'] == 120 and thumb['height'] == 90: thumbnail = thumb['url'] break # Big thumbnail for thumb in e['media$group']['media$thumbnail']: if thumb['width'] == 480 and thumb['height'] == 360: bigthumb = thumb['url'] break author = e['author'][0]['name']['$t'].encode('utf-8') title = e['title']['$t'].encode('utf-8') description = e['media$group']['media$description']['$t'].encode('utf-8') video_url = e['media$group']['media$content'][0]['url'] duration = e['media$group']['media$content'][0]['duration'] duration_min = duration / 60 duration_sec = duration % 60 # On youtube version 2, eventually the viewCount key is not returned try: viewcount = int(e['yt$statistics']['viewCount']) except Exception: viewcount = 0 logger.warn('Viewcount data couldn\'t be retrieved') entry_data = { 'author': author, 'title': title, 'description': description, 'video_url': video_url, 'duration': duration, 'duration_min': duration_min, 'duration_sec': duration_sec, 'viewcount': viewcount, 'thumbnail': thumbnail, 'big_thumb': bigthumb } my_entries.append(entry_data) return my_entries
def _load_system_modules(self): for load_path in QUESTS_LOAD_PATHS: if os.path.exists(load_path): for f in os.listdir(load_path): full_path = os.path.join(load_path, f, 'quest.py') modname = os.path.basename(os.path.dirname(f)) if os.path.isfile(full_path): qmod = imp.load_source(modname, full_path) q_class = qmod.init() q = q_class(self) if not self._quest_exists(q): self._quests.append(q) else: logger.warn("'{}' not found".format(load_path))
def set_safeboot_mode(): logger.warn("Safe boot requested") set_screen_value('hdmi_force_hotplug', 1) set_screen_value('config_hdmi_boost', 4) set_screen_value('hdmi_group', 2) set_screen_value('hdmi_mode', 16) set_screen_value('disable_overscan', 1) set_screen_value('overscan_left', 0) set_screen_value('overscan_right', 0) set_screen_value('overscan_top', 0) set_screen_value('overscan_bottom', 0)
def set_safeboot_mode(): logger.warn("Safe boot requested") set_config_value("hdmi_force_hotplug", 1) set_config_value("config_hdmi_boost", 4) set_config_value("hdmi_group", 2) set_config_value("hdmi_mode", 16) set_config_value("disable_overscan", 1) set_config_value("overscan_left", 0) set_config_value("overscan_right", 0) set_config_value("overscan_top", 0) set_config_value("overscan_bottom", 0)