def _reboot_alert_cb(self, alert, response_id): self.remove_alert(alert) if response_id is Gtk.ResponseType.OK: try: utils.reboot() except Exception as e: _logger.error('Cannot reboot: %s' % e)
def _check_cpu_load(self): #self._log.info('Entering _check_cpu_load()') load = self._get_5_min_cpu_load() if load > self._cpu_load_max: self._log.info('CPU 5 minute load average is %f ' % load) self._log.info('CPU load is excessive, rebooting the system') utils.reboot() self._stop_event.set()
def reboot(): version = utils.getSetting('dVersion') utils.setSetting('cVersion', version) utils.setSetting('dVersion', '0.0.0') cmd = 'recoveryflash /recovery/update.zip' utils.reboot(cmd)
def factory(): try: set_alt_rootfs(True) mount_recovery() open("/mnt/var/ftk/recovery", 'a').close() umount_recovery() reboot() except StandardError as err: panic("", err)
def reboot(): version = utils.getSetting('dVersion') utils.setSetting('cVersion', version) utils.setSetting('dVersion', '0.0.0') cmd = 'recoveryflash' utils.reboot(cmd)
def restart_services(services): for service in services: if service == 'monit': try: subprocess.call(['monit', 'reload']) except subprocess.SubprocessError: raise RestartServicesError(f'monit reload') if service == 'reboot': utils.reboot() try: subprocess.call(['systemctl', 'restart', service]) except subprocess.SubprocessError: raise RestartServicesError(service)
def take_pictures(n=3, sleep_time=3): try: camera_config = config['camera'] light_only_for_pictures = config['light_only_for_pictures'] with PiCamera() as camera: try: configure_camera(camera, camera_config) if light_only_for_pictures: switch_light("on") if config['annotate_config_to_pictures']: annotate_picture(camera, camera_config) camera.start_preview(fullscreen=False, window=tuple(camera_config['preview']['window'])) for i in range(1, n+1): time.sleep(sleep_time if i == 0 else 1) fn = f'{PICTURES_PATH}/{datetime.now().strftime("%Y-%m-%d %H-%M-%S")}.jpg' camera.capture(fn, quality=camera_config['quality']) logging.info(f'Saving picture{" " + str(i) if n>1 else ""} to {fn}') except: logging.error(sys.exc_info()[1], exc_info=sys.exc_info()) finally: camera.stop_preview() if light_only_for_pictures: switch_light("off") sync_all_files(config['cloud']) except picamera.exc.PiCameraMMALError: logging.error(sys.exc_info()[1], exc_info=sys.exc_info()) reboot('Suspecting PiCamera out of resources => rebooting') except: logging.error(sys.exc_info()[1], exc_info=sys.exc_info())
def _check_comm(self): """Reboot if there hasn't been a RUDICS connection for an extended period or if we have been connected for an unbelievably long time. """ if self._comm_state is 'init': #self._log.debug('comm_state is init') self._timer = time.time() self._comm_state = 'starting_up' return elif self._comm_state is 'starting_up': #self._log.debug('comm_state is starting_up') if self._connected(): #self._log.info('Connected to RUDICS server') self._timer = time.time() self._comm_state = 'connected' return if utils.get_et_secs( self._timer) > super_config.comm_max_init_time: self._log.error('Rebooting - no initial RUDICS connection') utils.reboot() return elif self._comm_state is 'connected': #self._log.debug('comm_state is connected') if not self._connected(): #self._log.info('Disconnected from RUDICS server') self._timer = time.time() self._comm_state = 'disconnected' return if utils.get_et_secs(self._timer) > super_config.comm_max_up_time: self._log.error('Rebooting - exceeded max RUDICS connect time') self._rebooting = True utils.reboot() return elif self._comm_state is 'disconnected': #self._log.debug('comm_state is disconnected') if self._connected(): #self._log.info('Connected to RUDICS server') self._timer = time.time() self._comm_state = 'connected' return if utils.get_et_secs( self._timer) > super_config.comm_max_down_time: self._log.error( 'Rebooting - exceeded max RUDICS disconnect time') self._rebooting = True utils.reboot() return else: self._log.error( 'Unknown state value in CommWatchdog._check_comm()')
def normal(): try: set_alt_rootfs(False) reboot() except StandardError as err: panic("", err)
def recovery(): try: set_alt_rootfs(True) reboot() except StandardError as err: panic("", err)
def reboot(): utils.reboot() return redirect("/")
except ApplyConfigError: post_error_status('config') print('------------------------------------------') print('--------------Apply Crontab---------------') try: apply_crontab(remoteConfig, cronFile) except ApplyCrontabError: post_error_status('crontab') print('------------------------------------------') print('----------------Save Config---------------') try: save_config(remoteConfig, envFile) except SaveConfigError: post_error_status('config') print('------------------------------------------') postStatus = args['post_status'] if postStatus: print('------------Post Service Status-----------') try: post_service_status() except PostServiceStatusError: post_error_status('status') print('------------------------------------------') rebootArg = args['reboot'] if rebootArg: reboot()
def parse(self, message): print '[self.] received: {}'.format(message) black_list = [] try: # if 'learnwav' in message or 'respondwav_single' in message or 'respondwav_sentence' in message: # _, filename = message.split() # if filename in black_list: # print 'SKIPPING BAD FILE {}'.format(filename) # return if message == 'dream': self.state['memoryRecording'] = False self.state['autorespond_sentence'] = False self.state['ambientSound'] = False self.state['autolearn'] = False self.state['autorespond_single'] = False self.state['_audioLearningStatus'] = False self.state['record'] = False self.publisher.send_json(self.state) self.event.send_json({'dream': True}) if message == 'reboot': utils.reboot() if message == 'appendCurrentSettings': self.association.send_pyobj([message]) self.association.recv_pyobj() if 'getCurrentSettings' in message: msg, value = message.split() self.association.send_pyobj([msg, value]) self.association.recv_pyobj() if 'i_am_speaking' in message: _, value = message.split() self.state['i_am_speaking'] = value in ['True', '1'] if 'enable_say_something' in message: _, value = message.split() self.state['enable_say_something'] = value in ['True', '1'] if 'last_segment_ids' in message: the_ids = message[17:] self.event.send_json({'last_segment_ids': loads(the_ids) }) if 'last_most_significant_audio_id' in message: audio_id = message[31:] self.event.send_json({'last_most_significant_audio_id': audio_id }) if message == 'clear play_events': self.event.send_json({'clear play_events' : 'True'}) if 'calculate_cochlear' in message: _, wav_file = message.split() t0 = time.time() try: brain.cochlear(utils.wait_for_wav(wav_file), stride=IO.NAP_STRIDE, rate=IO.NAP_RATE) except: print 'SHOULD {} BE BLACKLISTED?'.format(wav_file) black_list.append(wav_file) print 'Calculating cochlear neural activation patterns took {} seconds'.format(time.time() - t0) if message == 'evolve': self.state['memoryRecording'] = False self.state['autorespond_sentence'] = False self.state['autolearn'] = False self.state['autorespond_single'] = False self.state['_audioLearningStatus'] = False self.state['record'] = False self.publisher.send_json(self.state) self.association.send_pyobj(['evolve']) self.association.recv_pyobj() if 'register' in message and 'BRAIN' in message: _, name, free = message.split() self.state['brains'][name] = int(free) if 'fullscreen' in message: _, value = message.split() self.event.send_json({ 'fullscreen': value in ['True', '1'] }) if 'display2' in message: _, value = message.split() self.event.send_json({ 'display2': value in ['True', '1'] }) if message == 'startrec': self.state['record'] = True if message == 'stoprec': self.state['record'] = False if 'facerecognition' in message: _, value = message.split() self.state['facerecognition'] = value in ['True', '1'] if 'print_me' in message: self.event.send_json({ 'print_me': message[7:] }) if 'play_id' in message: self.event.send_json({ 'play_id': message[8:] }) if 'testSentence' in message: self.event.send_json({ 'testSentence': message[13:] }) if 'assoc_setPlotting' in message: self.event.send_json({ 'assoc_setPlotting': message[18:] in ['True', '1'] }) if 'assoc_setParam' in message: self.event.send_json({ 'assoc_setParam': message[15:] }) if 'respond_setParam' in message: self.event.send_json({ 'respond_setParam': message[17:] }) if 'memoryRecording' in message: self.state['memoryRecording'] = message[16:] in ['True', '1'] if '_audioLearningStatus' in message: self.state['_audioLearningStatus'] = message[21:] in ['True', '1'] if 'roboActive' in message: self.state['roboActive'] = int(message[11:]) if 'ambientSound' in message: self.state['ambientSound'] = int(message[13:]) if 'decrement' in message: _, name = message.split() self.state['brains'][name] -= 1 print '{} has now {} available slots'.format(name, self.state['brains'][name]) if 'learnwav' in message: _, filename = message.split() self.event.send_json({ 'learn': True, 'filename': filename }) if 'respondwav_single' in message: _, filename = message.split() self.event.send_json({ 'respond_single': True, 'filename': filename }) if 'respondwav_sentence' in message: _, filename = message.split() self.event.send_json({ 'respond_sentence': True, 'filename': filename }) if 'play_sentence' in message: print 'playSentence', message sentence = message[13:] self.event.send_json({ 'play_sentence':True, 'sentence': sentence }) if 'rhyme' in message: _, value = message.split() self.event.send_json({'rhyme': value == 'True'}) if 'urge_to_say_something' in message: _, value = message.split() self.event.send_json({'urge_to_say_something': value}) if 'autolearn' in message: self.state['autolearn'] = message[10:] in ['True', '1'] if 'autorespond_single' in message: self.state['autorespond_single'] = message[19:] in ['True', '1'] if 'autorespond_sentence' in message: self.state['autorespond_sentence'] = message[21:] in ['True', '1'] if 'inputLevel' in message: self.event.send_json({ 'inputLevel': message[11:] }) if 'calibrateEq' in message: self.event.send_json({ 'calibrateEq': True }) if 'calibrateAudio' in message: latency_ok = False try: lat = open('roundtrip_latency.txt', 'r') latency = float(lat.readline()) self.event.send_json({ 'setLatency': latency }) latency_ok = True except Exception, e: print 'Something went wrong when reading latency from file.', e self.event.send_json({ 'calibrateAudio': True }) if latency_ok: self.event.send_json({ 'calibrateNoiseFloor': True }) if 'calibrateAudio memoryRecording' in message: self.state['memoryRecording'] = True if 'csinstr' in message: self.event.send_json({ 'csinstr': message[8:] }) if 'selfDucking' in message: self.event.send_json({ 'selfDucking': message[12:] }) if 'zerochannels' in message: self.event.send_json({ 'zerochannels': message[13:] }) if 'playfile' in message: self.event.send_json({ 'playfile': message[9:] }) if 'selfvoice' in message: self.event.send_json({ 'selfvoice': message[10:] }) if 'save' in message: self.event.send_json({ 'save': utils.brain_name() if len(message) == 4 else message[5:] }) if 'load' in message: if len(message) == 4: brain_name = utils.find_last_valid_brain() else: _, brain_name = message.split() if brain_name: self.event.send_json({ 'load': brain_name }) self.publisher.send_json(self.state)
def set_pa_portal_api_key(): cmd = '<operations><request><license><api-key><set><key>' + keys_license.pa_portal_api_key + '</key></set></api-key></license></request></operations></request>' utils.xml_to_dictionary(utils.palo_alto_api_call(device, cmd, **creditials)) def deactive_license(): cmd = '<request><license><deactivate><VM-Capacity><mode>auto</mode></VM-Capacity></deactivate></license></request>' utils.xml_to_dictionary(utils.palo_alto_api_call(device, cmd, **creditials)) time.sleep(10) def set_auth_key(): ''' This command reboots the device and returns garbage. ''' cmd = '<request><license><fetch><auth-code>' + keys_license.auth_code + '</auth-code></fetch></license></request>' utils.palo_alto_api_call(device, cmd, **creditials) time.sleep(10) for device in devices: set_pa_portal_api_key() deactive_license() utils.reboot() utils.wait_for_shutdown() utils.check_if_device_booted() set_auth_key() # This works even tho API errors # <request><license><fetch><auth-code>V3039712</auth-code></fetch></license></request>