def set_config_from_onboarding(config_settings: OnboardingConfig): get = _get_val_or_none config.reload() if get(config_settings, 'stateTarget') or not get(config_settings, 'networkContrib'): config.set('general.security_level', 1) if get(config_settings, 'localThreat'): config.set('general.security_level', 3) config.set('transports.lan', False) config.set('ui.theme', 'light') if get(config_settings, 'useDark'): config.set('ui.theme', 'dark') disabled = config.get('plugins.disabled', []) if not get(config_settings, 'circles') or \ config.get('general.security_level') > 0: disabled.append('flow') if not get(config_settings, 'mail'): disabled.append('pms') config.set('plugins.disabled', disabled) config.set('general.store_plaintext_blocks', get(config_settings, 'plainContrib')) config.set('onboarding.done', True, savefile=True)
def test_toggle_bootstrap(self): testargs = ["onionr.py", "togglebootstrap"] self.assertTrue(config.get('general.use_bootstrap_list')) with patch.object(sys, 'argv', testargs): parser.register() config.reload() self.assertFalse(config.get('general.use_bootstrap_list'))
def kill_daemon(): """Shutdown the Onionr daemon (communicator).""" config.reload() logger.warn('Stopping the running daemon...', timestamp=False, terminal=True) # On platforms where we can, fork out to prevent locking try: pid = os.fork() if pid != 0: return except (AttributeError, OSError): pass events.event('daemon_stop') net = NetController(config.get('client.port', 59496)) try: spawn( localcommand.local_command, '/shutdownclean' ).get(timeout=5) except sqlite3.OperationalError: pass net.killTor()
def delkeyword(message): global last_message last_message = '' commands.delstopwords(message) config.reload() bot.send_message(message.chat.id, 'Указанные слова удалены') bot.send_message(message.chat.id, f'Текущее состояние базы данных: {commands.showinfo()}')
def on_processblocks(api, data=None): if data['type'] != 'pm': return notification_func = notifier.notify data['block'].decrypt() metadata = data['block'].bmetadata signer = bytesconverter.bytes_to_str(data['block'].signer) user = contactmanager.ContactManager(signer, saveUser=False) name = user.get_info("name") if name != 'anonymous' and name != None: signer = name.title() else: signer = signer[:5] if data['block'].decrypted: config.reload() if config.get('mail.notificationSound', True): notification_func = notifier.notification_with_sound if config.get('mail.notificationSetting', True): if not config.get('mail.strangersNotification', True): if not user.isFriend(): return notification_func(title="Onionr Mail - New Message", message="From: %s\n\nSubject: %s" % (signer, metadata['subject']))
def reload(request): """ Reloads the configuration file; interface to config.reload. """ if request.method != "POST": return _methodNotAllowed() options = _validateOptions(request, {}) if type(options) is str: return _response(options) user = userauth.authenticateRequest(request) if type(user) is str: return _response(user) elif user == None: return _unauthorized() elif not user.isSuperuser: return _forbidden() try: oldValue = ezid.pause(True) # Wait for the system to become quiescent. while True: if len(ezid.getStatus()[0]) == 0: break time.sleep(1) config.reload() finally: ezid.pause(oldValue) return _response("success: configuration file reloaded and caches emptied")
def testConfig(self): logger.debug('-'*26 + '\n') logger.info('Running simple configuration test...') import config config.check() config.reload() configdata = str(config.get_config()) config.set('testval', 1337) if not config.get('testval', None) is 1337: self.assertTrue(False) config.set('testval') if not config.get('testval', None) is None: self.assertTrue(False) config.save() config.reload() if not str(config.get_config()) == configdata: self.assertTrue(False) self.assertTrue(True)
def test_clearnet_tor_request(testmanager): """Ensure that Tor cannot request clearnet address. Does not run if Tor is being reused """ config.reload() leak_result = "" if config.get('tor.use_existing_tor', False): logger.warn( "Can't ensure Tor reqs to clearnet won't happen when reusing Tor") return socks_port = localcommand.local_command('/gettorsocks') # Don't worry, this request isn't meant to go through, # but if it did it would be through Tor try: leak_result: str = do_get_request('https://example.com/notvalidpage', port=socks_port, ignoreAPI=True).lower() except AttributeError: leak_result = "" except Exception as e: logger.warn(str(e)) try: if 'example' in leak_result: logger.error('Tor was able to request a clearnet site') raise ValueError('Tor was able to request a clearnet site') except TypeError: pass
def setup_config(): if not os.path.exists(config._configfile): # this is the default config, it will be overwritten if a config file already exists. Else, it saves it conf_data = readstatic.read_static('default_config.json', ret_bin=False) config.set_config(json.loads(conf_data)) config.save() config.reload() settings = 0b000 if config.get('log.console.color', True): settings = settings | USE_ANSI if config.get('log.console.output', True): settings = settings | OUTPUT_TO_CONSOLE if config.get('log.file.output', True): settings = settings | OUTPUT_TO_FILE set_settings(settings) verbosity = str(config.get('log.verbosity', 'default')).lower().strip() if not verbosity in ['default', 'null', 'none', 'nil']: map = { str(LEVEL_DEBUG) : LEVEL_DEBUG, 'verbose' : LEVEL_DEBUG, 'debug' : LEVEL_DEBUG, str(LEVEL_INFO) : LEVEL_INFO, 'info' : LEVEL_INFO, 'information' : LEVEL_INFO, str(LEVEL_WARN) : LEVEL_WARN, 'warn' : LEVEL_WARN, 'warning' : LEVEL_WARN, 'warnings' : LEVEL_WARN, str(LEVEL_ERROR) : LEVEL_ERROR, 'err' : LEVEL_ERROR, 'error' : LEVEL_ERROR, 'errors' : LEVEL_ERROR, str(LEVEL_FATAL) : LEVEL_FATAL, 'fatal' : LEVEL_FATAL, str(LEVEL_IMPORTANT) : LEVEL_IMPORTANT, 'silent' : LEVEL_IMPORTANT, 'quiet' : LEVEL_IMPORTANT, 'important' : LEVEL_IMPORTANT } if verbosity in map: set_level(map[verbosity]) else: logger.warn('Verbosity level %s is not valid, using default verbosity.' % verbosity) if type(config.get('client.webpassword')) is type(None): config.set('client.webpassword', base64.b16encode(os.urandom(32)).decode('utf-8'), savefile=True) if type(config.get('client.client.port')) is type(None): randomPort = netcontroller.get_open_port() config.set('client.client.port', randomPort, savefile=True) if type(config.get('client.public.port')) is type(None): randomPort = netcontroller.get_open_port() config.set('client.public.port', randomPort, savefile=True) if type(config.get('client.api_version')) is type(None): config.set('client.api_version', onionrvalues.API_VERSION, savefile=True)
def local_command(command, data='', silent=True, post=False, post_data={}, max_wait=20, is_json=False): """Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers.""" hostname = get_hostname() # if the api host cannot be reached, return False if not hostname: return False if data != '': data = '&data=' + urllib.parse.quote_plus(data) payload = 'http://%s/%s%s' % (hostname, command, data) if not config.get('client.webpassword'): config.reload() try: if post: if is_json: ret_data = requests.post(payload, json=post_data, headers={ 'token': config.get('client.webpassword'), 'Connection': 'close' }, timeout=(max_wait, max_wait)).text else: ret_data = requests.post(payload, data=post_data, headers={ 'token': config.get('client.webpassword'), 'Connection': 'close' }, timeout=(max_wait, max_wait)).text else: ret_data = requests.get(payload, headers={ 'token': config.get('client.webpassword'), 'Connection': 'close' }, timeout=(max_wait, max_wait)).text except Exception as error: if not silent: logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True) ret_data = False return ret_data
def mainLoop(): global running global prev_hosts global next_hosts now = int(time.time()) prev_slot = now - (now % TIME_SLOT) next_slot = prev_slot + TIME_SLOT poke_time = next_slot - REMAINING_BEFORE_POKE poke_started = False while running: now = int(time.time()) if now >= next_slot: # Remove old jobs while not scanner_msgqueue.empty(): try: scanner_msgqueue.get(False) except QueueEmpty: break insertHostsDataPoint(prev_slot) prev_hosts = next_hosts next_hosts = {} prev_slot = now - (now % TIME_SLOT) next_slot = prev_slot + TIME_SLOT poke_time = next_slot - REMAINING_BEFORE_POKE poke_started = False elif not poke_started and now >= poke_time: config.reload() if config.getPeriodicDiscoveryEnabled(): scanner_msgqueue.put("net_scan") log.debug("Peridoc ARP scan queued") else: for host in prev_hosts: try: h = next_hosts[host] except KeyError: max_time = next_slot - now - 1 if (max_time >= 5) and (config.getDeviceProbeEnabled(prev_hosts[host].mac)): host = prev_hosts[host].ip scanner_msgqueue.put(host) log.debug("Host " + host + " ARP scan queued") poke_started = True for messages in manager.getMessages(): for message in messages: handleHost(message.mac, message.ip, message.seen_tstamp, message.host_name) now = int(time.time()) if now < next_slot: # TODO use blocking queue wait instead seconds = min(next_slot, now + MESSAGE_CHECK_INTERVAL) - now time.sleep(seconds)
def get_enabled_plugins(): ''' Returns a list of the enabled plugins ''' check() config.reload() return list(config.get('plugins.enabled', list()))
def reload_(): assert django.conf.settings.configured config.reload() # noinspection PyProtectedMember log.debug('reloaded(): db_shoulders={} cache_shoulders={}'.format( ezidapp.models.Shoulder.objects.filter(active=True, manager='ezid').count(), len(ezidapp.models.shoulder._shoulders), ))
def get_enabled_plugins(): ''' Returns a list of the enabled plugins ''' check() config.reload() return config.get('plugins')['enabled']
def save_model(self, request, obj, form, change): assert change obj.save() if "alertMessage" in form.changed_data: import ui_common ui_common.alertMessage = obj.alertMessage if obj.secretKey == "": import config config.reload() django.contrib.messages.success(request, "Server reloaded.") return obj
def reloadConfig(self): """Reload configuration if necessary.""" if not config.updated(): return try: config.reload() log.msg('reloaded configuration') except: log.err() log.msg('failed to reload configuration')
def localCommand(self, command): ''' Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers. ''' config.reload() # TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless. requests.get('http://' + open('data/host.txt', 'r').read() + ':' + str(config.get('client')['port']) + '/client/?action=' + command + '&token=' + str(config.get('client')['client_hmac'])) return
def load_plugin_blueprints(flaskapp, blueprint: str = 'flask_blueprint'): """Iterate enabled plugins and load any http endpoints they have""" config.reload() disabled = config.get('plugins.disabled') for plugin in onionrplugins.get_enabled_plugins(): if plugin in disabled: continue plugin = onionrplugins.get_plugin(plugin) try: flaskapp.register_blueprint(getattr(plugin, blueprint)) except AttributeError: pass
def add_bridges(torrc: str) -> str: """Configure tor to use a bridge using Onionr config keys.""" config.reload() if config.get('tor.use_bridge', False) is True: bridge = config.get('tor.bridge_ip', None) if bridge is not None: # allow blank fingerprint purposefully fingerprint = config.get('tor.bridge_fingerprint', '') torrc += '\nUseBridges 1\nBridge %s %s\n' % (bridge, fingerprint) if not bridge: logger.error('Bridge was enabled but not specified in config, ' + 'this probably won\'t work', terminal=True) return torrc
def configure(self): ''' Displays something from the configuration file, or sets it ''' if len(sys.argv) >= 4: config.reload() config.set(sys.argv[2], sys.argv[3], True) logger.debug('Configuration file updated.') elif len(sys.argv) >= 3: config.reload() logger.info(logger.colors.bold + sys.argv[2] + ': ' + logger.colors.reset + str(config.get(sys.argv[2], logger.colors.fg.red + 'Not set.'))) else: logger.info(logger.colors.bold + 'Get a value: ' + logger.colors.reset + sys.argv[0] + ' ' + sys.argv[1] + ' <key>') logger.info(logger.colors.bold + 'Set a value: ' + logger.colors.reset + sys.argv[0] + ' ' + sys.argv[1] + ' <key> <value>')
def SvcDoRun(self): log.startLogging(open(r'c:\sepiida\sepiida-agent.log', 'w')) def noop(s): pass log.debug = noop config.reload() p = portal.Portal(sshserver.ExampleRealm()) p.registerChecker(sshserver.InMemoryPublicKeyChecker(os.path.join(r'c:\sepiida', 'authorized_keys.txt'))) privKeyFn = os.path.join(r'c:\sepiida', 'ssh_host_rsa_key') pubKeyFn = privKeyFn + '.pub' factory = sshserver.ExampleFactory(privKeyFn, pubKeyFn) factory.portal = p reactor.listenTCP(22, factory) reactor.run(installSignalHandlers=0)
def get_client_API_server(): config.reload() retData = '' getconf = lambda: config.get('client.client.port') port = getconf() if port is None: config.reload() port = getconf() try: with open(filepaths.private_API_host_file, 'r') as host: hostname = host.read() except FileNotFoundError: raise FileNotFoundError else: retData += '%s:%s' % (hostname, port) return retData
def check(): ''' Checks to make sure files exist ''' config.reload() if not config.is_set('plugins'): logger.debug('Generating plugin config data...') config.set('plugins', {'enabled': []}, True) if not os.path.exists(os.path.dirname(get_plugins_folder())): logger.debug('Generating plugin data folder...') os.makedirs(os.path.dirname(get_plugins_folder())) return
def connectionMade(self): config.reload() # Send hello (info) message self._sendInfo(hello=True) self.update_loop = task.LoopingCall(self._sendUpdatedUserInfo) self.update_info_loop = task.LoopingCall(self._sendInfo) def startLooping(): # Check periodically if anything has changed, and send updated # information if necessary self.update_loop.start(8.0, True) # Send info, e.g. uptime and load every 10 seconds self.update_info_loop.start(10.0, False) # To avoid PotentialZombieWarning due to the reactor not running, # only start looping after the reactor has been started. reactor.callWhenRunning(startLooping)
async def reload(self, ctx, *, module: str): if ctx.message.author.id in config.superusers: if module == 'webserver': await config.modules['webserver'].stop() webserver = importlib.reload(config.modules[module]) config.modules['webserver'] = webserver await webserver.start() await ctx.channel.send("reloaded `" + module + "` succesfully!") elif module == 'config': config.reload() await ctx.channel.send("reloaded `" + module + "` succesfully!") elif module == 'logic': bot.remove_cog('Logic') logic = importlib.reload(config.modules['logic']) gameclass = logic.Logic(bot) bot.add_cog(gameclass) elif module == admin: bot.remove_cog('Admin') admin = importlib.reload(config.modules['admin']) adminclass = admin.Admin(bot) bot.add_cog(adminclass) elif module in config.modules and config.modules[module]: try: bot.remove_cog(module.capitalize()) loadedmodule = importlib.reload(config.modules[module]) bot.add_cog( getattr(loadedmodule, module.capitalize())(bot)) await ctx.channel.send("reloaded `" + module + "` succesfully!") except AttributeError: importlib.reload(config.modules[module]) await ctx.channel.send("reloaded `" + module + "` succesfully?") except Exception as e: print(e) await ctx.channel.send("reloading `" + module + "` failed.") else: await ctx.channel.send('`' + module + "` isn't available for reloading.") else: await emformat.genericmsg(ctx.channel, "this command is restricted.", "error", "reload")
def localCommand(self, command, silent=True): ''' Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers. ''' config.reload() self.getTimeBypassToken() # TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless. try: retData = requests.get('http://' + open('data/host.txt', 'r').read() + ':' + str(config.get('client')['port']) + '/client/?action=' + command + '&token=' + str(config.get('client')['client_hmac']) + '&timingToken=' + self.timingToken).text except Exception as error: if not silent: logger.error('Failed to make local request (command: %s).' % command, error=error) retData = False return retData
def check(): ''' Checks to make sure files exist ''' config.reload() if not config.is_set('plugins'): logger.debug('Generating plugin config data...') config.set('plugins', {'enabled': []}, True) if not os.path.exists(os.path.dirname(get_plugins_folder())): logger.debug('Generating plugin data folder...') os.makedirs(os.path.dirname(get_plugins_folder())) if not exists('test'): os.makedirs(get_plugins_folder('test')) with open(get_plugins_folder('test') + '/main.py', 'a') as main: main.write( "print('Running')\n\ndef on_test(onionr = None, data = None):\n print('received test event!')\n return True\n\ndef on_start(onionr = None, data = None):\n print('start event called')\n\ndef on_stop(onionr = None, data = None):\n print('stop event called')\n\ndef on_enable(onionr = None, data = None):\n print('enable event called')\n\ndef on_disable(onionr = None, data = None):\n print('disable event called')\n" ) enable('test') return
it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. ''' import multiprocessing, nacl.encoding, nacl.hash, nacl.utils, time, math, threading, binascii, sys, json import core, onionrutils, config, logger, onionrblockapi config.reload() def getDifficultyModifier(coreOrUtilsInst=None): '''Accepts a core or utils instance returns the difficulty modifier for block storage based on a variety of factors, currently only disk use. ''' classInst = coreOrUtilsInst retData = 0 if isinstance(classInst, core.Core): useFunc = classInst._utils.storageCounter.getPercent elif isinstance(classInst, onionrutils.OnionrUtils): useFunc = classInst.storageCounter.getPercent else: useFunc = core.Core()._utils.storageCounter.getPercent
def mainLoop(): global running global seen_hosts now = int(time.time()) prev_slot = now - (now % TIME_SLOT) next_slot = prev_slot + TIME_SLOT poke_time = next_slot - REMAINING_BEFORE_POKE poke_started = False while running: now = int(time.time()) if now >= next_slot: # Remove old jobs if scanner_msgqueue: while not scanner_msgqueue.empty(): try: scanner_msgqueue.get(False) except QueueEmpty: break insertHostsDataPoint(prev_slot, now) prev_slot = now - (now % TIME_SLOT) next_slot = prev_slot + TIME_SLOT poke_time = next_slot - REMAINING_BEFORE_POKE poke_started = False elif not poke_started and now >= poke_time: config.reload() if config.getPeriodicDiscoveryEnabled(): if scanner_msgqueue: scanner_msgqueue.put("net_scan") log.debug("Peridoc ARP scan queued") else: max_time = next_slot - now - 1 if (max_time >= 5) and scanner_msgqueue: for h in seen_hosts.values(): if h.isBecomingIdle( now) and config.getDeviceProbeEnabled(h.mac): scanner_msgqueue.put(h.ip) log.debug("Host " + h.ip + " ARP scan queued") poke_started = True processDevicesUpdates() now = int(time.time()) if now < next_slot: seconds = min(next_slot, now + MESSAGE_CHECK_INTERVAL) - now # Sleep has_msg = web_msgqueue[1].poll(timeout=seconds) if has_msg: msg = web_msgqueue[1].recv() if msg == "get_seen_devices": processDevicesUpdates() web_msgqueue[1].send(pickle.dumps(seen_hosts))
def main(): sys.stderr.write('.') sys.stderr.flush() ts = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone('UTC')) for_date = ts.strftime('%Y-%m-%d') zip_file = os.path.join(settings.DATABOX_OUT, '{0}.zip'.format(for_date, )) if not os.path.isfile(zip_file): print >> sys.stderr, 'create zip file' zipfile.ZipFile(zip_file, 'w').close() with zipfile.ZipFile(zip_file, 'a') as zip_handle: json_files = util.walk_databox( settings.DATABOX_IN, max_count=settings.FETCH_COUNT, file_type='.json', contains=for_date ) for json_file in json_files: print >> sys.stderr, json_file zip_handle.write( json_file, os.path.basename(json_file), zipfile.ZIP_DEFLATED ) os.system('rm %s' % (json_file, )) if __name__ == "__main__": config.load() for _ in pass_gen(settings.FETCH_INTERVAL): config.reload() main()
def setup_config(dataDir, o_inst=None): data_exists = os.path.exists(dataDir) if not data_exists: os.mkdir(dataDir) config.reload() if not os.path.exists(config._configfile): if os.path.exists('static-data/default_config.json'): # this is the default config, it will be overwritten if a config file already exists. Else, it saves it with open('static-data/default_config.json', 'r') as configReadIn: config.set_config(json.loads(configReadIn.read())) else: # the default config file doesn't exist, try hardcoded config logger.warn( 'Default configuration file does not exist, switching to hardcoded fallback configuration!' ) config.set_config({ 'dev_mode': True, 'log': { 'file': { 'output': True, 'path': dataDir + 'output.log' }, 'console': { 'output': True, 'color': True } } }) config.save() settings = 0b000 if config.get('log.console.color', True): settings = settings | logger.USE_ANSI if config.get('log.console.output', True): settings = settings | logger.OUTPUT_TO_CONSOLE if config.get('log.file.output', True): settings = settings | logger.OUTPUT_TO_FILE logger.set_settings(settings) if not o_inst is None: if str(config.get('general.dev_mode', True)).lower() == 'true': o_inst._developmentMode = True logger.set_level(logger.LEVEL_DEBUG) else: o_inst._developmentMode = False logger.set_level(logger.LEVEL_INFO) verbosity = str(config.get('log.verbosity', 'default')).lower().strip() if not verbosity in ['default', 'null', 'none', 'nil']: map = { str(logger.LEVEL_DEBUG): logger.LEVEL_DEBUG, 'verbose': logger.LEVEL_DEBUG, 'debug': logger.LEVEL_DEBUG, str(logger.LEVEL_INFO): logger.LEVEL_INFO, 'info': logger.LEVEL_INFO, 'information': logger.LEVEL_INFO, str(logger.LEVEL_WARN): logger.LEVEL_WARN, 'warn': logger.LEVEL_WARN, 'warning': logger.LEVEL_WARN, 'warnings': logger.LEVEL_WARN, str(logger.LEVEL_ERROR): logger.LEVEL_ERROR, 'err': logger.LEVEL_ERROR, 'error': logger.LEVEL_ERROR, 'errors': logger.LEVEL_ERROR, str(logger.LEVEL_FATAL): logger.LEVEL_FATAL, 'fatal': logger.LEVEL_FATAL, str(logger.LEVEL_IMPORTANT): logger.LEVEL_IMPORTANT, 'silent': logger.LEVEL_IMPORTANT, 'quiet': logger.LEVEL_IMPORTANT, 'important': logger.LEVEL_IMPORTANT } if verbosity in map: logger.set_level(map[verbosity]) else: logger.warn( 'Verbosity level %s is not valid, using default verbosity.' % verbosity) return data_exists
def __init__(self): ''' Main Onionr class. This is for the CLI program, and does not handle much of the logic. In general, external programs and plugins should not use this class. ''' try: os.chdir(sys.path[0]) except FileNotFoundError: pass # Load global configuration data data_exists = os.path.exists('data/') if not data_exists: os.mkdir('data/') if os.path.exists('static-data/default_config.json'): config.set_config( json.loads(open('static-data/default_config.json').read()) ) # this is the default config, it will be overwritten if a config file already exists. Else, it saves it else: # the default config file doesn't exist, try hardcoded config config.set_config({ 'devmode': True, 'log': { 'file': { 'output': True, 'path': 'data/output.log' }, 'console': { 'output': True, 'color': True } } }) if not data_exists: config.save() config.reload() # this will read the configuration file into memory settings = 0b000 if config.get('log', {'console': {'color': True}})['console']['color']: settings = settings | logger.USE_ANSI if config.get('log', {'console': { 'output': True }})['console']['output']: settings = settings | logger.OUTPUT_TO_CONSOLE if config.get('log', {'file': {'output': True}})['file']['output']: settings = settings | logger.OUTPUT_TO_FILE logger.set_file( config.get('log', {'file': { 'path': 'data/output.log' }})['file']['path']) logger.set_settings(settings) if str(config.get('devmode', True)).lower() == 'true': self._developmentMode = True logger.set_level(logger.LEVEL_DEBUG) else: self._developmentMode = False logger.set_level(logger.LEVEL_INFO) self.onionrCore = core.Core() self.onionrUtils = OnionrUtils(self.onionrCore) # Handle commands self.debug = False # Whole application debugging if os.path.exists('data-encrypted.dat'): while True: print('Enter password to decrypt:') password = getpass.getpass() result = self.onionrCore.dataDirDecrypt(password) if os.path.exists('data/'): break else: logger.error('Failed to decrypt: ' + result[1], timestamp=False) else: # If data folder does not exist if not data_exists: if not os.path.exists('data/blocks/'): os.mkdir('data/blocks/') # Copy default plugins into plugins folder if not os.path.exists(plugins.get_plugins_folder()): if os.path.exists('static-data/default-plugins/'): names = [ f for f in os.listdir("static-data/default-plugins/") if not os.path.isfile(f) ] shutil.copytree('static-data/default-plugins/', plugins.get_plugins_folder()) # Enable plugins for name in names: if not name in plugins.get_enabled_plugins(): plugins.enable(name, self) for name in plugins.get_enabled_plugins(): if not os.path.exists(plugins.get_plugin_data_folder(name)): try: os.mkdir(plugins.get_plugin_data_folder(name)) except: plugins.disable(name, onionr=self, stop_event=False) if not os.path.exists(self.onionrCore.peerDB): self.onionrCore.createPeerDB() pass if not os.path.exists(self.onionrCore.addressDB): self.onionrCore.createAddressDB() # Get configuration if not data_exists: # Generate default config # Hostname should only be set if different from 127.x.x.x. Important for DNS rebinding attack prevention. if self.debug: randomPort = 8080 else: while True: randomPort = random.randint(1024, 65535) if self.onionrUtils.checkPort(randomPort): break config.set( 'client', { 'participate': 'true', 'client_hmac': base64.b16encode( os.urandom(32)).decode('utf-8'), 'port': randomPort, 'api_version': API_VERSION }, True) self.cmds = { '': self.showHelpSuggestion, 'help': self.showHelp, 'version': self.version, 'config': self.configure, 'start': self.start, 'stop': self.killDaemon, 'status': self.showStats, 'statistics': self.showStats, 'stats': self.showStats, 'enable-plugin': self.enablePlugin, 'enplugin': self.enablePlugin, 'enableplugin': self.enablePlugin, 'enmod': self.enablePlugin, 'disable-plugin': self.disablePlugin, 'displugin': self.disablePlugin, 'disableplugin': self.disablePlugin, 'dismod': self.disablePlugin, 'reload-plugin': self.reloadPlugin, 'reloadplugin': self.reloadPlugin, 'reload-plugins': self.reloadPlugin, 'reloadplugins': self.reloadPlugin, 'create-plugin': self.createPlugin, 'createplugin': self.createPlugin, 'plugin-create': self.createPlugin, 'listkeys': self.listKeys, 'list-keys': self.listKeys, 'addmsg': self.addMessage, 'addmessage': self.addMessage, 'add-msg': self.addMessage, 'add-message': self.addMessage, 'pm': self.sendEncrypt, 'getpms': self.getPMs, 'get-pms': self.getPMs, 'addpeer': self.addPeer, 'add-peer': self.addPeer, 'add-address': self.addAddress, 'add-addr': self.addAddress, 'addaddr': self.addAddress, 'addaddress': self.addAddress, 'addfile': self.addFile, 'importblocks': self.onionrUtils.importNewBlocks, 'introduce': self.onionrCore.introduceNode, 'connect': self.addAddress } self.cmdhelp = { 'help': 'Displays this Onionr help menu', 'version': 'Displays the Onionr version', 'config': 'Configures something and adds it to the file', 'start': 'Starts the Onionr daemon', 'stop': 'Stops the Onionr daemon', 'stats': 'Displays node statistics', 'enable-plugin': 'Enables and starts a plugin', 'disable-plugin': 'Disables and stops a plugin', 'reload-plugin': 'Reloads a plugin', 'create-plugin': 'Creates directory structure for a plugin', 'add-peer': 'Adds a peer to database', 'list-peers': 'Displays a list of peers', 'add-msg': 'Broadcasts a message to the Onionr network', 'pm': 'Adds a private message to block', 'get-pms': 'Shows private messages sent to you', 'addfile': 'Create an Onionr block from a file', 'importblocks': 'import blocks from the disk (Onionr is transport-agnostic!)', 'introduce': 'Introduce your node to the public Onionr network', } # initialize plugins events.event('init', onionr=self, threaded=False) command = '' try: command = sys.argv[1].lower() except IndexError: command = '' finally: self.execute(command) if not self._developmentMode: encryptionPassword = self.onionrUtils.getPassword( 'Enter password to encrypt directory: ') self.onionrCore.dataDirEncrypt(encryptionPassword) shutil.rmtree('data/') return
def showinfo(): config.reload() return f'Ключевые слова: {config.KEYWORDS}\n\nСТОП-слова: {config.STOPWORDS}\n\nАдмины: {config.ADMINS}'
def task(self, msg_queue, cp_eventsqueue, config_changeev, passive_mode): self.msg_queue = msg_queue self.cp_eventsqueue = cp_eventsqueue self.passive_mode = passive_mode self.config_changeev = config_changeev self.macs_to_spoof = {} self.ip_to_mac = {} # Acquire capabilities to capture packets acquire_capabilities() # TODO make interface configurable handle = pkt_reader.open_capture_dev(self.options["interface"], 1000, "broadcast or arp", False) self.gateway_mac = pkt_reader.get_gateway_mac(handle) self.iface_ip = pkt_reader.get_iface_ip(handle) self.iface_mac = pkt_reader.get_iface_mac(handle) self.lan_network = pkt_reader.get_lan_network(handle) self.handle = handle if (not self.passive_mode): print("[NET:%s] [IP: %s] [MAC: %s] Gateway %s (%s)" % (self.lan_network, self.iface_ip, self.iface_mac, self.gateway_mac, pkt_reader.get_gateway_ip(handle))) self.setupCaptiveNat() self.reloadExceptions() last_request_spoof = 0 while self.isRunning(): info = pkt_reader.read_packet_info(handle) now = time.time() # Check for captive portal events while cp_eventsqueue[1].poll(): (msg_type, ip) = cp_eventsqueue[1].recv() if (msg_type == "auth_ok"): # A device was successfully authenticated mac = self.ip_to_mac.get(ip) if not mac: print("Warning: unknown device with IP: " + ip) else: # Verify that the device has actually a captive_portal logic policy = getDevicePolicy(mac) if policy == "captive_portal": print("Whitelisting device [MAC=%s][IP=%s]" % (mac, ip)) self.whitelisted_devices[mac] = True # Spoof the device back to the original gateway pkt_reader.arp_rearp(handle, mac, ip) self.macs_to_spoof.pop(mac, None) # Check for config change events if self.config_changeev.is_set(): config.reload() self.reloadExceptions() self.config_changeev.clear() if info: name = info.get("name") mac = info["mac"] ip = info["ip"] self.handleHost(mac, ip, name, now) #print(info) if self.shouldSpoof(mac, ip): if (info.get("proto") == "ARP_REQ"): # Immediately spoof the reply pkt_reader.arp_rep_spoof(handle, mac, ip) self.macs_to_spoof[mac] = {"last_seen": now, "ip": ip} self.ip_to_mac[ip] = mac if ((now - last_request_spoof) >= SPOOFING_TIMEOUT): idle_macs = [] for mac, mac_info in self.macs_to_spoof.items(): if ((now - mac_info["last_seen"]) < SPOOFED_MAC_IDLE_TIMEOUT): pkt_reader.arp_req_spoof(handle, mac, mac_info["ip"]) else: idle_macs.append(mac) for mac in idle_macs: self.macs_to_spoof.pop(mac, None) last_request_spoof = now # Spoof the devices back to the original gateway for mac, mac_info in self.macs_to_spoof.items(): pkt_reader.arp_rearp(handle, mac, mac_info["ip"]) if not self.passive_mode: self.termCaptiveNat() pkt_reader.close_capture_dev(handle)