def neighbor_info(self): try: tc = telnetlib.Telnet(self.address, timeout=3) self._write(tc, None, [b'\r\n[Uu]sername: ']) self._write(tc, config.get('liscain', 'liscain_init_username'), [b'\r\n[Pp]assword: ']) self._write(tc, config.get('liscain', 'liscain_init_password')) self._write(tc, 'terminal length 0') nbr_info = ['cdp'] neigh_info_started = False for line in self._write(tc, 'show cdp neigh').split('\n')[:-1]: line = line.strip() if 'Device ID' in line: neigh_info_started = True if neigh_info_started: nbr_info.append(line) return '\n'.join(nbr_info) except socket.timeout: self._logger.info('timeout getting neighbor info') return 'unknown' except EOFError: self._logger.info('switch not ready while getting neighbor info') return 'unknown'
async def _get_photo(client: TelegramClient, entity: Union[types.User, types.Channel, types.Chat]): if not entity.photo or isinstance( entity.photo, (types.UserProfilePhotoEmpty, types.ChatPhotoEmpty)): return None photo = entity.photo.photo_small\ if isinstance(entity.photo, (types.ChatPhoto, types.UserProfilePhoto))\ else entity.photo filename = '{0}.{1}.jpg'.format( entity.id, md5('{e.dc_id}.{e.volume_id}.{e.local_id}.{e.secret}'.format(e=photo).encode('utf-8')) .hexdigest() ) filepath = os.path.join(config.get('paths', 'avatars'), filename) if not os.path.exists(filepath): # if file was updated, remove old one for f in glob(os.path.join(config.get('paths', 'avatars'), str(entity.id) + '*.jpg')): os.unlink(f) asyncio.ensure_future( client.download_profile_photo(entity, file=filepath, download_big=False) ).add_done_callback(catch_task_exception) return filename
async def get_client(vk_user_id: int, event_loop=None) -> Awaitable[TelegramClient]: vk_user_id = int(vk_user_id) client: TelegramClient = context.clients.get(vk_user_id) if client: return client phone_number = context.accounts.get(vk_user_id, {}).get('phone_number') if not phone_number: _logger.debug('User id: %r, Accounts: %s', vk_user_id, context.accounts) raise errors.UnknownUser('Unknown vk_user_id or empty phone_number') if event_loop is None: event_loop = asyncio.get_event_loop() _logger.debug('Creating client for VK user %d', vk_user_id) context.clients[vk_user_id] = client = TelegramClient( os.path.join(config.get('paths', 'sessions'), parse_phone(phone_number)), config.get('telegram', 'api_id'), config.get('telegram', 'api_hash'), loop=event_loop) try: await client.connect() if not await client.is_user_authorized(): raise errors.UnauthorizedException( f'VK user_id {vk_user_id} is not authorized in Telegram') return client except: await client.disconnect() raise
def connect(self, site): wrapper_key = Site.getWrapperkey(self.getDataDirectory(), site) address = config.get("server.address", "127.0.0.1") port = config.get("server.port", "43110") secure = config.get("server.secure", False) return ZeroWebSocket(wrapper_key, "%s:%s" % (address, port), secure)
def actionConfigGet(self, name): """ Get config value Usage: config get <name> Print config variable <name>. <name> can be dot-separated. """ print config.get(name)
def autoadopt(self, device): ready_devices = None association = None with sql_ses() as ses: try: association = ses.query(Option82Info).filter( Option82Info.downstream_switch_mac == device.mac_address).one() except sqlalchemy.orm.exc.NoResultFound: association = None if association is None: self._logger.info('opt82/%s: could not find association for %s', device.identifier, device.address) return autoconf_path = config.get('liscain', 'autoconf_path') whitelisted_prefixes = config.get('liscain', 'autoconf_version_whitelist_prefix') version_ok = False if whitelisted_prefixes is None: version_ok = True else: whitelisted_prefixes = whitelisted_prefixes.split(',') for whitelisted_prefix in whitelisted_prefixes: if device.version.startswith(whitelisted_prefix): version_ok = True break switch_name = association.downstream_switch_name if not version_ok: self._logger.info( 'opt82/%s (%s @ %s) does not meet autoconf criteria (version)', device.identifier, switch_name, device.address) return config_path = '{}/{}.cfg'.format(autoconf_path, switch_name) self._logger.info('opt82/%s: trying autoadopt for %s', device.identifier, switch_name) switch_config = None try: with open(config_path) as fp: switch_config = fp.read() except FileNotFoundError: self._logger.error( 'opt82/%s: failed to open %s for switch autoconfiguration', device.identifier, config_path) return try: self._commander.enqueue( device, tasks.DeviceConfigurationTask(device, identity=switch_name, configuration=switch_config), ) except BaseException as e: self._logger.error(e)
def emit_base_config(self): with open('baseconfig/cisco.cfg') as fp: conf = fp.read().format( liscain_hostname=self.identifier, liscain_adopt_dn=config.get('liscain', 'liscain_adopt_dn'), liscain_init_username=config.get('liscain', 'liscain_init_username'), liscain_init_password=config.get('liscain', 'liscain_init_password'), ) return StringIO(conf)
def register_device(): permitted_ips = ['*'] if config.get("single_ip"): permitted_ips = [network.get_public_ip()] log.info("Registering permitted IPs {}".format(",".join(permitted_ips))) method = "v1/device-server" data = { "description": "bunq2ynab on " + network.get_hostname(), "secret": config.get("api_token"), "permitted_ips": permitted_ips } post(method, data) state.set("device_registered", "True")
def serve_file(name: str, **kwargs) -> StringIO: global commander global option82_controller remote_address: str = kwargs['raddress'] remote_id: str = 'lc-{:02x}'.format( int(ipaddress.ip_address(remote_address))) if name in ['network-confg']: device = None with lib.db.sql_ses() as ses: try: device = ses.query(CiscoIOS).filter( and_(CiscoIOS.identifier == remote_id, CiscoIOS.state != SwitchState.CONFIGURED)).one() except sqlalchemy.orm.exc.NoResultFound: device = CiscoIOS() device.initialize(identifier=remote_id, address=remote_address) ses.add(device) ses.commit() ses.refresh(device) try: task = tasks.DeviceInitializationTask(device) if config.get('liscain', 'autoconf_enabled') == 'yes': task.hook(SwitchState.READY, option82_controller.autoadopt) commander.enqueue(device, task) except KeyError as e: logger.error('init/%s: %s', remote_id, e) return device.emit_base_config() else: logger.debug('%s requested %s, ignoring', remote_id, name) return StringIO()
def __init__(self, **kwargs): super().__init__(**kwargs) self.game = kwargs["game"] self.config = config.get(f"{self.__class__.__name__}Plugin") self.redis_client = StrictRedis(**config["redis"]) self.input_controller = kwargs["input_controller"] self.machine_learning_models = dict() self.frame_handlers = dict( NOOP=self.handle_noop, COLLECT_FRAMES=self.handle_collect_frames, COLLECT_FRAMES_FOR_CONTEXT=self.handle_collect_frames_for_context, COLLECT_CHARACTERS=self.handle_collect_characters) self.frame_handler_setups = dict( COLLECT_FRAMES_FOR_CONTEXT=self.setup_collect_frames_for_context) self.frame_handler_setup_performed = False self.visual_debugger = VisualDebugger() self.game_frame_buffer = GameFrameBuffer( size=self.config.get("game_frame_buffer_size", 5)) self.game_context = None self.sprite_identifier = SpriteIdentifier() self._register_sprites() self.flag = None self.uuid = str(uuid.uuid4()) self.started_at = datetime.now()
def get(self): code = self.request.args.get('code') # final stage if code: return self._final_stage(code) elif self.request.args.get('error'): self.logger.info('Error response from VK: %s', self.request.args) raise HTTP_403( self.request.args.get('error_description'), self.request.args.get('error_reason', self.request.args.get('error'))) # first stage client_id = config.get('vk', 'client_id') return { 'auth_url': 'https://oauth.vk.com/authorize?display=page&v=5.92' '&scope=wall,photos,video,pages,docs,groups,offline' '&redirect_uri=https://api.vk.com/blank.html' '&response_type=code&revoke=1' f'&client_id={client_id}' }
def _set_photo_path(self, item): if item.get('photo'): # self.logger.debug('Updating path of %s', item) item['photo'] = config.get('telegram', 'avatars_path') + '/' + item['photo'] # self.logger.debug('Updated path of %s', item) return item
def actionInstanceShutdown(self, force=False, signal=None): """ Shutdown ZeroNet instance Usage: instance shutdown Call ZeroWebSocket for shutdown instance shutdown --force Kill ZeroNet process instance shutdown <signal> Send signal to ZeroNet process """ if not force and signal is None: try: with self.connect(config.get("homepage", Addresses.ZeroHello)) as ws: try: ws.send("serverShutdown") except ZeroWebSocket.Error as e: pass except KeyError as e: sys.stderr.write("Could not get wrapper key of ZeroHello. Try 'instance shutdown --force'.\n") return 1 else: if signal is None: signal = os_signal.SIGINT pid = Instance.getPid(self.getDataDirectory()) if pid is None: sys.stderr.write("Could not find ZeroNet process.\n") return 1 os.kill(pid, signal)
def getCurrentAccount(self): address = config.get("account.current", None) if address is None: address = User.getUsers(self.getDataDirectory())[0] config.set("account.current", address) return address
def autoadopt_mapping_listener(self, zmq_context): zmq_socket = zmq_context.socket(zmq.PULL) zmq_socket.bind(config.get('liscain', 'opt82_zmq_listener')) while True: try: msg = zmq_socket.recv_json() self._handle_message(msg) except zmq.Again: pass
def create_token(payload): payload['exp'] = datetime.utcnow() + timedelta( minutes=config.getint('jwt', 'timeout', fallback=180) ) if 'roles' in payload: payload['aud'] = payload['roles'] del payload['roles'] return encode(payload, config.get('jwt', 'secret'), algorithm='HS256')
def get_private_key(): pem_str = state.get("private_key") if pem_str: return crypto.load_privatekey(crypto.FILETYPE_PEM, pem_str) log.info("Generating new private key...") key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, 2048) pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) state.set("private_key", pem.decode("utf-8")) state.set("private_key_for_api_token", config.get("api_token")) return key
def main(): global option82_controller lib.db.initialize(config.get('liscain', 'database')) tftp_task: threading.Thread = threading.Thread(target=tftp_server, daemon=True) tftp_task.start() zmq_context: zmq.Context = zmq.Context(10) zmq_sock: zmq.socket = zmq_context.socket(zmq.REP) zmq_sock.bind(config.get('liscain', 'command_socket')) option82_controller_autoadopt: threading.Thread = threading.Thread( target=option82_controller.autoadopt_mapping_listener, args=(zmq_context, ), daemon=True) option82_controller_autoadopt.start() while True: msg: dict = zmq_sock.recv_json() zmq_sock.send_json(handle_msg(msg))
def __init__(self, **kwargs): super().__init__(**kwargs) self.game = kwargs["game"] self.config = config.get(f"{self.__class__.__name__}Plugin") self.input_controller = kwargs["input_controller"] self.machine_learning_models = dict() self.frame_handlers = dict(NOOP=self.handle_noop, COLLECT_FRAMES=self.handle_collect_frames)
def buildWeek(file_name, dates, calweek, year): """ Simple copy of Lars function with few edits This function will be called from the menu :param file_name: Name of the .tex file :param dates: Dates as list of date objects :param calweek: Calender week as int/string :param year: Year as int/str :return: """ data = _buildHeader(dates, calweek, year) for entry in dates: data.append('\\hline\n') data.append( entry.strftime("%A") + ', ' + entry.strftime("%d.%m.%Y") + '\n') data.append('&\n') # put stuff for day-templates here with open('./templates/' + entry.strftime("%A") + '.txt', 'r') as template: content = template.readlines() for line in content: data.append(line) data.append('\\\\\n') # rest of day can be appended after the loop: data.append('\\hline\n') data.append('\\end{tabular}\n') data.append('\\vfill\n') data.append('\\unterschriften {' + config.get("name") + " " + config.get("last_name") + '}{Auszubildender}{' + config.get("instructor_title") + " " + config.get("instructor_name") + " " + config.get("instructor_last_name") + '}{Ausbilder}\n') data.append('\\end{document}\n') # Write the data we have to the file _writeToFile(file_name, data)
def initial_setup(self) -> bool: retry_max = 10 for retry in range(1, retry_max + 1): try: tc = telnetlib.Telnet(self.address, timeout=10) self._write(tc, None, [b'\r\n[Uu]sername: ']) self._write(tc, config.get('liscain', 'liscain_init_username'), [b'\r\n[Pp]assword: ']) self._write(tc, config.get('liscain', 'liscain_init_password')) self._logger.debug('logged in') self._write(tc, 'terminal length 0') self._read_mac(tc) self._read_pid(tc) self._read_version(tc) self._logger.info('generating ssh keys...') self._write(tc, 'configure terminal') self._write(tc, 'ip ssh rsa keypair-name ssh') self._write( tc, 'crypto key generate rsa general-keys label ssh mod 2048', timeout=120) self._write(tc, 'sdm prefer dual-ipv4-and-ipv6 default', timeout=10) self._write(tc, 'sdm prefer dual-ipv4-and-ipv6 vlan', timeout=10) self._write(tc, 'end') self._write(tc, 'exit') self._logger.debug('logged out') self._logger.info('successfully initialized switch') return True except socket.timeout: self._logger.info('timeout, retry %i/%i', retry, retry_max) continue except EOFError: self._logger.info('switch not ready, wait 10s (retry %i/%i)', retry, retry_max) time.sleep(10) self._logger.error('failed initial setup') return False
def configure(self, switch_config): try: tc = telnetlib.Telnet(self.address, timeout=10) self._write(tc, None, [b'\r\n[Uu]sername: ']) self._write(tc, config.get('liscain', 'liscain_init_username'), [b'\r\n[Pp]assword: ']) self._write(tc, config.get('liscain', 'liscain_init_password')) self._logger.debug('[configure] logged in, begin configure') self._write(tc, 'terminal length 0') self._write(tc, 'tclsh') tclsh_exp = [b'\\+>'] self._write(tc, 'puts [open "flash:liscain.config.in" w+] {', tclsh_exp, newline='\r') for config_line in switch_config.split('\n'): config_line = config_line.strip() self._write(tc, config_line, tclsh_exp, newline='\r') self._write(tc, '}') self._write(tc, 'exit') self._write(tc, 'write') self._write(tc, 'copy flash:liscain.config.in startup-config', [b'\r\n']) self._write(tc, 'startup-config') try: prompt = self._write(tc, 'reload', [b'yes/no', b'confirm']) if 'yes/no' in prompt: time.sleep(1) self._write(tc, 'no', [b'confirm']) time.sleep(1) self._write(tc, '') except socket.timeout: pass self._logger.debug('[configure] completed') return True except socket.timeout: self._logger.error('[configure] timed out') return False except EOFError: return True
def connect(db, ro=False): global __connections k = db + 'R' if ro else 'W' if k in __connections: return __connections[k] if ro and db + 'W' in __connections: return __connections[db + 'W'] _logger.debug('Creating connection: %s', db) __connections[k] = sqlite3.connect( 'file:' + os.path.abspath(config.get('paths', 'db_' + db, fallback='./main')) + ('?mode=ro' if ro else ''), check_same_thread=False, uri=True) __connections[k].row_factory = sqlite3.Row if config.get('globals', 'debug', fallback=False) and\ config.get('globals', 'debug_sql', fallback=False): _logger.debug('Enabling SQLite query trace') __connections[k].set_trace_callback(_logger.debug) cursor = __connections[k].cursor() cursor.execute( "select count(*) from sqlite_master where type='table' AND name IN ('{}')" .format("', '".join(__tables[db].keys()))) cnt = cursor.fetchone()[0] if cnt < len(__tables[db]): if ro: raise Exception(f'DB {db} not ready') for name, definition in __tables[db].items(): _logger.debug('Creating table `%s`', name) cursor.execute(f'create table {name} {definition}') cursor.connection.commit() cursor.close() return __connections[k]
def logit(parameters): header = ['name', 'email', 'datetime', 'attachment'] parameters['datetime'] = str(datetime.now()) file_path = os.path.join(APP_DIR, config.get('app-settings', 'attachments'), 'reports.csv') if not os.path.exists(file_path): create_dir_if_not_exists(file_path) with open(file_path, 'w') as wp: writer = csv.writer(wp) writer = csv.writer(wp, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) writer.writerow(header) row_data = [parameters.get(key) for key in header] with open(file_path, 'a') as log: writer = csv.writer(log, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) writer.writerow(row_data)
def change_identity(self, identity): old_identity = self.identifier try: tc = telnetlib.Telnet(self.address, timeout=10) self._write(tc, None, [b'\r\n[Uu]sername: ']) self._write(tc, config.get('liscain', 'liscain_init_username'), [b'\r\n[Pp]assword: ']) self._write(tc, config.get('liscain', 'liscain_init_password')) self._logger.debug('[change_identity] logged in') self._write(tc, 'terminal length 0') self._write(tc, 'configure terminal') self.identifier = identity self._write(tc, 'hostname {}'.format(identity)) self._write(tc, 'end') self._write(tc, 'exit') self._logger.debug('[change_identity] logged out') return super().change_identity(identity) except socket.timeout: self.identifier = old_identity return False except EOFError: self.identifier = old_identity return False
def main(): zmq_context = zmq.Context() zmq_sock = zmq_context.socket(zmq.REQ) zmq_sock.connect(config.get('liscain', 'command_socket')) if init_args.mode == 'device': if args.list: list_devices(zmq_sock) if args.reinit_by_id is not None: reinit(zmq_sock, args.reinit_by_id) if args.neighbor_info_by_id is not None: get_neigh_info(zmq_sock, args.neighbor_info_by_id) if args.delete_by_id is not None: delete_device(zmq_sock, args.delete_by_id) if args.adopt_by_id is not None: if args.identity is None: print('identity is required when adopting') return adopt_device(zmq_sock, args.adopt_by_id, args.identity, 'config/{}.cfg'.format(args.identity)) if args.adopt_by_mac is not None: if args.identity is None: print('identity is required when adopting') return devices = get_devices(zmq_sock) mac_matches = [] simplemac = args.adopt_by_mac.replace(':', '') for device in devices: if device['state'] not in ['READY', 'CONFIGURE_FAILED']: continue if simplemac in device['mac_address'].replace(':', ''): mac_matches.append(device['id']) if len(mac_matches) == 1: adopt_device(zmq_sock, mac_matches[0], args.identity, 'config/{}.cfg'.format(args.identity)) elif len(mac_matches) > 1: print('error: multiple mac_address matches') else: print('error: no mac_address matches') if init_args.mode == 'opt82': if args.list: opt82_list(zmq_sock) if args.delete_by_id: opt82_delete_by_id(zmq_sock, args.delete_by_id) if args.set: if args.upstream_mac is None or args.upstream_port is None: print('error: upstream mac and port are required when setting option82 info') return opt82_set_info(zmq_sock, args.upstream_mac, args.upstream_port, args.downstream_name)
def get_statistics(): list_date_time = [] file_path = os.path.join(APP_DIR, config.get('app-settings', 'attachments'), 'reports.csv') if not os.path.exists(file_path): print "It seems no certificates generated yet, try sometimes later." with open(file_path, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='|') header = reader.next() for line in reader: list_date_time.append(datetime.strptime(line[2], "%Y-%m-%d %X.%f")) print "\nHey I am printing the statistics...\n", "=-=" * 16 _now = datetime.now() print "Last Hour\t Count\t:%d" % (len(filter(lambda d: 1 if d > (_now - timedelta(seconds=60 * 60)) else 0, list_date_time))) print "Last Day \t Count\t:%d" % (len(filter(lambda d: 1 if d > (_now - timedelta(days=1)) else 0, list_date_time))) print "Last Week\t Count\t:%d" % (len(filter(lambda d: 1 if d > (_now - timedelta(days=7)) else 0, list_date_time)))
def write_text_to_image(text): img = Image.open(config.get('user-settings', 'certificate')) draw = ImageDraw.Draw(img) font = ImageFont.truetype(config.get('app-settings', 'font-family'), eval(config.get('app-settings', 'font-size'))) font_colour = eval(config.get('app-settings', 'font-colour')) xy_points = eval(config.get('user-settings', 'xy_cordinates')) draw.text(xy_points, text, font_colour, font=font) out_put_path = os.path.join(APP_DIR, config.get('app-settings', 'attachments'), "%s.pdf" % ("_".join(text.split()))) create_dir_if_not_exists(out_put_path) img.save(out_put_path, "PDF", Quality=100) return out_put_path
def setup_callback(): global serversocket, callback_ip, callback_port, local_port, portmap_port # Don't try to map ports if we have a public IP callback_ip = callback_port = None using_portmap = False local_ip = network.get_local_ip() if not network.is_private_ip(local_ip): log.info("Host has a public IP...") callback_ip = local_ip elif config.get("port"): log.info("Host has a private IP, port specified, configure forward " + "manually...") callback_ip = network.get_public_ip() else: log.info("Host has a private IP, trying upnp port mapping...") network.portmap_setup() network.portmap_search() callback_ip = network.get_public_ip() using_portmap = True if not callback_ip: log.error("No public IP found, not registering callback.") return if not serversocket: serversocket, local_port = bind_port() log.info("Listening on port {0}...".format(local_port)) serversocket.listen(5) # max incoming calls queued if not using_portmap: callback_port = local_port else: portmap_port = network.portmap_add(portmap_port, local_port) if not portmap_port: log.error("Failed to map port, not registering callback.") return callback_port = portmap_port sync_obj.populate() for acc in sync_obj.get_bunq_accounts(): url = "https://{}:{}/bunq2ynab-autosync".format( callback_ip, callback_port) bunq_api.add_callback(acc["bunq_user_id"], acc["bunq_account_id"], "bunq2ynab-autosync", url)
def bind_port(): serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) port = config.get("port") if port: serversocket.bind(('0.0.0.0', int(port))) return serversocket, int(port) port = None for i in range(0, 128): port = network.next_port(port) try: serversocket.bind(('0.0.0.0', port)) return serversocket, port except OSError as e: if e.errno == errno.EADDRINUSE: log.warning("Port {0} is in use, trying next...".format(port)) continue raise raise Exception("No free port found")
def main(): try: if sys.argv[1] == '--admin': print 'loading statistics...' get_statistics() return parameters = {'name': " ".join(sys.argv[2:]), 'email': sys.argv[1] } except IndexError: print "USAGE : python certify <email_id> <full_name_with_space>" print "ADMIN USAGE : python certify --admin" return print "\nPlease wait, sending mail to %(email)s for %(name)s certificate..." % (parameters) heading = config.get('email-sender', 'subject') attachments = [write_text_to_image(parameters['name'])] send_email(parameters.get('email'), heading, open('data/emailer.html').read(), attachments, parameters) parameters['attachment'] = attachments[0] logit(parameters)
def __init__(self, **kwargs): super().__init__(**kwargs) self.config = config.get(f"{self.__class__.__name__}Plugin") self.platform = kwargs.get("platform") self.window_id = None self.window_name = kwargs.get("window_name") self.window_geometry = None self.is_launched = False self.frame_grabber_process = None self.game_frame_limiter = GameFrameLimiter(fps=self.config.get("fps", 4)) self.sprites = self._discover_sprites() self.redis_client = StrictRedis(**config["redis"]) self.kwargs = kwargs
def get_classifier_scaler(scenario_name = "FTC"): scenario = _scenarios[scenario_name] # Set up classifier classifier = 0 if scenario['classifier'] == 'rf': classifier = RandomForest() print 'Using RANDOM FOREST' elif scenario['classifier'] == 'svm': classifier = sklearn_SVC() print 'Using SVM' print 'Loading model from "{}"'.format(scenario['model']) classifier.load_model(scenario['model']) # Standardize data points if necessary scaler = None if 'scaled' in scenario['model']: scaler = pickle.load(open(config.get('datasets', 'contagio_scaler'))) print 'Using scaler' return classifier, scaler
def __init__(self, **kwargs): super().__init__(**kwargs) self.game = kwargs["game"] self.config = config.get(f"{self.__class__.__name__}Plugin") self.redis_client = StrictRedis(**config["redis"]) self.input_controller = kwargs["input_controller"] self.machine_learning_models = dict() self.frame_handlers = dict( NOOP=self.handle_noop, COLLECT_FRAMES=self.handle_collect_frames, COLLECT_FRAMES_FOR_CONTEXT=self.handle_collect_frames_for_context, COLLECT_CHARACTERS=self.handle_collect_characters ) self.frame_handler_setups = dict( COLLECT_FRAMES_FOR_CONTEXT=self.setup_collect_frames_for_context ) self.frame_handler_setup_performed = False self.visual_debugger = VisualDebugger() self.game_frame_buffer = GameFrameBuffer(size=self.config.get("game_frame_buffer_size", 5)) self.game_context = None self.sprite_identifier = SpriteIdentifier() self._register_sprites() self.flag = None self.uuid = str(uuid.uuid4()) self.started_at = datetime.now()
import sys, os _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config mimicus_dir = config.get('pdfrate', 'mimicus_dir') import_path = os.path.join(mimicus_dir, 'reproduction') sys.path.append(import_path) from common import * from common import _scenarios from mimicus.tools.featureedit import _pdfrate_feature_names as feats import numpy as np def _pdfrate_wrapper(ntuple): ''' A helper function to parallelize calls to gdkde(). ''' try: return pdfrate_once(*ntuple) except Exception as e: return e def pdfrate_once(classifier, scaler, file_path): pdf_feats = FeatureEdit(file_path) pdf_feats = pdf_feats.retrieve_feature_vector_numpy() if scaler:
:copyright: (c) 2014- by Internet by Design Ltd :license: GPL v3, see LICENSE for more details. """ from git import Repo import json import os import requests import shutil import socket import sys from lib import state from lib.config import config root = config.get("general", "path") """ Pull in list of modules from the config server """ payload = {'format': 'json', 'server__name': socket.gethostname()} # Generate payload for request r = json.loads(requests.get("http://" + config.get("general", "confighost") + "/api/v1/module/", params=payload).text) """ Loop through returned results and grab core module data """ modules = {} for module in r['objects']: # Loop through modules the server says we should have payload = {'format': 'json', 'name': module['name']} moduleData = json.loads( requests.get("http://" + config.get("general", "confighost") + "/api/v1/module_list/", params=payload).text)[ 'objects'][0] # Pull config options and build a modules list modules[module['id']] = {"name": module['name'], "package": moduleData["package"], "config": module["config"]} """ Get list of currently installed modules so we can remove ones that have been removed """
from SocketServer import ThreadingMixIn from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler import threading import time import pickle import os import sys # Make sure the working directory in the src. _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config HOST = config.get('detector_agent', 'host') PORT = int(config.get('detector_agent', 'port')) from lib.common import hash_file # Import local classifiers. from classifiers.pdfrate_wrapper import pdfrate from classifiers.hidost_wrapper import hidost from classifiers.bundle_wrapper import hidost_pdfrate, hidost_pdfrate_sigmoid import sklearn print sklearn.__version__ # Import remote classifiers. from classifiers.cuckoo_wrapper import cuckoo from classifiers.wepawet_wrapper import wepawet
--events=/tmp/events.csv --imps=/tmp/impressions.csv') parser.add_argument("--app_name", help='Spark Application name') parser.add_argument("--events", help='Event CSV file location') parser.add_argument("--imps", help='Impression SCV file location') return parser.parse_args() if __name__ == "__main__": # Processing arguments, assigning values or default values from configuration args = create_argument_parser() if args.app_name: app_name = args.app_name else: app_name = config.get('app', 'spark_app_name') if args.events: event_file = args.events else: event_file = config.get('input', 'event_file') if args.imps: impression_file = args.imps else: impression_file = config.get('input', 'impression_file') count_of_events_path = config.get('output', 'count_of_events_path') count_of_users_path = config.get('output', 'count_of_users_path') # SparkConfig with master='local' is used here
import os from argparse import ArgumentParser import pickle import sys import hashlib _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config exec_dir = config.get('hidost', 'exec_dir') model_path = config.get('hidost', 'model_path') feats_path = config.get('hidost', 'feats_path') # temp files cache_dir = config.get('hidost', 'cache_dir') if config.has_option('hidost', 'sk_learn_location'): sk_learn_location = config.get('hidost', 'sk_learn_location') sys.path.insert(1, sk_learn_location) from sklearn import datasets pdf2paths_cmd = os.path.join(exec_dir, "pdf2paths") feat_extract_cmd = os.path.join(exec_dir, "feat-extract") empty_file_list = os.path.join(cache_dir, "empty.list") def hash_str(string): return hashlib.sha1(string).hexdigest() def hash_file(filepath): sha1 = hashlib.sha1()
import os import requests import json import sys import hashlib import time import re _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config HOST = config.get('cuckoo', 'host') PORT = int(config.get('cuckoo', 'port')) TIMEOUT = int(config.get('cuckoo', 'timeout')) from lib.common import * logger = logging.getLogger('gp.cuckoo') def check_reported(file_path): sha1 = hash_file(file_path) REST_URL = "http://%s:%d/tasks/check_reported/%s" % (HOST, PORT, sha1) request = requests.get(REST_URL) if request.status_code == 200: json_decoder = json.JSONDecoder() if request.text: r = json_decoder.decode(request.text) return r def submit(file_path, public_name = None, timeout = None, cache=False):
from pymongo import MongoClient import pickle import os import sys _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config ADDR = config.get("detector_agent", "mongodb_uri") DB_NAME = config.get("detector_agent", "db_name") client = MongoClient(ADDR) db = client[DB_NAME] def load_pickle_to_mongodb(pickle_file_name, col): cache = pickle.load(open(pickle_file_name)) records = [] for sha1, result in cache.iteritems(): record = {"sha1": sha1, "result": result} records.append(record) print "inserting" col.insert(records) col.create_index("sha1") def dump_cuckoo_sigs(): db = client.cuckoo
import time import os import sys import xml.etree.ElementTree as ET _current_dir = os.path.abspath(os.path.dirname(__file__)) import_path = os.path.join(_current_dir, './wepawet') sys.path.append(import_path) from submit_to_wepawet import wepawet_submit_file, AnalysisOptions, wepawet_query _current_dir = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, "..")) sys.path.append(PROJECT_ROOT) from lib.config import config user = config.get('wepawet', 'username') passwd = config.get('wepawet', 'password') from lib.common import * logger = logging.getLogger('gp.wepawet') def submit_file(file_path): #resource = "./variants/test1.pdf" analysis_opts = AnalysisOptions() submit_status_xml = wepawet_submit_file(file_path, analysis_opts, user, passwd) #print submit_status_xml submit_status = ET.fromstring(submit_status_xml) task_id = None if submit_status.attrib['state'] == 'ok':