def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number)
def execute_job(job): Connection.Instance().set_access_token_secret(job.access_token, job.access_secret) if not Connection.Instance().job_exists(job): initialize_job(job) print("{} started !".format(job.name)) collection_job = Connection.Instance().jobs_db[job.name] current_number_of_users = collection_job.count() while current_number_of_users < job.user_limit: next_user = job.crawling_strategy(collection_job) print("Fetching followers of {}...".format(next_user["screen_name"])) process_user(next_user, job, collection_job) current_number_of_users = collection_job.count() else: print( "!!!JOB FINISHED!!!\nuser limit : {}, number of collected users : {}" .format(job.user_limit, current_number_of_users))
def get_server_conf(core_ip, source_id, args): # establish a synchronus connection to server conn = Connection(core_ip) # fetch config from server server_config = conn.fetch_config() # Pull out the configs relevant to this client server_conf = { 'videocaps': server_config['mix']['videocaps'], 'audiocaps': server_config['mix']['audiocaps'] } if source_id is not None: # get conf from server for this source, d=server_config[source_id] if args.debug: pprint(d) # stomp all over command line values # this is backwards: command line should override conf file. for k in d: if args.debug: print('--{}="{}"'.format(k,d[k])) # python argparse converts a-b to a_b, so we will to too. args.__setattr__(k.replace("-", "_"),d[k]) return server_conf, args
def run(self): Connection.Instance().set_access_token_secret(self.job.access_token, self.job.access_secret) if not Connection.Instance().job_exists(self.job): initialize_job(self.job) print("{} started !".format(self.job.name)) collection_job = Connection.Instance().jobs_db[self.job.name] current_number_of_users = collection_job.count() while current_number_of_users < self.job.user_limit and not self.exit.is_set( ): print( "Heartbeat from job {}, access token = {}, access_secret = {}". format(self.job.name, self.job.access_token, self.job.access_secret)) next_user = self.job.crawling_strategy(collection_job) print("Fetching followers of {}...".format( next_user["screen_name"])) process_user(next_user, self.job, collection_job) current_number_of_users = collection_job.count() else: print( "!!!JOB FINISHED!!!\nuser limit : {}, number of collected users : {}" .format(self.job.user_limit, current_number_of_users))
def get_server_conf(core_ip, source_id, args): # establish a synchronus connection to server conn = Connection(core_ip) # fetch config from server server_config = conn.fetch_config() # Pull out the configs relevant to this client server_conf = { 'videocaps': server_config['mix']['videocaps'], 'audiocaps': server_config['mix']['audiocaps'] } if source_id is not None: # get conf from server for this source, d = server_config[source_id] if args.debug: pprint(d) # stomp all over command line values # this is backwards: command line should override conf file. for k in d: if args.debug: print('--{}="{}"'.format(k, d[k])) # python argparse converts a-b to a_b, so we will to too. args.__setattr__(k.replace("-", "_"), d[k]) return server_conf, args
class Aliases(object): """ This class contains all the methods related to ElasticSearch alias management. """ def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number) def create_alias(self, alias_name, index_name): """ Create an alias to the specified index. alias_name The alias name to create index_name The index the alias points to """ es = self.es_connection.get_connection() result = es.indices.put_alias(name=alias_name, index=index_name, ignore=400) # Display error if there is one acknowledge_result(result) def delete_alias(self, alias_name, index_name): """ Delete the specified alias from ElasticSearch. alias_name The alias name to delete index_name The index that the alias points to """ es = self.es_connection.get_connection() result = es.indices.delete_alias(index=index_name, name=alias_name) # Display error if there is one acknowledge_result(result) def list_alias(self, index_name): """ List the aliases defined on the ElasticSearch cluster. index_name Name of index to list aliases (default is _all) """ es = self.es_connection.get_connection() if not index_name: result = es.indices.get_aliases() else: result = es.indices.get_aliases(index=index_name) # Print an error if one occurred acknowledge_result(result) def show_alias(self, alias_name): """ Show the details about the specified alias. alias_name The name of the alias to show """ print "Not implemented."
def get_test_result_set(): """ temp added for testing :return: """ connection = Connection("dynamodb", "us-west-1") storage_obj = DynamoDB(connection=connection.new_connection()) storage_obj.set_storage_set_name("cs-file-metadata") storage_set = storage_obj.get_storage_set() result_set = storage_obj.list(storage_set) if storage_set else None return result_set
def __init__(self): self.prev_videoMode = "2D" self.prev_compState = None self.initialized = False self.player = XBMCPlayer() self.player.register_observer(self) self.monitor = XBMCMonitor() self.monitor.register_observer(self) self.connection = Connection() self.updateSettings() self.daemon()
def mock_conn(request, mocker): obj = Namespace(conn1=Connection(), conn2=Connection(), conn3=Connection()) obj.conn1._encode = lambda **kwargs: CONN_ONE_ENCODE obj.conn1._decode = lambda **kwargs: CONN_ONE_DECODE obj.conn2._encode = lambda **kwargs: CONN_TWO_ENCODE obj.conn2._decode = lambda **kwargs: CONN_TWO_DECODE obj.conn3._encode = lambda **kwargs: CONN_THR_ENCODE obj.conn3._decode = lambda **kwargs: CONN_THR_DECODE conns = [obj.conn1, obj.conn2, obj.conn3] methods = ['_encode', '_decode', 'do_encode', 'do_decode', 'attach'] for conn in conns: for method in methods: mocker.spy(conn, method) return obj
def listen_new_connection(self): """ Listen for new connections and create the supervisor in case that is not already created :return: void """ conn, client_address = self.__socket.accept( ) # conn variable is a Socket connection = Connection(conn, (client_address[0], self.__socket_port), self.__event, self.__db_file) connection.start() if not self.__supervisor: self.__supervisor = SupervisorThread(self.__event) self.__supervisor.start()
def __init__(self, config, passphrase=None): Connection.__init__(self) if not isinstance(config, Config): raise ValueError('Config must be an instance of Config()') public_key = private_key = None if config.public_key: with open(config.public_key, 'rb') as file: public_key = file.read() if config.private_key: with open(config.private_key, 'rb') as file: private_key = file.read() self.aes_dir = config.aes_dir self.rsa = lib_rsa.Rsa(private_key=private_key, public_key=public_key, passphrase=passphrase) self.aes, self.aes_filename = self.__get_aes_info()
def process_user(user, job, collection_job): result = get_followers_page_and_next_cursor(user["screen_name"], user["last_cursor"]) if result: page, next_cursor = result else: print("...Account unauthorized, skipping") collection_job.update_one({"id": user["id"]}, {"$set": { "authorized": False }}) return # find user id's that are not currently in the database and fetch their profiles try: q = Queue("default", connection=Connection.Instance().redis_server) ret = q.enqueue(save_new_users, args=( page, job.name, )) except ModuleNotFoundError as e: print(e) collection_job.update({"id": user["id"]}, { "$addToSet": { "follower_ids": { "$each": page } }, "$set": { "finished": next_cursor == 0, "last_cursor": next_cursor } })
class Cluster(object): """ This class provides access to ElasticSearch cluster management features. """ def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number) def cluster_health(self, index_name): """ Display basic cluster health information, or if index is specified, of that index. index_name Index to get health status on """ es = self.es_connection.get_connection() if index_name == "_all": result = es.cluster.health() else: result = es.cluster.health(index=index_name) # Print an error if one occurred acknowledge_result(result)
def get_result_set(region, bucket, table, base): """get result set from storage object """ connection = Connection(base, region) base = base.lower() storage_obj = None if base == "s3": storage_obj = S3(connection=connection.new_connection()) storage_obj.set_storage_set_name(bucket) elif base == "dynamodb": storage_obj = DynamoDB(connection=connection.new_connection()) storage_obj.set_storage_set_name(table) storage_set = storage_obj.get_storage_set() result_set = storage_obj.list(storage_set) if storage_set else None return result_set
def processChangelog(basepath="~/.jot/jot.changelog"): """Look for foreign changelogs and update and delete them""" from lib.connection import Connection path = matchPath(basepath, mustExist=False) + ".*" logfiles = glob.glob(path) def writeQuery(db, log): """Executes the query and records the transaction""" if db.matchIdentifier(log.identifier, quiet=True) is None: if log.values is not None: db.rawQuery(log.query, log.values, commit=False) else: db.rawQuery(log.query, commit=False) transaction = 'insert into transactions(hash,ts) values("%s","%s")' % (log.identifier, time.time()) db.rawQuery(transaction, commit=False) return True return False if len(logfiles) > 0: print "Found new changelog. Updating local database..." db = Connection(table="transactions", verbose=False) changesCount = 0 for logfile in logfiles: fp = open(logfile, "rb") while True: try: log = pickle.load(fp) if writeQuery(db, log): changesCount += 1 except EOFError: break fp.close() os.remove(logfile) db.commit() if changesCount > 0: print "Done." print decorate("OKGREEN", str(changesCount) + " changes were made.") else: print "Nothing to update." return True
def processChangelog(basepath='~/.jot/jot.changelog'): '''Look for foreign changelogs and update and delete them''' from lib.connection import Connection path = matchPath(basepath,mustExist=False) + '.*' logfiles = glob.glob(path) def writeQuery(db,log): '''Executes the query and records the transaction''' if db.matchIdentifier(log.identifier,quiet=True) is None: if log.values is not None: db.rawQuery(log.query,log.values,commit=False) else: db.rawQuery(log.query,commit=False) transaction = 'insert into transactions(hash,ts) values("%s","%s")' % (log.identifier,time.time()) db.rawQuery(transaction,commit=False) return True return False if len(logfiles) > 0: print 'Found new changelog. Updating local database...' db = Connection(table='transactions',verbose=False) changesCount = 0 for logfile in logfiles: fp = open(logfile,'rb') while True: try: log = pickle.load(fp) if writeQuery(db,log): changesCount += 1 except EOFError: break fp.close() os.remove(logfile) db.commit() if changesCount > 0: print 'Done.' print decorate('OKGREEN',str(changesCount) + ' changes were made.') else: print 'Nothing to update.' return True
def createFile(self): #initialize connection to database connection = Connection(self.database_config) self.cursor = connection.cursor() #prepare stat views self.createSQLView() records = self.getTraffic() delimiter = self.csv_config['delimiter'][1:-1] quotechar = self.csv_config['quotechar'][1:-1] charset = self.csv_config['file_charset'][1:-1] file = open(self.getPathFilename(), 'wb') writer = csv.writer(file, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_ALL) for record in records: writer.writerow((record[0], unicode(record[1], 'utf-8').encode(charset), record[2], record[3])) file.close()
def __init__(self, args): """ Instantiate with an argument parser. """ self.object = args.object self.action = args.action self.target = args.target self.target_index = args.target_index self.host = args.host self.port = args.port self.shards = args.shards self.replicas = args.replicas self.es_connection = Connection(self.host, self.port)
async def handle_session(self): while True: try: print('Enter command name:') action = sys.stdin.readline().strip() if action == 'exit': print('Exiting') return elif action == 'help': print('Available commands:\nhelp\nexit') for command in self.type_to_callback: print(command) continue if action not in self.type_to_callback: print('Bad command') continue request = {'type': action} if action not in self.no_auth_check_handlers: username = self.context.get('username') auth_token = self.context.get('auth_token') if not username or not auth_token: print('You should signin first') continue request['auth'] = {'username': username, 'auth_token': auth_token} for param in self.type_to_params.get(action, ()): print('Enter {}:'.format(param)) request[param] = sys.stdin.readline().strip() reader, writer = await asyncio.open_connection(self.server_host, self.server_port) conn = Connection(reader, writer) await conn.write(request) response = await conn.read() await conn.close() if response.get('code') == 200: self.type_to_callback.get(action)(request, response.get('data', {})) else: print('Error occured: {}'.format(response.get('data'))) except KeyboardInterrupt: raise except Exception as e: logging.exception('Exception occured: {}'.format(e)) # traceback.print_exc(file=open('client.log', 'a')) print('Something went wrong')
class Tcpconnection(Connection): def __init__(self, module, (dst, dport), (src, sport)): Connection.__init__(self, module, (dst, dport), (src, sport)) self.outqueue = PriorityQueue() self.inqueue = PriorityQueue() self.outseq = 0 # last ACKed sequence number self.outseq_max = 0 # for detection of out of order packets # minimum sequence number that is allowed to get appended # i.e. packets that got inserted, ack'd AND retransmitted must not # be appended to the {incoming,outgoing} buffer again self.outseq_min = 0 self.inseq_min = 0 self.inseq = 0 # last ACKed sequence number self.inseq_max = 0 # for detection of out of order packets
async def handle(self, reader, writer): try: addr = writer.get_extra_info('peername') logging.info('Incoming request from {}'.format(addr)) conn = Connection(reader, writer) request = await conn.read() response = Handler(self.auth_handler).handle_request(request) await conn.write(response) await conn.close() except KeyboardInterrupt: raise except Exception as e: logging.exception('Unhandled exception occured: {}'.format(e))
class Irc(object): """Handles a client connection to the IRC protocol""" def __init__(self, server, publisher, flood_prevention=1): self.server = server self.publisher = publisher self.flood_prevention = flood_prevention self._conn = Connection(server['host'], server['port'], server['ssl'], server['timeout']) # Internal connection # Timer to prevent flooding self.timer = Event() self.timer.set() # The canonical channels of IRC to subscribe / publish # Receives input to send to irc server self.sender = Queue() # Receives output to publish self.receiver = Queue() # Suscribe my output to receive data from connection self.publisher.subscribe(self.receiver, self._conn.receiver, Msg.from_msg) # Subscribe connection to send data from my input self.publisher.subscribe(self._conn.sender, self.sender, self._prevent_flood) @property def connected(self): return self._conn.connected def connect(self): if self.connected: return True self._conn.connect() if self.connected: return True else: return self._conn.state def disconnect(self): self._conn.disconnect() return self.connected def kill(self): """Completely terminate the irc connection""" self.publisher.unsubscribe(self.receiver, self._conn.receiver) self.publisher.unsubscribe(self._conn.sender, self.sender) self._conn.kill() def _prevent_flood(self, msg): """Used to prevent sending messages extremely quickly""" if self.flood_prevention > 0 and msg.cmd != 'PONG': self.timer.wait() self.timer.clear() gevent.spawn_later(self.flood_prevention, self.timer.set) return str(msg)
def main(): parser = argparse.ArgumentParser(description='Generic script to export ' \ 'from POS database or stored pos files.' \ ' See README.md or docs/ for details.') parser.add_argument('-d', '--date', metavar='date', help='Date to run as mm-dd-yyy') args = parser.parse_args() if args.date: run_date = datetime.datetime.strptime(args.date, '%m-%d-%Y').date() else: run_date = datetime.date.fromordinal( datetime.date.today().toordinal() - 1) config = configparser.ConfigParser() config.read('settings\\settings.ini') output_filename = '{}{}.{}'.format( config['General']['OutputFileName'], run_date.strftime('%Y%m%d'), config['General']['OutputFileExtension']) pos_type = config['General']['POS'] pos = PointOfSale.pos_class(pos_type, config[pos_type]) pos.set_run_date(run_date) pos.gather_pos_data() pos.format_data() pos.write_file(output_filename) pos.clean_up() connection_type = config['General']['Connection'] conn_handler = Connection.connection_class(connection_type, config[connection_type]) conn_handler.connect() conn_handler.change_remote_directory(config[connection_type]['RemotePath']) conn_handler.upload_file(output_filename) conn_handler.close() os.replace(output_filename, config['General']['ArchiveFolder'] + output_filename)
def initialize_job(job): print("Initializing job : {}".format(job.name)) user_profiles = get_user_profiles_single_request(job.seed_list) # Determine features for each profile for profile in user_profiles: profile["features"] = { func.__name__: func(profile) for func in job.classifiers } db = Connection.Instance().jobs_db collection_job = db[job.name] collection_job.create_index("id", unique=True) collection_job.insert_many(user_profiles)
def __init__(self, server, publisher, flood_prevention=1): self.server = server self.publisher = publisher self.flood_prevention = flood_prevention self._conn = Connection(server['host'], server['port'], server['ssl'], server['timeout']) # Internal connection # Timer to prevent flooding self.timer = Event() self.timer.set() # The canonical channels of IRC to subscribe / publish # Receives input to send to irc server self.sender = Queue() # Receives output to publish self.receiver = Queue() # Suscribe my output to receive data from connection self.publisher.subscribe(self.receiver, self._conn.receiver, Msg.from_msg) # Subscribe connection to send data from my input self.publisher.subscribe(self._conn.sender, self.sender, self._prevent_flood)
class Mapping(object): """ Provide functions for managing index mappings on an ElasticSearch cluster. """ def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number) def list_mapping(self, index_name): """ Show the mappings for a specified index. index_name Index to display mappings. """ es = self.es_connection.get_connection() result = es.indices.get_mapping(index=index_name) # Display error if there is one acknowledge_result(result)
def main(self): while True: logging.debug("next iteration of loading thread") source = Connection.loadSource("www.mujkaktus.cz", "/novinky") if source is not None: soup = BeautifulSoup(source, "html.parser") news = [(article.h3.get_text(), article.p.get_text()) for article in soup.find_all( "div", class_="journal-content-article")] else: news = [] if len(news): header, paragraph = news[0] loadedArticle = header + " — " + paragraph logging.debug('loaded article: "' + loadedArticle + '"') if self.article.new(loadedArticle): article = self.article.add(loadedArticle) for subscriber in self.subscribers.all(): deliver = self.postman.add(article, subscriber.id) if deliver is not None: self.bot.sendMessage(int(subscriber.telegram_id), loadedArticle) time.sleep(LOADING_TIME)
def send_backend_data(sim: ubirch.SimProtocol, modem: Modem, conn: Connection, api_function, uuid, data) -> (int, bytes): MAX_MODEM_RESETS = 1 # number of retries with modem reset before giving up MAX_RECONNECTS = 1 # number of retries with reconnect before trying a modem reset for reset_attempts in range(MAX_MODEM_RESETS + 1): # check if this is a retry for reset_attempts if reset_attempts > 0: log.debug("retrying with modem reset") sim.deinit() modem.reset() sim.init() conn.ensure_connection() # try to send multiple times (with reconnect) try: for send_attempts in range(MAX_RECONNECTS + 1): # check if this is a retry for send_attempts if send_attempts > 0: log.debug("retrying with disconnect/reconnect") conn.disconnect() conn.ensure_connection() try: log.info("sending...") return api_function(uuid, data) except Exception as e: log.debug("sending failed: {}".format(e)) # (continues to top of send_attempts loop) else: # all send attempts used up raise Exception("all send attempts failed") except Exception as e: log.debug(repr(e)) # (continues to top of reset_attempts loop) else: # all modem resets used up raise Exception("could not establish connection to backend")
parser.add_argument("-c", "--threadcount", type=str, help="thread count", default="1", required=False) parser.add_argument('--dryrun', action='store_true') parser.add_argument('--test', help="use test set", action='store_true') ts = time.time() LOG_PATH = "/tmp/case_log_%s.log" % datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') # logging logging.config.fileConfig('logging.ini', disable_existing_loggers=False, defaults={'logfilename': LOG_PATH}) logger = logging.getLogger(__name__) # FIXME - file logging fh = logging.FileHandler(LOG_PATH) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) # parse args args = parser.parse_args() db_table = args.table s3_bucket = args.bucket dryrun = args.dryrun test = args.test db_connection = Connection("dynamodb", args.region) db = DynamoDB(connection=db_connection.new_connection()) s3_connection = Connection("s3", args.region) s3 = S3(connection=s3_connection.new_connection()) main(args.bucket, args.table, args.threadcount) sns_connection = Connection("sns", args.region) sns = SNS(connection=sns_connection.new_connection()) sns.send("CS-team", "Done", "meh :-D")
def __init__(self): Connection.__init__(self)
"cisco": { "platform": "ios", }, "arista": {"platform": "eos"}, "juniper": {"platform": "junos"}, } defaults_dict = { "username": "******", "password": "******", "data": {"key3": "value3"}, } from lib.connection import Connection from lib.constant import SCRAPLI command = "show version" import datetime a = datetime.datetime.now() conn = Connection(SCRAPLI) conn.open(hosts_dict, groups_dict, defaults_dict) result_dt = conn.send_command(command) print(result_dt["cisco3"].result) conn.close() b = datetime.datetime.now() c = b - a print("Executation time", SCRAPLI, c.seconds)
def query_device(self, data): conn = Connection() serial_num = conn.query_devices(inspector.receiveDevices) conn.reset() self._deviceRow.setDevice(serial_num) processes = conn.get_remote_process(serial_num, inspector.echoData) conn.reset() opend_unix_socks = conn.get_remote_unix_socks(serial_num, inspector.echoData) conn.reset() remote_browsers = inspector.parseSocksAndProc(processes, opend_unix_socks) for browser in remote_browsers: browser.query_pages(serial_num, conn) browser.dump(logger.Logger(logger.Logger.VERBOSE)) conn.reset() self._pageRow.setPackages(remote_browsers)
usocks.append(words[7]) lines = proc.split('\n') lines = lines[1:] for line in lines: if line == '': break words = line.split(); for i, pid in enumerate(pids): if (pid == "is_chrome" and words[8] == "com.android.chrome") or pid == words[1]: remote_browsers.append(Browser(words[1], words[8], usocks[i][1:], pid == "is_chrome")) return remote_browsers if __name__ == '__main__': conn = Connection(log) serial_num = conn.query_devices(receiveDevices) log.v("serial: "+serial_num) conn.reset() processes = conn.get_remote_process(serial_num, echoData) conn.reset() opend_unix_socks = conn.get_remote_unix_socks(serial_num, echoData) conn.reset() remote_browsers = parseSocksAndProc(processes, opend_unix_socks) if len(remote_browsers) == 0: exit(0) log.i("no remote browsers detect")
class Indices(object): """ This class contains all the methods related to ElasticSearch index management. """ def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number) def create_index(self, index_name, shards, replicas): """ Create an ElasticSearch index index_name Name of index to be created shards Number of shards for index replicas Number of replicas for index """ es = self.es_connection.get_connection() result = es.indices.create( index=index_name, body={ 'settings': { 'number_of_shards': shards, 'number_of_replicas' : replicas } }, # Do not generate an error if index exists ignore=400 ) # Display error if there is one acknowledge_result(result) def delete_index(self, index_name): """ Delete an ElasticSearch index index_name Name of index to be deleted """ es = self.es_connection.get_connection() result = es.indices.delete(index=index_name) # Display error if there is one acknowledge_result(result) def open_index(self, index_name): """ Open a closed index in the ElasticSearch cluster index_name Name of index to be opened """ es = self.es_connection.get_connection() result = es.indices.open(index=index_name) # Display error if there is one acknowledge_result(result) def close_index(self, index_name): """ Close an index on the ElasticSearch cluster index_name Name of index to be closed """ es = self.es_connection.get_connection() result = es.indices.close(index=index_name) # Print an error if one occurred acknowledge_result(result) def flush_index(self, index_name): """ Flush all of the documents out of the target index index_name Name of index to be flushed """ es = self.es_connection.get_connection() result = es.indices.flush(index=index_name) # Print an error if one occurred acknowledge_result(result) def list_index(self, index_name): """ Display a list of indices in the ElasticSearch cluster. index_name Name of index to list (default is _all) """ es = self.es_connection.get_connection() result = es.indices.get_settings(index=index_name) # Print an error if one occurred acknowledge_result(result) def show_stats(self, index_name): """ Display performance metrics for specified index. """ es = self.es_connection.get_connection() result = es.indices.stats(index_name) # Print an error if one occurred acknowledge_result(result)
hosts_dict = {"cisco3": {"hostname": "100.100.0.10"}} defaults_dict = { "username": "******", "password": "******", "port": 22, "platform": "cisco_ios" } from lib.connection import Connection from lib.constant import NETMIKO from lib.constant import SCRAPLI command = "show version" import timeit # conn = Connection(NETMIKO) # conn.open(hosts=hosts_dict, defaults=defaults_dict) # result_dt = conn.send_command(command) # print(result_dt["cisco3"].result) # conn.close() conn1 = Connection(NETMIKO) conn1.open(hosts=hosts_dict, defaults=defaults_dict) result_dt = conn1.send_command(command) print(result_dt["cisco3"].result) conn1.close()
def main(): HOST, PORT, REVERSE, PUB_KEY_FILE, PRV_KEY_FILE, KN_HOSTS_FILE = read_conf( ) # Si se pasa dirección y/o puerto como argumento, sobreescribirlo addr, port = read_args() if addr: HOST = addr if port: PORT = port # Si existe el known_hosts, leerlo y convertirlo en una lista if os.path.isfile(KN_HOSTS_FILE): with open(KN_HOSTS_FILE, "rb") as f: known_hosts = f.read() known_hosts = known_hosts.split("\n") else: known_hosts = False # Creación del socket conn = Connection(HOST, PORT) try: if REVERSE == '1': conn.listen(timeout=10) else: conn.connect() keyring = Keyring() # Lee los ficheros de claves pub/priv with open(PRV_KEY_FILE, 'rb') as f: prv_key = f.read() with open(PUB_KEY_FILE, 'rb') as f: pub_key = f.read() # Envía su clave pública al servidor conn.send(pub_key) # Recibe la clave pública del servidor srv_pub_key = conn.receive() if srv_pub_key == ':ERR:': print '{}[!]{} ERROR: El servidor no reconoce tu clave pública.'.format( color.RED, color.END) sys.exit(1) # Comparación de clave pública recibida con las contenidas en el known_hosts if (not known_hosts) or (srv_pub_key not in known_hosts): add_srv_to_known_hosts = raw_input( "{}[!]{} WARNING: La clave pública de este servidor no se encuentra almacenada:\n{} \nSi lo desea, puede añadirla [y/n] >>> " .format(color.YELLOW, color.END, srv_pub_key)) if add_srv_to_known_hosts.lower() == "y": with open(KN_HOSTS_FILE, "ab") as f: f.write(srv_pub_key + "\n") # Envía firma de autenticación signature = keyring.sign(prv_key) conn.send(signature) srv_signature = conn.receive() # Si recibe error de autenticación, informa y cierra el programa if srv_signature == ':ERR:': print '{}[!]{} ERROR: La autenticación ha fallado'.format( color.RED, color.END) sys.exit(1) # Si logra autenticarse, comprueba la firma del servidor print '{}[+]{} Cliente autenticado correctamente'.format( color.GREEN, color.END) sign_valid = keyring.verify(srv_pub_key, srv_signature) if sign_valid: conn.send(':OK:') print '{}[+]{} Servidor autenticado correctamente'.format( color.GREEN, color.END) else: conn.send(':ERR:') print '{}[!]{} ERROR: La autenticación ha fallado'.format( color.RED, color.END) sys.exit(1) # Marca de sincronización sync = conn.receive() # Intercambio de clave de sesión mediante PKI session_key = keyring.genSessionKey() session_key_crypted = keyring.cipherRSA(session_key, srv_pub_key) conn.send(session_key_crypted) print '{}[+]{} Intercambiando clave de sesión...'.format( color.BLUE, color.END) # Una vez establecida e intercambiada la clave de sesión, asociamos el keyring a la conexión keyring.session_key = session_key conn.keyring = keyring shell = Shell(conn) shell.start() except conn.timeout: print '\n{}[!]{} El servidor está desconectado.\n'.format( color.RED, color.END)
"--base", type=str, help="s3 | dynamodb", default="s3", required=False) # logging logging.config.fileConfig('logging.ini', disable_existing_loggers=False, defaults={'logfilename': LOG_PATH}) logger = logging.getLogger(__name__) # FIXME - file logging fh = logging.FileHandler(LOG_PATH) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) # parse args args = parser.parse_args() db_connection = Connection("dynamodb", args.region) db_table = args.table db = DynamoDB(connection=db_connection.new_connection()) s3_connection = Connection("s3", args.region) s3_bucket = args.bucket s3 = S3(connection=s3_connection.new_connection()) if db and s3: main(args.region, args.bucket, args.table, args.base, args.threadcount) else: raise ImportError("No DB connection")
class Aliases(object): """ This class contains all the methods related to ElasticSearch alias management. """ def __init__(self, host_name, port_number): """ Instantiate object with the following parameters: host_name ElasticSearch host name port_number ElasticsSearch API port number """ self.es_connection = Connection(host_name, port_number) def create_alias(self, alias_name, index_name): """ Create an alias to the specified index. alias_name The alias name to create index_name The index the alias points to """ es = self.es_connection.get_connection() result = es.indices.put_alias( name=alias_name, index=index_name, ignore=400 ) # Display error if there is one acknowledge_result(result) def delete_alias(self, alias_name, index_name): """ Delete the specified alias from ElasticSearch. alias_name The alias name to delete index_name The index that the alias points to """ es = self.es_connection.get_connection() result = es.indices.delete_alias( index=index_name, name=alias_name ) # Display error if there is one acknowledge_result(result) def list_alias(self, index_name): """ List the aliases defined on the ElasticSearch cluster. index_name Name of index to list aliases (default is _all) """ es = self.es_connection.get_connection() if not index_name: result = es.indices.get_aliases() else: result = es.indices.get_aliases(index=index_name) # Print an error if one occurred acknowledge_result(result) def show_alias(self, alias_name): """ Show the details about the specified alias. alias_name The name of the alias to show """ print "Not implemented."
def test_interface_functions(self): conn = Connection() conn._encode() conn._decode()
def connect(self, next_neuron, weight = 0): Connection(self, next_neuron, weight) return self