def run(self): #database.lock(self.data['SOURCEIP']) varBinds, err = snmp.get(self.conn, snmp.sysObjectID) if err: database.release(self.data['SOURCEIP']) return False else: sprint("Start Thread for device", self.data['SOURCEIP'], varBinds[0][1].prettyPrint()) if varBinds[0][1][6] == 9: backup_class = BackupDeviceCISCO elif varBinds[0][1][6] == 890: backup_class = BackupDeviceZyXEL else: eprint("Unknown device vendor", self.data['SOURCEIP'], varBinds[0][1].prettyPrint()) database.update_vendor_oid(self.data['SOURCEIP'], str(varBinds[0][1].prettyPrint())) database.release(self.data['SOURCEIP']) return False with backup_class(varBinds, self.data, self.conn) as backup: if backup.run(): if backup.save() and config.compare: backup.compare() database.clear(self.data['SOURCEIP']) else: eprint("Backup", self.data['SOURCEIP'], "error") database.release(self.data['SOURCEIP'])
def init(self, msg): sprint('Initialise BACKUP instance', msg) if not os.path.exists('/usr/local/share/snmp/mibs'): os.mkdir('/usr/local/share/snmp/mibs') if not os.path.exists('/usr/local/share/snmp/pymibs'): os.mkdir('/usr/local/share/snmp/pymibs') database.clear_locks() return True
def setup(self, data): for obj, idx, value in data: sprint("def setup", obj, idx, value) ot = ObjectType(ObjectIdentity(self.mib, obj, idx), value) result, err = snmp.set(self.conn, ot) if err: return None return True
def get_values(conn, obj): varBinds, Err = snmp.get(conn, obj) sprint(len(varBinds)) for varBind in varBinds: name, value = varBind sprint(varBind, 'Name:', name.items(), 'Val:', value.prettyPrint()) values = {} return {}
async def message_handler(self, event): """Callback method for received events.NewMessage""" if event.text: # check if the required aes key is present. aes_shared_key = None for dlg in Dialog.select(): if dlg.dialog_id == event.sender_id: # found a entry of aes key shared with receiver. aes_shared_key = dlg.aes_shared_key break if aes_shared_key is None: # get the public key. peer_pub_key = get_public_key(event.sender_id) shared_key = my_ecdh_private_key.exchange(ec.ECDH(), peer_pub_key) aes_shared_key = HKDF( algorithm=hashes.SHA256(), length=32, salt=None, info=None, backend=default_backend(), ).derive(shared_key) peer = Dialog(dialog_id=event.sender_id, aes_shared_key=aes_shared_key) peer.save(force_insert=True) # Decrypt the msg and print. b64_enc_text_bytes = event.text.encode("utf-8") encr_msg_bytes = base64.b64decode(b64_enc_text_bytes) init_vector = encr_msg_bytes[:16] aes = Cipher( algorithms.AES(aes_shared_key), modes.CBC(init_vector), backend=default_backend(), ) decryptor = aes.decryptor() temp_bytes = decryptor.update(encr_msg_bytes[16:]) + decryptor.finalize() unpadder = padding.PKCS7(128).unpadder() temp_bytes = unpadder.update(temp_bytes) + unpadder.finalize() event.text = temp_bytes.decode("utf-8") chat = await event.get_chat() if event.is_group: sprint( '<< {} @ {} sent "{}"'.format( get_display_name(await event.get_sender()), get_display_name(chat), event.text, ) ) else: sprint('<< {} sent "{}"'.format(get_display_name(chat), event.text))
def get_neighbors(ip, lvl, prn): conn = snmp.ConnectionData(ip) obj = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysName', 0)) varBinds, err = snmp.get(conn, obj) if err: sys.exit(1) sprint('Scan:', ip, varBinds[0][1].prettyPrint()) sprint('Get:', ip, 'NeighborIpAddress', '...') mtxrNeighborIpAddress = get_values( conn, ObjectType(ObjectIdentity('MIKROTIK-MIB', 'mtxrNeighborIpAddress', 5)))
def build_logs(self, resp: t.Iterator, image_tag: str) -> None: """ Stream build logs to stderr. Args: resp (:obj:`t.Iterator`): blocking generator from docker.api.build image_tag (:obj:`str`): given model server tags. Ex: bento-server:0.13.0-python3.8-debian-runtime Raises: docker.errors.BuildErrors: When errors occurs during build process. Usually this comes when generated Dockerfile are incorrect. """ last_event: str = "" image_id: str = "" output: str = "" logs: t.List = [] built_regex = re.compile(r"(^Successfully built |sha256:)([0-9a-f]+)$") try: while True: try: # output logs to stdout # https://docker-py.readthedocs.io/en/stable/user_guides/multiplex.html output = next(resp).decode("utf-8") json_output: t.Dict = json.loads(output.strip("\r\n")) # output to stderr when running in docker if "stream" in json_output: sprint(json_output["stream"]) matched = built_regex.search(json_output["stream"]) if matched: image_id = matched.group(2) last_event = json_output["stream"] logs.append(json_output) except StopIteration: log.info(f"Successfully built {image_tag}.") break except ValueError: log.error(f"Errors while building image:\n{output}") if image_id: self._push_context[image_tag] = docker_client.images.get( image_id) else: raise BuildError(last_event or "Unknown", logs) except BuildError as e: log.error(f"Failed to build {image_tag} :\n{e.msg}") for line in e.build_log: if "stream" in line: sprint(line["stream"].strip()) log.fatal("ABORTING due to failure!")
def push_logs(resp: t.Iterator, image_id: str) -> None: """ Stream push logs to stderr Args: resp (:obj:`t.Iterator`): blocking generator from docker.api.push(stream=True, decode=True) image_id (:obj:`str`): id of docker.Images. This is the reference of our built images. Raises: docker.errors.APIError: Error during push process. This could be timeout, urllib3.exceptions, etc. """ try: while True: try: data = next(resp) status = data.get("status") if "id" not in data: sprint(status) else: _id = data.get("id") if "exists" in status: log.warning(f"{_id}: {status}") continue elif "Pushed" in status: log.debug(f"{status} {_id}") else: progress = data.get("progress") sprint(f"{status} {_id}: {progress}") except StopIteration: log.info( f"Successfully pushed {docker_client.images.get(image_id)}." ) break except APIError as e: log.error(f"Errors during `docker push`: {e.response}")
def do_compare(msg, opt=[]): l0ip = get_l0ip(msg['SOURCEIP']) diff_folder = folder.format(syslog=msg, l0ip=l0ip) diff_path = get_diff_path(msg, config_folder=diff_folder, l0ip=l0ip) file_first, file_last = get_first_last(diff_folder) if (file_last is None): return False elif same(file_first, file_last, opt): if os.path.exists(diff_path): os.remove(diff_path) return False diff_cmd = _diff_out + opt + [file_first, file_last] sprint("Generate difference", diff_path) with open(diff_path, 'w') as df: diff = Popen(diff_cmd, stdout = df) diff.communicate() if diff.returncode > 1: raise DiffProcedureError(' '.join(diff_cmd)) return True os.chmod(diff_path, 0o664) os.chown(diff_path, -1, 4)
def allresponse(p): file = open(r'../../data/response8462.csv', 'w') result = [] candidates = candvector() for q1 in candidates: for q2 in candidates: for q3 in candidates: for q4 in candidates: q = [q1, q2, q3, q4] payoff = avePayoff_cal(p, q) record = sprint(q) + ':' + str(payoff[1]) + '\n' # print(record) file.write(record) file.close() return result
def do_save(src, msg, opt): l0ip = get_l0ip(msg['SOURCEIP']) dst_folder = get_config_folder(msg, l0ip=l0ip) if not os.path.exists(dst_folder): os.makedirs(dst_folder) os.chmod(dst_folder, 0o775) os.chown(dst_folder, -1, 4) n = 0 dst = get_config_path(msg, config_folder=dst_folder, l0ip=l0ip) if no_duplicates: file_first, file_last = get_first_last(dst_folder) if file_first is not None: if file_last is None: file_test = file_first else: file_test = file_last if same(file_test, src, opt): sprint("backup", msg['SOURCEIP'], "complete, configs are same") return False sprint("backup", msg['SOURCEIP'], 'complete', dst) copyfile(src, dst) os.chmod(dst, 0o664) os.chown(dst, -1, 4) return True
def wait(self, obj, idx): ot = ObjectType(ObjectIdentity(self.mib, obj, idx)) cs, err = snmp.get(self.conn, ot) if err: return False sprint("init", self.data['SOURCEIP'], self.mib, cs[0].prettyPrint()) i = server.save_timeout // 2 while i > 0 and cs[0][1] == self.cs_waiting and not err: time.sleep(1) i -= 1 cs, err = snmp.get(self.conn, ot) sprint("wait", i, self.data['SOURCEIP'], self.mib, cs[0].prettyPrint()) if err: return False if i <= 0: eprint("Download config", self.data['SOURCEIP'], self.mib, 'did not start in timeout!') i = server.save_timeout while i > 0 and cs[0][1] == self.cs_running and not err: time.sleep(1) i -= 1 cs, err = snmp.get(self.conn, ot) sprint("downloading", i, self.data['SOURCEIP'], self.mib, cs[0].prettyPrint()) if err: return False if i <= 0: eprint("Download config", self.data['SOURCEIP'], self.mib, 'timeout!') elif cs[0][1] == self.cs_successful: if os.path.getsize(self.srv_path) <= 0: eprint("Downloaded config", self.data['SOURCEIP'], self.mib, 'size zero!') return False return True else: eprint("Save config", self.data['SOURCEIP'], self.mib, 'exit with', cs[0][1].prettyPrint()) return False
def build_discriminator(discriminator_name, dis_input,opt): sprint('Building discriminator described in {}'.format(discriminator_name)) discriminator = getattr(fancy_net_lib, discriminator_name)(dis_input, opt) return discriminator
async def run(self): """Main loop of the TelegramClient, will wait for user action""" self.add_event_handler(self.message_handler, events.NewMessage(incoming=True)) # Enter a while loop to chat as long as the user wants while True: dialog_count = 15 dialogs = await self.get_dialogs(limit=dialog_count) i = None while i is None: print_title("Dialogs window") # Display them so the user can choose for i, dialog in enumerate(dialogs, start=1): sprint("{}. {}".format(i, get_display_name(dialog.entity))) # Let the user decide who they want to talk to print() print("> Who do you want to send messages to?") print("> Available commands:") print(" !q: Quits the dialogs window and exits.") print(" !l: Logs out, terminating this session.") print() i = await async_input("Enter dialog ID or a command: ") if i == "!q": return if i == "!l": await self.log_out() return try: i = int(i if i else 0) - 1 # Ensure it is inside the bounds, otherwise retry if not 0 <= i < dialog_count: i = None except ValueError: i = None # Retrieve the selected user (or chat, or channel) entity = dialogs[i].entity # Show some information print_title('Chat with "{}"'.format(get_display_name(entity))) print("Available commands:") print(" !q: Quits the current chat.") print(" !Q: Quits the current chat and exits.") print() # And start a while loop to chat while True: msg = await async_input("Enter a message: ") # Quit if msg == "!q": break if msg == "!Q": return # Send chat message (if any) if msg: # If the receiver's aes key is not present, # fetch his public key from server and derive a aes key print("SENDING MESSAGE TO ENTITTY: ", entity.id) aes_shared_key = None for dlg in Dialog.select(): if dlg.dialog_id == entity.id: # found a entry of aes shared key. aes_shared_key = dlg.aes_shared_key break if aes_shared_key is None: # get the public key. peer_pub_key = get_public_key(entity.id) shared_key = my_ecdh_private_key.exchange( ec.ECDH(), peer_pub_key ) aes_shared_key = HKDF( algorithm=hashes.SHA256(), length=32, salt=None, info=None, backend=default_backend(), ).derive(shared_key) peer = Dialog( dialog_id=entity.id, aes_shared_key=aes_shared_key ) peer.save(force_insert=True) init_vector = token_bytes(16) aes = Cipher( algorithms.AES(aes_shared_key), modes.CBC(init_vector), backend=default_backend(), ) encryptor = aes.encryptor() padder = padding.PKCS7(128).padder() padded_data = padder.update(msg.encode("utf-8")) + padder.finalize() enc_msg_bytes = encryptor.update(padded_data) + encryptor.finalize() enc_msg_bytes = init_vector + enc_msg_bytes b64_enc_txt = base64.b64encode(enc_msg_bytes).decode("utf-8") await self.send_message(entity, b64_enc_txt, link_preview=False)
def run(self, args): sprint("add", args['SOURCEIP'], "to queue") # TODO: add hits to table to count trys BackupDevice(args).start() return True
def deinit(self): sprint('DeInitialise BACKUP instance') database.clear_locks() if database.save_on_exit: database.save return True
def main(): if len(sys.argv) < 2: sprint('Not enough arguments, must have model name') exit(-1) model_name = sys.argv[1] sprint('Setting up workspace') setup_workspace() sprint('Loading dataset') input_shape, XT_nd, XTest_nd, XT_dg, XTest_dg = dataset_definition() data_collection = (XT_nd, XTest_nd, XT_dg, XTest_dg) sprint('Building GAN') gan, gen_fw, gen_bw, dis_fw, dis_bw = cycle_gan_definition(input_shape) nets = [gan, gen_fw, gen_bw, dis_fw, dis_bw] sprint('Pretraining discriminator') pretrain_discriminator(XT_nd, XT_dg, gen_fw, gen_bw, dis_fw, dis_bw) sprint('Training....') train(model_name, \ data_collection,\ nets, \ nb_epoch=1000*100, plt_frq=25,BATCH_SIZE=16)
def __init__(self): sprint('Create BACKUP instance') self.thread = BackupThread()
def train(model_name,\ data_collection,\ nets, \ nb_epoch=5000, plt_frq=25, BATCH_SIZE=32,\ losses = {"fw_d_l":[], "fw_d_a":[], \ "bw_d_l":[], "bw_d_a":[], \ "g_fw_id":[], "g_fw_recon":[], \ "g_bw_id":[], "g_bw_recon":[], \ "g_loss":[]}): sprint('Preparing output path', level=1) #os.mkdir(build_model_path(model_name, 'output')) parent_path, start_string = build_model_path(model_name, 'output') setup_workspace(parent_path) XT_nd, XTest_nd, XT_dg, XTest_dg = data_collection gan, gen_fw, gen_bw, dis_fw, dis_bw = nets for e in tqdm(range(nb_epoch)): # Select batch nd_batch = XT_nd[ np.random.randint(0, XT_nd.shape[0], size=BATCH_SIZE), :, :, :] dg_batch = XT_dg[ np.random.randint(0, XT_dg.shape[0], size=BATCH_SIZE), :, :, :] # Generate images fw_generated = gen_fw.predict(dg_batch) bw_generated = gen_bw.predict(nd_batch) # Prepare training 'output' fw_X = np.concatenate((nd_batch, fw_generated)) fw_y = np.zeros([2 * BATCH_SIZE, 2]) fw_y[0:BATCH_SIZE, 1] = 1 fw_y[BATCH_SIZE:, 0] = 1 bw_X = np.concatenate((dg_batch, bw_generated)) bw_y = np.zeros([2 * BATCH_SIZE, 2]) bw_y[0:BATCH_SIZE, 1] = 1 bw_y[BATCH_SIZE:, 0] = 1 make_trainable(dis_fw, True) fw_d_loss = dis_fw.train_on_batch(fw_X, fw_y) #losses["fw_d_l"].append(fw_d_loss) losses["fw_d_l"].append(fw_d_loss[0]) losses["fw_d_a"].append(fw_d_loss[1]) make_trainable(dis_bw, True) bw_d_loss = dis_bw.train_on_batch(bw_X, bw_y) #losses["bw_d_l"].append(bw_d_loss) losses["bw_d_l"].append(bw_d_loss[0]) losses["bw_d_a"].append(bw_d_loss[1]) # train combined generators # Remember that CycleGAN model computes losses as follows # Combine Discriminator and Generator # gan = Model(inputs=[image_fw, image_bw], \ # outputs=[dis_result_fw, dis_result_bw, \ # same_fw, same_bw, \ # recovered_fw, recovered_bw]) # gan.compile(loss=['binary_crossentropy', 'binary_crossentropy',\ # 'mae', 'mae', \ # 'mae', 'mae'],\ # loss_weights = [1, 1,\ # identity_loss, identity_loss,\ # consistency_loss, consistency_loss],\ # optimizer=optimizer) nd_batch_2 = XT_nd[ np.random.randint(0, XT_nd.shape[0], size=BATCH_SIZE), :, :, :] dg_batch_2 = XT_dg[ np.random.randint(0, XT_dg.shape[0], size=BATCH_SIZE), :, :, :] y2 = np.zeros([BATCH_SIZE, 2]) y2[:, 1] = 1 #make_trainable(dis_fw, False) #make_trainable(dis_bw, False) g_loss = gan.train_on_batch( [dg_batch_2, nd_batch_2], [y2, y2,\ dg_batch_2, nd_batch_2,\ dg_batch_2, nd_batch_2 ] ) losses["g_loss"].append(g_loss[0]) losses["g_fw_id"].append(g_loss[3]) losses["g_bw_id"].append(g_loss[4]) losses["g_fw_recon"].append(g_loss[5]) losses["g_bw_recon"].append(g_loss[6]) # Updates plots if e % plt_frq == plt_frq - 1: #plot_loss(losses, gen_figname(e, start_string, 'loss', parent_path=parent_path)) plot_training_stats( losses, gen_figname(e, start_string, 'loss', parent_path=parent_path)) plot_gen( gen_fw, gen_bw, XTest_dg, gen_figname(e, start_string, 'gen_fw', parent_path=parent_path)) plot_gen( gen_bw, gen_fw, XTest_nd, gen_figname(e, start_string, 'gen_bw', parent_path=parent_path)) return losses
def build_generator(generator_name, gen_input): sprint('Building generator described in {}'.format(generator_name)) generator = getattr(fancy_net_lib, generator_name)(gen_input) return generator