def main(args): """ Main function """ if args.random: # Generate random permutation for initial sigma, tau sigma = Perm.random(string.ascii_uppercase) tau = Perm.random(string.ascii_uppercase) else: sigma, tau = map(permutation_from_key, args.keys) args.verbose and print("sigma = {}".format(sigma)) args.verbose and print("tau = {}".format(tau)) with args.in_file, args.out_file: if args.encrypt: args.verbose and print("Enciphering...") process_func = autoperm_encipher else: args.verbose and print("Deciphering...") process_func = autoperm_decipher if args.preserve: process_func.preserve(args.in_file, args.out_file, sigma, tau) else: process_func.strip(args.in_file, args.out_file, sigma, tau, block=args.block, width=args.width, compare=args.compare, lowercase=args.lowercase)
def initialise_state(self): frequencies = collections.Counter(self.text) self.key = Perm({ a: b for (a, _), (b, _) in zip( frequencies.most_common(), collections.Counter(ENGLISH_FREQUENCIES).most_common()) })
def permutation_from_key(key): """ Generate a low-level permutation from a key consisting of letters, by removing repeated letters and filling in the rest of the alphabet going from the last letter. Eg "linustorvalds" as key becomes ABCDEFGHIJKLMNOPQRSTUVWXYZ LINUSTORVADEFGHJKMPQWXYZBC This method is /not/ completely standard. Wikipedia would have you believe that you should just chug along with the rest of the alphabet from the first letter, but this bleeds huge amounts of information into your permutation, as xyz will often map to xyz, whereas here they're basically randomly offset. (Wikipedia's example sneakily has a z in the key so you don't notice this) This function generously strips any punctuation and makes the string uppercase, so should be fairly robust on any input. """ mapping = {} alphabet = set(string.ascii_uppercase) from_iterable = iter(string.ascii_uppercase) # use an OrderedDict so as to retain compatibility with 3.6 spec key_unique = "".join(collections.OrderedDict.fromkeys(strip_punc(key))) # in case of empty key (although that's not a good idea) k = 'A' for k, a in zip(key_unique, from_iterable): mapping[a] = k alphabet.remove(k) alphabet = sorted(alphabet) start_index = 0 while start_index < len(alphabet) and alphabet[start_index] < k: start_index += 1 for ind, k in enumerate(from_iterable): mapping[k] = alphabet[(start_index + ind) % len(alphabet)] return Perm(mapping)
def cyclic(cls, n): """ Returns the cyclic group of order n """ return cls( {Perm(n)(tuple((j + i) % n for i in range(n))) for j in range(n)})
class SubstitutionHillClimber(HillClimber): __slots__ = "key", def initialise_state(self): frequencies = collections.Counter(self.text) self.key = Perm({ a: b for (a, _), (b, _) in zip( frequencies.most_common(), collections.Counter(ENGLISH_FREQUENCIES).most_common()) }) def format_state(self): print("key:\n{}".format(self.key.inverse().table_format())) print("plaintext:\n{}...".format("".join( substitution.func(self.text, self.key))[:1000])) def modify_state(self): # try all other permutations, randomly ordered. The shuffling here # doesn't take place in a bottleneck, and it hopefully prevents the # search path from becoming too homogeneous. yield from ( self.key * p for p in random.sample(MOD_PERMUTATIONS, k=len(MOD_PERMUTATIONS))) def get_score(self, state): return quadgram_score.no_strip(substitution.func(self.text, state)) def set_state(self, state): self.key = state def get_state(self): return self.key
def modify_state(self): # try a random state yield tuple(Perm.random(string.ascii_uppercase) for _ in range(2)) # try modifying just one of sigma or tau yield from ( (self.sigma * p, self.tau) for p in random.sample(MOD_PERMUTATIONS, k=len(MOD_PERMUTATIONS))) yield from ( (self.sigma, self.tau * p) for p in random.sample(MOD_PERMUTATIONS, k=len(MOD_PERMUTATIONS)))
def fft(f, ferrers): ''' Compute Clausen's FFT: Ref: 'FAST FOURIER TRANSFORMS FOR SYMMETRIC GROUPS: THEORY AND IMPLEMENTATION' By MICHAEL CLAUSEN AND ULRICH BAUM http://www.ams.org/journals/mcom/1993-61-204/S0025-5718-1993-1192969-X/S0025-5718-1993-1192969-X.pdf f: function from S_n to \mathbb{R} ferrers: a FerrersDiagram object (indicates which irrep to compute the transform over) Returns a matrix of size d x d, where d is the number of standard tableaux of the FerrersDiagram shape Note that the specific permutation group S_n is given by the size of the ferrers diagram ''' # iterate over cosets if ferrers.size == 1: return np.eye(1) * f(Perm([(1, )])) n = ferrers.size d_branches = ferrers.branch_down() tabs = ferrers.tableaux d_lambda = len(tabs) f_hat = np.zeros((d_lambda, d_lambda)) for i in range(1, n + 1): cyc = Perm([tuple(j for j in range(i, n + 1))]) f_i = lambda pi: f( cyc * pi ) # assume that the input function is a function on Perm objects # irrep function (aka yor) requires the 2nd argument to be a list of tuples rho_i = irrep(ferrers, cyc) idx = 0 # used to figure out where the direct sum should add things res = np.zeros(f_hat.shape) for lambda_minus in d_branches: fft_fi = fft(f_i, lambda_minus) d = fft_fi.shape[0] res[idx:idx + d, idx:idx + d] += fft_fi idx += d f_hat += rho_i.dot(res) return f_hat
def dihedral(cls, n): """ Returns the dihedral group of order 2n. """ # if n <= 2: # return Group.cyclic(n) ** 2 rots = cls.cyclic(n).perms refls = {Perm(n)(tuple((j - i) % n for i in range(n))) for j in range(n)} return cls(rots | refls)
def autoperm_encipher(plaintext, sigma, tau): """ Encrypt """ for a, b in chunk(plaintext, 2): if b is None: yield sigma[a] else: yield from (sigma[a], tau[b]) transposition = Perm.from_cycle([a, b]) sigma *= transposition tau *= transposition
def autoperm_decipher(ciphertext, sigma, tau): """ Decrypt """ sigma_inverse = sigma.inverse() tau_inverse = tau.inverse() for a, b in chunk(ciphertext, 2): if b is None: yield sigma_inverse[a] else: a_plain = sigma_inverse[a] b_plain = tau_inverse[b] yield from (a_plain, b_plain) transposition = Perm.from_cycle([a_plain, b_plain]) sigma_inverse = transposition * sigma_inverse tau_inverse = transposition * tau_inverse
def generate(cls, n, elements, limit=math.inf): """ Generate more elements from some generating elements """ i = 1 Id = Perm(n)() G = cls({Id}) to_multiply = [Id] new = [] while len(to_multiply) != 0: for a in to_multiply: for b in elements: c = a * b if c not in G.perms: i += 1 if i > limit: return None G.perms.add(c) new.append(c) to_multiply = new new = [] return G
def __init__(self, network_type, spp, renders_dir, spp_additional=None, mapmode=None, uniform=False, dataset_filter=None, dualmode=False): self.is_cuda = True # torch.cuda.is_available() self.criterion = self.right_type(nn.L1Loss(size_average=True)) self.criterion_mse = self.right_type(nn.MSELoss(size_average=True)) self.criterion_l1 = self.right_type(nn.L1Loss(size_average=True)) self.loss_relative_L1 = self.right_type(special_losses.RelativeL1()) self.loss_edge_loss = self.right_type(special_losses.EdgeLoss()) self.loss_relative_edge_loss = self.right_type( special_losses.RelativeEdgeLoss()) self.sc_log1p3 = self.right_type(RenderSimulator.SC_log1p3()) self.mapmode = mapmode self.uniform = uniform self.dataset_filter = dataset_filter self.optimizer = None self.out_dir = "." self.dualmode = dualmode self.writer = SummaryWriter() self.writer_count = 0 self.tensorboard_every = 10000 self.tensorboard_graph_every = 20 self.monitor_writer = special_losses.MonitorWriter( self.writer, self.tensorboard_graph_every, self.tensorboard_every, self) self.loss_combo = special_losses.LossCombo( self.monitor_writer, ["relative_l1", self.loss_relative_L1, 1], ["relative_edge_loss", self.loss_relative_edge_loss, 1], ) self.spp_criterion = self.criterion_mse self.running_loss = 0.0 self.running_loss_count = 0.0 self.print_error = 5 self.print_weights = 0 self.batch_size = 1 if config.config.small_gpu else 6 self.loss_graph = [] self.epoch = 0 self.apply_dir = None self.spp_additional = float(spp_additional) self.trainloader = None self.testloader = None self.prenormalized = False self.gamma = 1.0 self.mode = None self.model_loaded = False self.colorchannels = 3 self.output_target_buffers = [ Buffer(Perm("color", 0, "COLOR"), "color_in") ] self.output_target_buffers2 = {} self.show_img = True self.vis = True self.generate_gt = False self.nodes_real = None self.no_gt = False self.crop_network = True self.model_save_path = "cur_model.model" network_type = network_type.split(":") self.network_type_opt = set(network_type) self.spp = spp self.renders_dir = renders_dir self.opt_passspp = True self.opt_passspp_mult = 1 if self.take_type_opt("passspp") else 0 assert self.opt_passspp_mult == 0 # we don't do it anymore self.opt_mixset = self.take_type_opt("mixset") self.opt_sppopt = self.take_type_opt("sppopt") self.prenormalized = True self.spp_base = 1 if '' in self.network_type_opt: self.network_type_opt.remove('') assert len(self.network_type_opt) == 0 self.network_single_prefilter = False normn = "color_in_log" bc = [ Buffer(Perm("color", i, "COLOR"), normalizer_name=normn) for i in range(self.colorchannels) ] color_out = [ Buffer(Perm("color", i, "COLOR_CBF"), "color_in") for i in range(self.colorchannels) ] for i in range(self.colorchannels): self.output_target_buffers2["color" + str(i)] = bc[i] data_in = [] for i in range(3): data_in.append(Buffer(Perm("color", i, "TEXTURE"), "texture")) data_in.extend([ Buffer("_feature_NORM_1_X", "norm1"), Buffer("_feature_NORM_1_Y", "norm1"), Buffer("_feature_NORM_1_Z", "norm1"), Buffer("_feature_DEPTH_1", "depth"), ]) data_in.extend([ Buffer("_feature_SHADOW_1_Y", "shadow"), ]) color_layers = [ self.output_target_buffers2[key] for key in sorted(self.output_target_buffers2.keys()) ] data_in = color_layers + data_in data_out = [] data_out.extend(color_layers) self.networks = {} data_out = color_layers self.forward_pass = self.forward_pass_adapt2 denoise_net = Denoising.NetBig(len(data_in), 3, passspp=self.opt_passspp) self.networks["denoise"] = Denoising.NetDaptor( self, self.right_type(denoise_net), data_in, data_out) rnet = RenderSimulator.NetRenderMulti3() self.networks["selector"] = Denoising.NetDaptor( self, self.right_type(rnet), data_in, data_out) spp_map = nn.Sequential( Denoising.NetBig(11, 1), Denoising.NetMapBound(scale=(self.spp_additional + self.spp_base), bias=0.0)) self.networks["spp_map"] = Denoising.NetDaptor( self, self.right_type(spp_map), data_in, data_out) self.data_layer = DataLayer( all_fast_buffers2=self.get_all_fast_buffers(), all_gt_buffers2=self.get_all_gt_buffers(), permutator_group2=permutator_group, normalizers2=normalizers, output_target_buffers=self.output_target_buffers2) self.gglue = Gglue(self.networks, {}) self.super_dataset = None self.subset_dataset = None self.subset_dataset_loader = None self.active_nodes = None
def initialise_state(self): self.sigma = Perm.random(string.ascii_uppercase) self.tau = Perm.random(string.ascii_uppercase)
def symmetric(cls, n): return cls(set(map(Perm(n), itertools.permutations(range(n)))))
import itertools import math import sys import random import time import collections import abc from perm import Perm from substitution import substitution from quadgram_metric import quadgram_score from metric import ENGLISH_FREQUENCIES MOD_PERMUTATIONS = [ Perm.from_cycle(transp) for transp in itertools.combinations(string.ascii_uppercase, 2) ] class HillClimber(abc.ABC): """ Class keeping track of the various bits of state needed to climb hills """ __slots__ = "text", "best_score", "total_keys_tried", "update_interval" def __init__(self, text, update_interval=1000): self.text = text self.initialise_state() self.best_score = self.get_score(self.get_state()) self.total_keys_tried = 0