Ejemplo n.º 1
0
    def nondiagonal_block_mp(self, workers, labEnc_Xi, enc_mask_A):
        new_block = None
        # Matrix multiplication(labEnc_Xj, labEnc_Xi)
        t_labEnc_Xi = transpose(labEnc_Xi)
        num_dimension = len(t_labEnc_Xi)
        
        for labEnc_Xj in self.list_labEnc_Xj:
            t_labEnc_Xj = transpose(labEnc_Xj)
            splitedmatrice = []
            for vector1 in t_labEnc_Xj:
                for vector2 in t_labEnc_Xi:
                    #splitedmatrice.append([vector1, vector2]) 
                    interval = len(vector1)//10
                    for i in range(0, len(vector1), interval):
                        splitedmatrice.append([vector1[i:i + interval], vector2[i:i + interval]]) 
            temp = workers.map(self.dotproduct, splitedmatrice)
            subnew_block = []
            for i in range(0, len(temp), 10):
                subnew_block.append(sum(temp[i:i + 10]))
            subnew_block = list(zip(*[iter(subnew_block)]*num_dimension)) # reshape
            if new_block is None:
                new_block = subnew_block 
            else: 
                new_block += subnew_block

        # remove mask(sum(Enc(b*b')))
        #assert len(new_block) == len(enc_mask_A) and len(new_block[-1]) == len(enc_mask_A[-1])
        new_block = [list(map(add, enc_value, enc_mask)) for enc_value, enc_mask in zip(new_block, enc_mask_A)]

        return new_block
Ejemplo n.º 2
0
def invMixColumns(state: bytearray, rotate=False) -> bytearray:
    if rotate:
        state = transpose(state)
    for col in range(Nb):
        m0 = matrix_element(state, col, 0)
        m1 = matrix_element(state, col, 1)
        m2 = matrix_element(state, col, 2)
        m3 = matrix_element(state, col, 3)
        s0 = ffMultiply(0x0e, m0) ^ ffMultiply(0x0b, m1) ^ \
            ffMultiply(0x0d, m2) ^ ffMultiply(0x09, m3)
        s1 = ffMultiply(0x09, m0) ^ ffMultiply(0x0e, m1) ^ \
            ffMultiply(0x0b, m2) ^ ffMultiply(0x0d, m3)
        s2 = ffMultiply(0x0d, m0) ^ ffMultiply(0x09, m1) ^ \
            ffMultiply(0x0e, m2) ^ ffMultiply(0x0b, m3)
        s3 = ffMultiply(0x0b, m0) ^ ffMultiply(0x0d, m1) ^ \
            ffMultiply(0x09, m2) ^ ffMultiply(0x0e, m3)

        # set the new byte values in the state
        state[matrix_index(col, 0)] = s0
        state[matrix_index(col, 1)] = s1
        state[matrix_index(col, 2)] = s2
        state[matrix_index(col, 3)] = s3
    if rotate:
        state = transpose(state)
    return state
Ejemplo n.º 3
0
def shiftRows(state: bytearray, rotate=False) -> bytearray:
    if rotate:
        state = transpose(state)
    for row in range(Nb):
        r = state[row * Nb:row * Nb + Nb]
        for i in range(row):
            r_temp = r[0]
            r = r[1:Nb]
            r.append(r_temp)
        state[row * Nb: row * Nb + Nb] = r
    if rotate:
        state = transpose(state)
    return state
Ejemplo n.º 4
0
def invShiftRows(state: bytearray, rotate=False) -> bytearray:
    if rotate:
        state = transpose(state)
    for row in range(Nb):
        r = state[row * Nb:row * Nb + Nb]
        for i in range(row):
            r_temp = r[Nb - 1]
            r = r[0:Nb - 1]
            r.insert(0, r_temp)
        state[row * Nb:row * Nb + Nb] = r
    if rotate:
        state = transpose(state)
    return state
Ejemplo n.º 5
0
def main():
    """
    main function to prepare data for Tiramisu algorithm
    """
    parser = argparse.ArgumentParser(
        description='reads image sets and augments the data for Tiramisu',
        prog='data_gen.py <args>')

    # Required arguments
    parser.add_argument("-i",
                        "--input",
                        required=True,
                        help="Path to image sets")
    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="Path to save test and train files")

    # Optional arguments
    parser.add_argument("-r",
                        "--ratio",
                        type=float,
                        default=0.2,
                        help="validation set ratio")

    # Creating required directories
    args = vars(parser.parse_args())
    if not os.path.exists(args['output'] + '/train/data/'):
        os.makedirs(args['output'] + '/train/data/')
    if not os.path.exists(args['output'] + '/validate/data/'):
        os.makedirs(args['output'] + '/validate/data/')
    if not os.path.exists(args['output'] + '/train/masks/'):
        os.makedirs(args['output'] + '/train/masks/')
    if not os.path.exists(args['output'] + '/validate/masks/'):
        os.makedirs(args['output'] + '/validate/masks/')
    if not os.path.exists(args['output'] + '/test/data/'):
        os.makedirs(args['output'] + '/test/data/')

    print("Creating an image per video...")
    combine(args['input'], args['output'])

    print("Generating a mask per video...")
    json_to_mask(args['input'], args['output'])

    print("augmenting the dataset...")
    slicer(args['output'])
    rotate(args['output'])
    transpose(args['output'])

    # Splitting the dataset into training and validation set
    split(args['output'], args['ratio'])
Ejemplo n.º 6
0
    def compute_merged_mask(self, enc_seed_i, num_instances, num_features):
        """
        Protocol-Vertical Step1(set up)
        - receive upk_i[Enc(seed_i)], label information[num_instances, num_features] from DataOweners
        - compute B'=Enc(sum(b_i*b_j), c'=Enc(sum(bi*b_0))
        
        :param enc_seed_i : upk_i
        :param num_instances, num_features : label information
        :return enc_mask_A, enc_mask_b: B', c' denoted in the paper
        """
        seed_i = self.msk.decrypt(enc_seed_i)

        mask_X_i = [[
            PRF(seed_i, h_index + w_index * num_instances, self.mpk.n)
            for w_index in range(num_features)
        ] for h_index in range(num_instances)]

        # compute B'[i,j]
        enc_mask_A = None
        t_mask_X_i = transpose(mask_X_i)
        if len(self.list_mask_X) != 0:
            for mask_X_j in self.list_mask_X:
                submask = [[
                    self.mpk.encrypt(
                        space_mapping(sum(map(operator.mul, vector1, vector2)),
                                      self.mpk.n)) for vector1 in t_mask_X_i
                ] for vector2 in transpose(mask_X_j)]

                if enc_mask_A is None:
                    enc_mask_A = submask
                else:
                    enc_mask_A += submask

        self.list_mask_X.append(mask_X_i)

        # compute c'[i]
        enc_mask_b = None
        if self.mask_Y is None:
            #self.mask_Y = [PRF(seed_i, index, self.mpk.n) for index in range(num_instances)]
            self.mask_Y = t_mask_X_i[0]
        else:
            enc_mask_b = [
                self.mpk.encrypt(
                    space_mapping(sum(map(operator.mul, vector1, self.mask_Y)),
                                  self.mpk.n)) for vector1 in t_mask_X_i
            ]

        return enc_mask_A, enc_mask_b
Ejemplo n.º 7
0
def c19():
    b64_cipher_texts = [
        b'SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==',
        b'Q29taW5nIHdpdGggdml2aWQgZmFjZXM=',
        b'RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==',
        b'RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=',
        b'SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk',
        b'T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==',
        b'T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=',
        b'UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==',
        b'QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=',
        b'T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl',
        b'VG8gcGxlYXNlIGEgY29tcGFuaW9u',
        b'QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==',
        b'QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=',
        b'QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==',
        b'QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=',
        b'QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=',
        b'VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==',
        b'SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==',
        b'SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==',
        b'VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==',
        b'V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==',
        b'V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==',
        b'U2hlIHJvZGUgdG8gaGFycmllcnM/',
        b'VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=',
        b'QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=',
        b'VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=',
        b'V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=',
        b'SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==',
        b'U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==',
        b'U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=',
        b'VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==',
        b'QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu',
        b'SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=',
        b'VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs',
        b'WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=',
        b'SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0',
        b'SW4gdGhlIGNhc3VhbCBjb21lZHk7',
        b'SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=',
        b'VHJhbnNmb3JtZWQgdXR0ZXJseTo=',
        b'QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=',
    ]

    block_size = 16
    random_key = get_random_bytes(block_size)
    reused_nonce = 0

    def c19_cryptor(plain_text):
        return aes128_ctr_encode(random_key, reused_nonce, plain_text)

    cipher_texts = [b64decode(s) for s in b64_cipher_texts]
    repeat_length = min(map(len, cipher_texts))
    chunks = [ct[0:repeat_length] for ct in cipher_texts]
    chunks = transpose(chunks)
    keystream = bytes(map(lambda t: t[1], map(c4_best_single_byte_xor,
                                              chunks)))
    for ct in cipher_texts:
        ctt = ct[0:repeat_length]
        print(xor_buf(ctt, keystream))
Ejemplo n.º 8
0
def get_public_key(pub_key):
    if not os.path.isfile(pub_key):
        print("There are no public key available")
        return None

    with open(pub_key, 'r') as f:
        lines = f.readlines()

        n = int(lines[1])
        k = int(lines[2])
        q = int(lines[3])

        lines = lines[4::]

        rows = []

        for i in range(len(lines)):
            row = get_vector(lines[i])
            rows.append(row)

        A = u.transpose(rows)

        f.close()

    return A, n, k, q
Ejemplo n.º 9
0
def encrypt(m, A, q, n, k):
    r = u.random_bit_vector(n)
    tA = u.transpose(A)

    coeff = (q - 1) / 2

    alpha = (u.matrix_vector(tA, r, q))

    beta = [0 for i in range(n)]

    for i in range(k):
        mi = coeff * m[i]
        beta.append(mi)

    c = []

    for i in range(len(alpha)):
        ti = alpha[i] + beta[i]
        ti %= q
        if ti > ((q - 1) / 2):
            ti -= q
            #print("Correction")
        if ti < -((q - 1) / 2):
            ti += q
            #print("Correction")
        c.append(ti)

    #print("Encryption completed")
    return c
Ejemplo n.º 10
0
Archivo: game.py Proyecto: ericyd/2048
 def group(self, grouping, array=None):
     array = array if array is not None else self.board
     # group into nested list of lists, 4 items each
     nested_array = grouper(array, 4)
     if grouping == 'columns':
         nested_array = transpose(*nested_array)
     return nested_array
Ejemplo n.º 11
0
Archivo: game.py Proyecto: ericyd/2048
 def ungroup(self, grouping, array=None):
     array = array if array is not None else self.board
     nested_array = array[:]
     if grouping == 'columns':
         nested_array = transpose(*nested_array)
     # flatten - credit: https://stackoverflow.com/a/952952/3991555
     flat_array = flatten(nested_array)
     return flat_array
Ejemplo n.º 12
0
def train(args, builder, params):
    trainer = dynet.RMSPropTrainer(params, args.learning_rate)
    trainer.set_clip_threshold(args.clip_threshold)
    for group_no in range(args.iterations):
        print('batch group #%d...' % (group_no + 1))
        batch_group_loss = 0.0
        for batch_no in range(args.batch_group_size):
            # Sample a new batch of training data
            length = random.randint(*args.training_length_range)
            batch = [
                random_sequence(length, args.source_alphabet_size)
                for i in range(args.batch_size)
            ]
            # Arrange the input and output halves of the sequences into batches
            # of individual symbols
            input_sequence_batch = transpose(s.input_sequence() for s in batch)
            output_sequence_batch = transpose(s.output_sequence()
                                              for s in batch)
            # Start building the computation graph for this batch
            dynet.renew_cg()
            state = builder.initial_state(args.batch_size)
            # Feed everything up to the separator symbol into the model; ignore
            # outputs
            for symbol_batch in input_sequence_batch:
                index_batch = [input_symbol_to_index(s) for s in symbol_batch]
                state = state.next(index_batch, StackLSTMBuilder.INPUT_MODE)
            # Feed the rest of the sequence into the model and sum up the loss
            # over the predicted symbols
            symbol_losses = []
            for symbol_batch in output_sequence_batch:
                index_batch = [output_symbol_to_index(s) for s in symbol_batch]
                symbol_loss = dynet.pickneglogsoftmax_batch(
                    state.output(), index_batch)
                symbol_losses.append(symbol_loss)
                state = state.next(index_batch, StackLSTMBuilder.OUTPUT_MODE)
            loss = dynet.sum_batches(dynet.esum(symbol_losses))
            # Forward pass
            loss_value = loss.value()
            batch_group_loss += loss_value
            # Backprop
            loss.backward()
            # Update parameters
            trainer.update()
        avg_loss = batch_group_loss / (args.batch_size * args.batch_group_size)
        print('  average loss: %0.2f' % avg_loss)
Ejemplo n.º 13
0
def shift(text, amount):
    alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    assert (len(alpha) == 26)
    shifted = ""
    for n in range(len(alpha)):
        shifted += alpha[(n + amount) % len(alpha)]

    tbl = dict((a, b) for (a, b) in zip(alpha, shifted))
    return transpose(text.upper(), tbl)
Ejemplo n.º 14
0
	def move(self, direction):
		def move_row_left(row):
			def tighten(row):
				new_row = [i for i in row if i != 0]
				new_row += [0 for i in range(len(row) - len(new_row))]
				return new_row

			def merge(row):
				pair = False
				new_row = []
				for i in range(len(row)):
					if pair:
						new_row.append(2*row[i])
						self.score += 2*row[i]
						pair = False
					else:
						if i + 1 < len(row) and row[i] == row[i + 1]:
							pair = True
							new_row.append(0)
						else:
							new_row.append(row[i])
				assert len(new_row) == len(row)
				return new_row
			
			return tighten(merge(tighten(row)))

		moves = {}
		moves['Left'] = lambda field:					\
			[move_row_left(row) for row in field]
		moves['Right'] = lambda field:					\
			invert(moves['Left'](invert(field)))
		moves['Up'] = lambda field:						\
			transpose(moves['Left'](transpose(field)))
		moves['Down'] = lambda field:					\
			transpose(moves['Right'](transpose(field)))

		if direction in moves:
			if self.move_is_possible(direction):
				self.field = moves[direction](self.field)
				self.spawn()
				return True
			else:
				return False
Ejemplo n.º 15
0
def plot_stats(stats, save_to):
    # normalise ranks
    all_ranks = [rank for (_, _, _, rank) in stats]
    all_ranks = util.transpose(all_ranks)
    all_ranks = [[x / float(sum(row)) for x in row] for row in all_ranks]
    all_ranks = util.transpose(all_ranks)

    # objectify stats
    player_stats = collections.defaultdict(lambda: util.MutableNamedTuple())
    for (i, (name, mean, sdev, _)) in enumerate(stats):
        player_stats[name].mean = mean
        player_stats[name].sdev = sdev
        player_stats[name].ranks = all_ranks[i]

    # setup plot and maximize
    colormap = create_colormap(player_stats.keys())
    create_figure()

    # plot ranks
    width = 1.0
    ax1 = plt.subplot2grid((5, 1), (0, 0), rowspan=4)
    xs = xrange(1, len(all_ranks[0]) + 1)
    bottom = [0] * len(xs)
    for name, player_info in player_stats.items():
        ys = player_info.ranks
        ax1.bar(xs, ys, bottom=bottom, color=colormap[name], width=width)
        bottom = util.vector_sum(ys, bottom)
    plt.xticks([x + width / 2.0 for x in xs], map(str, xs))
    plt.xlim(min(xs), max(xs))
    plt.ylim(0, 1)
    plt.xlabel('Rank')
    plt.ylabel('Probability')

    # create legend
    ax2 = plt.subplot2grid((5, 1), (4, 0))
    label_fun = (lambda player_type, player_info:
                 u'%s (μ=%.2f, σ=%.2f)'
                 % (player_type, player_info.mean, player_info.sdev))
    create_legend(ax2, player_stats, colormap, label_fun)

    # save graph
    save_figure(save_to)
Ejemplo n.º 16
0
    def kd_to_pt_score(kd):
        key_size = kd[0]

        chunks = chunk(cipher_text, key_size)
        chunks = transpose(chunks)
        key = bytes(map(lambda t: t[1], map(c4_best_single_byte_xor, chunks)))

        plain_text = decrypt_xor(key, cipher_text)

        score = english_score(plain_text)
        return (score, plain_text)
Ejemplo n.º 17
0
def reset():
    global level, cur_screen_pos, bird_pos_x, bird_pos_y, bird_vel,\
        score, game_over, bird_anim_frame, show_readme, lead_in

    cur_screen_pos = 0
    bird_pos_x = BIRD_X_POS
    bird_pos_y = round(PIPE_HEIGHT / 2)
    bird_vel = 0
    score = 0
    game_over = False
    bird_anim_frame = 0
    lead_in = START_SPACE

    # make first pipe have a gap in the middle
    middle = (PIPE_HEIGHT - PIPE_GAP) / 2
    first_pipe_top = random.randint(middle - 3, middle + 3)

    level = ([empty] * START_SPACE +
             pipe_and_space(first_pipe_top) +
             [el for _ in range(NUM_PIPES) for el in pipe_and_space()])
    level = transpose(transpose(level) + art.ground(len(level)))

    if first_run:
        # have to add empty space to the bottom of readme
        fillup = len(level[0]) - len(art.readme)

        whitespace = [' ' * len(art.readme[0])]
        readme = art.readme + whitespace * fillup

        whitespace = [' ' * len(art.logo[0])]
        logo = (whitespace * 20 +
                art.logo +
                whitespace * (SCREEN_HEIGHT - 20 - len(art.logo)))

        level = (transpose(readme) +
                 [' ' * SCREEN_HEIGHT] * 5 +
                 transpose(logo) +
                 level)
        bird_pos_x += SCREEN_WIDTH + len(logo[0]) + 7
        lead_in += len(logo[0]) + len(readme[0]) + 5
Ejemplo n.º 18
0
def mixColumns(state: bytearray, rotate=False) -> bytearray:
    out = state.copy()
    if rotate:
        out = transpose(out)
    for col in range(Nb):
        m0 = matrix_element(out, col, 0)
        m1 = matrix_element(out, col, 1)
        m2 = matrix_element(out, col, 2)
        m3 = matrix_element(out, col, 3)
        s0 = ffMultiply(0x02, m0) ^ ffMultiply(0x03, m1) ^ m2 ^ m3
        s1 = m0 ^ ffMultiply(0x02, m1) ^ ffMultiply(0x03, m2) ^ m3
        s2 = m0 ^ m1 ^ ffMultiply(0x02, m2) ^ ffMultiply(0x03, m3)
        s3 = ffMultiply(0x03, m0) ^ m1 ^ m2 ^ ffMultiply(0x02, m3)

        # set the new byte values in the state
        out[matrix_index(col, 0)] = s0
        out[matrix_index(col, 1)] = s1
        out[matrix_index(col, 2)] = s2
        out[matrix_index(col, 3)] = s3
    if rotate:
        out = transpose(out)
    return out
Ejemplo n.º 19
0
    def protocol_ridge_step1(self):
        """
        Protocol-ridge_version1 Step1(data masking)
        - sample a random matrix(R) and a random vector(r)
        - mask a merged dataset(A, b) with R, r

        :return enc_C: Enc(C) = Enc(A*R)
        :return enc_d: Enc(d) = Enc(b + Ar)
        """
        num_dimension = len(self.merged_enc_A)

        # sample a random matrix(R) and a random vector(r)
        Range = self.mpk.n-1
        MaxInt = self.mpk.max_int
        R = [[( random.randrange(Range) - MaxInt ) for _ in range(num_dimension)] for _ in range(num_dimension)]
            
        # check that R is invertible. ([det(A)] is non-zero <=> A is invertible)
        # if R is not invertible, random-sample again until R is invertible.
        det_R = compute_det(R, self.mpk.n)
        while(det_R == 0.0):
            R = [[( random.randrange(Range) - MaxInt ) for _ in range(num_dimension)] for _ in range(num_dimension)]
            det_R = compute_det(R, self.mpk.n)

        r = [(int)( random.randrange(Range) - MaxInt ) for _ in range(num_dimension)]

        # store R, r in the Object for step3
        self.R = R
        self.r = r
        self.det_R = det_R

        # masking C = A*R with multi-processing
        splitedAR = []
        R_trans = transpose(self.R)
        for i in range(num_dimension):
            for j in range(num_dimension):
                splitedAR.append([self.merged_enc_A[i], R_trans[j]])

        splitedbA = list(zip(self.merged_enc_b, self.merged_enc_A))
        with multi.Pool(processes = multi.cpu_count()) as workers:#multi processing
            # masking C = A*R with multi-processing
            enc_C = workers.map(self.dotproduct, splitedAR)
            # masking d = b + A*r
            enc_d = workers.map(self.compute_enc_d, splitedbA)
        workers.close()
        workers.terminate()

        enc_C = list(zip(*[iter(enc_C)]*num_dimension)) # reshape

        return enc_C, enc_d
Ejemplo n.º 20
0
	def move_is_possible(self, direction):
		def row_is_left_movable(row):
			def change(i):
				if 0 == row[i] and 0 != row[i + 1]:
					return True
				if 0 != row[i] and row[i + 1] == row[i]:
					return True
				return False
			return any(change(i) for i in range(len(row) - 1))

		check = {}
		check['Left'] = lambda field:						\
			any(row_is_left_movable(row) for row in field)
		check['Right'] = lambda field:						\
			check['Left'](invert(field))
		check['Up'] = lambda field:							\
			check['Left'](transpose(field))
		check['Down'] = lambda field:						\
			check['Right'](transpose(field))

		if direction in check:
			return check[direction](self.field)
		else:
			return False
Ejemplo n.º 21
0
    def nondiagonal_block(self, labEnc_Xi, enc_mask_A):
        new_block = None
        # Matrix multiplication(labEnc_Xj, labEnc_Xi)
        t_labEnc_Xi = transpose(labEnc_Xi)
        for labEnc_Xj in self.list_labEnc_Xj:
            subnew_block = [[sum(map(mul, vector1, vector2)) for vector1 in t_labEnc_Xi] for vector2 in transpose(labEnc_Xj)]
            if new_block is None:
                new_block = subnew_block 
            else: 
                new_block += subnew_block

        # remove mask(sum(Enc(b*b')))
        assert len(new_block) == len(enc_mask_A)
        assert len(new_block[-1]) == len(enc_mask_A[-1])

        new_block = [list(map(add, enc_value, enc_mask)) for enc_value, enc_mask in zip(new_block, enc_mask_A)]

        return new_block
Ejemplo n.º 22
0
    def merge_data(self, enc_Ai, enc_bi, labEnc_Xi, labEnc_Y, enc_mask_A, enc_mask_b, lamda, magnitude):
        encoded_lamda = (int)(lamda * (magnitude**2))

        # merged Enc(A)
        if len(self.merged_enc_A) == 0:    # if a MLE gets data from the first DOwner
            assert enc_mask_A is None and enc_mask_b is None
            assert enc_bi is not None
            self.merged_enc_A = self.diagonal_block(enc_Ai, encoded_lamda)
            self.merged_enc_b = enc_bi
        else:

            '''
            # single process
            new_diagonal_block = self.diagonal_block(enc_Ai, encoded_lamda)
            new_nondiagonal_block = self.nondiagonal_block(labEnc_Xi, enc_mask_A)
            '''
            # multi processing
            # parallization with multi-processing to improve the performance
            new_diagonal_block = self.diagonal_block(enc_Ai, encoded_lamda)
            with multi.Pool(processes = multi.cpu_count()) as workers:#multi processing
                new_nondiagonal_block = self.nondiagonal_block_mp(workers, labEnc_Xi, enc_mask_A)
                new_enc_bi = workers.map(self.compute_new_enc_bi, list(zip(enc_mask_b, transpose(labEnc_Xi))))
            workers.close()
            workers.terminate()

            # add blocks to right side
            for index, new_subline in enumerate(new_nondiagonal_block):
                self.merged_enc_A[index] += new_subline

            # add blocks to bottom side
            t_newNonDiagonalBlock = transpose(new_nondiagonal_block)

            for h_index in range(len(enc_Ai)):
                self.merged_enc_A.append(t_newNonDiagonalBlock[h_index] + new_diagonal_block[h_index])

            self.merged_enc_b += new_enc_bi


        # add Enc(Xi), Enc(Yi) on the list
        self.list_labEnc_Xj.append(labEnc_Xi)
        if labEnc_Y is not None:
            self.labEnc_Y = labEnc_Y
Ejemplo n.º 23
0
                whitespace * (SCREEN_HEIGHT - 20 - len(art.logo)))

        level = (transpose(readme) +
                 [' ' * SCREEN_HEIGHT] * 5 +
                 transpose(logo) +
                 level)
        bird_pos_x += SCREEN_WIDTH + len(logo[0]) + 7
        lead_in += len(logo[0]) + len(readme[0]) + 5


reset()


# show first frame and wait for keypress
screen_slice = level[cur_screen_pos:cur_screen_pos + SCREEN_WIDTH]
screen = transpose(screen_slice)
data = '\r\n'.join(''.join(row) for row in screen)
set_text(text_area, data)
while not keycheck():
    print 'waiting'
    pass


while True:  # game loop
    key_pressed = keycheck()

    if key_pressed:
        if game_over:
            first_run = False
            reset()
        else:
Ejemplo n.º 24
0
def pipe_and_space(top_height=None):
    return transpose(art.pipes(PIPE_GAP, PIPE_HEIGHT, top_height)) \
        + [empty] * PIPE_SPACING
wm_mask_list = []
text_mask_list = []
for i in range(len(input_img)):
    text_img = show(input_img[i])
    text_mask = get_text_mask(np.array(text_img))  # 得到 text 的 mask (bool)
    rgb_img = Image.new(mode="RGB", size=text_img.size, color=(255, 255, 255))
    p = -int(wm_img.size[0] * np.tan(10 * np.pi / 180))
    right_shift = 10
    xp = pos[i][0][0] + right_shift if len(pos[i]) != 0 else right_shift
    # xp = 0
    rgb_img.paste(wm_img, box=(xp, p))  # 先贴 wm
    wm_mask = (np.array(rgb_img.convert('L')) != 255)  # 得到 wm 的 mask(bool)
    rgb_img.paste(text_img, mask=cvt2Image(text_mask))  # 再贴 text

    wm0_img_list.append(rgb_img)
    wm_mask_list.append(transpose(wm_mask))
    text_mask_list.append(transpose(text_mask))
wm_mask = np.asarray(wm_mask_list)
text_mask = np.asarray(text_mask_list)

batch_size = 100
clip_min, clip_max = 0.0, 1.0

# 大数据集查看
record_text = []
wm0_img = pred_img = np.asarray(
    [cvt2raw(np.array(img.convert('L'))) / 255 for img in wm0_img_list])
batch_iter = len(input_img) // batch_size
batch_iter = batch_iter if len(input_img) % batch_size == 0 else batch_iter + 1
for batch_i in range(batch_iter):
    start = batch_size * batch_i
Ejemplo n.º 26
0
def score(num):
    digs = [transpose(digits[int(d)]) for d in str(num)]
    return transpose([col for dig in digs for col in dig])
Ejemplo n.º 27
0
def ground(length):
    one = transpose(ground_segment)
    many = one * (length / len(one) + 1)
    assert len(many) >= length
    return transpose(many[:length])
Ejemplo n.º 28
0
    def protocol_ridge_step1(self):
        """
        Protocol-ridge Step1(data masking)
        - sample a random matrix(R) and a random vector(r)
        - mask a merged dataset(A, b) with R, r

        :return enc_C: Enc(C) = Enc(A*R)
        :return enc_d: Enc(d) = Enc(b + Ar)
        """
        num_dimension = len(self.merged_enc_A)

        # sample a random matrix(R) and a random vector(r)
        Range = self.pk.n - 1
        MaxInt = self.pk.max_int
        R = [[(random.randrange(Range) - MaxInt) for _ in range(num_dimension)]
             for _ in range(num_dimension)]

        # check that R is invertible. ([det(A)] is non-zero <=> A is invertible)
        # if R is not invertible, random-sample again until R is invertible.
        det_R = compute_det(R, self.pk.n)
        while (det_R == 0.0):
            R = [[(random.randrange(Range) - MaxInt)
                  for _ in range(num_dimension)] for _ in range(num_dimension)]
            det_R = compute_det(R, self.pk.n)

        r = [(int)(random.randrange(Range) - MaxInt)
             for _ in range(num_dimension)]

        self.R = R
        self.r = r
        self.det_R = det_R

        # Matrix multiplication with multi proccessing.
        splitedAR = []
        R_trans = transpose(self.R)
        for i in range(num_dimension):
            for j in range(num_dimension):
                splitedAR.append([self.merged_enc_A[i], R_trans[j]])

        splitedbA = list(zip(self.merged_enc_b, self.merged_enc_A))

        with multi.Pool(
                processes=multi.cpu_count()) as pool:  #multi processing
            # masking C = A*R with multi-processing
            enc_C = pool.map(self.compute_enc_C, splitedAR)

            # masking d = b + A*r
            enc_d = pool.map(self.compute_enc_d, splitedbA)
        pool.close()
        pool.terminate()
        enc_C = list(zip(*[iter(enc_C)] * num_dimension))  # reshape
        '''
        # Matrix multiplication with multi proccessing.
        interval = num_dimension//5
        self.interval = interval
        A_splited = [self.merged_enc_A[i:i + interval] for i in range(0, num_dimension, interval)]
        R_trans = transpose(R)
        R_t_splited = [R_trans[i:i + interval] for i in range(0, num_dimension, interval)]
        splitedAR = []
        for A_i in A_splited:
            for R_t_i in R_t_splited:
                splitedAR.append([A_i, transpose(R_t_i)])

        splitedbA = list(zip(self.merged_enc_b, self.merged_enc_A))

        with multi.Pool(processes = multi.cpu_count()) as pool: #multi processing
            # masking C = A*R with multi-processing
            temp_enc_C = pool.map(self.compute_enc_C, splitedAR)
            enc_C = []
            for i in range(5): #reshape
                enc_C += [list(chain.from_iterable(items)) for items in zip(*temp_enc_C[i:i+5])]

            # masking d = b + A*r
            enc_d = pool.map(self.compute_enc_d, splitedbA)
        pool.close()
        pool.terminate()
        '''

        return enc_C, enc_d
Ejemplo n.º 29
0
def test_transpose():
    chunks = [b'123', b'456']
    assert (transpose(chunks) == [b'14', b'25', b'36'])
    chunks = [b'123', b'456', b'7']
    assert (transpose(chunks) == [b'147', b'25', b'36'])