示例#1
0
    def __init__(self):
        total_data = 2**8

        for i in range(total_data):
            binary_number = to_binary(i)
            binary_number = extend_to_bits(to_binary(i))

            random_number = to_binaryC2(random.randint(-(2**31), (2**31) - 1))
            self.data[binary_number] = random_number
示例#2
0
    def __init__(self):
        total_registers = 2**5

        for i in range(total_registers):
            binary_number = to_binary(i)
            if len(binary_number) < 5:
                zero_fill = 5 - len(binary_number)
                binary_number = "{}{}".format("0" * zero_fill, binary_number)

            if i == 8:
                self.data[binary_number] = extend_to_bits(to_binary(16))
            else:
                self.data[binary_number] = False
示例#3
0
def test():
    p = creds._pub[1]
    print(p)
    msg = utils.point_to_string(p)
    print(msg)
    msg = utils.to_binary(msg)

    enc = creds.encrypt(msg)
    enc = utils.to_binary(enc)
    enc = bytes.decode(enc)

    msg = creds.decrypt(enc)
    print(msg)
    p = utils.string_to_point(msg)
    print(p)
示例#4
0
def excess(integer):
    integer += 127

    number = to_binary(integer)[::-1]
    while len(number) < 8:
        number = "0" + number

    return number
示例#5
0
    def makeOr(self, a, b):

        a = int(a, 2)
        b = int(b, 2)

        result = to_binary((a | b))

        return extend_to_bits(result)
示例#6
0
def load_sequens_data(fname_seq):
    seq_dict = {'id': [], 'prop': [], 'seq': [], 'seq_bin': []}
    for rec in SeqIO.parse(fname_seq, 'fasta'):
        seq_dict['id'].append(rec.id)
        seq_dict['prop'].append(float(rec.description.split()[-1]))
        seq_dict['seq'].append(rec.seq)
        seq_dict['seq_bin'].append(to_binary(rec.seq))
    return seq_dict
示例#7
0
def to_signed_binary(integer, sign):
  number = to_binary(integer)

  if len(number) >= 8:
    number = number[:8]
    return number[::-1]

  while len(number) < 7:
    number += '0'
  return sign + number[::-1]
示例#8
0
def run():
    """
    Solution: Pretty straightforward search.
    """
    N = 1000000

    total = 0
    for n in xrange(1, N):
        if not is_palindrome(n):
            continue

        if is_palindrome(to_binary(n)):
            total += n
    return total
示例#9
0
    def __init__(self, dataset, data_dir, list_IDs, batch_size=1, dim=(32,32,32), 
    shuffle=True, transform=None, verbose=0, prows=None):

        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.shuffle = shuffle
        self.on_epoch_end()
        self.data = dataset
        self.transform = transform
        self.verbose = verbose
        self.X_gene = to_binary(self.data)
        self.X_chem = smiles_to_onehot(self.data)
        self.Y = self.data['LN_IC50'].tolist()
def classify_rnn1(model, text: List[str]) -> str:
	word2index = joblib.load('./word2index')
	tag2index = joblib.load('./tag2index')

	t = preprocessing(text)

	new_sent = [word2index[word] for word in t]
	max_len = 5423
	x = pad_sequences(truncating='post', maxlen=max_len, sequences=[new_sent],
					  padding='post', value=0)

	cats = model.predict(x)

	pred = to_binary(cats)
	pred2 = to_tags(pred, tag2index)

	return pred2
示例#11
0
def next_ip(broadcast):
    tinit = ""
    for x in broadcast:
        tinit = tinit + x

    tdec = to_decimal(int(tinit))
    tbin = binary_fixed(to_binary(tdec + 1), 32)

    tinit = ""
    tmp = ""
    for i in range(0, len(tbin)):
        if (i % 8) == 0 and i != 0:
            tinit = tinit + str(to_decimal(int(tmp))) + "."
            tmp = ""
        tmp = tmp + tbin[i]
    tinit = tinit + str(to_decimal(int(tmp)))

    return tinit
示例#12
0
def align(img):
    """
        Aligns text present in input image, by calculating 2d rectangle encapsulating it.
        Note that the input should be binary image!
    """
    coords = np.column_stack(np.where(img > 0))

    neg_angle = cv2.minAreaRect(coords)[-1]
    angle = -neg_angle
    if angle > 45:
        angle -= 90

    h, w = img.shape
    rot_matrix = cv2.getRotationMatrix2D((w // 2, h // 2), angle, 1.0)
    rotated = cv2.warpAffine(img,
                             rot_matrix, (w, h),
                             flags=cv2.INTER_CUBIC,
                             borderMode=cv2.BORDER_REPLICATE,
                             borderValue=0)
    rotated = utils.to_binary(rotated)

    return rotated
示例#13
0
]

if race:
    race_list = list(set(x['race']))
    columns += [(x['race'] == n) for n in race_list]
    cnames += ['Race=%s' % r for r in race_list]

columns.append(x['two_year_recid'])
cnames.append('recidivate-within-two-years')

print 'write categorical dataset', fout
y = tb.tabarray(columns=columns, names=cnames)
y.saveSV(fout)

print 'write binary dataset', bout
b = utils.to_binary(y)
b.saveSV(bout)

print 'permute and partition dataset'
split_ind = np.array_split(np.random.permutation(len(y)), num_folds)
print 'number of folds:', num_folds
print 'train size:', [
    sum([len(split_ind[i]) for i in range(num_folds) if i != j])
    for j in range(num_folds)
]
print 'test size:', [
    sum([len(split_ind[i]) for i in range(num_folds) if i == j])
    for j in range(num_folds)
]

num_rules = np.zeros(num_folds, int)
示例#14
0
    def train(self, args):
        # Data from domain A and B, and mixed dataset for partial and full models.
        dataA = glob("./{}/train/*.*".format(self.dataset_A_dir))
        dataB = glob("./{}/train/*.*".format(self.dataset_B_dir))

        if args.continue_train:
            if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
                print(" [*] Load checkpoint succeeded!")
            else:
                print(" [!] Load checkpoint failed.")

        counter = 1
        start_time = time.time()
        d_loss_list = []
        g_loss_list = []
        cycle_loss_list = []

        print("Training start...")

        for epoch in range(args.epoch):

            # Shuffle training data
            np.random.shuffle(dataA)
            np.random.shuffle(dataB)

            # Get the proper number of batches
            batch_idxs = min(len(dataA), len(dataB)) // self.batch_size

            # learning rate starts to decay when reaching the threshold
            self.lr = (
                self.lr
                if epoch < args.epoch_step
                else self.lr * (args.epoch - epoch) / (args.epoch - args.epoch_step)
            )

            for idx in range(batch_idxs):

                # To feed real_data
                batch_files = list(
                    zip(
                        dataA[idx * self.batch_size : (idx + 1) * self.batch_size],
                        dataB[idx * self.batch_size : (idx + 1) * self.batch_size],
                    )
                )
                batch_samples = [
                    load_npy_data(batch_file) for batch_file in batch_files
                ]
                batch_samples = np.array(batch_samples).astype(
                    np.float32
                )  # batch_size * 64 * 84 * 2
                real_A, real_B = batch_samples[:, :, :, 0], batch_samples[:, :, :, 1]
                real_A = tf.expand_dims(real_A, -1)
                real_B = tf.expand_dims(real_B, -1)

                # generate gaussian noise for robustness improvement
                gaussian_noise = np.abs(
                    np.random.normal(
                        0,
                        self.sigma_d,
                        [
                            self.batch_size,
                            self.time_step,
                            self.pitch_range,
                            self.input_c_dim,
                        ],
                    )
                ).astype(np.float32)

                with tf.GradientTape(persistent=True) as gen_tape, tf.GradientTape(
                    persistent=True
                ) as disc_tape:

                    fake_B = self.generator_A2B(real_A, training=True)
                    cycle_A = self.generator_B2A(fake_B, training=True)

                    fake_A = self.generator_B2A(real_B, training=True)
                    cycle_B = self.generator_A2B(fake_A, training=True)

                    [fake_A_sample, fake_B_sample] = self.pool([fake_A, fake_B])

                    DA_real = self.discriminator_A(
                        real_A + gaussian_noise, training=True
                    )
                    DB_real = self.discriminator_B(
                        real_B + gaussian_noise, training=True
                    )

                    DA_fake = self.discriminator_A(
                        fake_A + gaussian_noise, training=True
                    )
                    DB_fake = self.discriminator_B(
                        fake_B + gaussian_noise, training=True
                    )

                    DA_fake_sample = self.discriminator_A(
                        fake_A_sample + gaussian_noise, training=True
                    )
                    DB_fake_sample = self.discriminator_B(
                        fake_B_sample + gaussian_noise, training=True
                    )

                    # Generator loss
                    cycle_loss = self.L1_lambda * (
                        abs_criterion(real_A, cycle_A) + abs_criterion(real_B, cycle_B)
                    )
                    g_A2B_loss = (
                        self.criterionGAN(DB_fake, tf.ones_like(DB_fake)) + cycle_loss
                    )
                    g_B2A_loss = (
                        self.criterionGAN(DA_fake, tf.ones_like(DA_fake)) + cycle_loss
                    )
                    g_loss = g_A2B_loss + g_B2A_loss - cycle_loss

                    # Discriminator loss
                    d_A_loss_real = self.criterionGAN(DA_real, tf.ones_like(DA_real))
                    d_A_loss_fake = self.criterionGAN(
                        DA_fake_sample, tf.zeros_like(DA_fake_sample)
                    )
                    d_A_loss = (d_A_loss_real + d_A_loss_fake) / 2
                    d_B_loss_real = self.criterionGAN(DB_real, tf.ones_like(DB_real))
                    d_B_loss_fake = self.criterionGAN(
                        DB_fake_sample, tf.zeros_like(DB_fake_sample)
                    )
                    d_B_loss = (d_B_loss_real + d_B_loss_fake) / 2
                    d_loss = d_A_loss + d_B_loss

                # Calculate the gradients for generator and discriminator
                generator_A2B_gradients = gen_tape.gradient(
                    target=g_A2B_loss, sources=self.generator_A2B.trainable_variables,
                )
                generator_B2A_gradients = gen_tape.gradient(
                    target=g_B2A_loss, sources=self.generator_B2A.trainable_variables,
                )

                discriminator_A_gradients = disc_tape.gradient(
                    target=d_A_loss, sources=self.discriminator_A.trainable_variables,
                )
                discriminator_B_gradients = disc_tape.gradient(
                    target=d_B_loss, sources=self.discriminator_B.trainable_variables,
                )

                # Apply the gradients to the optimizer
                self.GA2B_optimizer.apply_gradients(
                    zip(
                        generator_A2B_gradients, self.generator_A2B.trainable_variables,
                    )
                )
                self.GB2A_optimizer.apply_gradients(
                    zip(
                        generator_B2A_gradients, self.generator_B2A.trainable_variables,
                    )
                )

                self.DA_optimizer.apply_gradients(
                    zip(
                        discriminator_A_gradients,
                        self.discriminator_A.trainable_variables,
                    )
                )
                self.DB_optimizer.apply_gradients(
                    zip(
                        discriminator_B_gradients,
                        self.discriminator_B.trainable_variables,
                    )
                )

                print(
                    "================================================================="
                )
                print(
                    (
                        "Epoch: [%2d] [%4d/%4d] time: %4.4f D_loss: %6.2f, G_loss: %6.2f, cycle_loss: %6.2f"
                        % (
                            epoch,
                            idx,
                            batch_idxs,
                            time.time() - start_time,
                            d_loss,
                            g_loss,
                            cycle_loss,
                        )
                    )
                )
                # ADDED
                d_loss_list.append(d_loss)
                g_loss_list.append(g_loss)
                cycle_loss_list.append(cycle_loss)

                counter += 1

                # generate samples during training to track the learning process
                if np.mod(counter, args.print_freq) == 1:
                    sample_dir = os.path.join(
                        self.sample_dir,
                        "{}2{}_{}_{}_{}".format(
                            self.dataset_A_dir,
                            self.dataset_B_dir,
                            self.now_datetime,
                            self.model,
                            self.sigma_d,
                        ),
                    )
                    if not os.path.exists(sample_dir):
                        os.makedirs(sample_dir)

                    # to binary, 0 denotes note off, 1 denotes note on
                    samples = [
                        to_binary(real_A, 0.5),
                        to_binary(fake_B, 0.5),
                        to_binary(cycle_A, 0.5),
                        to_binary(real_B, 0.5),
                        to_binary(fake_A, 0.5),
                        to_binary(cycle_B, 0.5),
                    ]

                    self.sample_model(
                        samples=samples, sample_dir=sample_dir, epoch=epoch, idx=idx
                    )

                if np.mod(counter, args.save_freq) == 1:
                    self.checkpoint_manager.save(counter)

        pickle_loss_list(d_loss_list, self.d_loss_path)
        pickle_loss_list(g_loss_list, self.g_loss_path)
        pickle_loss_list(cycle_loss_list, self.cycle_loss_path)
示例#15
0
    random.shuffle(AI2)
    random.shuffle(COMMONCORE)
    random.shuffle(ILLINOIS)
    random.shuffle(MAWPS)
    random.shuffle(GENERATED)

    # AI2 testing data
    test_pre_ai2 = convert_to(AI2_TEST, "prefix")
    test_pos_ai2 = convert_to(AI2_TEST, "postfix")
    if KEEP_INFIX_PARENTHESIS:
        test_inf_ai2 = remove_variables(AI2_TEST)
        test_inf_ai2 = test_inf_ai2[:len(test_pos_ai2)]
    else:
        test_inf_ai2 = convert_to(AI2_TEST, "infix")

    to_binary(os.path.join(DIR_PATH, "../test_ai_prefix.p"),
              test_pre_ai2)
    to_binary(os.path.join(DIR_PATH, "../test_ai_postfix.p"),
              test_pos_ai2)
    to_binary(os.path.join(DIR_PATH, "../test_ai_infix.p"),
              test_inf_ai2)

    # AI2 training data
    pre_ai2 = convert_to(AI2, "prefix")
    pos_ai2 = convert_to(AI2, "postfix")
    if KEEP_INFIX_PARENTHESIS:
        inf_ai2 = remove_variables(AI2)
        inf_ai2 = inf_ai2[:len(pos_ai2)]
    else:
        inf_ai2 = convert_to(AI2, "infix")

    if MAKE_IND_SETS:
示例#16
0
def calculate_address(pcs, init, size, ipsize):
    ipinit = init
    pcs.sort(reverse=True)

    for ip in pcs:
        ip_part = ipinit.split('.')
        tmp_size = len(to_binary(ip))

        if ip == tmp_size:
            tmp_size = tmp_size - 1

        tm = ipsize - tmp_size

        ip_a = []
        m_a = []
        n_a = []

        # ip bits
        # print(ip_part)
        for ipp in ip_part:
            tb = binary_fixed(to_binary(int(ipp)), size)
            # print(tb)
            for i in range(0, size):
                ip_a.append(tb[i])
        # mac bits
        for i in range(0, ipsize):
            if (i < tm):
                m_a.append(1)
            else:
                m_a.append(0)

        # na bits
        for i in range(0, ipsize):
            n_a.append(int(ip_a[i]) and m_a[i])

        # ba bits
        b_a = ip_a.copy()
        for i in range(tm, ipsize):
            b_a[i] = 1

        # to decimal
        sip_a = []
        sm_a = []
        sn_a = []
        sb_a = []

        sip = ""
        sm = ""
        sn = ""
        sb = ""

        for i in range(0, ipsize):
            if (i % size) == 0 and i != 0:
                sip_a.append(sip)
                sip = ""
                sm_a.append(sm)
                sm = ""
                sn_a.append(sn)
                sn = ""
                sb_a.append(sb)
                sb = ""
            sip = sip + str(ip_a[i])
            sm = sm + str(m_a[i])
            sn = sn + str(n_a[i])
            sb = sb + str(b_a[i])

        sip_a.append(sip)
        sm_a.append(sm)
        sn_a.append(sn)
        sb_a.append(sb)

        print_ips(sip_a, sm_a, sn_a, sb_a, 13, size + 2, ipsize)
        ipinit = next_ip(sb_a)
示例#17
0
    PROBLEM_LIST = AI2 + COMMONCORE + ILLINOIS + MAWPS + GENERATED

    # Randomize
    random.shuffle(PROBLEM_LIST)

    # AI2 testing data
    test_pre_ai2 = convert_to(AI2_TEST, "prefix")
    test_pos_ai2 = convert_to(AI2_TEST, "postfix")
    if KEEP_INFIX_PARENTHESIS:
        test_inf_ai2 = remove_variables(AI2_TEST)
        test_inf_ai2 = test_inf_ai2[:len(test_pos_ai2)]
    else:
        test_inf_ai2 = convert_to(AI2_TEST, "infix")

    to_binary(os.path.join(DIR_PATH, "../test_ai_prefix.p"), test_pre_ai2)
    to_binary(os.path.join(DIR_PATH, "../test_ai_postfix.p"), test_pos_ai2)
    to_binary(os.path.join(DIR_PATH, "../test_ai_infix.p"), test_inf_ai2)

    # AI2 training data
    pre_ai2 = convert_to(AI2, "prefix")
    pos_ai2 = convert_to(AI2, "postfix")
    if KEEP_INFIX_PARENTHESIS:
        inf_ai2 = remove_variables(AI2)
        inf_ai2 = inf_ai2[:len(pos_ai2)]
    else:
        inf_ai2 = convert_to(AI2, "infix")

    if MAKE_IND_SETS:
        to_binary(os.path.join(DIR_PATH, "../train_ai_prefix.p"), pre_ai2)
        to_binary(os.path.join(DIR_PATH, "../train_ai_postfix.p"), pos_ai2)
示例#18
0
from os import path
import utils
import NeuralNetworks

##############################################################################

FILE_R = "text2.txt"
FILE_W = "resultText2"
MODEL_FILENAME = "rnn_model.h5"
DATA_FILENAME = "data2"
COUNT_CLASS = 2
BATCH = 1
LENGTH_SEQ = 20
COUNT_PARAMETERS = 6

##############################################################################

all_x, all_y, all_z = utils.unpack_train_data(DATA_FILENAME)
x, z = utils.prepare_real_data(all_x, all_z, LENGTH_SEQ)

if path.exists(MODEL_FILENAME):
    nn = NeuralNetworks.load_model(MODEL_FILENAME)
    y = utils.to_binary(nn.predict(x, BATCH))
    utils.markup_text(FILE_W, FILE_R, y, z, LENGTH_SEQ)
else:
    print("not found model nn")
    exit()
示例#19
0
 def predict(self, x, batch_size=32):
     return utils.to_binary(self.model.predict(x, batch_size=batch_size))
示例#20
0
def main():
    running = True
    while running:
        mode = input('Host? ').lower()
        if mode == 'y' or mode == 'yes' or mode == 'host':
            print('Local pub_key: ',
                  utils.to_binary(ecc.encoding.enc_point(creds._pub[1])))
            TCP_IP = my_addr
            TCP_PORT = 4444
            BUFFER_SIZE = 2048  # Normally 1024, but we want fast response
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind((TCP_IP, TCP_PORT))
            s.listen(1)
            conn, addr = s.accept()
            print('Connection address:', addr)
            while 1:
                data = conn.recv(BUFFER_SIZE)
                if not data: break
                data_obj = fancy_shit(data)
                if 'mcv_seif' in data_obj:
                    msg = creds.decrypt(bytes.decode(data_obj['_pub']))
                    print(msg)
                    other_creds = Key((521, utils.string_to_point(msg)))
                    ipmsg = creds.decrypt(bytes.decode(data_obj['ip_msg']))
                    print(other_creds.validate())
                    if other_creds.verify(data_obj['ip_msg'],
                                          bytes.decode(data_obj['ip_sig'])):
                        print('Successful connection!!!!!')
                        conn.close()
                        break
                print("(host) received data")
                conn.send(data)
            conn.close()
            s.close()
            print('(host) socket closed')
        else:
            TCP_IP = input('ip_addr: ')
            TCP_PORT = 4444
            BUFFER_SIZE = 2048

            other_creds = Key((
                521,
                ecc.encoding.dec_point(
                    'áØÄ\x87ª`ÝÓ\x92\x8c\x9eh";ûX\x1cFÈ#Qn\x00î&7Lýùäj\ræ\x9c\x07{ºÃ\xadöâ\x10b,zta"\x18«q\x8b±\x00\x02Ucé(7\x8ax\tqtoM-,N\x0b\x04ÖÂÿAø\nWÕçH`[]3úJÕsaÓi~ä\x17·\xa0\x1e\x0c\x18¶Gb\x08}MÝ\x13l\x04\x8aÄåaËÞo\x9f¹\x95L\x7f?yÀr»\x1c\x8d'
                )))

            if not other_creds.validate():
                print('Invalid host key')
                break

            enc_pub = other_creds.encrypt(
                utils.to_binary(utils.point_to_string(creds._pub[1])))
            signed_ip = creds.sign(utils.to_binary(my_addr))
            enc_ip = other_creds.encrypt(utils.to_binary(my_addr))

            MESSAGE = utils.to_binary(
                '"mcv_seif":1,"_pub":{},"ip_sig":{},"ip_msg":{}'.format(
                    enc_pub, signed_ip, enc_ip))
            print(fancy_shit(MESSAGE))

            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((TCP_IP, TCP_PORT))
            s.send(MESSAGE)
            data = s.recv(BUFFER_SIZE)
            s.close()
            print('(client) socket closed')
示例#21
0
cost = W1 = W2 = W3 = b1 = b2 = b3 = None
for i in range(epochs):
    cost, W1, W2, W3, b1, b2, b3 = train(X_train, y_train, 0.1)
    if i % 100 == 0:
        print('Cost on epoch %i: %s' %(i, cost))

predictor = define_predict(W1, b1, W2, b2, W3, b3)

#PREDICTING THE OUTPUT
y_train_hat = predictor(X_train)
y_dev_hat = predictor(X_dev)
y_test_hat = predictor(X_test)

#CONVERTING TO BINARY OUTPUT
from utils import to_binary
y_train_hat = to_binary(y_train_hat)
y_dev_hat   = to_binary(y_dev_hat)
y_test_hat  = to_binary(y_test_hat)

#CHECKING ACCURACY
from sklearn.metrics import accuracy_score
accuracy_score(y_train[0], y_train_hat[0], normalize=True)

#CHECKING RECALL
from sklearn.metrics import recall_score
recall_score(y_train[0], y_train_hat[0])

#CHECKING F1 SCORE
from sklearn.metrics import f1_score
f1_score(y_train[0], y_train_hat[0])