示例#1
0
def main():
    if args.train:
        for t in range(model.checkpoint, args.num_epochs):
            if t + 1 <= args.num_epochs_all_nodes:
                train(t + 1, get_batches(data_train_all_nodes,
                                         args.batch_size), 'train')
            else:
                train(t + 1, get_batches(data_train, args.batch_size), 'train')
            train(t + 1, dev_batches, 'dev')
            train(t + 1, test_batches, 'test')
    elif args.oracle:
        oracle(args, model, ptb, data_test, 'test')
    else:
        if args.robust:
            for i in range(args.num_epochs):
                eps_scheduler.step_epoch(verbose=False)
            res = []
            for i in range(1, args.budget + 1):
                logger.info('budget {}'.format(i))
                ptb.budget = i
                acc_rob = train(None, test_batches, 'test')
                res.append(acc_rob)
            logger.info('Verification results:')
            for i in range(len(res)):
                logger.info('budget {} acc_rob {:.3f}'.format(i + 1, res[i]))
            logger.info(res)
        else:
            train(None, test_batches, 'test')
def break_ecb(data):

    known_bytes = ""
    for j in range(1, len(data) + 1):
        seen = oracle(data[:-j])
        for i in range(256):
            payload = data[:-j] + known_bytes + chr(i)
            if oracle(payload)[:len(data)] == seen[:len(data)]:
                known_bytes += chr(i)
                break
    print(known_bytes)
示例#3
0
 def label_data(self, data):
     X = []
     Y = []
     for s in data:
         f = Formula(s)
         tt = TruthTable(f)
         oracle(tt)
         for entry in tt.table:
             w = {e.v: e.b for e in entry}
             X.append(self.featurize(f, w))
             Y.append(tt.table[entry])
     return X, Y
def train_substitute(oracle_model, dataset, test_dataset, device, MAX_RHO, LAMBDA, EPOCHS):

	if oracle_model is not None: oracle_model = oracle_model.to(device)

	n_classes = 81
	input_shape = list(dataset[0][0].shape)

	conv = [input_shape[0],4,8,16,32]
	fc = [512,256,128]

	model = None
	for rho in range(MAX_RHO):

		input_shape = list(dataset[0][0].shape)

		dummy_labels = None

		if oracle_model is not None:
			dummy_labels = oracle(oracle_model, dataset, device)
		else:
			dummy_labels = oracle_obj(dataset, device)

		dummy_dataset = create_dataset(dataset, dummy_labels)

		model = Classifier(input_shape, conv, fc, n_classes).to(device)
		criterion = nn.CrossEntropyLoss().to(device)
		optimizer = optim.Adagrad(model.parameters(), lr=0.01)

		train(model, EPOCHS, dummy_dataset, test_dataset, criterion, optimizer, device)
		print("Rho: %d"%(rho))
		print("Dataset Size: %d"%(len(dataset)))

		dataset = augment_dataset(model, dummy_dataset, LAMBDA, device)
	
	return model
示例#5
0
def query_oracle(fake, realblock):

    # make a string that contains the fakeciphertext and the real ciphertext block
    # this is C1'C2
    # this is a string with 32 characters
    totalciphertext = ''.join(fake) + realblock

    return (oracle(totalciphertext))
示例#6
0
文件: lab.py 项目: juleari/crypto
def get_block(pos, dec, a):
    
    for hh in range(255, 0, -1):
    
        decrypted = get_beat(pos, dec, a, hh)
        
        if oracle(decrypted):
            return  get_block((pos + 1) % 16, dec, [hh] + a) if pos else\
                    messagefrombytes([hh] + a)
    
    return messagefrombytes(a)
示例#7
0
 def step_size(self, x):
     step_size_map = {
         "INV_SQ_ROOT": self.inv_sq_root_step,
         "LOG": self.log_step,
         "GEOMETRIC": self.geometric_step,
         "CONSTANT": self.constant_step,
         "LIPSCHITZ": self.lipschitz_step
     }
     if self.step_function == "LIPSCHITZ":
         self.oracle = oracle(self.oracle_params)
         step = step_size_map[self.step_function](x)
     else:
         step = step_size_map[self.step_function]()
     self.iteration += 1
     return step
示例#8
0
文件: app.py 项目: bored-user/oracle
def main() -> int:
    args = parse_args()
    args.path += '/' if args.path[-1] != '/' else ''
    size = utils.estimate_size(args.size[0], args.size[1], args.path, args.black_white)
    ammount = utils.ammount_of_images(args.size[0] * args.size[1], args.black_white)

    if not os.path.isdir(args.path):
        os.makedirs(args.path)

    if args.estimate: return print(f'{ammount} images\n{size}') or 0
    if args.estimate_size: return print(size) or 0
    if args.estimate_ammount: return print(f'{ammount} images') or 0

    [ img.save(f'{args.path}{uuid.uuid4()}.jpg') for img in oracle.oracle(args.size[0], args.size[1], args.black_white) ]
    return 0
示例#9
0
def optimal_noise_language(language,
                           train_file,
                           model_file,
                           output_dir,
                           eval_method='oracle',
                           noise_method='m',
                           weight_sim_1best=0.5,
                           weight_sim_between=0.5,
                           noise_start=0,
                           noise_end=2,
                           noise_step=0.05):
    if (eval_method not in [
            'mst', 'oracle', 'unsupervised_min_sim_between_max_sim_to_1best',
            'unsupervised_min_sim_between_distinct_trees_max_sim_to_1best'
    ]):
        raise Exception('evaluatiom method ' + eval_method +
                        ' is not implemnted')

    results_of_different_noises_path = os.path.join(output_dir,
                                                    'optimal_noises')
    print results_of_different_noises_path
    if (noise_method == 'm'):
        mu = '1'
    else:
        mu = '0'

    tmp_train_file = tempfile.NamedTemporaryFile()
    convertconll2liang(train_file, tmp_train_file.name)

    if (eval_method in [
            'unsupervised_min_sim_between_max_sim_to_1best',
            'unsupervised_min_sim_between_distinct_trees_max_sim_to_1best'
    ]):  #create 1-best liang result for dev only once (not perturbated)
        tmp_out_file = tempfile.NamedTemporaryFile()
        pru = ParserRunUnit(language=language,
                            input_file=tmp_train_file.name,
                            model=model_file,
                            res_output=tmp_out_file.name)

        pru.parse_no_words()  #in practice - parse with words - model path
        # TODO:fix the file durig convertion and not before
        fix_liang_file(tmp_out_file.name)
        convertliang2conll(tmp_out_file.name,
                           os.path.join(output_dir, "liang_1_best_dev_res"))
        log.info(
            '1best liang tree was created for unsupervised noise learning for language: '
            + language)

    for noise in np.arange(noise_start, noise_end, noise_step):
        specific_noise_dir = os.path.join(output_dir,
                                          "std_{0}_mu_{1}".format(noise, mu))

        create_dir(specific_noise_dir)

        output_file_path = os.path.basename(train_file)
        output_file_path = os.path.join(specific_noise_dir, output_file_path)

        for k in xrange(10):
            tmp_out_file = tempfile.NamedTemporaryFile()
            pru = ParserRunUnit(language=language,
                                input_file=tmp_train_file.name,
                                model=model_file,
                                res_output=tmp_out_file.name,
                                noise=True,
                                noise_method=noise_method,
                                mu=mu,
                                sigma=str(noise))

            pru.parse_no_words()
            #TODO:fix the file durig convertion and not before
            fix_liang_file(tmp_out_file.name)
            convertliang2conll(tmp_out_file.name,
                               output_file_path + "_" + str(k))

        if (eval_method == 'oracle'):
            eval_score, best_list = oracle(specific_noise_dir, train_file)
        elif (eval_method == 'mst'):
            eval_score = mst_wrapper.mst_wrapper(input_dir=specific_noise_dir,
                                                 gold_file=train_file,
                                                 output_dir=specific_noise_dir)
        elif (eval_method == 'unsupervised_min_sim_between_max_sim_to_1best'):
            eval_score = min_sim_between_max_sim_to_1best(
                input_dir=specific_noise_dir,
                liang_1best_file=os.path.join(output_dir,
                                              "liang_1_best_dev_res"),
                weight_sim_1best=weight_sim_1best,
                weight_sim_between=weight_sim_between)

        elif (eval_method ==
              'unsupervised_min_sim_between_distinct_trees_max_sim_to_1best'):
            eval_score = min_sim_between_distinct_trees_max_sim_to_1best(
                input_dir=specific_noise_dir,
                liang_1best_file=os.path.join(output_dir,
                                              "liang_1_best_dev_res"),
                weight_sim_1best=weight_sim_1best,
                weight_sim_between=weight_sim_between)

        res = open(results_of_different_noises_path, 'a')
        res.write(str(noise) + " " + str(eval_score) + "\n")
        res.close()
        shutil.rmtree(specific_noise_dir)
示例#10
0
    optimizer_types = ["NORMALIZE", "NONNORMALIZE", "ADAGRAD"]

    main_params, optimization_params, step_params, oracle_params = parse_args()

    params = {
        'MAIN': main_params,
        'OPTIMIZATION': optimization_params,
        'STEP': step_params,
        'ORACLE': oracle_params
    }

    num_dimensions = main_params["DIMENSIONS"]
    initializations = main_params["INITIALIZATIONS"]
    initialization_magnitude = main_params["INITIALIZATION_MAGNITUDE"]

    oracle = oracle(oracle_params)

    depth = CONSTANTS.depth
    sweep = CONSTANTS.sweep
    interval_shrinkage = CONSTANTS.interval_shrinkage

    # Step size parameter search
    if main_params["SEARCH"]:
        step_params["OPTIMAL"] = False
        optimal_step_sizes = []

        # Iterate over optimizer types
        for optimizer_type in optimizer_types:
            start_step = CONSTANTS.start_step
            end_step = CONSTANTS.end_step
示例#11
0
__TODO_4__ = 0
__TODO_5__ = 0

# This is the ciphertext block that we want to break
Y_hex = '5f8043943189c3a3c3e6bd0d2237f73f'
Y = bytes.fromhex(Y_hex)

# We always send a message of 2 blocks to the oracle:
# - the first block is a block R that we control
# - the second block is the ciphertext block Y we want to break

# We change the last byte of the controlled block R until we get correct padding
# (the other bytes of the controlled block can be anything, for instance, all bytes can be 0)

R = __TODO_1__  # recall that the AES block size is 16 bytes
while oracle(__TODO_2__) == PADDING_ERROR:
    r = (R[-1] + 1).to_bytes(
        1, 'big')  # value of the last byte of R is incremented
    R = R[:-1] + r  # and we put it back into the controlled block R

# At this point, we know that the padding was correct when sending in R+Y
# However, we don't know the length of the resulting padding...
# So we determine the padding length here

for i in range(
        16
):  # We will change every byte in R, until we get a padding error again
    S = R[:__TODO_3__] + b'\xFF' + R[
        __TODO_4__:]  # We implement the byte change by simply setting the i-th byte of R to FF
    if oracle(
            S + Y
示例#12
0
def main(args):
    fileNames = os.listdir(trainTextDir)
    fileNames = [i for i in fileNames if (i.endswith('.json'))]
    for fileName in fileNames:
        with open(os.path.join(trainTextDir, fileName)) as text_json:
            text_data = json.load(text_json)
            text_data = tx.filterGarbage(text_data)
            tx.calculateAngles(text_data)
            tx.calculateCenterPoints(text_data)
            text_lines = tx.createLines(text_data)
            with open(
                    os.path.join(trainLabelsDir,
                                 fileName.split('_')[0] +
                                 '_labels.json')) as ground_truth_json:
                truth = json.load(ground_truth_json)
                truth = tx.removeSwedishLetters(truth)
                receipt = rc.Receipt(fileName, text_lines, truth)
                receipts.append(receipt)

    f = open('./data/test/test.txt', "r")
    for line in f:
        testFilePaths.append(line[:-1])
    test_reciepts = []
    for receipt in receipts:
        if receipt.path in testFilePaths:
            test_reciepts.append(receipt)

    if args[1] == 'plot_bert':
        d1 = pd.DataFrame(
            {
                'train synthetic 10000': plot.train_10000_v2,
                'validation synthetic 10000': plot.val_10000_v2
            },
            index=range(1, 31))
        d2 = pd.DataFrame(
            {
                'train synthetic 1000': plot.train_1000,
                'validation synthetic 1000': plot.val_1000
            },
            index=range(1, 31))
        d3 = pd.DataFrame(
            {
                'train real data': plot.train,
                'validation real data': plot.val
            },
            index=range(1, 31))
        data = pd.concat([d1, d2, d3], axis=1)
        sns.set_style("darkgrid")
        ax = sns.lineplot(data=data)
        ax.set(xlabel='epoch', ylabel='loss')
        plt.show()

    if args[1] == 'plot_lstm':
        f1 = open('/Users/markolazic/Desktop/sroie-task3/data/trainLoss.txt',
                  'r')
        f2 = open('/Users/markolazic/Desktop/sroie-task3/data/valLoss.txt',
                  'r')
        f3 = open(
            '/Users/markolazic/Desktop/sroie-task3/data/trainLoss1000.txt',
            'r')
        f4 = open('/Users/markolazic/Desktop/sroie-task3/data/valLoss1000.txt',
                  'r')
        f5 = open(
            '/Users/markolazic/Desktop/sroie-task3/data/trainLoss10000.txt',
            'r')
        f6 = open(
            '/Users/markolazic/Desktop/sroie-task3/data/valLoss10000.txt', 'r')
        f1Lines = f1.readlines()
        f2Lines = f2.readlines()
        f3Lines = f3.readlines()
        f4Lines = f4.readlines()
        f5Lines = f5.readlines()
        f6Lines = f6.readlines()
        train_loss = []
        for line in f1Lines:
            train_loss.append(float(line[:-1]))
        val_loss = []
        for line in f2Lines:
            val_loss.append(float(line[:-1]))
        train_loss1000 = []
        for line in f3Lines:
            train_loss1000.append(float(line[:-1]))
        val_loss1000 = []
        for line in f4Lines:
            val_loss1000.append(float(line[:-1]))
        train_loss10000 = []
        for line in f5Lines:
            train_loss10000.append(float(line[:-1]))
        val_loss10000 = []
        for line in f6Lines:
            val_loss10000.append(float(line[:-1]))

        d1 = pd.DataFrame(
            {
                'train synthetic 10000': train_loss10000,
                'validation synthetic 10000': val_loss10000
            },
            index=range(1, 2001))
        d2 = pd.DataFrame(
            {
                'train synthetic 1000': train_loss1000,
                'validation synthetic 1000': val_loss1000
            },
            index=range(1, 2001))
        d3 = pd.DataFrame(
            {
                'train real data': train_loss,
                'validation real data': val_loss
            },
            index=range(1, 2001))
        data = pd.concat([d1, d2, d3], axis=1)
        data = data.rolling(100).mean()
        sns.set_style("darkgrid")
        ax = sns.lineplot(data=data)
        ax.set(xlabel='epoch', ylabel='loss')
        plt.show()

    if args[1] == 'create_data_statistics':
        stats = util.create_data_statistics(receipts, 'vendor')
        for k, v in sorted(stats.items(),
                           reverse=True,
                           key=lambda item: item[1]):
            print(k, '---', v)

    if args[1] == 'generate_gcn_data':
        test_data_dict = {}
        train_data_dict = {}
        for i, receipt in enumerate(receipts):
            if receipt.path in testFilePaths:
                test_data_dict[i] = data_gen.generateWordClasses(receipt)
            else:
                train_data_dict[i] = data_gen.generateWordClasses(
                    receipt, correcting=False)

        gcn.create(receipts, testFilePaths)

    if args[1] == 'create_result':
        path = './data/results/10000_synt'
        test_dict_path = os.path.join(path, 'res_dict.pth')
        res_dict = torch.load(test_dict_path)
        result = list(res_dict.items())
        res_list = []
        for i, (_, (labels, words)) in enumerate(result):
            res = extract(labels, words)
            res_list.append(res)
        calculateMetrics(test_reciepts, res_list, writeToFile=True, path=path)

    if args[1] == 'generate_word_data':
        generateSynthetic = False
        if args[2] and util.isInt(args[2]):
            generateSynthetic = True
            number = int(args[2])
        train_data_dict = {}
        test_data_dict = {}
        for i, receipt in enumerate(receipts):
            if receipt.path in testFilePaths:
                test_data_dict[i] = data_gen.generateWordClasses(receipt)
            else:
                train_data_dict[i] = data_gen.generateWordClasses(
                    receipt, correcting=False)
        train_receipts = []
        for r in receipts:
            if r.path not in testFilePaths:
                train_receipts.append(r)
        if generateSynthetic:
            synthetic = generateSintheticData(train_receipts, number)
            for i, (words, labels) in enumerate(synthetic):
                train_data_dict[i + len(receipts)] = (words, labels)
        '''
        vocab = data_gen.createVocabulary(receipts + synthetic)
        f=open('./data/synt_vocab.txt',"w+")
        for w in vocab:
            f.write(w + '\n')
        f.write('[UNK]' + '\n')
        f.write('[CLS]' + '\n')
        f.write('[SEP]' + '\n')
        f.write('[MASK]' + '\n')
        f.close()
        '''
        torch.save(train_data_dict, "./data/synt_10000_train_data_dict.pth")
        torch.save(test_data_dict, "./data/synt_test_data_dict.pth")

    if args[1] == 'oracle':
        for i, receipt in enumerate(test_reciepts):
            _ = data_gen.generateWordClasses(receipt)
        oracle(test_reciepts)

    if args[1] == 'rule_based':
        for receipt in test_reciepts:
            predict(receipt)
        calculateRuleBasedAccuracy(test_reciepts)

    if args[1] == 'create_lstm_result':
        result_jsons = os.listdir(lstmResultDir)
        result_jsons = [i for i in result_jsons if (i.endswith('.json'))]
        result_jsons.sort(key=lambda r: int(r.split('.')[0]))
        results_dicts = []
        for fileName in result_jsons:
            with open(os.path.join(lstmResultDir, fileName)) as text_json:
                text_data = json.load(text_json)
                results_dicts += [text_data]
        calculateLSTMaccuracy(test_reciepts, results_dicts)

    elif args[1] == 'create_char_data':
        generateSynthetic = True
        number = 10000
        train_data_dict = {}
        test_data_dict = {}
        for i, receipt in enumerate(receipts):
            if receipt.path in testFilePaths:
                test_data_dict[i] = data_gen.generateCharClasses(
                    receipt, includeProducts=True)
            else:
                train_data_dict[i] = data_gen.generateCharClasses(
                    receipt, includeProducts=True)
        if generateSynthetic:
            VOCAB = ascii_uppercase + digits + punctuation + " \t\n"
            for r in receipts:
                data_gen.generateWordClasses(r)
            synthetic = generateSintheticData(receipts, number)
            for i, (words, labels) in enumerate(synthetic):
                t_new_words = ''
                t_new_labels = []
                for w, l in zip(words, labels):
                    t_new_words += w.upper() + ' '
                    t_new_labels += [
                        util.getClassInt(l) for i in range(len(w))
                    ] + [0]
                new_words = ''
                new_labels = []
                for index in range(len(t_new_words)):
                    if t_new_words[index] in VOCAB:
                        new_words += t_new_words[index]
                        new_labels.append(t_new_labels[index])
                new_words = new_words[0:-1]
                new_labels = new_labels[0:-1]
                for i in range(1, len(new_words) - 1):
                    if new_labels[i] == 0 and new_labels[i -
                                                         1] == new_labels[i +
                                                                          1]:
                        new_labels[i] = new_labels[i - 1]
                train_data_dict[len(receipts) + i] = (new_words, new_labels)
        print(train_data_dict)
        torch.save(
            train_data_dict,
            "/Users/markolazic/Desktop/sroie-task3/data/train_char_data_prod_synt10000.pth"
        )
        torch.save(
            test_data_dict,
            "/Users/markolazic/Desktop/sroie-task3/data/test_char_data_prod_synt10000.pth"
        )
示例#13
0
def generate_plots(params, values, plot_type="Loss", optimizer_types=None):
    # Retrieve all params
    optimization_params = params['OPTIMIZATION']
    step_params = params['STEP']
    oracle_params = params['ORACLE']
    main_params = params['MAIN']

    function = string_format(oracle_params['FUNCTION'])
    step_size = string_format(step_params['STEP_FUNCTION'])
    num_dimensions = main_params['DIMENSIONS']
    iterations = optimization_params['ITERATIONS']
    initialization_magnitude = main_params['INITIALIZATION_MAGNITUDE']
    nu = optimization_params['NU']
    queries = optimization_params['QUERIES']
    gradient_type = string_format(optimization_params['GRADIENT_TYPE'])
    function_param = oracle_params['FUNCTION_PARAM']
    condition_num = oracle_params['CONDITION_NUM']
    initial_step_magnitude = step_params['INITIAL_STEP_MAGNITUDE']

    # Begin file name
    file_name = "{} Function ".format(function)
    plot_title = "{} Function ".format(function)
    file_path = "./"

    # If creating surface plot or contour
    if plot_type == "Surface" or plot_type == "Contour":
        function_oracle = oracle(oracle_params)
        optimizer_type = string_format(params["OPTIMIZER_TYPE"])
        if step_size == "LIPSCHITZ" and optimizer_type != "NONNORMALIZE":
            step_size = "INV_SQ_ROOT"
        file_name += "{} ".format(optimizer_type)
        plot_title += "{} ".format(optimizer_type)
        assert (num_dimensions == 2 and values.shape[0] == 2), \
            "Dimensions is not equal to 2, surface plot not available."

    # Logic for creating surface plot
    if plot_type == "Surface":

        # Begin plotting
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        max_x = initialization_magnitude * 1.1
        plt.ylim(bottom=-max_x)
        plt.ylim(top=max_x)
        plt.xlim(left=-max_x)
        plt.xlim(right=max_x)

        # Calculate grid values
        x = np.linspace(-max_x - 0.01, max_x + 0.01, 200)
        y = np.linspace(-max_x - 0.01, max_x + 0.01, 200)
        X, Y = np.meshgrid(x, y)
        function_oracle = oracle(oracle_params)
        evaluated_grid = np.array([
            function_oracle.query_function(np.array([[x], [y]]))
            for x, y in zip(np.ravel(X), np.ravel(Y))
        ])
        Z = evaluated_grid.reshape(X.shape)

        # Evaluate function at given values
        evaluated_points = np.array([
            function_oracle.query_function(np.array([[x], [y]]))
            for x, y in values.T
        ])
        surf = ax.plot_surface(X,
                               Y,
                               Z,
                               cmap=cm.coolwarm,
                               linewidth=0,
                               antialiased=False,
                               alpha=0.4)
        lines = ax.scatter(values[0, :],
                           values[1, :],
                           evaluated_points,
                           cmap='plasma',
                           c=np.arange(0, values.shape[1]))

        ax.zaxis.set_major_locator(LinearLocator(10))
        ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
        plt.xlabel('X')
        plt.ylabel('Y')

    # Logic for creating contour plot
    elif plot_type == "Contour":

        minimum = function_oracle.query_function(np.array([[0], [0]])) - 1
        fig, ax = plt.subplots()
        max_x = initialization_magnitude * 1.1

        # Calculate grid values
        x = np.linspace(-max_x - 0.01, max_x + 0.01, 200)
        y = np.linspace(-max_x - 0.01, max_x + 0.01, 200)
        X, Y = np.meshgrid(x, y)
        evaluated = np.array([
            function_oracle.query_function(np.array([[x], [y]]))
            for x, y in zip(np.ravel(X), np.ravel(Y))
        ]) - minimum
        Z = evaluated.reshape(X.shape)

        levels = np.array([
            function_oracle.query_function(np.array([[x], [x]]))
            for x in np.linspace(0.1, max_x, 20)
        ]) - minimum

        ax.contourf(X,
                    Y,
                    Z,
                    levels,
                    cmap="viridis",
                    norm=LogNorm(),
                    extend='both')
        ax.scatter(values[0, :],
                   values[1, :],
                   cmap='plasma',
                   s=1.4,
                   alpha=1,
                   c=np.arange(0, values.shape[1]))

        for i in range(1, values.shape[1]):
            ax.annotate('', xy=(values[0, i], values[1, i]), xytext=(values[0, i - 1], values[1, i - 1]),
                        arrowprops={'arrowstyle': '-', 'color': 'k', \
                                    'lw': 1, 'alpha': 0.5},
                        va='center', ha='center')

        plt.xlabel('X')
        plt.ylabel('Y')
        plt.ylim(bottom=-max_x)
        plt.ylim(top=max_x)
        plt.xlim(left=-max_x)
        plt.xlim(right=max_x)

    else:

        average_values = np.median(values, axis=0).squeeze()
        for i in range(average_values.shape[0]):
            sd = np.sqrt(
                np.var(values[:, i, :], axis=0).squeeze() / values.shape[0])
            smooth_sd = gaussian_filter(sd, sigma=2)
            plt.plot(average_values[i, :])
            below = average_values[i, :] - smooth_sd * 1.96
            above = average_values[i, :] + smooth_sd * 1.96
            plt.fill_between(range(average_values.shape[1]),
                             below,
                             above,
                             alpha=.2)

        plt.xlabel('Iterations')
        plt.ylabel(plot_type)
        plt.ylim(top=average_values[0, 0] * 1.1)
        if plot_type == "Distance from Origin":
            plt.ylim(bottom=0)
        else:
            min_value = np.min(average_values)
            if min_value < 0:
                min_plot = min_value * 1.1
            else:
                min_plot = min_value * 0.9
            plt.ylim(bottom=min_plot)

        plt.legend(optimizer_types, loc='upper right', prop={'size': 6})

    # Add rest of file name
    file_name += "{} Step Sizes {} Gradients dims {} iters {} mag {} nu {} queries {} param {} cond num {}". \
        format(step_size, gradient_type, num_dimensions, iterations, initialization_magnitude,
               nu, queries, function_param, condition_num)
    # Update file path and plot title
    if step_params["OPTIMAL"]:
        plot_title += 'Convergence with {} Gradients \n {} Optimal Step Sizes Param {}'. \
            format(gradient_type, step_size, function_param)
        file_path += "Optimal/"
    else:
        file_name += " step mag {}".format(initial_step_magnitude)
        plot_title += 'Convergence with {} Gradients \n {} Step Sizes Param {} Init Step Mag {}'. \
            format(gradient_type, step_size,  function_param, initial_step_magnitude)
    file_path += plot_type.replace(" ", "_") + "_Plots/"

    plt.title(plot_title)

    plt.savefig(file_path + file_name + ".png", dpi=400)
    plt.clf()
示例#14
0
from formula import Formula
from neural_net import NeuralNet

NUM_TEST_FORMULAS = 100

nn = NeuralNet()
nn.train()
testFormulas = FormulaSource()
testFormulas.gen_data(NUM_TEST_FORMULAS)
numCorrect = 0
numTotal = 0
nnC = 0
for f in testFormulas.data:
    t = TruthTable(Formula(f))

    oracle(t)
    oracleT = copy(t.table)

    baseline(t)
    baseT = copy(t.table)

    nn.solve_table(t)
    nnT = copy(t.table)
    for k in oracleT:
        numTotal += 1
        if oracleT[k] == baseT[k]:
            numCorrect += 1
        if oracleT[k] == nnT[k]:
            nnC += 1

print("Baseline: {}/{} correct".format(numCorrect, numTotal),
示例#15
0
except IndexError:
    pass
except TypeError:
    print("Period should be an integer")
    sys.exit(1)

# Initialise input, output and result registers
input_reg = QuantumRegister(n, "input")
output_reg = QuantumRegister(n, "output")
result = ClassicalRegister(n)

# Initialize circuit
circuit = QuantumCircuit(input_reg, output_reg, result)

# Generate oracle
(oracle_gate, period) = oracle(n, period)
print(f"Secret period - {period}")

# Generete circuit for Quantum Fourier Transform
qft_gate = QFT(num_qubits=n, do_swaps=False,
               inverse=True).to_gate(label=f"QFT_{n}")

# Apply n Hadamard gates to input bits
circuit.h(input_reg)

# Apply Oracle on input bits
circuit.append(oracle_gate, [*input_reg, *output_reg])

# Perform QFT
circuit.append(qft_gate, input_reg)
示例#16
0
 def close (self):
     db = oracle()
     db.closeConnection(self.connect)
示例#17
0
文件: tests.py 项目: gluker/ljparser
from oracle import search_in_url as oracle

test_cases = [
    'http://rusisrael.livejournal.com/283608.html',
    'http://rusisrael.livejournal.com/4366076.html',
    'http://potrebitel-il.livejournal.com/22412050.html',
    'http://dolboeb.livejournal.com/2858081.html',
    'http://dolboeb.livejournal.com/2868126.html',
    'http://tourism-il.livejournal.com/1003133.html',
    'http://rabota-il.livejournal.com/9281433.html',
    'http://rabota-il.livejournal.com/9069326.html',
    'http://potrebitel-il.livejournal.com/22338502.html',
    'http://rabota-il.livejournal.com/712789.html',
    'http://ladies-il.livejournal.com/7098073.html',
    ]

for case in test_cases:
    print("Trying {}".format(case))
    try:
        stable = oracle(case)
    except Exception as ex:
        print("Oracle failed on {} with {}".format(case,ex))
        break
    try:
        testing = parse_comments(case)
    except Exception as ex:
        print("Test failed on {} with {}".format(case,ex))
        break
    assert parse_comments(case) == oracle(case), "Results don't match"

示例#18
0
 def __init__ (self):
     db = oracle()
     self.connect = db.connection()