Exemple #1
0
    def update_properties(self):
        """
        Updates properties files of the ejbca
        :return:
        """
        file_web = self.get_web_prop_file()
        file_ins = self.get_install_prop_file()

        prop_web = util.merge(self.WEB_PROPERTIES, self.web_props)
        prop_ins = util.merge(self.INSTALL_PROPERTIES, self.install_props)

        prop_hdr = '#\n'
        prop_hdr += '# Config file generated: %s\n' % (
            datetime.now().strftime("%Y-%m-%d %H:%M"))
        prop_hdr += '#\n'

        file_web_hnd = None
        file_ins_hnd = None
        try:
            file_web_hnd, file_web_backup = util.safe_create_with_backup(
                file_web, 'w', 0o644)
            file_ins_hnd, file_ins_backup = util.safe_create_with_backup(
                file_ins, 'w', 0o644)

            file_web_hnd.write(prop_hdr + self.properties_to_string(prop_web) +
                               "\n\n")
            file_ins_hnd.write(prop_hdr + self.properties_to_string(prop_ins) +
                               "\n\n")
        finally:
            if file_web_hnd is not None:
                file_web_hnd.close()
            if file_ins_hnd is not None:
                file_ins_hnd.close()
Exemple #2
0
    def build_vocab(self, file_contents, write=True):
        if self.split_kind == "bpe" and self.split_file:
            self.token_counts = self.build_bpe_full_file(file_contents)
            top_words = sorted(self.token_counts.items(),
                               key=lambda i: i[1],
                               reverse=True)
            top_words = [t[0]
                         for t in top_words]  # if t[1] >= self.vocab_cutoff]
        elif self.split_kind == "chars":
            top_words = [chr(c) for c in range(33, 127)]
            self.token_counts = {c: 1000 for c in top_words}
        else:
            self.token_counts = {}
            self.w2i = None
            for file in file_contents:
                for label in re.split('\s+', file):
                    subtokens = [
                        label
                    ] if self.split_kind != "heuristic" else util.split_subtokens(
                        label)
                    for sub in subtokens:
                        util.merge(self.token_counts, sub, 1)

            # Ensure some key tokens make it into the vocabulary
            if "<unk>" not in self.token_counts:
                self.token_counts["<unk>"] = max(
                    self.vocab_cutoff,
                    sum([
                        c for c in self.token_counts.values()
                        if c < self.vocab_cutoff
                    ]))
            for ix, s in enumerate(self.special_tokens):
                if s not in self.token_counts or self.split_kind == "bpe":
                    self.token_counts[s] = int(1e9) + ix

            if self.split_kind == "bpe":
                print("Computing BPE on", len(self.token_counts), "tokens")
                self.token_counts = self.build_bpe(self.token_counts)
                top_words = sorted(self.token_counts.items(),
                                   key=lambda i: i[1],
                                   reverse=True)
                top_words = [t[0] for t in top_words
                             ]  # if t[1] >= self.vocab_cutoff]
            else:
                # Sort and discard tokens to infrequent to keep
                top_words = sorted(self.token_counts.items(),
                                   key=lambda t: t[1],
                                   reverse=True)
                top_words = [
                    t[0] for t in top_words if t[1] >= self.vocab_cutoff
                ]

        # Build the vocabulary
        self.w2i = {w: i for i, w in enumerate(top_words)}
        self.i2w = {i: w for w, i in self.w2i.items()}

        if write:
            self.save_vocab(self.out_vocab_path)
Exemple #3
0
    def merge(self, cond, c1, c2):
        self.globals = util.merge(cond, c1.globals, c2.globals)
        self.locals = util.merge(cond, c1.locals, c2.locals)
        self.references = util.merge(cond, c1.references, c2.references)
        self.accessed = c1.accessed.union(c2.accessed)

        assert self.path_condition == c1.path_condition
        assert self.path_condition == c2.path_condition

        assert self.side_conditions == c1.side_conditions
        assert self.side_conditions == c2.side_conditions
Exemple #4
0
def stitch_blocks(blocks, model, size):
    # save original size
    orig_size = blocks[0].shape

    # cast to uint8 if necessary
    # in either case scale down images
    if blocks[0].dtype == 'uint16':
        A, B, C, D = map(lambda x: resize(x, size),
                         map(uint16_to_uint8, blocks))
    else:
        A, B, C, D = map(lambda x: resize(x, size), blocks)

    start = time()
    # get transforms
    t_AB = est_transform(A, B, model, orig_size)
    t_CD = est_transform(C, D, model, orig_size)

    # for the vertical component take the mean of AC and BD
    t_AC = est_transform(A, C, model, orig_size)
    t_BD = est_transform(B, D, model, orig_size)
    t_v = (t_AC + t_BD) / 2

    # use original images from here on
    A, B, C, D = blocks

    # translation net
    if t_AB.size == 2:
        # stitch
        im_ab = merge(A, B, *t_AB)
        im_cd = merge(C, D, *t_CD)
        final = merge(im_ab, im_cd, *t_v)

        # add theta component to transform
        t_AB = t_AB.tolist() + [0]
        t_CD = t_CD.tolist() + [0]
        t_v = t_v.tolist() + [0]
        return (final, [t_AB, t_CD, t_v], time() - start)

    # regular hnet
    shape = A.shape
    h_AB = points_to_affine(shape, t_AB)
    h_CD = points_to_affine(shape, t_CD)
    h_AC = points_to_affine(shape, t_AC)
    h_BD = points_to_affine(shape, t_BD)
    h_v = (h_AC + h_BD) / 2

    # stitch
    im_ab, t1 = warp_merge(A, B, h_AB)
    im_cd, t2 = warp_merge(C, D, h_CD)
    final, t3 = warp_merge(im_ab, im_cd, h_v)

    # affine transforms kept as np arrays
    return (final, [t1, t2, t3], time() - start)
def merge_sort(arr):
    l = len(arr)
    # merge subarrays of sizes doubling from 1 to array length
    cur_size = 1
    while cur_size < l:
        lo = 0
        while lo < l - 1:  # for every nonoverlapping window of current size
            mid = lo + cur_size
            hi = min(mid + cur_size, l)  # not going out of bounds
            merge(arr, lo, mid, hi)
            lo += 2 * cur_size  # advance by full size
        cur_size *= 2  # double the current size
def get_train_dataset(args,
                      target_data_dir,
                      target_dataset,
                      tokenizer,
                      split_name,
                      source_data_dir=None,
                      source_dataset=None):
    dataset_dict_source = None
    dataset_dict_target = None
    data_encodings_source = None
    source_dataset_name = 'individual'
    target_dataset_name = 'individual'
    if source_data_dir is not None and source_dataset is not None:
        datasets = source_dataset.split(',')
        label = 0
        for dataset in datasets:
            source_dataset_name += f'_{dataset}'
            dataset_dict_curr = util.read_squad(f'{source_data_dir}/{dataset}',
                                                label=label)
            dataset_dict_source = util.merge(dataset_dict_source,
                                             dataset_dict_curr)
            label += 1
        data_encodings_source = read_and_process(args, tokenizer,
                                                 dataset_dict_source,
                                                 source_data_dir,
                                                 source_dataset_name,
                                                 split_name)
    label = 3
    datasets = target_dataset.split(',')
    for dataset in datasets:
        target_dataset_name = f'_{dataset}'
        # dataset_dict_curr = util.read_squad(f'{target_data_dir}/{dataset}', label=1)
        dataset_dict_curr = xuran_perform_eda.perform_eda(
            f'{target_data_dir}/{dataset}',
            dataset,
            train_fraction=1,
            label=label)
        dataset_dict_target = util.merge(dataset_dict_target,
                                         dataset_dict_curr)
        label += 1
    data_encodings_target = read_and_process(args, tokenizer,
                                             dataset_dict_target,
                                             target_data_dir,
                                             target_dataset_name, split_name)
    dataset_dict = util.merge(dataset_dict_source, dataset_dict_target)
    data_encodings = util.merge(data_encodings_source, data_encodings_target)
    return util.QADomainDataset(data_encodings,
                                train=(split_name == 'train')), dataset_dict
Exemple #7
0
def sobel_filter(image):
    kernelx = np.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]))
    kernely = np.array(([-1, -2, -1], [0, 0, 0], [1, 2, 1]))

    if len(image.shape) == 2:
        gx = conv(image, kernelx)
        gy = conv(image, kernely)

        output = abs(gx) + abs(gy)  # np.sqrt(gx ** 2 + gy ** 2) slower
        output[np.where(output > MAX_PIXEL)] = MAX_PIXEL

        return output.astype(np.uint8)
    else:
        r, g, b = util.split(image)
        rx, ry = conv(r, kernelx), conv(r, kernely)
        gx, gy = conv(g, kernelx), conv(g, kernely)
        bx, by = conv(b, kernelx), conv(b, kernely)

        R = abs(rx) + abs(ry)
        G = abs(gx) + abs(gy)
        B = abs(bx) + abs(by)

        output = util.merge(R, G, B)
        output[np.where(output > MAX_PIXEL)] = MAX_PIXEL

        return output.astype(np.uint8)
Exemple #8
0
def test_merge_into():
    dst = {'k4': {'v41': 'vv41'}}
    src1 = {'k1': 'v1'}
    src2 = {'k2': 'v2'}
    src3 = {'k3': {'v3': 'vv3'}}
    src4 = {'k4': {'v4': 'vv4'}}
    result = merge_into(dst, src1, src2, src3, src4)
    assert result is dst
    assert result['k1'] == 'v1'
    assert result['k2'] == 'v2'
    assert isinstance(result['k3'], dict)
    assert result['k3']['v3'] == 'vv3'
    assert result['k4']['v41'] == 'vv41'
    assert result['k4']['v4'] == 'vv4'

    result = merge(src1, src2, src3)
    assert result is not dst
    assert result is not src1
    assert result is not src2
    assert result is not src3
    assert result['k1'] == 'v1'
    assert result['k2'] == 'v2'
    assert isinstance(result['k3'], dict)
    assert result['k3']['v3'] == 'vv3'
    assert 'k1' in src1
    assert 'k1' not in src2
    assert 'k1' not in src3
    assert 'k2' not in src1
    assert 'k2' in src2
    assert 'k2' not in src3
    assert 'k3' not in src1
    assert 'k3' not in src2
    assert 'k3' in src3
Exemple #9
0
async def run(memory, phases):
    async with trio.open_nursery() as nursery:
        # set up channels
        a_send, a_recv = trio.open_memory_channel(0)
        b_send, b_recv = trio.open_memory_channel(0)
        # need a relay channel so we can tell when a needs input
        r_send, r_recv = trio.open_memory_channel(0)
        nursery.start_soon(intcode.process, memory, a_recv, r_send)
        c_send, c_recv = trio.open_memory_channel(0)
        nursery.start_soon(intcode.process, memory, b_recv, c_send, False)
        d_send, d_recv = trio.open_memory_channel(0)
        nursery.start_soon(intcode.process, memory, c_recv, d_send, False)
        e_send, e_recv = trio.open_memory_channel(0)
        nursery.start_soon(intcode.process, memory, d_recv, e_send, False)
        f_send, f_recv = trio.open_memory_channel(0)
        nursery.start_soon(intcode.process, memory, e_recv, f_send, False)
        # input phases
        await a_send.send(phases[0])
        await b_send.send(phases[1])
        await c_send.send(phases[2])
        await d_send.send(phases[3])
        await e_send.send(phases[4])

        sig = 0
        async with a_send, f_recv:
            async for out, ch in merge(nursery, r_recv, f_recv):
                if ch == r_recv:
                    if out == intcode.Command.INPUT:
                        await a_send.send(sig)
                    else:
                        await b_send.send(out)
                else:
                    sig = out

            return sig
Exemple #10
0
def get_all():
    dataset = pd.read_csv("data\Students_Performance.csv")
    dataset = pd.get_dummies(dataset,
                             columns=[
                                 "gender", "race/ethnicity",
                                 "parental level of education", "lunch",
                                 "test preparation course"
                             ],
                             prefix=[
                                 "gender", "race/ethnicity", "parental",
                                 "lunch", "preparation"
                             ])
    #dataset['final_score'] = dataset['math score'] + dataset['reading score'] + dataset['writing score']
    #dataset['final_score'] = dataset.final_score.map(lambda x: ceil(x))
    #dataset['pass'] = dataset.final_score.map(lambda x: 1 if x/3 > 70 else 0)
    #dataset['index'] = range(0, len(dataset))

    #del dataset['final_score']
    #del dataset['math score']
    del dataset['reading score']
    del dataset['writing score']

    #dataset_good = dataset[dataset['pass'] == 1]
    #dataset_bad = dataset[dataset['pass'] == 0]
    dataset_good = dataset[dataset['math score'] > 80]
    dataset_bad = dataset[dataset['math score'] <= 80]
    #del dataset_good['math score']
    #del dataset_bad['math score']

    print(dataset_good.columns.tolist())
    print(len(dataset_good.columns.tolist()))
    both_sides = util.merge(dataset_good, dataset_bad, 18)

    return dataset, both_sides
Exemple #11
0
	def __getResponse(self):
		inp_stream = list()
		timeout = time() + 0.1
		while self.inWaiting()==0:
			now = time()
			if now > timeout and self.inWaiting()==0:
				return False
		timeout = 0;
		while self.read()!='~':
			if timeout == 100:
				return False
			timeout+=1

		inp_stream.append(0x7e)
		MSB = ord(self.read())
		LSB = ord(self.read())

		lenght = merge(MSB,LSB)
		inp_stream.append(MSB)
		inp_stream.append(LSB)
		for i in xrange(lenght+1):
			inp_stream.append(ord(self.read()))

		#debug
		#print "Xbee.__getResponse() = {}".format(inp_stream)

		return inp_stream
Exemple #12
0
def remove_singleton_indices(dag):
    """ Remove unnecessary index operations from a dag

    merges two operations that look like this
    {(out,) : {'fn': op,  'args': args}
     out    : {'fn': index, 'args': ((out,) 0)}}
    into just one that looks like this
    {out    : {'fn': op,  'args': args}}
    i.e. it unpacks singleton tuples

    Reverses:
        insert_single_indices
    """

    # dict of shortcuts
    quick_outs = {out: dag[(out,)] for out in dag if (out,) in dag}

    def badkey(out):
        return (out in quick_outs
             or (    isinstance(out, tuple)
                 and len(out) == 1
                 and out[0] in quick_outs))

    clean_dag =  {out : dag[out] for out in dag
                                 if not badkey(out)}

    return merge(clean_dag, quick_outs)
Exemple #13
0
    def __getResponse(self):
        inp_stream = list()
        timeout = time() + 0.1
        while self.inWaiting() == 0:
            now = time()
            if now > timeout and self.inWaiting() == 0:
                return False
        timeout = 0
        while self.read() != '~':
            if timeout == 100:
                return False
            timeout += 1

        inp_stream.append(0x7e)
        MSB = ord(self.read())
        LSB = ord(self.read())

        lenght = merge(MSB, LSB)
        inp_stream.append(MSB)
        inp_stream.append(LSB)
        for i in xrange(lenght + 1):
            inp_stream.append(ord(self.read()))

        #debug
        #print "Xbee.__getResponse() = {}".format(inp_stream)

        return inp_stream
Exemple #14
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)  # 合并图像块数

        if config.is_train:
            data_path = os.path.join("./", config.checkpoint_dir, "train.h5")
        else:
            data_path = os.path.join("./", config.checkpoint_dir, "test.h5")

        train_data, train_label = read_data(data_path)

        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)

        tf.global_variables_initializer().run()

        counter = 0  # 输出判断数
        start_time = time.time()
        # 加载训练数据
        if self.load(config.checkpoint_dir):
            print("[*] Load SUCCESS")
        else:
            print("[!] Load Failed")
        if config.is_train:
            print("Train....")
            batch_index = len(train_data) // config.batch_size
            for ep in range(config.epoch):
                for idx in range(batch_index):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    _, err = self.sess.run([self.train_op, self.loss], {
                        self.images: batch_images,
                        self.labels: batch_labels
                    })
                    counter += 1

                    if counter % 10 == 0:
                        print(
                            "Epoch: %2d,step: %2d,time: %4.4f,loss: %.8f" %
                            ((ep + 1), counter, time.time() - start_time, err))
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        else:
            print("Test...")
            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })
            result = merge(result, [nx, ny])
            result = result.squeeze()  # squeese():把 result 的 ? 维度删除
            image_path = os.path.join(os.getcwd(), config.sample_dir,
                                      "text_image.png")
            imsave(image_path, result)
Exemple #15
0
def contraharmonic_mean_filter_rgb(image, filter_size, Q=1):
    r, g, b = util.split(image)
    R = contraharmonic_mean_filter(r, filter_size, Q)
    G = contraharmonic_mean_filter(g, filter_size, Q)
    B = contraharmonic_mean_filter(b, filter_size, Q)
    output = util.merge(R, G, B)

    return output.astype(np.uint8)
Exemple #16
0
def harmonic_mean_filter_rgb(image, filter_size):
    r, g, b = util.split(image)
    R = harmonic_mean_filter(r, filter_size)
    G = harmonic_mean_filter(g, filter_size)
    B = harmonic_mean_filter(b, filter_size)
    output = util.merge(R, G, B)

    return output.astype(np.uint8)
Exemple #17
0
def median_filter_rgb(image, filter_size):
    r, g, b = util.split(image)
    R = median_filter(r, filter_size)
    G = median_filter(g, filter_size)
    B = median_filter(b, filter_size)
    output = util.merge(R, G, B)

    return output.astype(np.uint8)
Exemple #18
0
    def test03PopulateProjects(self):
        """002-03 Populate test projects"""
        r = self.client.post('/project/',
                             merge(self.PUBLIC_PROJECT_DATA,
                                   self.PUBLIC_PROJECT_VOLATILE,
                                   account=self.account.id,
                                   regions=[self.AWS_REGION]))
        self.assertCode(r, 201)
        self.assertIsNotNone(self.project_public)

        r = self.client.post('/project/',
                             merge(self.VPC_PROJECT_DATA,
                                   self.VPC_PROJECT_VOLATILE,
                                   account=self.account.id,
                                   regions=[self.AWS_REGION]))

        self.assertCode(r, 201)
        self.assertIsNotNone(self.project_vpc)
Exemple #19
0
    def preprocessTests(self):
        for name in self.tests:
            self.tests[name] = util.merge(self.default_test, self.tests[name])

        for name in self.tests:
            test = self.tests[name]
            if "base" in test:
                print("Derive %s from %s" % (name, test["base"]))
                self.tests[name] = util.merge(
                    self.tests[self.tests[name]["base"]], self.tests[name])
                print(self.tests[self.tests[name]["base"]])

            if test["reference"] == None:
                raise RuntimeError(
                    "Reference genome path must be defined for data set")

            if not os.path.exists(test["reference"]):
                if os.path.exists(self.config["reference_directory"] + "/" +
                                  test["reference"]):
                    test["reference"] = os.path.abspath(
                        self.config["reference_directory"] + "/" +
                        test["reference"])
            else:
                test["reference"] = os.path.abspath(test["reference"])

            if not os.path.exists(test["reference"]):
                raise RuntimeError("Reference '%s' not found for test '%s'" %
                                   (test["reference"], name))

            if test["title"] == None:
                test["title"] = name

            if test["order"] == None:
                test["order"] = name

            if test["simulator"] == None:
                if test["platform"] == "ion_torrent":
                    test["simulator"] = "dwgsim"
                else:
                    test["simulator"] = "mason"

            test["name"] = name
            test["dir"] = os.path.abspath(self.config["test_directory"] + "/" +
                                          name)
Exemple #20
0
def get_dataset(args, datasets, data_dir, tokenizer, split_name):
    datasets = datasets.split(',')
    dataset_dict = None
    dataset_name=''
    for dataset in datasets:
        dataset_name += f'_{dataset}'
        dataset_dict_curr = util.read_squad(f'{data_dir}/{dataset}')
        dataset_dict = util.merge(dataset_dict, dataset_dict_curr)
    data_encodings = read_and_process(args, tokenizer, dataset_dict, data_dir, dataset_name, split_name)
    return util.QADataset(data_encodings, train=(split_name=='train')), dataset_dict
Exemple #21
0
def conv_filter(image, kernel):
    if len(image.shape) == 2:
        return conv(image, kernel)
    else:
        r, g, b = util.split(image)
        R = conv(r, kernel)
        G = conv(g, kernel)
        B = conv(b, kernel)
        output = util.merge(R, G, B)

        return output.astype(np.uint8)
Exemple #22
0
def get_dataset_eda_revised(args, datasets, data_dir, tokenizer, split_name, train_fraction):
    datasets = datasets.split(',')
    dataset_dict = None
    dataset_name=''
    for dataset in datasets:
        dataset_name += f'_{dataset}'
        # dataset_dict_curr = util.read_squad(f'{data_dir}/{dataset}')
        dataset_dict_curr = xuran_perform_eda.perform_eda(f'{data_dir}/{dataset}', dataset, train_fraction)
        dataset_dict = util.merge(dataset_dict, dataset_dict_curr)
    data_encodings = read_and_process(args, tokenizer, dataset_dict, data_dir, dataset_name, split_name)
    return util.QADataset(data_encodings, train=(split_name=='train')), dataset_dict
Exemple #23
0
    def load_tests(self, conf):
        tests = {}

        for name, info in conf.iteritems():
            if name in ['output_dir', 'env', 'before', 'after']:
                continue

            info = merge(info, conf)
            tests[name] = Test(name, info)

        return tests
Exemple #24
0
 def user_data(self, data={}, user=None):
     "Fill in the user data in the template data. "
     user = users.get_current_user()
     if not user:
         return data
     user_data = {
         'email': user,
         'admin': users.is_current_user_admin(),
         'logout': users.create_logout_url("/"),
     }
     return util.merge(data, user=user_data)
Exemple #25
0
    def test02PopulateAccount(self):
        """002-02 Populate test account"""
        r = self.client.post('/account/',
                             merge(self.ACCOUNT_DATA,
                                   domain=self.domain.id,
                                   access_key=self.AWS_ACCESS_KEY_ID,
                                   secret_key=self.AWS_SECRET_ACCESS_KEY))

        self.assertCode(r, 201)
        self.assertIsNotNone(self.account)

        r = self.client.post('/account/',
                             merge(self.DUMMY_ACCOUNT_DATA,
                                   domain=self.dummy_domain.id,
                                   access_key='4298374984',
                                   secret_key='5432543534',
                                   active=False))

        self.assertCode(r, 201)
        self.assertIsNotNone(self.dummy_account)
Exemple #26
0
def get_all():
    dataset = prepare_titanic_data("data\\Titanic.csv")

    dataset_good = dataset[dataset['survived'] == 1]
    dataset_bad = dataset[dataset['survived'] == 0]
    del dataset_good['survived']
    del dataset_bad['survived']

    both_sides = util.merge(dataset_good, dataset_bad, 9)
    #print(both_sides.columns.tolist())

    return dataset, both_sides
Exemple #27
0
def gaussian_filter(image, filter_size, sigma):
    kernel = gaussian_kernel(filter_size, sigma)
    if len(image.shape) == 2:
        return conv(image, kernel)
    else:
        r, g, b = util.split(image)
        R = conv(r, kernel)
        G = conv(g, kernel)
        B = conv(b, kernel)
        output = util.merge(R, G, B)

        return output.astype(np.uint8)
Exemple #28
0
	def train(self,config):

		if config.train:
			input_setup(self.sess,config)
			data = os.path.join(os.getcwd(),'checkpoint\\train.h5')
			print(data)
			 
		else:
			x,y = input_setup(self.sess,config)
			data = os.path.join(os.getcwd(),'checkpoint\\test.h5')
				
		train_data,train_label = read_data(data)	
		#print(train_label.shape)

		self.optimizer = tf.train.AdamOptimizer(config.learning_rate).minimize(self.loss)

		self.sess.run(tf.global_variables_initializer())

		counter = 0

		if self.load(self.chkpt):
			print('Load Success')
		else:
			print('Load fail')

		if config.train:		
			for i in range(config.epoch):
				batch_idx = len(train_data) // config.batch_size
				for idx in range(0,batch_idx):
					batch_data = train_data[idx * config.batch_size:(idx+1)* config.batch_size]
					batch_label = train_label[idx * config.batch_size:(idx+1)* config.batch_size]
					#print(train_data.shape)

					counter +=1

					_,out = self.sess.run([self.optimizer,self.loss],feed_dict={self.images:batch_data , self.labels:batch_label})

					if counter%10 == 0:
						print("epoch:%d, step:%d, loss:%.8f" % (i,counter,out))

					if counter%100 == 0:
						print(os.path.join(self.chkpt,"model_save"))
						self.saver.save(self.sess,os.path.join(self.chkpt,"model_save"),global_step=counter)

		else:
			out = self.pred.eval({self.images:train_data,self.labels:train_label })

			image = merge(out, [x,y], config.channel)
			
			image_path = os.path.join(os.getcwd(), config.result_dir)
			image_path = os.path.join(image_path, "test_image.png")
			#Image.open(image).show()
			save_img(image,image_path,config)
Exemple #29
0
def mean_filter(image, filter_size):
    kernel = np.ones((filter_size, filter_size)) * (1.0 / (filter_size**2))
    if len(image.shape) == 2:
        return conv(image, kernel)
    else:
        r, g, b = util.split(image)
        R = conv(r, kernel)
        G = conv(g, kernel)
        B = conv(b, kernel)
        output = util.merge(R, G, B)

        return output.astype(np.uint8)
Exemple #30
0
def find_scenarios(
        scenario_pattern: str) -> Sequence[Tuple[str, dict, dict, dict]]:
    scenarios: MutableSequence[Tuple[str, dict, dict, dict]] = []
    for dir_name, subdir_list, file_list in os.walk('./tests/scenarios'):
        for file_name in file_list:
            full_file_name = os.path.join(dir_name, file_name)
            if not re.match(scenario_pattern, full_file_name):
                continue

            file_description = full_file_name[0:-5]
            try:
                with open(full_file_name, 'r') as f:
                    if file_name.endswith('.yaml'):
                        content: dict = yaml.load(f)
                    elif file_name.endswith('.json'):
                        content: dict = json.loads(f.read())
                    else:
                        raise Exception(
                            f"unsupported scenarios file: {full_file_name}")

                default_resource: dict = content["default_resource"] if "default_resource" in content else {}
                default_expected: dict = content["default_expected"] if "default_expected" in content else {}
                file_mock: dict = content["mock"] if "mock" in content else {}
                for scenario in content["scenarios"]:
                    file_mock_copy: dict = deepcopy(file_mock)
                    scenarios.append(
                        (file_description + '_' +
                         (scenario["description"]
                          if "description" in scenario else "unknown"),
                         util.merge(file_mock_copy, scenario["mock"])
                         if "mock" in scenario else file_mock_copy,
                         util.merge(
                             deepcopy(default_resource), scenario["resource"]
                             if "resource" in scenario else {}),
                         util.merge(deepcopy(default_expected),
                                    scenario["expected"])))
            except Exception as e:
                raise Exception(
                    f"failed creating scenario from '{full_file_name}'") from e
    return scenarios
Exemple #31
0
 def _do_expt(self, pack):
     src = pack.src
     value = pack.key
     row = self.expt.get(src)
     # print('expt %d %f' % (src, value))
     if row is None:
         self.expt[src] = [value]
     else:
         self.expt[src].append(value)
     # update server clocks
     self.clocks = util.merge(self.clocks, pack.vc)
     print('server merge %s' % str(self.clocks.inner))
     self.scan_query()
Exemple #32
0
def insert_single_indices(dag):
    """ Add in all index operations from tuples, even singletons

    Reverses:
        remove_singleton_indices
    """

    return merge(*[
            {out: dag[out]}
            if isinstance(out, tuple) or dag[out]['fn'] == index else
            {(out,) : dag[out],
              out   : {'fn': index, 'args':((out,), 0)}}
            for out in dag])
def get_all():
    dataset = prepare_credit_data("data\\UCI_Credit_Card.csv")

    dataset_good = dataset[dataset['default.payment.next.month'] == 0]
    dataset_bad = dataset[dataset['default.payment.next.month'] == 1]
    del dataset_good['default.payment.next.month']
    del dataset_bad['default.payment.next.month']

    #print(dataset_good.columns.tolist())
    #print(len(dataset_good.columns.tolist()))
    both_sides = util.merge(dataset_good, dataset_bad, 23)

    return dataset, both_sides
Exemple #34
0
def warp_merge(im1, im2, h):
    '''
    Grab the translation and rotation components and apply them.
    This is more reliable than warpAffine because no parts of the image are lost
    '''
    x, y = h[0:2, 2]
    # x = x/128 * im1.shape[1]
    # y = y/128 * im1.shape[0]
    th1 = math.atan(-h[0, 1] / h[0, 0]) * (180 / math.pi)  # atan(-b/a)
    th2 = math.atan(h[1, 0] / h[1, 1]) * (180 / math.pi)  # atan(c/d)
    th = (th1 + th2) / 2
    warped = rotate_bound(im2, th)
    return merge(im1, warped, x, y), [x, y, th]
Exemple #35
0
def get_dataset(args, dataset, data_dir, tokenizer, split_name, dataset_idx=None):
    if type(dataset) is list:
        output_name = ''
        dataset_dict = None
        for dataset_name in dataset:
            output_name += f'_{dataset_name}'
            dataset_dict_curr = util.read_squad(f'{data_dir}/{dataset_name}')
            dataset_dict = util.merge(dataset_dict, dataset_dict_curr)
    else:
        output_name = f'_{dataset}'
        dataset_dict = util.read_squad(f'{data_dir}/{dataset}')
    data_encodings = read_and_process(args, tokenizer, dataset_dict, data_dir, output_name, split_name, dataset_idx)
    return util.QADataset(data_encodings, train=(split_name=='train')), dataset_dict
Exemple #36
0
	def preprocessTests(self):
		for name in self.tests:
			self.tests[name] = util.merge(self.default_test, self.tests[name])

		for name in self.tests:
			test = self.tests[name]
			if "base" in test:
				print("Derive %s from %s" % (name, test["base"]))
				self.tests[name] = util.merge(self.tests[self.tests[name]["base"]], self.tests[name])
				print(self.tests[self.tests[name]["base"]])

			if test["reference"] == None:
				raise RuntimeError("Reference genome path must be defined for data set")

			if not os.path.exists(test["reference"]):
				if os.path.exists(self.config["reference_directory"] + "/" + test["reference"]):
					test["reference"] = os.path.abspath(self.config["reference_directory"] + "/" + test["reference"])
			else:
				test["reference"] = os.path.abspath(test["reference"])

			if not os.path.exists(test["reference"]):
				raise RuntimeError("Reference '%s' not found for test '%s'"%(test["reference"],name))

			if test["title"] == None:
				test["title"] = name

			if test["order"] == None:
				test["order"] = name

			if test["simulator"] == None:
				if test["platform"] == "ion_torrent":
					test["simulator"] = "dwgsim"
				else:
					test["simulator"] = "mason"

			test["name"] = name
			test["dir"] = os.path.abspath(self.config["test_directory"] + "/" + name)
def get_dataset(args, datasets, data_dir, tokenizer, split_name):
    datasets = datasets.split(',')
    dataset_dict = None
    dataset_name = 'individual'
    label = 3 if 'val' in split_name else 0
    for dataset in datasets:
        dataset_name += f'_{dataset}'
        dataset_dict_curr = util.read_squad(f'{data_dir}/{dataset}',
                                            label=label)
        dataset_dict = util.merge(dataset_dict, dataset_dict_curr)
        label += 1
    data_encodings = read_and_process(args, tokenizer, dataset_dict, data_dir,
                                      dataset_name, split_name)
    return util.QADomainDataset(data_encodings,
                                train=(split_name == 'train')), dataset_dict
Exemple #38
0
        def f(s):
            args2 = reify(args, s)
            subsets = [self.index[key] for key in enumerate(args) if key in self.index]
            if subsets:  # we are able to reduce the pool early
                facts = intersection(*sorted(subsets, key=len))
            else:
                facts = self.facts
            varinds = [i for i, arg in enumerate(args2) if isvar(arg)]
            valinds = [i for i, arg in enumerate(args2) if not isvar(arg)]
            vars = index(args2, varinds)
            vals = index(args2, valinds)
            assert not any(var in s for var in vars)

            return (
                merge(dict(zip(vars, index(fact, varinds))), s) for fact in self.facts if vals == index(fact, valinds)
            )
Exemple #39
0
def laplacian_filter(image, diagonal=True):
    if diagonal:
        kernel = np.array(([-1, -1, -1], [-1, 8, -1], [-1, -1, -1]))
    else:
        kernel = np.array(([0, 1, 0], [1, -4, 1], [0, 1, 0]))

    if len(image.shape) == 2:
        return conv(image, kernel)
    else:
        r, g, b = util.split(image)
        R = conv(r, kernel)
        G = conv(g, kernel)
        B = conv(b, kernel)
        output = util.merge(R, G, B)

        return output.astype(np.uint8)
Exemple #40
0
def generate(
        corrections_map,
        db_path,
        raf_path,
        fontconfig_path):
    # Generate new contents
    league = league_sql.generate(db_path, corrections_map)
    league = util.merge(
        league,
        league_raf.generate(
            league,
            raf_path,
            fontconfig_path,
            corrections_map,
        ))
    league = league_ability.fix(league)
    return league
Exemple #41
0
    def __parceFrame(self, stream):
        inp = stream
        if not stream:
            return False
        inp.pop(0)
        if len(inp) > 0:
            MSB = inp.pop(0)
            LSB = inp.pop(0)
            lenght = merge(MSB, LSB)
        else:
            return False

        self.__inpFrame['FrameType'] = inp.pop(0)

        if self.__inpFrame['FrameType'] == 0x90:
            self.__inpFrame['InputAdress64'] = inp[:8]
            self.__inpFrame['InputAdress16'] = inp[8:10]
            self.__inpFrame['Status'] = inp[10]
            self.__inpFrame['Payload'] = inp[11:-1]
            self.__availableFrame = True

            #debug
            #print "Xbee.__parceFrame() = FrameType {}".format(hex(self.__inpFrame['FrameType']))

            return True

        elif self.__inpFrame['FrameType'] == 0x8B:
            self.__inpFrame['FrameId'] = inp.pop(0)
            self.__inpFrame['InputAdress16'] = inp[0:2]
            self.__inpFrame['TransmitRetry'] = inp.pop(2)
            self.__inpFrame['Status'] = inp.pop(2)
            self.__inpFrame['DiscoverStatus'] = inp.pop(2)
            if self.__inpFrame['Status'] == 0x00:

                #debug
                #print "Xbee.__parceFrame() = FrameType {}".format(hex(self.__inpFrame['FrameType']))

                return True
            else:
                print "Status : {}".format(self.__inpFrame['Status'])
                print "DiscoverStatus : {}".format(
                    self.__inpFrame['DiscoverStatus'])
                return False
        else:
            return False
Exemple #42
0
	def __parceFrame(self,stream):
		inp = stream
		if not stream:
			return False
		inp.pop(0)
		if len(inp) > 0:
			MSB = inp.pop(0)
			LSB = inp.pop(0)
			lenght = merge(MSB,LSB)
		else:
			return False

		self.__inpFrame['FrameType'] = inp.pop(0)

		if self.__inpFrame['FrameType'] == 0x90:
			self.__inpFrame['InputAdress64'] = inp[:8]
			self.__inpFrame['InputAdress16'] = inp[8:10]
			self.__inpFrame['Status'] = inp[10]
			self.__inpFrame['Payload'] = inp[11:-1]
			self.__availableFrame = True

			#debug
			#print "Xbee.__parceFrame() = FrameType {}".format(hex(self.__inpFrame['FrameType']))

			return True
		
		elif self.__inpFrame['FrameType'] == 0x8B:
			self.__inpFrame['FrameId'] = inp.pop(0)
			self.__inpFrame['InputAdress16'] = inp[0:2]
			self.__inpFrame['TransmitRetry'] = inp.pop(2)
			self.__inpFrame['Status'] = inp.pop(2)
			self.__inpFrame['DiscoverStatus'] = inp.pop(2)
			if self.__inpFrame['Status'] == 0x00:

				#debug
				#print "Xbee.__parceFrame() = FrameType {}".format(hex(self.__inpFrame['FrameType']))

				return True
			else:
				print "Status : {}".format(self.__inpFrame['Status'])
				print "DiscoverStatus : {}".format(self.__inpFrame['DiscoverStatus'])
				return False
		else:
			return False
Exemple #43
0
def tuple_dag_to_index_dag(tdag):
    """ Convert a tuple-dag into an index dag

    Inserts index operations for all outputs. As a result each pseudo-job has
    only one output, either a tuple or a single variable. It is now easy to
    back-traverse the graph

    Reverses:
        remove_index_entries
    """


    gets = {out:
            {'fn' : index,
             'args' : (outs, i)}
            for outs in tdag if isinstance(outs, tuple)
            for i, out in enumerate(outs)}

    return merge(tdag, gets)
Exemple #44
0
		self.P = util.numpartitions(self.digits)

		#if debug: print("Frontend connected: N=" + str(self.N) + ", K=" + str(self.K))
	
		for line in lines:
			if len(line) > 0:
				node, index = line.split(",")
				index = int(index)
				self.nodes[index] = node
				
		#if debug: print("Nodes: " + str(self.nodes))
	
peer = peerutil.getpeer("frontend", frontend)

try:
	server = httputil.createserver(peer.port(), 
	util.merge({
		"get" : get,
		"set" : set,
	},
	peer.gethandlers()
	)
	)
except socket.error:
	print("Could not bind on port: " + str(peer.port()))
else:
	print("Frontend node serving on port: " + str(peer.port()))
	peer.addself()
	server.serve_forever()

def merge_sort_helper(arr, lo, hi):
    if (hi - lo > 1): # continue until array has size one
        mid = (lo + hi) // 2
        merge_sort_helper(arr, lo, mid)
        merge_sort_helper(arr, mid, hi)
        merge(arr, lo, mid, hi)