Beispiel #1
0
    def WH(self, dW):
        """
        Constructs fractional Brownian Motion process 
        """
        Y1 = np.zeros((self.N, 1 + self.s))  # Exact integrals
        Y2 = np.zeros((self.N, 1 + self.s))  # Riemann sums

        # Construct Y1 through exact integral
        for i in np.arange(1, 1 + self.s, 1):
            Y1[:, i] += dW[:, i - 1, 1]

        # Construct arrays for convolution
        G = np.zeros(1 + self.s)  # Gamma
        for k in np.arange(2, 1 + self.s, 1):
            G[k] = utils.g(utils.b(k, self.a) / self.n, self.a)
            # G[k] = g2(b(k, self.a)/self.n, self.a, self.T)

        X = dW[:, :, 0]  # Xi

        # Initialise convolution result, GX
        GX = np.zeros((self.N, len(X[0, :]) + len(G) - 1))

        # Compute convolution, FFT not used for small n
        # Possible to compute for all paths in C-layer?
        for i in range(self.N):
            GX[i, :] = np.convolve(G, X[i, :])

        # Extract appropriate part of convolution
        Y2 = GX[:, :1 + self.s]

        # Finally contruct and return full process
        WH = np.sqrt(2 * self.a + 1) * (Y1 + Y2)
        # WH = (Y1 + Y2)
        return WH
Beispiel #2
0
def pack_dataset():

    N = SEG_SIDE_LENGTH
    BS = 8

    weight = np.array([1 << ofs for ofs in range(BS - 1, -1, -1)],
                      dtype=np.uint8)
    pad = np.zeros((BS - (N * N % BS)), dtype=np.uint8)
    ignored = get_trash_segs_md5()

    with bz2.open(DATASET_CUSTOM_FILE, "wb") as fp:
        for label in tqdm(sorted(os.listdir(DECODED_DATASET_DIR)),
                          "pack dataset"):
            subdir = os.path.join(DECODED_DATASET_DIR, label)
            y = b(label[0])
            for filename in sorted(os.listdir(subdir)):
                path = os.path.join(subdir, filename)
                im = cv2.imread(path).astype(np.uint8)

                hs = md5(im.tobytes())
                if hs in ignored:
                    continue

                X = np.array((im.sum(axis=2) // 3) >> 7, dtype=np.uint8)
                X = np.hstack([X.flatten(), pad]).reshape(-1, BS)
                X = (X * weight).sum(axis=1).astype(np.uint8)
                X = bytes(X)

                fp.write(X + y)
Beispiel #3
0
def compress_dataset():

    N = SEG_SIDE_LENGTH
    BS = 8

    weight = np.array([ 1 << ofs for ofs in range(BS-1, -1, -1) ], dtype=np.uint8)
    pad = np.zeros((BS - (N * N % BS)), dtype=np.uint8)

    with bz2.open(DATASET_FILE, "wb") as fp:
        for filename in tqdm(sorted(os.listdir(SEG_DATASET_DIR)), "encode dataset"):
            path = os.path.join(SEG_DATASET_DIR, filename)
            im = cv2.imread(path)
            
            X = np.array(( im.sum(axis=2) // 3 ) >> 7, dtype=np.uint8)
            X = np.hstack([ X.flatten(), pad ]).reshape(-1, BS)
            X = (X * weight).sum(axis=1).astype(np.uint8)
            X = bytes(X)
            y = b(filename[0])
            fp.write(X + y)
Beispiel #4
0
def _merge_prefix(deque, size):
    """Replace the first entries in a deque of strings with a single
    string of up to size bytes.

    >>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
    >>> _merge_prefix(d, 5); print d
    deque(['abcde', 'fghi', 'j'])

    Strings will be split as necessary to reach the desired size.
    >>> _merge_prefix(d, 7); print d
    deque(['abcdefg', 'hi', 'j'])

    >>> _merge_prefix(d, 3); print d
    deque(['abc', 'defg', 'hi', 'j'])

    >>> _merge_prefix(d, 100); print d
    deque(['abcdefghij'])
    """
    if len(deque) == 1 and len(deque[0]) <= size:
        return
    prefix = []
    remaining = size
    while deque and remaining > 0:
        chunk = deque.popleft()
        if len(chunk) > remaining:
            deque.appendleft(chunk[remaining:])
            chunk = chunk[:remaining]
        prefix.append(chunk)
        remaining -= len(chunk)
    # This data structure normally just contains byte strings, but
    # the unittest gets messy if it doesn't use the default str() type,
    # so do the merge based on the type of data that's actually present.
    if prefix:
        deque.appendleft(type(prefix[0])().join(prefix))
    if not deque:
        deque.appendleft(b(""))
Beispiel #5
0
import unittest
import os
os.environ['DATA_DIR'] = 'db-test'
os.environ['DB_DIR'] = 'db-test'
from utils import b

print(b([os.environ['DB_DIR'], b'index.db']))

INDEX_DB = b'/'.join(b([os.environ['DB_DIR'], b'index.db']))

if os.path.exists(INDEX_DB):
    os.remove(INDEX_DB)

from worker import db_conn, prepare_db, update_db_index, get_metric_props_as_dict, find_metrics


class TestMetricDatabase(unittest.TestCase):
    def setUp(self):
        prepare_db()

    def test_create_select_metric(self):
        import time
        update_db_index('customer_12',
                        'foo.bar.baz',
                        time.time(),
                        8.2,
                        '',
                        metric_props={
                            'host': 'foobarhost',
                            'test': True,
                            'hostid': 123,
Beispiel #6
0
 def _consume(self, loc):
     if loc == 0:
         return b("")
     _merge_prefix(self._read_buffer, loc)
     self._read_buffer_size -= loc
     return self._read_buffer.popleft()
Beispiel #7
0
def prepare_dirs(metric_name, customer=b'customer_1'):
    dir = b'/'.join([DATA_DIR, customer, b(metric_name).replace(b'.', b'/')])
    if not os.path.exists(dir):
        os.makedirs(dir, exist_ok=True)
    return dir