Esempio n. 1
0
def sequential(size):
    matrix1, matrix2 = create_2matrix(size)
    save_matrix = SaveMatrix(matrix1, matrix2)

    start_time = time.time()
    # inicijalizacija matrica
    for i in range(1, size):
        matrix1[i] = matrix1[i][i:] + matrix1[i][:i]

        n = [matrix2[(j + i) % size][i] for j in range(0, size)]
        for j in range(0, size):
            matrix2[j][i] = n[j]

    result_matrix = multiply(matrix1, matrix2, size)
    save_matrix.add_to_c_sequential(result_matrix, True)

    for _ in range(1, size):
        for i in range(0, size):
            matrix1[i] = matrix1[i][1:] + matrix1[i][:1]
        matrix2 = matrix2[1:] + matrix2[:1]
        add_and_multiply(result_matrix, matrix1, matrix2, size)
        save_matrix.add_to_c_sequential(result_matrix)

    end_time = time.time()
    print_result(result_matrix, end_time - start_time)

    save_matrix.to_json_file()
Esempio n. 2
0
def main(args):
    base = util.get_value(args.aws, 'monoUrl')
    gets = [base + 'api/values', base + 'api/init/users']
    if not args.aws:
        gets.insert(0, base + 'api/init/table')

    #
    print('Init database and get default')
    for url in gets:
        print(f'Method GET endpoint {url}')
        response = requests.get(url)
        util.print_result(response)
    #
    auth = util.login(base, 'admin', 'demo')
    #
    print('Get my data')
    util.print_result(requests.get(base + 'api/auth/me', headers=auth))
    #
    get_all_documents(base, auth)
    #
    doc = add_document(base, auth, "Invoice to be removed")
    #
    remove_document(base, auth, doc['id'])
    #
    doc = add_document(base, auth, "Invoice to be tested")
    #
    get_all_documents(base, auth)
    #
    get_permissions(base, auth)
    #
    get_permission_jwt(base, auth, doc['id'])
    #
    remove_all_documents(base, auth)
Esempio n. 3
0
def test_feature(returns, returns_lag, rollmean, rollmean_lag, output_file=None):
    util.print_stats(returns, returns_lag, rollmean, rollmean_lag, file=output_file)

    dataset = feature_generation.create_dataset(returns, returns_lag, rollmean, rollmean_lag)

    result = learning.test(dataset, min_size=1000, step=step)
    util.print_result(result, file=output_file)

    return result
Esempio n. 4
0
def main(args):
    base = util.get_value(args.aws, 'monoUrl')
    if not args.aws:
        url = base + 'api/init/table'
        print('Init database')
        print(f'Method GET endpoint {url}')
        response = requests.get(url)

    #
    url = base + 'api/init/users'
    print('Init users')
    print(f'Method GET endpoint {url}')
    response = requests.get(url)
    util.print_result(response)
Esempio n. 5
0
def delete_file(base, auth, file_name):
    parts = file_name.split('/')
    url = base + \
        f'api/files/document/{parts[0]}/area/{parts[1]}/file/{parts[2]}'
    print('Delete file url %s' % url)
    response = requests.delete(url, headers=auth)
    return util.print_result(response)
Esempio n. 6
0
def get_jwt(base, auth, id):
    print('Get JWT')
    url = base + f'api/auth/permissions-jwt/{id}'
    print(url)
    response = requests.get(url, headers=auth)
    jwt = util.print_result(response)['jwt']
    return {
        'Authorization': f'Bearer {jwt}',
    }
Esempio n. 7
0
def rdt_send():

    hostname = sys.argv[1]
    portnumber = int(sys.argv[2])
    file = sys.argv[3]
    N = int(sys.argv[4])
    MSS = int(sys.argv[5])

    client_greeting(portnumber, file, hostname, MSS, N)

    global data_buffer
    global total_no_packets

    start = time.time()

    IPclient = ''
    client_port_no = 4445
    client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    client.bind((IPclient, client_port_no))

    FILE = open(file, 'rb')
    file_data = FILE.read(MSS)
    seq = 1
    while file_data:
        file_content = str(file_data, 'UTF-8', errors='replace')
        cs = checksum_calculation(file_content)
        checksum = struct.pack('=H', cs)
        seq_no = struct.pack('=L', seq)
        data = file_content.encode('ISO-8859-1', 'ignore')
        initial_packet = struct.pack('=h', PACKET_ID)
        cur_pkt = seq_no + checksum + initial_packet + data
        data_buffer.append(cur_pkt)
        file_data = FILE.read(MSS)
        seq += 1
    total_no_packets = len(data_buffer)

    ACKs = ackclass(client)
    transmitted_data = serverclass(portnumber, hostname, client, file, MSS, N)
    transmitted_data.join()
    ACKs.join()
    end = time.time()
    client.close()
    print_result(hostname, portnumber, N, MSS, end, start)
    FILE.close()
Esempio n. 8
0
def get_presigned_url(base, auth, doc_id, area, f):
    url = base + f'api/files/document/{doc_id}/area/{area}'
    print('Fetch Presigned url %s' % url)
    response = requests.post(url,
                             json={
                                 'fileName': f['name'],
                                 'contentType': f['content_type']
                             },
                             headers=auth)
    return util.print_result(response)
Esempio n. 9
0
def get_file(base, auth, file_name):
    parts = file_name.split('/')
    url = base + \
        f'api/files/document/{parts[0]}/area/{parts[1]}/file/{parts[2]}'
    print(f'Getting presigned load url {url}')
    response = util.print_result(requests.get(url=url, headers=auth))
    print(response)
    presigned_url = response['url']
    print(f'File is available at {presigned_url}')
    print(f'Downloading and saving to {parts[2]}')
    response = requests.get(presigned_url)
    with open(parts[2], 'wb') as f:
        f.write(response.content)
Esempio n. 10
0
def sda():
    func = request.form.get('func')
    source = request.form.get('source')
    if source == 'Data1' or source == 'Data2':
        if source == 'Data1':
            data = request.form.get('data1').splitlines()
        else:
            data = request.form.get('data2').splitlines()
        if data == []:
            return jsonify(f'{source} is empty.\nPlease enter something...')
    elif source == 'Data1,Data2':
        data1 = request.form.get('data1').splitlines()
        data2 = request.form.get('data2').splitlines()
        if data1 == []:
            return jsonify('Data1 is empty.\nPlease enter something...')
        elif data2 == []:
            return jsonify('Data2 is empty.\nPlease enter something...')
    result = []
    if func == 'chk_duplicates':
        start_time = time()
        r1 = chk_duplicates(data)
        elapsed_time = time()-start_time
        if r1 == []:
            result.append(f'{source} contains no duplicate values.\n')
            result.append(f'Duration for process: {elapsed_time:.3f} sec.')
        else:
            result += print_result([f'{i[0]}\t\tappears {i[1]} time(s)' for i in r1],
                                   f'Duplicate values found in {source}', elapsed_time=elapsed_time)
    elif func == 'rm_duplicates':
        result = precheck(list(set(data)))
    elif func == 'chk_consecutive':
        try:
            start_time = time()
            r1 = chk_consecutive(data)
            tmp = chk_duplicates(data)
            elapsed_time = time()-start_time
        except ValueError:
            result.append(
                f'[Error]{source} contains non-numeric value. Please check and try again!')
        else:
            if tmp != []:
                result.append(
                    f'[Warning]Duplicate values found in {source}.\nYou can "Check Duplicates ({source})" to check it.\n')
            if r1 == []:
                result.append(
                    f'{source} contains consecutive natural numbers.\n')
                result.append(f'Duration for process: {elapsed_time:.3f} sec.')
            else:
                result += print_result(r1, f'{source} is not consecutive',
                                       'The following numbers are missing:', elapsed_time=elapsed_time)
    elif func == 'compare':
        mode = request.form.get('mode')
        ignore_duplicates = request.form.get('ignore_duplicates')
        if ignore_duplicates == 'true':
            data1 = list(set(data1))
            data2 = list(set(data2))
        if mode == 'comm':
            start_time = time()
            r1 = compare(data1, data2, mode='comm')
            elapsed_time = time()-start_time
            if r1 == []:
                result.append('Two data contain no common values.\n')
                result.append(f'Duration for process: {elapsed_time:.3f} sec.')
            else:
                result += print_result(r1, 'Common values found between two data.',
                                       elapsed_time=elapsed_time)
        elif mode == 'diff':
            start_time = time()
            r1 = compare(data1, data2)
            r2 = compare(data2, data1)
            elapsed_time = time()-start_time
            if r1 + r2 == []:
                result.append('Data1 is same as Data2.\n')
                result.append(f'Duration for process: {elapsed_time:.3f} sec.')
            elif r1 == []:
                result.append('Data2 completely contains Data1.\n')
                result += print_result(r2, 'Data2 is more than Data1',
                                       elapsed_time=elapsed_time)
            elif r2 == []:
                result.append('Data1 completely contains Data2.\n')
                result += print_result(r1, 'Data1 is more than Data2',
                                       elapsed_time=elapsed_time)
            else:
                result.append('Two files have inconsistent content.\n')
                result += print_result(r1, 'Data1 is more than Data2')
                result.append('')
                result += print_result(r2, 'Data2 is more than Data1',
                                       elapsed_time=elapsed_time)
    elif func == 'diff':
        result = diff(data1, data2)
    output = '\n'.join(result)
    return jsonify(output)
Esempio n. 11
0
def get_permissions(base, auth):
    print('Get permissions')
    response = requests.get(base + 'api/auth/permissions', headers=auth)
    return util.print_result(response)
Esempio n. 12
0
import cv2 as cv
import numpy as np
from pathlib import Path
import natsort
import util

IMG_Path = Path("out")
IMG_File = natsort.natsorted(list(IMG_Path.glob("*.bmp")), alg=natsort.PATH)
IMG_Str = []
for i in IMG_File:
    IMG_Str.append(str(i))

for j in range(len(IMG_Str)):
    img = cv.imread(IMG_Str[j], cv.IMREAD_UNCHANGED)
    util.print_result(img, cv.imread("gt/" + str(8 + j) + "_gt.bmp"))

    gray = img[:, :, 0]
    src_gray = gray.copy()
    _, contours, _ = cv.findContours(src_gray, cv.RETR_EXTERNAL,
                                     cv.CHAIN_APPROX_SIMPLE)
    rlt = np.zeros((1500, 1500, 3), dtype=np.uint8)
    for k in range(len(contours)):
        cnt = contours[k]
        approx = cv.approxPolyDP(cnt, 10, True)
        cv.polylines(img, [approx], True, (0, 0, 255), 2)
        cv.drawContours(rlt, [approx],
                        -1, (255, 255, 255),
                        thickness=cv.FILLED)

    util.print_result(rlt, cv.imread("gt/" + str(8 + j) + "_gt.bmp"))
Esempio n. 13
0
                mtx3 = [matrix[r][j * p_size:j * p_size + p_size] for r in range(i * p_size, i * p_size + p_size)]
                data = [mtx1, mtx2, mtx3]
                # data = [mtx1, mtx2, [[0] * p_size for i in range(p_size)]]
                comm.send(data, dest=dest, tag=dest)

        for i in range(p_sqrt):
            for j in range(p_sqrt):
                dest = p_sqrt * i + j + 1
                mtx = comm.recv(source=dest, tag=dest)
                for k in range(p_size):
                    # matrix[i * p_size + k][j * p_size: j * p_size + p_size] = mtx[k]
                    matrix[i * p_size + k][j * p_size: j * p_size + p_size] = mtx[m_size-1][k]
                save_matrix.add_to_c_parallel(mtx, dest)

        end_time = time.time()
        print_result(matrix, end_time - start_time)
        save_matrix.to_json_file()
    else:
        data = comm.recv(source=0, tag=rank)
        r_one = rank - 1
        r_size = rank - p_sqrt
        x = rank + 1
        y = rank + p_sqrt
        p_sqrt_2 = p_sqrt ** 2
        dest1 = r_one if r_one % p_sqrt != 0 else r_one + p_sqrt
        dest2 = r_size if r_size > 0 else p_sqrt_2 + r_size
        source1 = x if rank % p_sqrt != 0 else x - p_sqrt
        source2 = y if y <= p_sqrt_2 else y - p_sqrt_2
        all_iter = []
        for t in range(m_size):
            add_and_multiply(data[2], data[0], data[1], len(data[0]))
Esempio n. 14
0
        'tfidf': _tfidf,
        'inverted': _inverted_index,
        'did_name': _doc_id_name_index
    }


print('Loading...')
index = load_index()
print('Ready...(type exit to terminate)')

while True:
    query = input('what is the query?')
    # query = 'patient ARDS'

    if query == 'exit':
        break

    print('...')
    normalize_query: list[str] = preprocessing.query(query)
    tf_idf_ranker_q = ranker.Ranker(index['inverted'], index['did_name'])
    _tfidf_query = tf_idf_ranker_q.tfidf_query(normalize_query)

    document_results: [int, float] = ranker.top_10_relevant_documents(
        index['tfidf'], _tfidf_query)
    document_results = [{
        'document_name': index['did_name'][d_id[0]],
        'similarity_score': d_id[1]
    } for d_id in document_results]
    print('Matching documents for the query - ', query)
    util.print_result(document_results, util.get_corpus_dir_path_from_args())
Esempio n. 15
0
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd

import sys

from util import np, bench_python, bench_dex, print_result

n, = map(int, sys.argv[1:])
x = np.ones((n, ), dtype=np.float64)
y = np.ones((n, ), dtype=np.float64)

np_add_time, add_loops = bench_python(lambda: x + y)
# Unrolling multiplication with x is a lot faster than using **
np_poly_time, poly_loops = bench_python(
    lambda: 4 * x * x * x * x + 3 * x * x * x + 2 * x * x + x)
dex_add_time, dex_poly_time = bench_dex('pointwise.dx', n=n, loops=add_loops)
print_result('1D addition', np_add_time, dex_add_time, add_loops)
print_result('Polynomial evaluation', np_poly_time, dex_poly_time, poly_loops)
Esempio n. 16
0
def remove_document(base, auth, id):
    print('Remove document "%s"' % id)
    response = requests.delete(base + f'api/documents/{id}', headers=auth)
    return util.print_result(response)
Esempio n. 17
0
def headers(base, auth):
    url = base + 'api/dev/headers'
    print('Fetch headers %s' % url)
    response = requests.get(url, headers=auth)
    return util.print_result(response)
Esempio n. 18
0
def get_all_documents(base, auth):
    print('Get all existing documents')
    url = base + 'api/documents'
    print(url)
    response = requests.get(url, headers=auth)
    return util.print_result(response)
Esempio n. 19
0
def main(args):
    base = util.get_value(args.aws, 'monoUrl')

    if not args.aws:
        gets = ['api/init/table']
        #
        print('Init database and get default')
        for url in gets:
            print(f'Method GET endpoint {url}')
            response = requests.get(base + url)
            util.print_result(response)
    #
    auth, user_id = util.login(base, 'admin', 'demo')
    #
    docs = demo_mono.get_all_documents(base, auth)
    if len(docs) == 0:
        print('No documents. Creating one.')
        doc = demo_mono.add_document(base, auth, "Some demo document")
    else:
        print('There are documents. Selecting first')
        doc = docs[0]
    print('Active document "' + doc['name'] + '"')
    doc_id = doc['id']
    #
    jwt_auth = get_jwt(base, auth, doc_id)
    #
    base_amis = util.get_value(args.aws, 'amisUrl')
    # headers(base_amis, jwt_auth)
    # decoded(base_amis, jwt_auth)
    if not args.aws:
        # Naturally, in AWS we can't list all the buckets that are there. That would not make any sense. So this is only for minio.
        bucket(base_amis, jwt_auth)
    #
    get_files(base_amis, jwt_auth, doc_id)
    #
    permissions = demo_mono.get_permissions(base, auth)
    area = get_first_area_with_add(permissions)
    print(f'Add files to area {area}')
    files = [{
        'name': 'temp.txt',
        'content_type': 'text/plain; charset=UTF-8'
    }, {
        'name': 'img.jpg',
        'content_type': 'image/jpeg'
    }]
    for f in files:
        presign_data = get_presigned_url(base_amis, jwt_auth, doc_id, area, f)
        upload_file_to_presigned_url(presign_data['url'], presign_data['key'],
                                     f, user_id)
    files = get_files(base_amis, jwt_auth, doc_id)

    if args.nodelete:
        print('Not fetching and deleting files. Look from browser. Exiting.')
        exit()

    get_file(base_amis, jwt_auth, files[0]['path'])
    delete_file(base_amis, jwt_auth, files[0]['path'])

    files = get_files(base_amis, jwt_auth, doc_id)
    get_file(base_amis, jwt_auth, files[0]['path'])

    delete_file(base_amis, jwt_auth, files[0]['path'])
    files = get_files(base_amis, jwt_auth, doc_id)
Esempio n. 20
0
def bucket(base, auth):
    url = base + 'api/dev/buckets'
    print('Test bucket %s' % url)
    response = requests.get(url, headers=auth)
    return util.print_result(response)
Esempio n. 21
0
def get_files(base, auth, doc_id):
    url = base + f'api/files/document/{doc_id}'
    print('Get files of document %s' % url)
    response = requests.get(url, headers=auth)
    return util.print_result(response)
Esempio n. 22
0
def decoded(base, auth):
    url = base + 'api/dev/decoded-jwt'
    print('Test decoding jwt from header %s' % url)
    response = requests.get(url, headers=auth)
    return util.print_result(response)
Esempio n. 23
0
def add_document(base, auth, name):
    print('Add document "%s"' % name)
    response = requests.post(base + 'api/documents',
                             json={"name": name},
                             headers=auth)
    return util.print_result(response)
    def train(self, load_weight=False, print_every=500):
        MALE, FEMALE = 0, 1
        FAKE, REAL = 0, 1
        LAST_EPOCH = 0

        # If pretrained weights exist, load them and get last epoch and iteration
        if load_weight is True:
            LAST_EPOCH = int(util.read_log())
            self.load_pretrained(LAST_EPOCH)

        for epoch in range(LAST_EPOCH, self.num_epoch):
            self.set_optimizer(epoch)

            for iters, (image, label) in enumerate(self.dloader):
                # If a batch has only female or male images not mixed, just discard that batch.
                # It seems that it makes results even worse.

                if torch.sum(label == 0) == 0 or torch.sum(label == 1) == 0:
                    continue

                male_num = torch.sum(label == 0)
                female_num = torch.sum(label == 1)

                image, label = image.type(self.dtype), label.type(self.ltype)
                male_img, female_img = util.gender_divider(image, label)

                image, label = Variable(image), Variable(label)
                male_img, female_img = Variable(male_img), Variable(female_img)
                """
                1. Train D_M (Discriminator for male)

                Step 1. Hope D_M(male_img) == 1
                Step 2. Hope D_M(fake_img) == 0
                Step 3. Minimize classification loss
                """
                # D_M(male_img) == 1
                real_loss, fake_loss = 0, 0
                if male_num is not 0:
                    real_score, _ = self.D_M(male_img)
                    real_label = util.make_label(real_score.size(),
                                                 label=REAL,
                                                 noisy=True).type(self.dtype)
                    real_label = Variable(real_label)

                    # Loss for real male image
                    real_loss = self.criterion_gan(real_score, real_label)

                # Hope D_M(fake_img) == 0
                if female_num is not 0:
                    fake_img = self.G_FM(female_img)
                    fake_score, _ = self.D_M(fake_img)
                    fake_label = util.make_label(fake_score.size(),
                                                 label=FAKE,
                                                 noisy=False).type(self.dtype)
                    fake_label = Variable(fake_label)

                    # Loss for fake male image
                    fake_loss = self.criterion_gan(fake_score, fake_label)

                # Minimize classofication loss
                _, gender_score = self.D_M(image)
                cls_loss = self.criterion_cls(gender_score, label)

                # Final D_M loss(Multitask learning)
                D_loss = real_loss + fake_loss + cls_loss

                # Update
                self.all_optim_zero_grad()
                D_loss.backward()
                self.optim_D_M.step()
                """
                2. Train D_F (Discriminator for female)

                Step 1. Hope D_F(female_img) == 1
                Step 2. Hope D_F(fake_img) == 0
                Step 3. Minimize classification loss
                """
                # Hope D_F(female_img) == 1
                real_loss, fake_loss = 0, 0
                if female_num is not 0:
                    real_score, _ = self.D_F(female_img)
                    real_label = util.make_label(real_score.size(),
                                                 label=REAL,
                                                 noisy=True).type(self.dtype)
                    real_label = Variable(real_label)

                    # Loss for real female image
                    real_loss = self.criterion_gan(real_score, real_label)

                # Hope D_F(fake_img) == 0
                if male_num is not 0:
                    fake_img = self.G_MF(male_img)
                    fake_score, _ = self.D_F(fake_img)
                    fake_label = util.make_label(fake_score.size(),
                                                 label=FAKE,
                                                 noisy=False).type(self.dtype)
                    fake_label = Variable(fake_label)

                    # Loss for fake female image
                    fake_loss = self.criterion_gan(fake_score, fake_label)

                # Minimize classification loss
                _, gender_score = self.D_F(image)
                cls_loss = self.criterion_cls(gender_score, label)

                # Final D_F loss
                D_loss = real_loss + fake_loss + cls_loss

                # Get classification accuracy

                accuracy = util.get_cls_accuracy(gender_score.data, label.data)

                # Update
                self.all_optim_zero_grad()
                D_loss.backward()
                self.optim_D_F.step()
                """
                3. Traing G_MF, G_FM with process of
                   <1> Male(Real image) -> <2> Female(Fake image) -> <3> Male(Cycle)
                   
                Step 1. Hope D_F(<2>) == 1
                Step 2. Hope <2> to be classified as female
                Step 3. Hope <1> == <3>
                """
                if male_num is not 0:
                    fake_img = self.G_MF(male_img)
                    fake_score, gender_score = self.D_F(fake_img)

                    # Hope D_F((2)) == 1
                    real_label = util.make_label(fake_score.size(),
                                                 label=REAL,
                                                 noisy=False).type(self.dtype)
                    real_label = Variable(real_label)
                    gan_loss = self.criterion_gan(fake_score, real_label)

                    # Hope <2> to be classified as female
                    female_label = util.make_label(gender_score.size(0),
                                                   label=FEMALE,
                                                   noisy=False).type(
                                                       self.ltype)
                    female_label = Variable(female_label)
                    cls_loss = self.cls_lambda * self.criterion_cls(
                        gender_score, female_label)

                    # Hope <1> == <3>
                    cycle_img = self.G_FM(fake_img)
                    cyc_loss = self.cyc_lambda * self.criterion_cyc(
                        cycle_img, male_img)

                    # Final loss
                    G_loss = gan_loss + cls_loss + cyc_loss

                    # Update
                    self.all_optim_zero_grad()
                    G_loss.backward()
                    self.optim_G_MF.step()
                    self.optim_G_FM.step()
                """
                4. Traing G_MF, G_FM with process of
                   <1> Female(Real image) -> <2> Male(Fake image) -> <3> Female(Cycle)
                   
                Step 1. Hope D_M(<2>) == 1
                Step 2. Hope <2> to be classified as male
                Step 3. Hope <1> == <3>
                """
                if female_num is not 0:
                    fake_img = self.G_FM(female_img)
                    fake_score, gender_score = self.D_M(fake_img)

                    # Hope D_M(<2>) == 1
                    real_label = util.make_label(fake_score.size(),
                                                 label=REAL,
                                                 noisy=False).type(self.dtype)
                    real_label = Variable(real_label)
                    gan_loss = self.criterion_gan(fake_score, real_label)

                    # Hope <2> to be classified as male
                    male_label = util.make_label(gender_score.size(0),
                                                 label=MALE,
                                                 noisy=False).type(self.ltype)
                    male_label = Variable(male_label)
                    cls_loss = self.cls_lambda * self.criterion_cls(
                        gender_score, male_label)

                    # Hope <1> == <3>
                    cycle_img = self.G_MF(fake_img)
                    cyc_loss = self.cyc_lambda * self.criterion_cyc(
                        cycle_img, female_img)

                    # Final loss
                    G_loss = gan_loss + cls_loss + cyc_loss

                    # Update
                    self.all_optim_zero_grad()
                    G_loss.backward()
                    self.optim_G_MF.step()
                    self.optim_G_FM.step()

                if iters % print_every == 0:
                    util.print_result(epoch, iters, accuracy, D_loss, G_loss,
                                      gan_loss, cyc_loss, cls_loss)

            # Save parameters
            util.save_weight(self.D_M,
                             self.D_F,
                             self.G_MF,
                             self.G_FM,
                             num_epoch=epoch)
            util.write_log(epoch)
Esempio n. 25
0
def main():

    # define any environment variables
    API_KEY, AUDIO_FILE = util.get_env_vars()

    # define endpoints to post to
    media_input_url = 'https://api.dolby.com/media/input'
    analyze_url = 'https://api.dolby.com/media/analyze'

    # define media location to upload file to
    media_location = util.generate_media_location(AUDIO_FILE)

    # define header that will always be used
    headers = {
        'x-api-key': API_KEY,
        'Content-Type': 'application/json',
        'Accept': 'application/json',
    }

    # get pre-signed url for uploading file
    body = {
        'url': media_location,
    }

    try:
        response = requests.post(
            media_input_url,
            json=body,
            headers=headers,
        )
    except requests.exceptions.RequestException as e:
        raise SystemExit(e)

    # upload file
    try:
        response = requests.put(
            response.json().get('url'),
            data=open(AUDIO_FILE, 'rb'),
        )
    except requests.exceptions.RequestException as e:
        raise SystemExit(e)

    # request file analysis
    body = {
        'input': media_location,
    }

    try:
        response = requests.post(
            analyze_url,
            json=body,
            headers=headers,
        )
    except requests.exceptions.RequestException as e:
        raise SystemExit(e)

    # get and print analysis
    # retry after 5 seconds if analysis is not yet complete
    job_id_url = f"{analyze_url}?job_id={response.json().get('job_id')}"

    while True:
        try:
            response = requests.get(
                job_id_url,
                headers=headers,
            )
        except requests.exceptions.RequestException as e:
            raise SystemExit(e)

        if response.json().get('status') == 'Success':
            util.print_result(response.json().get('result'))
            break
        else:
            time.sleep(5)
Esempio n. 26
0
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd

import sys

from util import np, bench_python, bench_dex, print_result

n, k, m = map(int, sys.argv[1:])
x = np.random.randn(n, k).astype(np.float64)
y = np.random.randn(k, m).astype(np.float64)

numpy_time, loops = bench_python(lambda: x.dot(y))
dex_time, = bench_dex('matmul.dx', n=n, k=k, m=m, loops=loops)
print_result('Matrix multiplication', numpy_time, dex_time, loops)
Esempio n. 27
0
def get_permission_jwt(base, auth, id):
    print('Get permission jwt')
    response = requests.get(base + 'api/auth/permissions-jwt/' + id,
                            headers=auth)
    return util.print_result(response)
Esempio n. 28
0
import numpy as np
import cv2 as cv
import util

img=cv.imread("9.bmp")
a=cv.imread("gt/9_gt.bmp")
util.print_result(img,a)
gray=img[:,:,0]
src_gray=gray.copy()
_, contours, _ = cv.findContours(src_gray, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# for cnt in contours:
src=np.zeros((1500,1500,3),dtype=np.uint8)
cv.drawContours(src_gray,contours,-1,(255),thickness=cv.FILLED)
src[:,:,0]=src_gray
src[:,:,1]=src_gray
src[:,:,2]=src_gray
util.print_result(src,cv.imread("gt/8_gt.bmp"))
_, contours, _ = cv.findContours(src_gray, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
print("Original Contour Number: %d" % len(contours))
refine_contours = []
for cnt in contours:
    # print(cnt.size)
    area = abs(cv.contourArea(cnt))
    if area > 25 * 25:
        refine_contours.append(cnt)
print("Reduced: %d" % len(refine_contours))
rlt=np.zeros((1500,1500,3),dtype=np.uint8)
cv.drawContours(rlt,refine_contours,-1,(255,255,255),thickness=cv.FILLED)
# util.print_result(rlt,cv.imread("gt/8_gt.bmp"))
cv.imwrite("out/noise_9.bmp",rlt)