def test_external_delegate_options_memory_import(delegate_dir,
                                                 test_data_folder):
    # create armnn delegate with memory-import option
    armnn_delegate = tflite.load_delegate(delegate_dir,
                                          options={
                                              'backends': 'CpuAcc,CpuRef',
                                              'memory-import': '1'
                                          })

    model_file_name = 'fallback_model.tflite'

    tensor_shape = [1, 2, 2, 1]

    input0 = np.array([1, 2, 3, 4], dtype=np.uint8).reshape(tensor_shape)
    input1 = np.array([2, 2, 3, 4], dtype=np.uint8).reshape(tensor_shape)
    inputs = [input0, input0, input1]
    expected_output = np.array([1, 2, 2, 2],
                               dtype=np.uint8).reshape(tensor_shape)

    # run the inference
    armnn_outputs = run_inference(test_data_folder, model_file_name, inputs,
                                  [armnn_delegate])

    # check results
    compare_outputs(armnn_outputs, [expected_output])
def test_external_delegate_options_fp32_to_fp16(capfd, delegate_dir,
                                                test_data_folder):
    # create armnn delegate with reduce-fp32-to-fp16 option
    armnn_delegate = tflite.load_delegate(delegate_dir,
                                          options={
                                              'backends': 'CpuRef',
                                              'debug-data': '1',
                                              'reduce-fp32-to-fp16': '1'
                                          })

    model_file_name = 'fp32_model.tflite'

    tensor_shape = [1, 2, 2, 1]

    input0 = np.array([1, 2, 3, 4], dtype=np.float32).reshape(tensor_shape)
    input1 = np.array([2, 2, 3, 4], dtype=np.float32).reshape(tensor_shape)
    inputs = [input0, input0, input1]
    expected_output = np.array([1, 2, 2, 2],
                               dtype=np.float32).reshape(tensor_shape)

    # run the inference
    armnn_outputs = run_inference(test_data_folder, model_file_name, inputs,
                                  [armnn_delegate])

    # check results
    compare_outputs(armnn_outputs, [expected_output])

    captured = capfd.readouterr()
    assert 'convert_fp32_to_fp16' in captured.out
    assert 'convert_fp16_to_fp32' in captured.out
def test_external_delegate_options_fp32_to_bf16(capfd, delegate_dir,
                                                test_data_folder):
    # create armnn delegate with reduce-fp32-to-bf16 option
    armnn_delegate = tflite.load_delegate(delegate_dir,
                                          options={
                                              'backends': 'CpuRef',
                                              'debug-data': '1',
                                              'reduce-fp32-to-bf16': '1'
                                          })

    model_file_name = 'conv2d.tflite'

    inputShape = [1, 5, 5, 1]
    outputShape = [1, 3, 3, 1]

    inputValues = [
        1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9,
        2
    ]

    expectedResult = [28, 38, 29, 96, 104, 53, 31, 55, 24]

    input = np.array(inputValues, dtype=np.float32).reshape(inputShape)
    expected_output = np.array(expectedResult,
                               dtype=np.float32).reshape(outputShape)

    # run the inference
    armnn_outputs = run_inference(test_data_folder, model_file_name, [input],
                                  [armnn_delegate])

    # check results
    compare_outputs(armnn_outputs, [expected_output])

    captured = capfd.readouterr()
    assert 'convert_fp32_to_bf16' in captured.out
def test_external_delegate_gpu_fastmath(delegate_dir, test_data_folder):
    # create armnn delegate with enable-fast-math
    # fast-math is only enabled on Conv2d layer, so use conv2d model.
    armnn_delegate = tflite.load_delegate(delegate_dir,
                                          options={
                                              'backends': 'GpuAcc',
                                              'enable-fast-math': '1',
                                              "logging-severity": "info"
                                          })

    model_file_name = 'conv2d.tflite'

    inputShape = [1, 5, 5, 1]
    outputShape = [1, 3, 3, 1]

    inputValues = [
        1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9,
        2
    ]

    expectedResult = [28, 38, 29, 96, 104, 53, 31, 55, 24]

    input = np.array(inputValues, dtype=np.float32).reshape(inputShape)
    expected_output = np.array(expectedResult,
                               dtype=np.float32).reshape(outputShape)

    # run the inference
    armnn_outputs = run_inference(test_data_folder, model_file_name, [input],
                                  [armnn_delegate])

    # check results
    compare_outputs(armnn_outputs, [expected_output])
Esempio n. 5
0
def evaluate(vocab, vqa, data_loader, criterion, epoch, args):
    """Calculates vqg average loss on data_loader.

    Args:
        vocab: questions and answers vocabulary.
        vqa: visual question answering model.
        data_loader: Iterator for the data.
        criterion: The criterion function used to evaluate the loss.
        args: ArgumentParser object.

    Returns:
        A float value of average loss.
    """
    gts, gens, qs = [], [], []
    vqa.eval()
    total_loss = 0.0
    total_correct = 0.0
    iterations = 0
    total_steps = len(data_loader)
    if args.eval_steps is not None:
        total_steps = min(len(data_loader), args.eval_steps)
    start_time = time.time()
    for i, (feats, questions, categories) in enumerate(data_loader):

        # Set mini-batch dataset.
        if torch.cuda.is_available():
            feats = feats.cuda()
            questions = questions.cuda()
            categories = categories.cuda()
        qlengths = process_lengths(questions)

        # Forward.
        outputs = vqa(feats, questions, qlengths)
        loss = criterion(outputs, categories)
        preds = outputs.max(1)[1]

        # Backprop and optimize.
        total_loss += loss.item()
        total_correct += accuracy(preds, categories)
        iterations += 1

        # Quit after eval_steps.
        if args.eval_steps is not None and i >= args.eval_steps:
            break
        q, gen, gt = parse_outputs(preds, questions, categories, vocab)
        gts.extend(gt)
        gens.extend(gen)
        qs.extend(q)

        # Print logs.
        if i % args.log_step == 0:
            delta_time = time.time() - start_time
            start_time = time.time()
            logging.info('Time: %.4f, Step [%d/%d], '
                         'Avg Loss: %.4f, Avg Acc: %.4f' %
                         (delta_time, i, total_steps, total_loss / iterations,
                          total_correct / iterations))
    # Compare model reconstruction to target
    compare_outputs(gens, qs, gts, logging)
    return total_loss / iterations
Esempio n. 6
0
def main():
    reference_id = open("input/reference.fasta",
                        "r").readline().split(' ')[0][1:]  # NC_045512.2

    cleanUpOutputDir(folder='output')

    # # ClustalJ = file json from clustal alignment; MuscleJ = file json from muscle alignment
    # # Iran
    # ClustalJ = runClustal('analysis/iran-ref.txt', reference_id, 3)
    # MuscleJ = runMuscle('analysis/muscle-I20200523-084930-0610-44096621-p1m.clw', reference_id, 3)
    # diff = utils.compare_outputs(clustal='output/'+ ClustalJ, muscle='output/'+ MuscleJ)
    # utils.saveCompareFile("differences.txt", "Iran", diff)

    # # Israel
    # ClustalJ = runClustal('analysis/israel-ref.txt', reference_id, 4)
    # MuscleJ = runMuscle('analysis/muscle-I20200523-085708-0753-28920419-p1m.clw', reference_id, 4)
    # diff = utils.compare_outputs(clustal='output/'+ ClustalJ, muscle='output/'+ MuscleJ)
    # utils.saveCompareFile("differences.txt", "Israel ", diff)

    # # GISAID only
    # ClustalJ = runClustal('analysis/GISAID-all.txt', reference_id, 7)
    # MuscleJ = runMuscle('analysis/muscle-I20200523-090216-0023-41230765-p1m.clw', reference_id, 7)
    # diff = utils.compare_outputs(clustal='output/'+ ClustalJ, muscle='output/'+ MuscleJ)
    # utils.saveCompareFile("differences.txt", "GISAID ", diff)

    # # NCBI only
    # ClustalJ = runClustal('analysis/all.txt', reference_id, 3)
    # MuscleJ = runMuscle('analysis/muscle-I20200512-170208-0225-69454386-p1m.clw', reference_id, 8)
    # diff = utils.compare_outputs(clustal='output/'+ ClustalJ, muscle='output/'+ MuscleJ)
    # utils.saveCompareFile("differences.txt", "ncbi ", diff)

    # # All sequences
    # ClustalJ = runClustal('analysis/global.txt', reference_id, 14)
    # MuscleJ = runMuscle('analysis/muscle-I20200523-090837-0910-95910164-p1m.clw', reference_id, 15)
    # diff = utils.compare_outputs(clustal='output/'+ ClustalJ, muscle='output/'+ MuscleJ)
    # utils.saveCompareFile("differences.txt","Global ", diff)

    # Compare global alignments
    print('[...]  Parsing ClustalW analysis...')
    ClustalJ = runClustal('analysis/clustal-global.clustal_num', reference_id,
                          14)
    print('[DONE] Parsing ClustalW analysis')

    print('[...]  Parsing Muscle analysis...')
    MuscleJ = runMuscle('analysis/muscle-global.clw', reference_id, 14)
    print('[DONE] Parsing Muscle analysis ')

    print('[...]  Comparing alignments...')
    diff = utils.compare_outputs(clustal='output/' + ClustalJ,
                                 muscle='output/' + MuscleJ,
                                 path='output')
    print('[DONE] Comparing alignments')
    # utils.saveCompareFile("differences.txt","Global ", diff)

    analysis(ClustalJ)
def test_external_delegate_cpu_options(capfd, delegate_dir, test_data_folder):
    # create armnn delegate with enable-fast-math and number-of-threads options
    # fast-math is only enabled on Conv2d layer, so use conv2d model.
    armnn_delegate = tflite.load_delegate(delegate_dir,
                                          options={
                                              'backends': 'CpuAcc',
                                              'enable-fast-math': '1',
                                              'number-of-threads': '4',
                                              "logging-severity": "info"
                                          })

    model_file_name = 'conv2d.tflite'

    inputShape = [1, 5, 5, 1]
    outputShape = [1, 3, 3, 1]

    inputValues = [
        1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9,
        2
    ]

    expectedResult = [28, 38, 29, 96, 104, 53, 31, 55, 24]

    input = np.array(inputValues, dtype=np.float32).reshape(inputShape)
    expected_output = np.array(expectedResult,
                               dtype=np.float32).reshape(outputShape)

    # run the inference
    armnn_outputs = run_inference(test_data_folder, model_file_name, [input],
                                  [armnn_delegate])

    # check results
    compare_outputs(armnn_outputs, [expected_output])

    captured = capfd.readouterr()
    assert 'Set CPPScheduler to Linear mode, with 4 threads to use' in captured.out
Esempio n. 8
0
def test_codegen(compiler_path, cool_file):
    compare_outputs(compiler_path, tests_dir + cool_file, tests_dir + cool_file[:-3] + '_input.txt',\
        tests_dir + cool_file[:-3] + '_output.txt')
def test_initial_gen(compiler_path, cool_file):
    compare_outputs(compiler_path, tests_dir + cool_file, tests_dir + cool_file[:-3] + '_input.txt',\
        tests_dir + cool_file[:-3] + '_output.txt', timeout=2)

# if __name__ == "__main__":
#     pytest.main(["-m", "initialgen"])
Esempio n. 10
0
                    c1tm = executer.run_cpp_bin(code1_bin, tc_out, code1_out)
                elif ".py" in code1_file:
                    c1tm = executer.run_py_code(code1_fio, tc_out, code1_out)
                print("Code1 executed in %.5f sec" % c1tm)
                utils.copy_file_to_folder_group(i, code1_out)
                stats.append({'code1_time': c1tm})

            if code2_file is not None:
                if ".cpp" in code2_file:
                    c2tm = executer.run_cpp_bin(code2_bin, tc_out, code2_out)
                elif ".py" in code2_file:
                    c2tm = executer.run_py_code(code2_fio, tc_out, code2_out)
                print("Code2 executed in %.5f sec" % c2tm)
                utils.copy_file_to_folder_group(i, code2_out)

                diffs = utils.compare_outputs(code1_out, code2_out, result)
                if diffs == 0:
                    print("Success : both outputs are same")
                elif diffs == -1:
                    print("Failure : invalid output generated")
                else:
                    print("Failure : output different at %d positions" % diffs)

                if diffs != -1:
                    utils.copy_file_to_folder_group(i, result)

                stats[-1].update({'code2_time': c2tm, 'diff': diffs})

            print()

    except KeyboardInterrupt as _:
Esempio n. 11
0
                foldr = utils.copy_to_grp(i, tc1_out, **grp_args)
                created_folders.add(foldr)

                stats.append({'code1_time': c1tm})

            if code2_file is not None:
                if ".cpp" in code2_file or ".c" in code2_file:
                    c2tm = executer.run_c_cpp_bin(code2_bin, ttc_out, tc2_out)
                elif ".py" in code2_file:
                    c2tm = executer.run_py_code(code2_fio, ttc_out, tc2_out)
                print("Code2 executed in %.5f sec" % c2tm)

                foldr = utils.copy_to_grp(i, tc2_out, **grp_args)
                created_folders.add(foldr)

                diffs = utils.compare_outputs(tc1_out, tc2_out, tresult)
                if diffs == 0:
                    print("Success : both outputs are same")
                elif diffs == -1:
                    print("Failure : invalid output generated")
                else:
                    print("Failure : output different at %d positions" % diffs)

                if diffs != -1:
                    foldr = utils.copy_to_grp(i, tresult, **grp_args)
                    created_folders.add(foldr)

                stats[-1].update({'code2_time': c2tm, 'diff': diffs})

            print()