예제 #1
0
def runValidation(modelName, net, params):
    '''Feed the specified images into the model at any batch size; and return
	an array of the image filename and the top-5 predictions'''
    imageData = []
    # Results will be accumulate here
    results = []
    imageFiles = getter.apiGetValidationInputs(modelName, cache=True)
    t = len(imageFiles)
    if modelName == models.MOBILENET or modelName == models.RESNET50:
        log.info('Running prediction...')
        bar = Bar('Running prediction...', max=t)
        for i in range(t):
            single_result = net.predict([imageFiles[i]], 1)
            fn = single_result[0]
            single_result_list = single_result[1:]
            results.append([fn, single_result_list])
            bar.next()
        bar.finish()
    elif modelName == models.SSDMOBILENET:
        from . import parse_ssd_output
        bar = Bar('Running prediction...', max=t)
        for i in range(t):
            out = net.predict([imageFiles[i]], 1)
            bar.next()
        bar.finish()

        output_path = os.path.join(paths.TARGETS, "openvino_ubuntu",
                                   "workloads", "ssdmobilenet",
                                   "ssd_detection_output.csv")
        results = parse_ssd_output.parse_ssd_detection(output_path)

    return results
예제 #2
0
def runValidation(modelName, net, params):
    imageFiles = getter.apiGetValidationInputs(modelName)
    imageData = []  #empty list
    # Results will be accumulate here
    results = []  #empty list
    t = len(imageFiles)
    log.info('Running prediction...')
    N = len(imageFiles)
    B = 1
    for i in range(N):
        data = preprocessor.apiProcess(modelName, imageFiles[i])
        res = net.predict(imageFiles[i], data, 5, 0, 1)
        log.debug("%d predictions left" % (N - (i + 1)))
        #for j in range(len(res['predictions'])):
        results.append([imageFiles[i], res['predictions'][0]])
    net.destructor()
    return results
예제 #3
0
def runValidation(modelName, net, params):
    '''Process all of the validation inputs from the apiGet* function
	
	This is a reference example. Batch size can be set to whatever is optimal.
	The important things to note are: [a] inputs come from the getter, and [b]
	the results are returned in the form of a list of lists defined in the
	report.py comments, depending on the workload/model.
	
	Note: This function returns its value upstream to the harness through the
	apiRun() API function. It is only an example, apiRun() can be implemented
	however a developer chooses.'''
    imageFiles = getter.apiGetValidationInputs(modelName)
    imageData = []
    # Results will be accumulate here
    results = []
    t = len(imageFiles)
    # Feeding 10 at a time
    log.info('Running prediction...')
    N = len(imageFiles)
    B = 1  # resnet is now batch 1?
    m = 0
    for i in range(t):
        if params[const.PRECISION] == const.FP32:
            data = preprocessor.apiProcess(modelName, imageFiles[i])
        if params[const.PRECISION] == const.INT8:
            data = preprocessor.apiProcess_int8(modelName, imageFiles[i])
        imageData.append(data)
        # Batch size of B; TODO test for one-off fail
        if len(imageData) == B or (i + 1) == t:
            res = net.predict(imageData, params)
            log.debug("%d predictions left" % (N - (i + 1)))
            imageData = []
            for j in range(len(res['predictions'])):
                results.append([imageFiles[m], res['predictions'][j]])
                m = m + 1
    # For debugging
    tmpdir = tempfile.mkdtemp()
    outputFn = os.path.join(tmpdir, 'outputs.json')
    with open(outputFn, 'w') as fp:
        json.dump(results, fp)
    log.debug('Validation output file is {}'.format(outputFn))
    #print(results)
    return results
예제 #4
0
def run(modelName, modelFileName, params):
    # TODO careful this over-writes (not appends to) original
    if (params[const.HARDWARE] == 'gpu'):
        os.environ['LD_LIBRARY_PATH'] = commonDir + ':' + os.path.join(
            commonDir, 'GpuAcc')
    else:
        os.environ['LD_LIBRARY_PATH'] = commonDir + ':' + os.path.join(
            commonDir, 'CpuAcc')
    tmpdir = tempfile.mkdtemp()
    inputFn = os.path.join(tmpdir, 'inputs.json')
    outputFn = os.path.join(tmpdir, 'outputs.json')
    if params[const.MODE] == const.ACCURACY:
        imageFileNames = getter.apiGetValidationInputs(modelName, cache=True)
    else:
        imageFileNames = getter.apiGetTestInputs(modelName,
                                                 params[const.BATCH],
                                                 cache=True)
    cxxParams = {
        'images':
        imageFileNames,  # Note: this can be up to 5,000 filenames
        'model':
        os.path.join(paths.MODELS, 'tensorflow', modelName, 'frozen_graph.pb'),
        'params':
        params
    }
    with open(inputFn, 'w') as fp:
        json.dump(cxxParams, fp)
    exeCmd = os.path.join(commonDir, '..', modelName, modelName + '.exe')
    cmd = [exeCmd, inputFn, outputFn]
    log.info('Running prediction...')
    log.debug(cmd)
    ret = subprocess.call(cmd)
    if ret:
        log.error('Inference failed')
        return None
    log.info('Loading results file %s' % outputFn)
    with open(outputFn) as fp:
        returnData = json.load(fp)
    if params[const.MODE] == const.ACCURACY:
        return returnData['predictions']
    else:
        return returnData['times']