import cv2
import numpy as np
from rknn.api import RKNN

if __name__ == '__main__':

    rknn = RKNN(verbose=False)

    rknn.register_op('./resize_area/ResizeArea.rknnop')

    rknn.load_tensorflow(tf_pb='./resize_area_test.pb',
                         inputs=['input'],
                         outputs=['resize_area_0'],
                         input_size_list=[[32, 32, 3]])
    rknn.build(do_quantization=False)
    # rknn.export_rknn('./resize_area.rknn')

    # rknn.load_rknn('./resize_area.rknn')

    rknn.init_runtime()

    img = cv2.imread('./dog_32x32.jpg')

    outs = rknn.inference(inputs=[img])

    out_img = outs[0].astype('uint8')
    out_img = np.reshape(out_img, (64, 64, 3))
    cv2.imwrite('./out.jpg', out_img)

    orig_rknn = sys.argv[1]
    hw_rknn = sys.argv[2]

    # Create RKNN object
    rknn = RKNN()
    
    # Load rknn model
    print('--> Loading RKNN model')
    ret = rknn.load_rknn(orig_rknn)
    if ret != 0:
        print('Load RKNN model failed!')
        exit(ret)
    print('done')

    # Init runtime environment
    print('--> Init runtime environment')

    # Note: you must set rknn2precompile=True when call rknn.init_runtime()
    #       RK3399Pro with android system does not support this function.
    ret = rknn.init_runtime(target='rk1808', rknn2precompile=True)
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    ret = rknn.export_rknn_precompile_model(hw_rknn)

    rknn.release()

Exemple #3
0
    print('--> Export RKNN model')
    ret = rknn.export_rknn('./xception.rknn')
    if ret != 0:
        print('Export xception.rknn failed!')
        exit(ret)
    print('done')

    # ret = rknn.load_rknn('./xception.rknn')

    # Set inputs
    img = cv2.imread(IMG_PATH)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # init runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    # ret = rknn.init_runtime(target='rk1808')
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    # Inference
    print('--> Running model')
    outputs = rknn.inference(inputs=[img])
    show_outputs(outputs)
    print('done')

    rknn.release()

def run_ssd(img_path,priorbox_path):
    #caffe_proto="./MobileNetSSD_deploy.prototxt"
    caffe_proto= "./MobileNetSSD_deploy_truncated.prototxt"
    caffe_weight="./MobileNetSSD_deploy10695.caffemodel"

    rknn_model="./pedestrian_ssd.rknn"

    caffe2rknn(caffe_proto,caffe_weight,rknn_model)
    
    print("run ssd")
    rknn=RKNN(verbose=True)
    ret=rknn.load_rknn(path=rknn_model)
    ret=rknn.init_runtime()
    #ret = rknn.init_runtime(target='rk1808', device_id='012345789AB')

    img=cv2.imread(img_path)
    img=cv2.resize(img,(300,300))
    print("shape:",img.shape)
    outlen=7668 #change to your model

    priorbox=[]
    with open(priorbox_path) as f:
         for line in  f:
             arr=line.strip().split(",")
             priorbox=list(map(float,arr))
    priorbox=np.reshape(np.array(priorbox),(2,outlen))

    outputs = rknn.inference(inputs=[img])#,data_format="nchw",data_type="float32"

    print("pb:",priorbox.shape,priorbox)
    print("loc:",outputs[0].shape,outputs[0])
    print("conf:",outputs[1].shape,outputs[1])    

    NUM_RESULTS=outlen//4
    NUM_CLASSES=2
    box_priors= priorbox[0].reshape((NUM_RESULTS,4))
    box_var   = priorbox[1].reshape((NUM_RESULTS,4))
    loc =  outputs[0].reshape((NUM_RESULTS, 4))
    conf = outputs[1].reshape((NUM_RESULTS, NUM_CLASSES))

    #compute softmax
    conf = [[x/(x+y),y/(x+y)] for x,y in np.exp(conf)]

    # Post Process
    for i in range(0, NUM_RESULTS):

        pb = box_priors[i]
        lc = loc[i]
        var= box_var[i]

        pb_w = pb[2] - pb[0]
        pb_h = pb[3] - pb[1]
        pb_cx = (pb[0] + pb[2]) * 0.5;
        pb_cy = (pb[1] + pb[3]) * 0.5;

        bbox_cx = var[0] * lc[0] * pb_w + pb_cx;
        bbox_cy = var[1] * lc[1] * pb_h + pb_cy;
        bbox_w = math.exp(var[2] * lc[2]) * pb_w;
        bbox_h = math.exp(var[3] * lc[3]) * pb_h;

        xmin = bbox_cx - bbox_w * 0.5;
        ymin = bbox_cy - bbox_h * 0.5;
        xmax = bbox_cx + bbox_w * 0.5;
        ymax = bbox_cy + bbox_h * 0.5;

        xmin *= 300 #input width
        ymin *= 300 #input height
        xmax *= 300 #input width
        ymax *= 300 #input height

        score = conf[i][1];

        if score > 0.9:
            print("score:",score)
            cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)),(0, 0, 255), 3)

    plt.imshow(cv2.cvtColor(img,cv2.COLOR_RGB2BGR))
    plt.show()

    print("ssd finished")
        rknn.build(do_quantization=True, dataset='./dataset.txt')
        print('done')

        # Export RKNN Model
        rknn.export_rknn('./ssd_mobilenet_v1_coco.rknn')

    # Set inputs
    orig_img = cv2.imread('./road.bmp')
    img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (INPUT_SIZE, INPUT_SIZE),
                     interpolation=cv2.INTER_CUBIC)

    # init runtime environment
    print('--> Init runtime environment')
    # ret = rknn.init_runtime()
    ret = rknn.init_runtime(target='rk1808')
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')

    times = 0
    first_outputs = None
    while True:
        # Inference
        times += 1
        print('--> Running model: %d' % (times))
        outputs = rknn.inference(inputs=[img])
        # print('', type(outputs[0]), type(outputs[1]))
        print('inference result: ', outputs)
        if not first_outputs:
Exemple #6
0
def main(folder="test"):
    folder = folder
    files = os.listdir(folder)

    for i in range(len(files)):
        img = cv2.imread("{}/{}".format(folder, files[i]))
        img = (img - 127.5) / 127.5
        h, w = img.shape[:2]
        print("w, h = ", w, h)
        input = cv2.resize(img, (PRESET, PRESET),
                           interpolation=cv2.INTER_CUBIC)
        input = input.reshape(PRESET, PRESET, 3)
        input = np.array(input, dtype=np.float32)

        rknn = RKNN()
        print('--> Loading model')

        #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')

        # Load TensorFlow Model
        print('--> Loading model')
        rknn.load_tensorflow(tf_pb='pretrained/SR_freeze.pb',
                             inputs=['ESRGAN_g/Conv2D'],
                             outputs=['output_image'],
                             input_size_list=[[PRESET, PRESET, 3]])
        print('done')

        # Build Model
        print('--> Building model')
        rknn.build(do_quantization=False)
        print('done')

        # Export RKNN Model
        rknn.export_rknn('./sr_rknn.rknn')

        # Direct Load RKNN Model
        rknn.load_rknn('./sr_rknn.rknn')

        # init runtime environment
        print('--> Init runtime environment')
        ret = rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed')

        # Inference
        print('--> Running model')

        output_image = rknn.inference(inputs=[input])
        print('complete')
        out = np.array(output_image, dtype=np.float64)
        print("output_image = ", out.shape)
        out = np.squeeze(out)

        Y_ = out.reshape(PRESET * 4, PRESET * 4, 3)
        Y_ = cv2.resize(Y_, (w * 4, h * 4), interpolation=cv2.INTER_CUBIC)
        print("output shape is ", Y_.shape)

        #후처리 과정

        Y_ = (Y_ + 1) * 127.5
        cv2.imwrite("{}/{}_yval.png".format(OUT_DIR, i), Y_)

        # Evaluate Perf on Simulator
        #rknn.eval_perf()

        # Release RKNN Context
        rknn.release()
 def run(self):  #把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
     minsize = 20
     threshold = [0.8, 0.9, 0.95]
     factor = 0.709
     pnet_rknn_list = init_pnet()
     rnet_rknn = RKNN()
     onet_rknn = RKNN()
     rnet_rknn.load_rknn('./RNet.rknn')
     onet_rknn.load_rknn('./ONet.rknn')
     ret = rnet_rknn.init_runtime()
     if ret != 0:
         #print('Init rnet runtime environment failed')
         exit(ret)
     ret = onet_rknn.init_runtime()
     if ret != 0:
         #print('Init onet runtime environment failed')
         exit(ret)
     sys.stdout = open('/dev/stdout', 'w')
     sys.stderr = open('/dev/stderr', 'w')
     global proflag
     global IMAGE_list
     global boundingbox_list
     nonfacecount = 0
     #wrongimg = 1
     while (proflag == 1):
         imreadLock.acquire()
         img0 = IMAGE_list[0].copy()
         img = IMAGE_list[1].copy()
         imreadLock.release()
         #tic()
         score_cmp = compare_image(img0, img)
         #print("score_cmp",score_cmp)
         #toc()
         if score_cmp < 0.98:
             #imreadLock.release()
             #print("detect face start")
             #cv2.imwrite("aa.jpg",img)
             tic()
             boundingboxes, points = detect_face(img, minsize,
                                                 pnet_rknn_list, rnet_rknn,
                                                 onet_rknn, threshold,
                                                 False, factor)
             #print("boundingboxes shape",boundingboxes.shape)
             print("total cost")
             toc()
             if boundingboxes.shape[0] != 0:
                 if len(boundingbox_list) != 0:
                     boundingbox_list.clear()
                 boundingbox_list.append(boundingboxes)
             else:
                 #path = str(wrongimg)+".jpg"
                 #cv2.imwrite(path,img)
                 #wrongimg += 1
                 nonfacecount += 1
                 if nonfacecount >= 3:
                     boundingbox_list.clear()
                     nonfacecount = 0
     for i in range(2):
         pnet_rknn_list[i].release()
     rnet_rknn.release()
     onet_rknn.release()
Exemple #8
0
    name = name[:name.find('.')]
    eval_img_name.append(name)
    eval_low_im = load_images(eval_low_data_name[idx])
    eval_low_data.append(eval_low_im)
    print(eval_low_im.shape) # (400, 600, 3)
    height, width, channels = eval_low_im.shape # test 사진의 원본 크기 저장 >> 최종이미지 저장시 사용함
    input_low = eval_low_data[idx]
    input_low_eval = np.expand_dims(input_low, axis=0) # (1, 400, 600, 3)

sample_dir = './results/test/'
if not os.path.isdir(sample_dir):
    os.makedirs(sample_dir)

# DecomNet 실행
print('-> Running model 1')
ret1 = rknn.init_runtime()
if ret1 != 0:
    print('Init runtime environment 1 failed')
    exit(ret1)
print('done')

print('-> Inference model 1')
decom_r_low, decom_i_low = rknn.inference(inputs=[input_low_eval]) # decom_r_low(3채널)
#print(type(decom_r_low)) #numpy.ndarray
print('=> 1 run success')

#image save (성공)
save_images(os.path.join(sample_dir, 'decom_r_low.png'), decom_r_low)
save_images(os.path.join(sample_dir, 'decom_i_low.png'), decom_i_low)

#------------------------------------------------------------------------------------------------------------