コード例 #1
0
ファイル: visu_param.py プロジェクト: lpigou/chalearn2014
def process():

    samples = glob(data + "/*.zip")
    # random.shuffle(samples)
    samples.sort()
    sample = samples[40]
    print sample
    sample = GestureSample(sample)
    gestures = sample.getGestures()
    gesture = gestures[3]

    skelet, depth, gray, user, c = pp.get_data(sample, gesture)
    user_o = user.copy()
    skelet, c = pp.proc_skelet(skelet)
    user = pp.proc_user(user)
    user_new, depth, c = pp.proc_depth(depth, user, user_o, skelet)
    gray, c = pp.proc_gray(gray, user, skelet)
    user = user_new

    video = empty((
        1,
        3,
    ) + gray.shape, dtype="uint8")
    video[0, 0], video[0, 1], video[0, 2] = gray, depth, user

    v = array(video, dtype="uint8")
    v = v.swapaxes(1, 2)
    # for i in xrange(gray.shape[1]):

    res_shape = (1, 2, 2, 32, 64, 64)
    v_new = empty(res_shape, dtype="uint8")
    h = res_shape[-1]
    v = v[:, :, :res_shape[2]]

    p = skelet[3]
    if p < 10: p = 100
    ofs = p * 0.25
    mid = v.shape[-1] / 2.
    sli = None
    if ofs < mid:
        start = int(round(mid - ofs))
        end = int(round(mid + ofs))
        sli = slice(start, end)

    for j in xrange(v.shape[2]):  #maps
        for k in xrange(v.shape[3]):  #frames
            #body
            img = v[0, 0, j, k]
            img = cut_img(img, 5)
            img = misc.imresize(img, (h, h))
            # if j==0: img = 255-misc.imfilter(img,"contour")
            v_new[0, 0, j, k] = img

            #hand
            img = v[0, 1, j, k]
            img = img[sli, sli]
            img = misc.imresize(img, (h, h))
            v_new[0, 1, j, k] = img
    return v_new
コード例 #2
0
ファイル: visu_param.py プロジェクト: lpigou/chalearn2014
def process():

    samples=glob(data+"/*.zip")
    # random.shuffle(samples)
    samples.sort()
    sample = samples[40]
    print sample
    sample = GestureSample(sample)
    gestures = sample.getGestures()
    gesture = gestures[3]

    skelet, depth, gray, user, c = pp.get_data(sample, gesture)
    user_o = user.copy()
    skelet,c = pp.proc_skelet(skelet)
    user = pp.proc_user(user)
    user_new, depth,c = pp.proc_depth(depth, user, user_o, skelet)
    gray,c = pp.proc_gray(gray, user,  skelet)
    user = user_new

    video = empty((1,3,)+gray.shape,dtype="uint8")
    video[0,0],video[0,1],video[0,2] = gray,depth,user

    v = array(video,dtype="uint8")
    v = v.swapaxes(1,2)
    # for i in xrange(gray.shape[1]):

    res_shape=(1,2,2,32,64,64)
    v_new = empty(res_shape,dtype="uint8")
    h = res_shape[-1]
    v = v[:,:,:res_shape[2]]

    p = skelet[3]
    if p < 10: p = 100
    ofs = p*0.25
    mid =  v.shape[-1]/2.
    sli = None
    if ofs < mid:
        start = int(round(mid-ofs))
        end = int(round(mid+ofs))
        sli = slice(start,end)

    for j in xrange(v.shape[2]): #maps
        for k in xrange(v.shape[3]): #frames
            #body
            img = v[0,0,j,k]
            img = cut_img(img,5)
            img = misc.imresize(img,(h,h))
            # if j==0: img = 255-misc.imfilter(img,"contour")
            v_new[0,0,j,k] = img

            #hand
            img = v[0,1,j,k]
            img = img[sli,sli]
            img = misc.imresize(img,(h,h))
            v_new[0,1,j,k] = img
    return v_new
コード例 #3
0
def eval():
    # global pred_file_name

    # pred_file_name = fileName.split('/')
    # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")

    # print fileName

    files = glob(data + "/" + "*.zip")
    files.sort()

    print len(files), "found"

    for fileName in files[669 - 470:]:
        # pred_file_name = fileName.split('/')
        # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")
        print fileName

        # s_time = time.time()
        smp = pp.GestureSample(fileName)
        # print "loading", (time.time()-s_time)/1000.,"ms"
        s_time = time.time()
        n = smp.data['numFrames']
        dv, uv, gv = smp.depth, smp.user, smp.rgb

        cur_fr = 1
        new_shape = (step, 128, 128)

        s = []
        d, u, g = [empty((n_f, ) + vid_res + (3, ), "uint8") for _ in range(3)]
        # take first n_f frames
        for v in dv, uv, gv:
            pp.go_to_frame(v, cur_fr)
        for i, fr in enumerate(range(cur_fr, cur_fr + n_f)):
            s.append(smp.getSkeleton(fr))
            d[i], u[i], g[i] = [v.read()[1] for v in dv, uv, gv]

        d, u, g = [pp.to_grayscale(v) for v in d, u, g]
        u[u < 128], u[u >= 128] = 0, 1
        depth, user, gray, skelet = d, u, g, s
        user_o = user.copy()
        depth_o = depth.copy()
        gray_o = gray.copy()
        user_depth = depth_o[user_o == 1]
        skelet, c = pp.proc_skelet(array(skelet).copy())
        user = pp.proc_user(user)

        _, depth, c = pp.proc_depth(depth.copy(), user.copy(), user_o,
                                    array(skelet).copy())
        gray, c = pp.proc_gray(
            gray.copy(), user,
            array(skelet).copy())  #user.copy!!!!!!!!!!!!!!!!!!!
        # video = array([gray,depth],dtype="uint8")
        # video = video.swapaxes(0,1) #(body-hand,gray-depth,fr,h,w)
        # video = video[0:1].astype("uint8")
        # x_.set_value(video,borrow=True)

        # pred = evalu_model()[0][0]
        # print pred.argmax()+1,"\t", "\t",pred.max(),"\t",cur_fr, "-",cur_fr+n_f
        cur_fr += n_f

        while cur_fr + step < n:
            # time_start = time.time()
            sn = []
            dn, un, gn = [
                empty((step, ) + vid_res + (3, ), "uint8") for _ in range(3)
            ]
            # for v in dv,uv,gv: pp.go_to_frame(v, cur_fr)
            for i, fr in enumerate(range(cur_fr, cur_fr + step)):
                sn.append(smp.getSkeleton(fr))
                dn[i], un[i], gn[i] = [v.read()[1] for v in dv, uv, gv]

            dn, un, gn = [pp.to_grayscale(v) for v in dn, un, gn]
            un[un < 128], un[un >= 128] = 0, 1

            s = s[step:] + sn
            # s.extend(sn)
            skelet, c = pp.proc_skelet(s, _3D=False)

            len_dump = len(depth_o[:step][user_o[:step] == 1])
            un_d = dn[un == 1]

            user_o[:-step] = user_o[step:]
            user_o[-step:] = un.copy()
            un = pp.proc_user(un, 3)

            user[:-step] = user[step:]
            user[-step:] = un.copy()

            depth_o[:-step] = depth_o[step:]
            depth_o[-step:] = dn.copy()
            gray_o[:-step] = gray_o[step:]
            gray_o[-step:] = gn.copy()

            _, depth, c = pp.proc_depth(depth_o.copy(), user.copy(), user_o,
                                        skelet)
            gray, c = pp.proc_gray(gray_o.copy(), user, skelet)
            traj2D, traj3D, ori, pheight, hand, center = skelet

            # # ---------------------------- DEPTH ------------------------------------------
            # thresh_noise = 200
            # scaler = 4
            # traj2D,traj3D,ori,pheight,hand,center = skelet
            # traj2D = traj2D[:,-step:]
            # # print traj2D.shape

            # #stats
            # # user_depth = depth_o[user_o==1] #11.1%
            # user_depth = hstack((user_depth[len_dump:],un_d))
            # dn_ = dn.copy()
            # med = average(user_depth)
            # med = 255 - med
            # std = user_depth.std()

            # depth_b = pp.cut_body(dn_, center, pheight, hand,new_shape)
            # # user_b = pp.cut_body(un.copy(), center, pheight, hand,new_shape)
            # depth_h = pp.cut_hand(dn_, traj2D, hand,new_shape)
            # # user_h = pp.cut_hand(un.copy(), traj2D, hand,new_shape)

            # new_d = empty((2,step,128,128), dtype="uint8")

            # for i,d in enumerate((depth_b,depth_h)):

            #     d = 255 - d

            #     # nuser_d = d[us==0]
            #     # nuser_d[nuser_d>thresh_noise] = 0
            #     # d[us==0] = nuser_d

            #     d[d>thresh_noise] = 0
            #     #d = inpaint(d, thresh_noise)

            #     thresh_d = med-3*std
            #     # d[d<thresh_d] = thresh_d
            #     d = d-thresh_d
            #     d = clip(d*scaler, 0, 255)

            #     d = d.astype("uint8")
            #     d = pp.medianblur(d)

            #     new_d[i] = d

            # dn = new_d.astype("uint8")
            # depth[:,:-step]=depth[:,step:]
            # depth[:,-step:] = dn.copy()
            # # ---------------------------- DEPTH ------------------------------------------
            # # ---------------------------- GRAY ------------------------------------------

            # gray_b = pp.cut_body(gn.copy(), center, pheight, hand, new_shape)
            # gray_h = pp.cut_hand(gn.copy(), traj2D, hand,new_shape)

            # new_gray = empty((2,step,128,128), dtype="uint8")
            # new_gray[0] = gray_b
            # new_gray[1] = gray_h

            # gn = new_gray.astype("uint8")
            # gray[:,:-step]=gray[:,step:]
            # gray[:,-step:] = gn.copy()

            # # ---------------------------- GRAY ------------------------------------------

            video = empty((
                1,
                2,
            ) + gray.shape, dtype="uint8")
            video[0, 0] = gray.copy()
            video[0, 1] = depth.copy()
            video = video.swapaxes(1, 2)  #(body-hand,gray-depth,fr,h,w)

            # if hand == "left":
            #     for i in range(video.shape[1]):
            #         for j in range(video.shape[2]):
            #             for k in range(video.shape[3]):
            #                 video[0,i,j,k] = cv2.flip(video[0,i,j,k],1)

            # video = video[0:1].astype("uint8")

            # ---------------------------AUGMENT--------------------------------------
            v_new = empty((1, 2, 2) + vid_shape, dtype="uint8")
            # p = pheight
            ratio = 0.25
            for i in xrange(video.shape[0]):  #batch

                if pheight < 10: pheight = 100
                scale = ratio + randi(2) / 100.
                ofs = pheight * scale
                mid = video.shape[-1] / 2.
                sli = None
                if ofs < mid:
                    start = int(round(mid - ofs))
                    end = int(round(mid + ofs))
                    sli = slice(start, end)

                for j in xrange(video.shape[2]):  #maps
                    for k in xrange(video.shape[3]):  #frames
                        #body
                        img = video[i, 0, j, k]
                        img = cut_img(img, 5)
                        img = misc.imresize(img, (h, h))
                        # if j==0: img = 255-misc.imfilter(img,"contour")
                        v_new[i, 0, j, k] = img

                        #hand
                        img = video[i, 1, j, k]
                        img = img[sli, sli]
                        img = misc.imresize(img, (h, h))
                        v_new[i, 1, j, k] = img
            # ---------------------------AUGMENT--------------------------------------
            """    
            x_.set_value(v_new.astype("float32"),borrow=True)

            print "predicting"
            pred = evalu_model()[0][0]
            pred_p = pred.max()
            pred_idx = pred.argmax()+1
            fps = int(1./((time.time()-time_start)/step))
            fr_start = cur_fr+step-n_f
            fr_end = cur_fr+step
            print pred_idx,"\t", "\t",pred_p,"\t",fr_start, "-",fr_end,"\t",fps,'fps'
            predict(pred_idx,pred_p,fr_start)
            # print v_new.shape
            # for i in xrange(v_new.shape[0]):
            #     for j in xrange(v_new.shape[1]):
            #         for k in xrange(v_new.shape[2]):
            #             play_vid(v_new[i,j,k],wait=0)
            
            """
            # print "put"
            q.put((v_new, cur_fr, n, fileName))

            cur_fr += step
コード例 #4
0
def eval():

    files = glob(data + "/" + "*.zip")
    files.sort()

    q2.put("Id,Sequence")

    print len(files), "found"

    for fileName in files[:]:

        num = fileName.split('/')
        # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")
        num = num[-1].replace(".zip", "")[-4:]
        q2.put("\n" + str(num))
        print fileName

        smp = pp.GestureSample(fileName)
        s_time = time.time()
        n = smp.data['numFrames']
        dv, uv, gv = smp.depth, smp.user, smp.rgb

        cur_fr = 1
        new_shape = (step, 128, 128)

        s = []
        d, u, g = [empty((n_f, ) + vid_res + (3, ), "uint8") for _ in range(3)]
        for v in dv, uv, gv:
            pp.go_to_frame(v, cur_fr)
        for i, fr in enumerate(range(cur_fr, cur_fr + n_f)):
            s.append(smp.getSkeleton(fr))
            d[i], u[i], g[i] = [v.read()[1] for v in dv, uv, gv]

        d, u, g = [pp.to_grayscale(v) for v in d, u, g]
        u[u < 128], u[u >= 128] = 0, 1
        depth, user, gray, skelet = d, u, g, s
        user_o = user.copy()
        depth_o = depth.copy()
        gray_o = gray.copy()
        user_depth = depth_o[user_o == 1]
        skelet, c = pp.proc_skelet(array(skelet).copy())
        user = pp.proc_user(user)

        _, depth, c = pp.proc_depth(depth.copy(), user.copy(), user_o,
                                    array(skelet).copy())
        gray, c = pp.proc_gray(
            gray.copy(), user,
            array(skelet).copy())  #user.copy!!!!!!!!!!!!!!!!!!!

        cur_fr += n_f

        while cur_fr + step < n:
            sn = []
            dn, un, gn = [
                empty((step, ) + vid_res + (3, ), "uint8") for _ in range(3)
            ]
            for i, fr in enumerate(range(cur_fr, cur_fr + step)):
                sn.append(smp.getSkeleton(fr))
                dn[i], un[i], gn[i] = [v.read()[1] for v in dv, uv, gv]

            dn, un, gn = [pp.to_grayscale(v) for v in dn, un, gn]
            un[un < 128], un[un >= 128] = 0, 1

            s = s[step:] + sn
            skelet, c = pp.proc_skelet(s, _3D=False)

            len_dump = len(depth_o[:step][user_o[:step] == 1])
            un_d = dn[un == 1]

            user_o[:-step] = user_o[step:]
            user_o[-step:] = un.copy()
            un = pp.proc_user(un, 3)

            user[:-step] = user[step:]
            user[-step:] = un.copy()

            depth_o[:-step] = depth_o[step:]
            depth_o[-step:] = dn.copy()
            gray_o[:-step] = gray_o[step:]
            gray_o[-step:] = gn.copy()

            _, depth, c = pp.proc_depth(depth_o.copy(), user.copy(), user_o,
                                        skelet)
            gray, c = pp.proc_gray(gray_o.copy(), user, skelet)
            traj2D, traj3D, ori, pheight, hand, center = skelet

            video = empty((
                1,
                2,
            ) + gray.shape, dtype="uint8")
            video[0, 0] = gray.copy()
            video[0, 1] = depth.copy()
            video = video.swapaxes(1, 2)  #(body-hand,gray-depth,fr,h,w)

            # ---------------------------AUGMENT--------------------------------------
            v_new = empty((1, 2, 2) + vid_shape, dtype="uint8")
            # p = pheight
            ratio = 0.25
            for i in xrange(video.shape[0]):  #batch

                if pheight < 10: pheight = 100
                scale = ratio + randi(2) / 100.
                ofs = pheight * scale
                mid = video.shape[-1] / 2.
                sli = None
                if ofs < mid:
                    start = int(round(mid - ofs))
                    end = int(round(mid + ofs))
                    sli = slice(start, end)

                for j in xrange(video.shape[2]):  #maps
                    for k in xrange(video.shape[3]):  #frames
                        #body
                        img = video[i, 0, j, k]
                        img = cut_img(img, 5)
                        img = misc.imresize(img, (h, h))
                        v_new[i, 0, j, k] = img

                        #hand
                        img = video[i, 1, j, k]
                        img = img[sli, sli]
                        img = misc.imresize(img, (h, h))
                        v_new[i, 1, j, k] = img
            # ---------------------------AUGMENT--------------------------------------

            q.put((v_new, cur_fr, n, fileName))

            cur_fr += step
コード例 #5
0
ファイル: eval_old.py プロジェクト: lpigou/chalearn2014
def eval():
    # global pred_file_name

    # pred_file_name = fileName.split('/')
    # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")

    # print fileName

    files = glob(data+"/"+"*.zip")
    files.sort()

    print len(files), "found"

    for fileName in files[669-470:]:
        # pred_file_name = fileName.split('/')
        # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")
        print fileName

        # s_time = time.time()
        smp = pp.GestureSample(fileName)
        # print "loading", (time.time()-s_time)/1000.,"ms"
        s_time = time.time()
        n = smp.data['numFrames']
        dv,uv,gv = smp.depth, smp.user, smp.rgb

        cur_fr = 1
        new_shape = (step,128,128)

        s = []
        d,u,g = [empty((n_f,)+vid_res+(3,), "uint8") for _ in range(3)]
        # take first n_f frames
        for v in dv,uv,gv: pp.go_to_frame(v, cur_fr)
        for i,fr in enumerate(range(cur_fr,cur_fr+n_f)):
            s.append(smp.getSkeleton(fr))
            d[i],u[i],g[i] = [v.read()[1] for v in dv,uv,gv]

        d,u,g = [pp.to_grayscale(v) for v in d,u,g]
        u[u<128], u[u>=128] = 0, 1
        depth,user,gray,skelet = d,u,g,s
        user_o = user.copy()
        depth_o = depth.copy()
        gray_o = gray.copy()
        user_depth = depth_o[user_o==1]
        skelet,c =pp.proc_skelet(array(skelet).copy())
        user = pp.proc_user(user)


        _,depth,c = pp.proc_depth(depth.copy(), user.copy(), user_o, array(skelet).copy())
        gray,c = pp.proc_gray(gray.copy(), user,  array(skelet).copy()) #user.copy!!!!!!!!!!!!!!!!!!!
        # video = array([gray,depth],dtype="uint8")
        # video = video.swapaxes(0,1) #(body-hand,gray-depth,fr,h,w)
        # video = video[0:1].astype("uint8")
        # x_.set_value(video,borrow=True)

        # pred = evalu_model()[0][0]
        # print pred.argmax()+1,"\t", "\t",pred.max(),"\t",cur_fr, "-",cur_fr+n_f
        cur_fr += n_f

        while cur_fr+step<n:
            # time_start = time.time()
            sn=[]
            dn,un,gn = [empty((step,)+vid_res+(3,), "uint8") for _ in range(3)]
            # for v in dv,uv,gv: pp.go_to_frame(v, cur_fr)
            for i,fr in enumerate(range(cur_fr,cur_fr+step)):
                sn.append(smp.getSkeleton(fr))
                dn[i],un[i],gn[i] = [v.read()[1] for v in dv,uv,gv]

            dn,un,gn = [pp.to_grayscale(v) for v in dn,un,gn]
            un[un<128], un[un>=128] = 0,1

            s = s[step:] + sn
            # s.extend(sn)
            skelet,c =pp.proc_skelet(s,_3D=False)

            len_dump = len(depth_o[:step][user_o[:step]==1])
            un_d = dn[un==1]

            user_o[:-step]=user_o[step:]
            user_o[-step:] = un.copy()
            un = pp.proc_user(un,3)

            user[:-step]=user[step:]
            user[-step:] = un.copy()

            depth_o[:-step]=depth_o[step:]
            depth_o[-step:] = dn.copy()
            gray_o[:-step]=gray_o[step:]
            gray_o[-step:] = gn.copy()

            _,depth,c = pp.proc_depth(depth_o.copy(), user.copy(), user_o, skelet)
            gray,c = pp.proc_gray(gray_o.copy(), user,  skelet)
            traj2D,traj3D,ori,pheight,hand,center = skelet

            # # ---------------------------- DEPTH ------------------------------------------
            # thresh_noise = 200
            # scaler = 4
            # traj2D,traj3D,ori,pheight,hand,center = skelet
            # traj2D = traj2D[:,-step:]
            # # print traj2D.shape

            # #stats
            # # user_depth = depth_o[user_o==1] #11.1%
            # user_depth = hstack((user_depth[len_dump:],un_d))
            # dn_ = dn.copy()
            # med = average(user_depth)
            # med = 255 - med
            # std = user_depth.std()

            # depth_b = pp.cut_body(dn_, center, pheight, hand,new_shape)
            # # user_b = pp.cut_body(un.copy(), center, pheight, hand,new_shape)
            # depth_h = pp.cut_hand(dn_, traj2D, hand,new_shape)
            # # user_h = pp.cut_hand(un.copy(), traj2D, hand,new_shape)

            # new_d = empty((2,step,128,128), dtype="uint8")

            # for i,d in enumerate((depth_b,depth_h)):

            #     d = 255 - d

            #     # nuser_d = d[us==0]
            #     # nuser_d[nuser_d>thresh_noise] = 0
            #     # d[us==0] = nuser_d

            #     d[d>thresh_noise] = 0
            #     #d = inpaint(d, thresh_noise)

            #     thresh_d = med-3*std
            #     # d[d<thresh_d] = thresh_d
            #     d = d-thresh_d
            #     d = clip(d*scaler, 0, 255)

            #     d = d.astype("uint8")
            #     d = pp.medianblur(d)

            #     new_d[i] = d

            # dn = new_d.astype("uint8")
            # depth[:,:-step]=depth[:,step:]
            # depth[:,-step:] = dn.copy()
            # # ---------------------------- DEPTH ------------------------------------------
            # # ---------------------------- GRAY ------------------------------------------

            # gray_b = pp.cut_body(gn.copy(), center, pheight, hand, new_shape)
            # gray_h = pp.cut_hand(gn.copy(), traj2D, hand,new_shape)

            # new_gray = empty((2,step,128,128), dtype="uint8")
            # new_gray[0] = gray_b
            # new_gray[1] = gray_h

            # gn = new_gray.astype("uint8")
            # gray[:,:-step]=gray[:,step:]
            # gray[:,-step:] = gn.copy()

            # # ---------------------------- GRAY ------------------------------------------

            video = empty((1,2,)+gray.shape,dtype="uint8")
            video[0,0] = gray.copy()
            video[0,1] = depth.copy()
            video = video.swapaxes(1,2) #(body-hand,gray-depth,fr,h,w)
            
            # if hand == "left":
            #     for i in range(video.shape[1]):
            #         for j in range(video.shape[2]):
            #             for k in range(video.shape[3]):
            #                 video[0,i,j,k] = cv2.flip(video[0,i,j,k],1)

            # video = video[0:1].astype("uint8")

            # ---------------------------AUGMENT--------------------------------------
            v_new = empty((1,2,2)+vid_shape,dtype="uint8")
            # p = pheight
            ratio = 0.25
            for i in xrange(video.shape[0]): #batch

                if pheight < 10: pheight = 100
                scale = ratio+randi(2)/100.
                ofs = pheight*scale
                mid =  video.shape[-1]/2.
                sli = None
                if ofs < mid:
                    start = int(round(mid-ofs))
                    end = int(round(mid+ofs))
                    sli = slice(start,end)

                for j in xrange(video.shape[2]): #maps
                    for k in xrange(video.shape[3]): #frames
                        #body
                        img = video[i,0,j,k]
                        img = cut_img(img,5)
                        img = misc.imresize(img,(h,h))
                        # if j==0: img = 255-misc.imfilter(img,"contour")
                        v_new[i,0,j,k] = img

                        #hand
                        img = video[i,1,j,k]
                        img = img[sli,sli]
                        img = misc.imresize(img,(h,h))
                        v_new[i,1,j,k] = img
            # ---------------------------AUGMENT--------------------------------------

            """    
            x_.set_value(v_new.astype("float32"),borrow=True)

            print "predicting"
            pred = evalu_model()[0][0]
            pred_p = pred.max()
            pred_idx = pred.argmax()+1
            fps = int(1./((time.time()-time_start)/step))
            fr_start = cur_fr+step-n_f
            fr_end = cur_fr+step
            print pred_idx,"\t", "\t",pred_p,"\t",fr_start, "-",fr_end,"\t",fps,'fps'
            predict(pred_idx,pred_p,fr_start)
            # print v_new.shape
            # for i in xrange(v_new.shape[0]):
            #     for j in xrange(v_new.shape[1]):
            #         for k in xrange(v_new.shape[2]):
            #             play_vid(v_new[i,j,k],wait=0)
            
            """
            # print "put"
            q.put((v_new,cur_fr,n, fileName))

            cur_fr += step
コード例 #6
0
ファイル: eval2013.py プロジェクト: pangyuteng/chalearn2014
def eval():

    files = glob(data + "/" + "*.zip")
    files.sort()

    q2.put("Id,Sequence")

    print len(files), "found"

    for fileName in files[:]:

        num = fileName.split("/")
        # pred_file_name = pred_file_name[-1].replace(".zip","_prediction.csv")
        num = num[-1].replace(".zip", "")[-4:]
        q2.put("\n" + str(num))
        print fileName

        smp = pp.GestureSample(fileName)
        s_time = time.time()
        n = smp.data["numFrames"]
        dv, uv, gv = smp.depth, smp.user, smp.rgb

        cur_fr = 1
        new_shape = (step, 128, 128)

        s = []
        d, u, g = [empty((n_f,) + vid_res + (3,), "uint8") for _ in range(3)]
        for v in dv, uv, gv:
            pp.go_to_frame(v, cur_fr)
        for i, fr in enumerate(range(cur_fr, cur_fr + n_f)):
            s.append(smp.getSkeleton(fr))
            d[i], u[i], g[i] = [v.read()[1] for v in dv, uv, gv]

        d, u, g = [pp.to_grayscale(v) for v in d, u, g]
        u[u < 128], u[u >= 128] = 0, 1
        depth, user, gray, skelet = d, u, g, s
        user_o = user.copy()
        depth_o = depth.copy()
        gray_o = gray.copy()
        user_depth = depth_o[user_o == 1]
        skelet, c = pp.proc_skelet(array(skelet).copy())
        user = pp.proc_user(user)

        _, depth, c = pp.proc_depth(depth.copy(), user.copy(), user_o, array(skelet).copy())
        gray, c = pp.proc_gray(gray.copy(), user, array(skelet).copy())  # user.copy!!!!!!!!!!!!!!!!!!!

        cur_fr += n_f

        while cur_fr + step < n:
            sn = []
            dn, un, gn = [empty((step,) + vid_res + (3,), "uint8") for _ in range(3)]
            for i, fr in enumerate(range(cur_fr, cur_fr + step)):
                sn.append(smp.getSkeleton(fr))
                dn[i], un[i], gn[i] = [v.read()[1] for v in dv, uv, gv]

            dn, un, gn = [pp.to_grayscale(v) for v in dn, un, gn]
            un[un < 128], un[un >= 128] = 0, 1

            s = s[step:] + sn
            skelet, c = pp.proc_skelet(s, _3D=False)

            len_dump = len(depth_o[:step][user_o[:step] == 1])
            un_d = dn[un == 1]

            user_o[:-step] = user_o[step:]
            user_o[-step:] = un.copy()
            un = pp.proc_user(un, 3)

            user[:-step] = user[step:]
            user[-step:] = un.copy()

            depth_o[:-step] = depth_o[step:]
            depth_o[-step:] = dn.copy()
            gray_o[:-step] = gray_o[step:]
            gray_o[-step:] = gn.copy()

            _, depth, c = pp.proc_depth(depth_o.copy(), user.copy(), user_o, skelet)
            gray, c = pp.proc_gray(gray_o.copy(), user, skelet)
            traj2D, traj3D, ori, pheight, hand, center = skelet

            video = empty((1, 2) + gray.shape, dtype="uint8")
            video[0, 0] = gray.copy()
            video[0, 1] = depth.copy()
            video = video.swapaxes(1, 2)  # (body-hand,gray-depth,fr,h,w)

            # ---------------------------AUGMENT--------------------------------------
            v_new = empty((1, 2, 2) + vid_shape, dtype="uint8")
            # p = pheight
            ratio = 0.25
            for i in xrange(video.shape[0]):  # batch

                if pheight < 10:
                    pheight = 100
                scale = ratio + randi(2) / 100.0
                ofs = pheight * scale
                mid = video.shape[-1] / 2.0
                sli = None
                if ofs < mid:
                    start = int(round(mid - ofs))
                    end = int(round(mid + ofs))
                    sli = slice(start, end)

                for j in xrange(video.shape[2]):  # maps
                    for k in xrange(video.shape[3]):  # frames
                        # body
                        img = video[i, 0, j, k]
                        img = cut_img(img, 5)
                        img = misc.imresize(img, (h, h))
                        v_new[i, 0, j, k] = img

                        # hand
                        img = video[i, 1, j, k]
                        img = img[sli, sli]
                        img = misc.imresize(img, (h, h))
                        v_new[i, 1, j, k] = img
            # ---------------------------AUGMENT--------------------------------------

            q.put((v_new, cur_fr, n, fileName))

            cur_fr += step