Пример #1
0
def create_pred_movie_no_conf(conf, predList, moviename, outmovie, outtype):
    predLocs, predscores, predmaxscores = predList
    #     assert false, 'stop here'
    tdir = tempfile.mkdtemp()

    cap = cv2.VideoCapture(moviename)
    nframes = int(cap.get(cvc.FRAME_COUNT))

    cmap = cm.get_cmap('jet')
    rgba = cmap(np.linspace(0, 1, conf.n_classes))

    fig = mpl.figure.Figure(figsize=(9, 4))
    canvas = FigureCanvasAgg(fig)
    for curl in range(nframes):
        framein = myutils.readframe(cap, curl)
        framein = crop_images(framein, conf)

        fig.clf()
        ax1 = fig.add_subplot(1, 2, 1)
        ax1.imshow(framein[:, :, 0], cmap=cm.gray)
        ax1.scatter(predLocs[curl, :, 0, 0], predLocs[curl, :, 0, 1],  # hold=True,
                    c=cm.hsv(np.linspace(0, 1 - old_div(1., conf.n_classes), conf.n_classes)),
                    s=20, linewidths=0, edgecolors='face')
        ax1.axis('off')
        ax2 = fig.add_subplot(1, 2, 2)
        if outtype == 1:
            curpreds = predscores[curl, :, :, :, 0]
        elif outtype == 2:
            curpreds = predscores[curl, :, :, :, 0] * 2 - 1

        rgbim = create_pred_image(curpreds, conf.n_classes)
        ax2.imshow(rgbim)
        ax2.axis('off')

        fname = "test_{:06d}.png".format(curl)

        # to printout without X.
        # From: http://www.dalkescientific.com/writings/diary/archive/2005/04/23/matplotlib_without_gui.html
        # The size * the dpi gives the final image size
        #   a4"x4" image * 80 dpi ==> 320x320 pixel image
        canvas.print_figure(os.path.join(tdir, fname), dpi=80)

        # below is the easy way.
    #         plt.savefig(os.path.join(tdir,fname))

    tfilestr = os.path.join(tdir, 'test_*.png')
    mencoder_cmd = "mencoder mf://" + tfilestr + " -frames " + "{:d}".format(
        nframes) + " -mf type=png:fps=15 -o " + outmovie + " -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=2000000"
    os.system(mencoder_cmd)
    cap.release()
Пример #2
0
def getpatch(cap,fnum,curloc):
    # matlab sometimes can access an additional frame at the end
    # which others can't.
    curp = None
    psz = conf.sel_sz

    if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT):
        if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)+1:
            raise ValueError('Accessing frames beyond the length of the video')
        return curp
    framein = myutils.readframe(cap,fnum-1)
    framein = framein[:,0:(old_div(framein.shape[1],2)),:]

    testp = myutils.padgrab(framein,0,curloc[1]-old_div(psz,2),curloc[1]+old_div(psz,2),
                           curloc[0]-old_div(psz,2),curloc[0]+old_div(psz,2),0,framein.shape[2])
    curp = np.array(scalepatches(testp,conf.scale,conf.numscale,conf.rescale))
    return curp
Пример #3
0
def getpatch(cap, fnum, curloc):
    # matlab sometimes can access an additional frame at the end
    # which others can't.
    curp = None
    psz = conf.sel_sz

    if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT):
        if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) + 1:
            raise ValueError('Accessing frames beyond the length of the video')
        return curp
    framein = myutils.readframe(cap, fnum - 1)
    framein = framein[:, 0:(old_div(framein.shape[1], 2)), :]

    testp = myutils.padgrab(framein, 0, curloc[1] - old_div(psz, 2),
                            curloc[1] + old_div(psz, 2),
                            curloc[0] - old_div(psz, 2),
                            curloc[0] + old_div(psz, 2), 0, framein.shape[2])
    curp = np.array(
        scalepatches(testp, conf.scale, conf.numscale, conf.rescale))
    return curp
Пример #4
0
def predictMovie(model_file, inmovie, outmovie):
    pred, saver, pholders = initPredSession()
    tdir = tempfile.mkdtemp()

    cap = cv2.VideoCapture(inmovie)
    nframes = int(cap.get(cvc.FRAME_COUNT))
    plt.gray()
    # with writer.saving(fig,"test_results.mp4",4):
    fig = plt.figure()

    with tf.Session() as sess:
        saver.restore(sess, model_file)

        count = 0
        for fnum in range(nframes):
            plt.clf()
            plt.axis('off')
            framein = myutils.readframe(cap, fnum)
            framein = framein[:, 0:(old_div(framein.shape[1], 2)), 0:1]
            out = predict(copy.copy(framein), sess, pred, pholders)
            plt.imshow(framein[:, :, 0])
            maxndx = np.argmax(out[0, :, :, 0])
            loc = np.unravel_index(maxndx, out.shape[1:3])
            scalefactor = conf.rescale * conf.pool_scale
            plt.scatter(loc[1] * scalefactor, loc[0] * scalefactor, hold=True)

            fname = "test_{:06d}.png".format(count)
            plt.savefig(os.path.join(tdir, fname))
            count += 1

#     ffmpeg_cmd = "ffmpeg -r 30 " + \
#     "-f image2 -i '/path/to/your/picName%d.png' -qscale 0 '/path/to/your/new/video.avi'

    tfilestr = os.path.join(tdir, 'test_*.png')
    mencoder_cmd = "mencoder mf://" + tfilestr + " -frames " + "{:d}".format(
        count
    ) + " -mf type=png:fps=15 -o " + outmovie + " -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=2000000"
    #     print(mencoder_cmd)
    os.system(mencoder_cmd)
    cap.release()
Пример #5
0
def predictMovie(model_file,inmovie,outmovie):
    pred,saver,pholders = initPredSession()
    tdir = tempfile.mkdtemp()

    cap = cv2.VideoCapture(inmovie)
    nframes = int(cap.get(cvc.FRAME_COUNT))
    plt.gray()
    # with writer.saving(fig,"test_results.mp4",4):
    fig = plt.figure()
    
    with tf.Session() as sess:
        saver.restore(sess, model_file)
        
        count = 0
        for fnum in range(nframes):
            plt.clf()
            plt.axis('off')
            framein = myutils.readframe(cap,fnum)
            framein = framein[:,0:(old_div(framein.shape[1],2)),0:1]
            out = predict(copy.copy(framein),sess,pred,pholders)
            plt.imshow(framein[:,:,0])
            maxndx = np.argmax(out[0,:,:,0])
            loc = np.unravel_index(maxndx,out.shape[1:3])
            scalefactor = conf.rescale*conf.pool_scale
            plt.scatter(loc[1]*scalefactor,loc[0]*scalefactor,hold=True)

            fname = "test_{:06d}.png".format(count)
            plt.savefig(os.path.join(tdir,fname))
            count+=1

#     ffmpeg_cmd = "ffmpeg -r 30 " + \
#     "-f image2 -i '/path/to/your/picName%d.png' -qscale 0 '/path/to/your/new/video.avi'

    tfilestr = os.path.join(tdir,'test_*.png')
    mencoder_cmd = "mencoder mf://" + tfilestr +     " -frames " + "{:d}".format(count) + " -mf type=png:fps=15 -o " +     outmovie + " -ovc lavc -lavcopts vcodec=mpeg4:vbitrate=2000000"
#     print(mencoder_cmd)
    os.system(mencoder_cmd)
    cap.release()
Пример #6
0
def createDB():

    L = sio.loadmat(conf.labelfile)
    pts = L['pts']
    ts = L['ts']
    expid = L['expidx']
    
    count = 0; valcount = 0
    
    psz = conf.sel_sz
    map_size = 100000*conf.psz**2*3
    
    createValdata(False)
    isval,localdirs,seldirs = loadValdata()
    
    lmdbfilename =os.path.join(conf.cachedir,conf.trainfilename)
    vallmdbfilename =os.path.join(conf.cachedir,conf.valfilename)
    if os.path.isdir(lmdbfilename):
        shutil.rmtree(lmdbfilename)
    if os.path.isdir(vallmdbfilename):
        shutil.rmtree(vallmdbfilename)
    
    env = lmdb.open(lmdbfilename, map_size=map_size)
    valenv = lmdb.open(vallmdbfilename, map_size=map_size)

    
    with env.begin(write=True) as txn,valenv.begin(write=True) as valtxn:

        for ndx,dirname in enumerate(localdirs):
            if not seldirs[ndx]:
                continue

            expname = os.path.basename(dirname)
            frames = np.where(expid[0,:] == (ndx + 1))[0]
            curdir = localdirs[ndx]
            cap = cv2.VideoCapture(os.path.join(curdir,'movie_comb.avi'))
            
            curtxn = valtxn if isval.count(ndx) else txn
                
            for curl in frames:

                fnum = ts[0,curl]
                curloc = np.round(pts[0,:,curl]).astype('int')
                if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT):
                    if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)+1:
                        raise ValueError('Accessing frames beyond the length of the video')
                    continue
                
                framein = myutils.readframe(cap,fnum-1)
                framein = framein[:,0:(old_div(framein.shape[1],2)),0:1]

                datum = createDatum(framein,1)
                str_id = createID(expname,curloc,fnum)
                curtxn.put(str_id.encode('ascii'), datum.SerializeToString())

                if isval.count(ndx):
                    valcount+=1
                else:
                    count+=1
                    
            cap.release() # close the movie handles
            print('Done %d of %d movies' % (ndx,len(localdirs)))
    env.close() # close the database
    valenv.close()
    print('%d,%d number of pos examples added to the db and valdb' %(count,valcount))
Пример #7
0
    plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'

    curdir = '/home/mayank/Dropbox/AdamVideos/multiPoint/M122_20140828/M122_20140828_v002'
    #     FFMpegWriter = manimation.writers['ffmpeg_file']
    FFMpegWriter = manimation.writers['ffmpeg']
    writer = FFMpegWriter(fps=15, bitrate=-1)

    fig = plt.figure()

    cap = cv2.VideoCapture(os.path.join(curdir, 'movie_comb.avi'))
    plt.gray()
    with writer.saving(fig, "test_results_mencoder.mp4", 4):
        for fnum in range(0, 50):
            #             print(fnum)
            plt.clf()
            framein = myutils.readframe(cap, fnum)
            plt.imshow(framein)
            writer.grab_frame()
    cap.release()

# In[13]:

get_ipython().magic(u'matplotlib inline')
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'

curdir = '/home/mayank/Dropbox/AdamVideos/multiPoint/M122_20140828/M122_20140828_v002'
fig = plt.figure()

cap = cv2.VideoCapture(os.path.join(curdir, 'movie_comb.avi'))
plt.gray()
for fnum in range(0, 2):
Пример #8
0
def createDB():

    L = sio.loadmat(conf.labelfile)
    pts = L['pts']
    ts = L['ts']
    expid = L['expidx']

    count = 0
    valcount = 0

    psz = conf.sel_sz
    map_size = 100000 * conf.psz**2 * 3

    createValdata(False)
    isval, localdirs, seldirs = loadValdata()

    lmdbfilename = os.path.join(conf.cachedir, conf.trainfilename)
    vallmdbfilename = os.path.join(conf.cachedir, conf.valfilename)
    if os.path.isdir(lmdbfilename):
        shutil.rmtree(lmdbfilename)
    if os.path.isdir(vallmdbfilename):
        shutil.rmtree(vallmdbfilename)

    env = lmdb.open(lmdbfilename, map_size=map_size)
    valenv = lmdb.open(vallmdbfilename, map_size=map_size)

    with env.begin(write=True) as txn, valenv.begin(write=True) as valtxn:

        for ndx, dirname in enumerate(localdirs):
            if not seldirs[ndx]:
                continue

            expname = os.path.basename(dirname)
            frames = np.where(expid[0, :] == (ndx + 1))[0]
            curdir = localdirs[ndx]
            cap = cv2.VideoCapture(os.path.join(curdir, 'movie_comb.avi'))

            curtxn = valtxn if isval.count(ndx) else txn

            for curl in frames:

                fnum = ts[0, curl]
                curloc = np.round(pts[0, :, curl]).astype('int')
                if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT):
                    if fnum > cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) + 1:
                        raise ValueError(
                            'Accessing frames beyond the length of the video')
                    continue

                framein = myutils.readframe(cap, fnum - 1)
                framein = framein[:, 0:(old_div(framein.shape[1], 2)), 0:1]

                datum = createDatum(framein, 1)
                str_id = createID(expname, curloc, fnum)
                curtxn.put(str_id.encode('ascii'), datum.SerializeToString())

                if isval.count(ndx):
                    valcount += 1
                else:
                    count += 1

            cap.release()  # close the movie handles
            print('Done %d of %d movies' % (ndx, len(localdirs)))
    env.close()  # close the database
    valenv.close()
    print('%d,%d number of pos examples added to the db and valdb' %
          (count, valcount))
Пример #9
0
def create_full_tf_record(conf):
    lbl = h5py.File(conf.labelfile, 'r')
    pts = np.array(lbl['pts'])
    ts = np.array(lbl['ts']).squeeze().astype('int')
    exp_id = np.array(lbl['expidx']).squeeze().astype('int')
    view = conf.view
    count = 0
    val_count = 0

    create_val_data(conf)
    is_val, local_dirs, sel_dirs = load_val_data(conf)

    train_filename = os.path.join(conf.cachedir, conf.fulltrainfilename)

    env = tf.python_io.TFRecordWriter(train_filename + '.tfrecords')

    for ndx, dirname in enumerate(local_dirs):
        if not sel_dirs[ndx]:
            continue

        exp_name = conf.getexpname(dirname)
        frames = np.where(exp_id == (ndx + 1))[0]
        cap = cv2.VideoCapture(local_dirs[ndx])

        cur_env = env

        for curl in frames:

            fnum = ts[curl]
            if fnum > cap.get(cvc.FRAME_COUNT):
                if fnum > cap.get(cvc.FRAME_COUNT) + 1:
                    raise ValueError('Accessing frames beyond ' +
                                     'the length of the video for' +
                                     ' {} expid {:d} '.format(exp_name, ndx) +
                                     ' at t {:d}'.format(fnum)
                                     )
                continue
            frame_in = myutils.readframe(cap, fnum - 1)
            c_loc = conf.cropLoc[tuple(frame_in.shape[0:2])]
            frame_in = PoseTools.crop_images(frame_in, conf)
            frame_in = frame_in[:, :, 0:1]

            cur_loc = np.round(pts[curl, :, view, :]).astype('int')
            cur_loc[:, 0] = cur_loc[:, 0] - c_loc[1]  # ugh, the nasty x-y business.
            cur_loc[:, 1] = cur_loc[:, 1] - c_loc[0]
            cur_loc = cur_loc.clip(min=0.1)

            rows = frame_in.shape[0]
            cols = frame_in.shape[1]
            if np.ndim(frame_in) > 2:
                depth = frame_in.shape[2]
            else:
                depth = 1

            image_raw = frame_in.tostring()
            example = tf.train.Example(features=tf.train.Features(feature={
                'height': int64_feature(rows),
                'width': int64_feature(cols),
                'depth': int64_feature(depth),
                'locs': float_feature(cur_loc.flatten()),
                'expndx': float_feature(ndx),
                'ts': float_feature(curl),
                'image_raw': bytes_feature(image_raw)}))
            cur_env.write(example.SerializeToString())

            count += 1

        cap.release()  # close the movie handles
        print('Done %d of %d movies, count:%d val:%d' % (ndx, len(local_dirs), count, val_count))
    env.close()  # close the database
    print('%d,%d number of pos examples added to the db and val-db' % (count, val_count))
Пример #10
0
def create_full_tf_record(conf):
    lbl = h5py.File(conf.labelfile, 'r')
    pts = np.array(lbl['pts'])
    ts = np.array(lbl['ts']).squeeze().astype('int')
    exp_id = np.array(lbl['expidx']).squeeze().astype('int')
    view = conf.view
    count = 0
    val_count = 0

    create_val_data(conf)
    is_val, local_dirs, sel_dirs = load_val_data(conf)

    train_filename = os.path.join(conf.cachedir, conf.fulltrainfilename)

    env = tf.python_io.TFRecordWriter(train_filename + '.tfrecords')

    for ndx, dirname in enumerate(local_dirs):
        if not sel_dirs[ndx]:
            continue

        exp_name = conf.getexpname(dirname)
        frames = np.where(exp_id == (ndx + 1))[0]
        cap = cv2.VideoCapture(local_dirs[ndx])

        cur_env = env

        for curl in frames:

            fnum = ts[curl]
            if fnum > cap.get(cvc.FRAME_COUNT):
                if fnum > cap.get(cvc.FRAME_COUNT) + 1:
                    raise ValueError('Accessing frames beyond ' +
                                     'the length of the video for' +
                                     ' {} expid {:d} '.format(exp_name, ndx) +
                                     ' at t {:d}'.format(fnum))
                continue
            frame_in = myutils.readframe(cap, fnum - 1)
            c_loc = conf.cropLoc[tuple(frame_in.shape[0:2])]
            frame_in = PoseTools.crop_images(frame_in, conf)
            frame_in = frame_in[:, :, 0:1]

            cur_loc = np.round(pts[curl, :, view, :]).astype('int')
            cur_loc[:,
                    0] = cur_loc[:,
                                 0] - c_loc[1]  # ugh, the nasty x-y business.
            cur_loc[:, 1] = cur_loc[:, 1] - c_loc[0]
            cur_loc = cur_loc.clip(min=0.1)

            rows = frame_in.shape[0]
            cols = frame_in.shape[1]
            if np.ndim(frame_in) > 2:
                depth = frame_in.shape[2]
            else:
                depth = 1

            image_raw = frame_in.tostring()
            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'height': int64_feature(rows),
                    'width': int64_feature(cols),
                    'depth': int64_feature(depth),
                    'locs': float_feature(cur_loc.flatten()),
                    'expndx': float_feature(ndx),
                    'ts': float_feature(curl),
                    'image_raw': bytes_feature(image_raw)
                }))
            cur_env.write(example.SerializeToString())

            count += 1

        cap.release()  # close the movie handles
        print('Done %d of %d movies, count:%d val:%d' %
              (ndx, len(local_dirs), count, val_count))
    env.close()  # close the database
    print('%d,%d number of pos examples added to the db and val-db' %
          (count, val_count))
Пример #11
0
ddff = np.abs(ddff).astype('uint8')
print(stat)
if stat:
    plt.imshow(ddff)

print(ddff.max())    
cap.release()


# In[ ]:

import myutils
reload(myutils)
import cv2
cap = cv2.VideoCapture('/home/mayank/Dropbox/AdamVideos/multiPoint/M118_20140730/M118_20140730_v002/movie_comb.avi')
ff = myutils.readframe(cap,1998)
print(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT ))
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 1998)
print(cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES))
stat,ff = cap.read()
print(cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES))
print(stat)


# In[ ]:

import moviepy.video.io.ffmpeg_reader as freader
reader = freader.FFMPEG_VideoReader('/home/mayank/Dropbox/AdamVideos/multiPoint/M118_20140730/M118_20140730_v002/movie_comb.avi')
f1 = reader.get_frame((-2.-0.1)/reader.fps)
f2 = reader.get_frame((1.-0.1)/reader.fps)
fe = reader.get_frame((1998.-0.1)/reader.fps)