Exemplo n.º 1
0
def run_pre_homography(outdir, matchdir, homTh, imH, imW, dmThresh, imPathList,
                       vizHomo):
    """
    Run per frame homogrpahy on matching files of deepmatch or epic flow
    """
    if homTh < 0:
        return
    utils.rmdir_f(outdir)
    utils.mkdir_p(outdir)
    mList = utils.read_r(matchdir, '*.txt')
    if vizHomo:
        col = np.array([255, 0, 0], dtype='int')
        utils.mkdir_p(outdir + '/viz_homo/')
    for i in range(len(mList)):
        matches = read_dmOutput(mList[i], imH, imW, dmThresh, False)
        matches = frame_homography(matches, homTh)
        # fit to coordinates to image size
        matches = np.minimum(matches, np.array([imW, imH, imW, imH]) - 1)
        matches = np.maximum(matches, np.array([0]))
        matchfile = outdir + 'match_%04d.txt' % i
        np.savetxt(matchfile, matches, fmt='%d')
        if matches.size > 0 and vizHomo:
            im = np.array(Image.open(imPathList[i]))
            im = Image.fromarray(
                utils.draw_point_im(im, matches[:, [1, 0]], col, sizeOut=10))
            im.save(outdir + '/viz_homo/%s' % (imPathList[i].split('/')[-1]))

        sys.stdout.write('Pairwise pre-tracking homogrpahy: [% 5.1f%%]\r' %
                         (100.0 * float(i / len(mList))))
        sys.stdout.flush()

    import subprocess
    subprocess.call([
        'tar', '-zcf', outdir + '/viz_homo.tar.gz', '-C', outdir + '/viz_homo',
        '.'
    ])
    print('Pairwise pre-tracking homogrpahy completed.')
Exemplo n.º 2
0
def demo_images():
    """
    Input is the path of file containing list of directories of video images.
    """
    # Hard coded parameters
    # VSB has 121 images per video
    # FBMS has 100-250 images per video
    # DAVIS has 60-70 images per video

    # For Shot:
    maxShots = 5
    vmax = 0.6
    colBins = 40

    # For NLC:
    redirect = True  # redirecting to output file ? won't print status
    frameGap = 0  # 0 means adjusted automatically per shot (not per video)
    maxSide = 650  # max length of longer side of Im
    minShot = 10  # minimum shot length
    maxShot = 110  # longer shots will be shrinked between [maxShot/2, maxShot]
    binTh = 0.7  # final thresholding to obtain mask
    clearVoteBlobs = True  # remove small blobs in consensus vote; uses binTh
    relEnergy = binTh - 0.1  # relative energy in consensus vote blob removal
    clearFinalBlobs = True  # remove small blobs finally; uses binTh
    maxsp = 400
    iters = 50

    # For CRF:
    gtProb = 0.7
    posTh = binTh
    negTh = 0.4

    # For blob removal post CRF: more like salt-pepper noise removal
    bSize = 25  # 0 means not used, [0,1] relative, >=1 means absolute

    # parse commandline parameters
    args = parse_args()
    np.random.seed(args.seed)
    doload = bool(args.doload)
    dosave = bool(args.dosave)

    # read directory names
    with open(args.imdirFile) as f:
        imDirs = f.readlines()
    imDirs = [line.rstrip('\n') for line in imDirs]

    # keep only the current shard
    if args.shardId >= args.numShards:
        print('Give valid shard id which is less than numShards')
        exit(1)
    imDirs = [
        x for i, x in enumerate(imDirs) if i % args.numShards == args.shardId
    ]
    print('NUM SHARDS: %03d,  SHARD ID: %03d,  CURRENT NUM VIDEOS: %03d\n\n' %
          (args.numShards, args.shardId, len(imDirs)))

    for imdir in imDirs:
        # setup input directory
        print(
            '-------------------------------------------------------------\n')
        print('Video InputDir: ', imdir)
        numTries = 0
        totalTries = 2
        sleepTime = 60
        while numTries < totalTries:
            imPathList = utils.read_r(imdir, '*.jpg')
            # imPathList = imPathList + utils.read_r(imdir, '*.bmp')
            if len(imPathList) < 1:
                print('Failed to load ! Trying again in %d seconds' %
                      sleepTime)
                numTries += 1
                time.sleep(sleepTime)  # delays for x seconds
            else:
                break
        if len(imPathList) < 2:
            print('Not enough images in image directory: \n%s' % imdir)
            # print('Continuing to next one ...')
            # continue
            assert False, 'Image directory does not exist !!'

        # setup output directory
        suffix = imdir.split('/')[-1]
        suffix = imdir.split('/')[-2] if suffix == '' else suffix
        outNlcIm = args.baseOutdir.split('/') + \
            ['nlcim', 'shard%03d' % args.shardId] + imdir.split('/')[3:]
        outNlcPy = args.baseOutdir.split('/') + ['nlcpy'
                                                 ] + imdir.split('/')[3:]
        outCrf = args.baseOutdir.split('/') + \
            ['crfim', 'shard%03d' % args.shardId] + imdir.split('/')[3:]
        outIm = args.baseOutdir.split('/') + \
            ['im', 'shard%03d' % args.shardId] + imdir.split('/')[3:]

        outNlcIm = '/'.join(outNlcIm)
        outNlcPy = '/'.join(outNlcPy)
        outCrf = '/'.join(outCrf)
        outIm = '/'.join(outIm)
        outVidNlc = args.baseOutdir + '/nlcvid/'
        outVidCRF = args.baseOutdir + '/crfvid/'

        utils.mkdir_p(outNlcIm)
        utils.mkdir_p(outNlcPy)
        utils.mkdir_p(outCrf)
        utils.mkdir_p(outIm)
        utils.mkdir_p(outVidNlc)
        utils.mkdir_p(outVidCRF)
        print('Video OutputDir: ', outNlcIm)

        # resize images if needed
        h, w, c = np.array(Image.open(imPathList[0])).shape
        frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)
        if frac < 1.0:
            h, w, c = imresize(np.array(Image.open(imPathList[0])), frac).shape
        imSeq = np.zeros((len(imPathList), h, w, c), dtype=np.uint8)
        for i in range(len(imPathList)):
            if frac < 1.0:
                imSeq[i] = imresize(np.array(Image.open(imPathList[i])), frac)
            else:
                imSeq[i] = np.array(Image.open(imPathList[i]))

        # First run shot detector
        if not doload:
            shotIdx = vid2shots.vid2shots(imSeq,
                                          maxShots=maxShots,
                                          vmax=vmax,
                                          colBins=colBins)
            if dosave:
                np.save(outNlcPy + '/shotIdx_%s.npy' % suffix, shotIdx)
        else:
            shotIdx = np.load(outNlcPy + '/shotIdx_%s.npy' % suffix)
        print('Total Shots: ', shotIdx.shape, shotIdx)

        # Adjust frameGap per shot, and then run NLC per shot
        for s in range(shotIdx.shape[0]):
            suffixShot = suffix + '_shot%d' % (s + 1)

            shotS = shotIdx[s]  # 0-indexed, included
            shotE = imSeq.shape[0] if s == shotIdx.shape[0] - 1 \
                else shotIdx[s + 1]  # 0-indexed, excluded
            shotL = shotE - shotS
            if shotL < minShot:
                continue

            frameGapLocal = frameGap
            if frameGapLocal <= 0 and shotL > maxShot:
                frameGapLocal = int(shotL / maxShot)
            imPathList1 = imPathList[shotS:shotE:frameGapLocal + 1]
            imSeq1 = imSeq[shotS:shotE:frameGapLocal + 1]

            print('\nShot: %d, Shape: ' % (s + 1), imSeq1.shape)
            if not doload:
                maskSeq = nlc.nlc(imSeq1,
                                  maxsp=maxsp,
                                  iters=iters,
                                  outdir=outNlcPy,
                                  suffix=suffixShot,
                                  clearBlobs=clearVoteBlobs,
                                  binTh=binTh,
                                  relEnergy=relEnergy,
                                  redirect=redirect,
                                  doload=doload,
                                  dosave=dosave)
                if clearFinalBlobs:
                    maskSeq = nlc.remove_low_energy_blobs(maskSeq, binTh)
                if dosave:
                    np.save(outNlcPy + '/mask_%s.npy' % suffixShot, maskSeq)
            if doload:
                maskSeq = np.load(outNlcPy + '/mask_%s.npy' % suffixShot)

            # run crf, run blob removal and save as images sequences
            sTime = time.time()
            crfSeq = np.zeros(maskSeq.shape, dtype=np.uint8)
            for i in range(maskSeq.shape[0]):
                # save soft score as png between 0 to 100.
                # Use binTh*100 to get FG in later usage.
                mask = (maskSeq[i] * 100).astype(np.uint8)
                Image.fromarray(mask).save(outNlcIm + '/' +
                                           imPathList1[i].split('/')[-1][:-4] +
                                           '.png')
                Image.fromarray(imSeq1[i]).save(outIm + '/' +
                                                imPathList1[i].split('/')[-1])
                crfSeq[i] = crf.refine_crf(imSeq1[i],
                                           maskSeq[i],
                                           gtProb=gtProb,
                                           posTh=posTh,
                                           negTh=negTh,
                                           crfParams=args.crfParams)
                crfSeq[i] = utils.refine_blobs(crfSeq[i], bSize=bSize)
                Image.fromarray(
                    crfSeq[i]).save(outCrf + '/' +
                                    imPathList1[i].split('/')[-1][:-4] +
                                    '.png')
                if not redirect:
                    sys.stdout.write(
                        'CRF, blob removal and saving: [% 5.1f%%]\r' %
                        (100.0 * float((i + 1) / maskSeq.shape[0])))
                    sys.stdout.flush()
            eTime = time.time()
            print('CRF, blob removal and saving images finished: %.2f s' %
                  (eTime - sTime))

            # save as video
            sTime = time.time()
            vidName = '_'.join(imdir.split('/')[3:]) + '_shot%d.avi' % (s + 1)
            utils.im2vid(outVidNlc + vidName, imSeq1,
                         (maskSeq > binTh).astype(np.uint8))
            utils.im2vid(outVidCRF + vidName, imSeq1, crfSeq)
            eTime = time.time()
            print('Saving videos finished: %.2f s' % (eTime - sTime))

    # Tarzip the results of this shard and delete the individual files
    import subprocess
    for i in ['im', 'crfim', 'nlcim']:
        tarDir = args.baseOutdir + '/%s/shard%03d' % (i, args.shardId)
        subprocess.call(['tar', '-zcf', tarDir + '.tar.gz', '-C', tarDir, '.'])
        utils.rmdir_f(tarDir)

    return
Exemplo n.º 3
0
def demo_videos():
    """
    Input is the path of directory containing raw videos
    """
    # Hard coded parameters
    maxSide = 600  # max length of longer side of Im
    lenSeq = 35  # longer seq will be shrinked between [lenSeq/2, lenSeq]
    binTh = 0.4  # final thresholding to obtain mask
    clearFinalBlobs = True  # remove low energy blobs; uses binTh
    vidDir = '/home/dpathak/local/data/trash/videos'

    # parse commandline parameters
    args = parse_args()
    np.random.seed(args.seed)
    print('InputDir: ', vidDir)
    print('OutputDir: ', args.outdir)

    vidPathList = utils.read_r(vidDir, '*.mp4')
    for i in range(len(vidPathList)):
        print('\nCurrent VideoPath: ', vidPathList[i])
        # load video
        imSeq = utils.vid2im(vidPathList[i])
        n, h, w, c = imSeq.shape
        # adjust frameGap
        frameGap = args.frameGap
        if frameGap <= 0 and n > lenSeq:
            frameGap = int(n / lenSeq)
        imSeq = imSeq[::frameGap + 1]
        n = imSeq.shape[0]
        # adjust size
        frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)
        if frac < 1.0:
            h, w, c = imresize(imSeq[0], frac).shape
            imSeq2 = np.zeros((n, h, w, c), dtype=np.uint8)
            for j in range(n):
                imSeq2[j] = imresize(imSeq[j], frac)
            imSeq = imSeq2
        print('Total Video Shape: ', imSeq.shape)
        if imSeq.shape[1] < 2:
            print('Not enough images in this video')
            print('Continuing to next one ...')
            continue

        # setup output directory
        suffix = vidPathList[i].split('/')[-1]
        suffix = vidPathList[i].split('/')[-2] if suffix == '' else suffix
        suffix = suffix[:-4]
        outdirV = args.outdir + '/' + suffix
        utils.mkdir_p(outdirV)
        print('OutputDir for current Video: ', outdirV)

        # run the algorithm
        maskSeq = nlc(imSeq,
                      maxsp=args.maxsp,
                      iters=args.iters,
                      outdir=outdirV)
        np.save(outdirV + '/mask_%s.npy' % suffix, maskSeq)

        # save visual results
        if clearFinalBlobs:
            maskSeq = remove_low_energy_blobs(maskSeq, binTh)
        utils.rmdir_f(outdirV + '/result_%s/' % suffix)
        utils.mkdir_p(outdirV + '/result_%s/' % suffix)
        outvidfile = outdirV + '/video_%s.avi' % suffix
        utils.im2vid(outvidfile, imSeq, maskSeq)
        for i in range(maskSeq.shape[0]):
            mask = (maskSeq[i] > binTh).astype(np.uint8)
            grayscaleimage = (color.rgb2gray(imSeq[i]) * 255.).astype(np.uint8)
            imMasked = np.zeros(imSeq[i].shape, dtype=np.uint8)
            for c in range(3):
                imMasked[:, :, c] = grayscaleimage / 2 + 127
            imMasked[mask.astype(np.bool), 1:] = 0
            Image.fromarray(imMasked).save(outdirV +
                                           '/result_%s/frame_%05d.png' %
                                           (suffix, i))
        import subprocess
        subprocess.call([
            'tar', '-zcf', outdirV + '/../result_%s.tar.gz' % suffix, '-C',
            outdirV + '/result_%s/' % suffix, '.'
        ])

    return
Exemplo n.º 4
0
def demo_images():
    """
    Input is the path of directory (imdir) containing images of a video
    """
    # Hard coded parameters
    maxSide = 600  # max length of longer side of Im
    lenSeq = 35  # longer seq will be shrinked between [lenSeq/2, lenSeq]
    binTh = 0.4  # final thresholding to obtain mask
    clearFinalBlobs = True  # remove low energy blobs; uses binTh

    # parse commandline parameters
    args = parse_args()
    np.random.seed(args.seed)
    if args.imdir == '':
        imagenetVideoList = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
                            'imagenet_videos/ILSVRC2015/ImageSets/VID/' + \
                            'train_10.txt'
        imagenetRoot = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
                    'imagenet_videos/ILSVRC2015/Data/VID/train/'
        with open(imagenetVideoList, 'r') as f:
            lines = f.readlines()
        imdirs = [x.strip().split(' ')[0] for x in lines]
        imdirs = imdirs[np.random.randint(len(imdirs))]
        args.imdir = os.path.join(imagenetRoot, imdirs)
        args.outdir = os.path.join(args.outdir, imdirs)

    # setup input directory
    print('InputDir: ', args.imdir)
    imPathList = utils.read_r(args.imdir, '*.*')
    if len(imPathList) < 2:
        print('Not enough images in image directory: \n%s' % args.imdir)
        return

    # setup output directory
    suffix = args.imdir.split('/')[-1]
    suffix = args.imdir.split('/')[-2] if suffix == '' else suffix
    args.outdir = args.outdir + '/' + suffix
    utils.mkdir_p(args.outdir)
    print('OutputDir: ', args.outdir)

    # load image sequence after adjusting frame gap and imsize
    frameGap = args.frameGap
    if frameGap <= 0 and len(imPathList) > lenSeq:
        frameGap = int(len(imPathList) / lenSeq)
    imPathList = imPathList[0:len(imPathList):frameGap + 1]
    h, w, c = np.array(Image.open(imPathList[0])).shape
    frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)
    if frac < 1.0:
        h, w, c = imresize(np.array(Image.open(imPathList[0])), frac).shape
    imSeq = np.zeros((len(imPathList), h, w, c), dtype=np.uint8)
    for i in range(len(imPathList)):
        if frac < 1.0:
            imSeq[i] = imresize(np.array(Image.open(imPathList[i])), frac)
        else:
            imSeq[i] = np.array(Image.open(imPathList[i]))
    print('Total Video Shape: ', imSeq.shape)

    # run the algorithm
    maskSeq = nlc(imSeq,
                  maxsp=args.maxsp,
                  iters=args.iters,
                  outdir=args.outdir)
    np.save(args.outdir + '/mask_%s.npy' % suffix, maskSeq)

    # save visual results
    if clearFinalBlobs:
        maskSeq = remove_low_energy_blobs(maskSeq, binTh)
    utils.rmdir_f(args.outdir + '/result_%s/' % suffix)
    utils.mkdir_p(args.outdir + '/result_%s/' % suffix)
    for i in range(maskSeq.shape[0]):
        mask = (maskSeq[i] > binTh).astype(np.uint8)
        grayscaleimage = (color.rgb2gray(imSeq[i]) * 255.).astype(np.uint8)
        imMasked = np.zeros(imSeq[i].shape, dtype=np.uint8)
        for c in range(3):
            imMasked[:, :, c] = grayscaleimage / 2 + 127
        imMasked[mask.astype(np.bool), 1:] = 0
        Image.fromarray(imMasked).save(args.outdir + '/result_%s/' % suffix +
                                       imPathList[i].split('/')[-1])
    import subprocess
    subprocess.call([
        'tar', '-zcf', args.outdir + '/../result_%s.tar.gz' % suffix, '-C',
        args.outdir + '/result_%s/' % suffix, '.'
    ])

    return
Exemplo n.º 5
0
def run_dm_sequence(outdir,
                    imPathList,
                    frameGap=0,
                    dmThresh=0,
                    matchNbr=10,
                    shotFrac=0,
                    postTrackHomTh=-1,
                    preTrackHomTh=-1,
                    use_epic=False,
                    vizFlow=False,
                    vizTr=False,
                    cpysrc=False):
    """
    Run DeepMatch Code on a sequence of images of video to obtain tracks
    """
    print('Outdir: ', outdir)
    # adjust image list according to frame Gap
    imPathList = imPathList[0:len(imPathList):frameGap + 1]

    # compute pariwise deepmatch across frames
    deepmatchdir = outdir + '/matches/'
    mList = []
    if os.path.isdir(deepmatchdir):
        mList = utils.read_r(deepmatchdir, '*.txt')
    if not len(mList) == len(imPathList) - 1 or len(mList) == 0:
        utils.mkdir_p(deepmatchdir)
        for i in range(len(imPathList) - 1):
            matchfile = deepmatchdir + 'match_%04d.txt' % i
            run_dm_pair(imPathList[i], imPathList[i + 1], matchfile)
            sys.stdout.write('Pairwise DeepMatch: [% 5.1f%%]\r' %
                             (100.0 * float(i / len(imPathList))))
            sys.stdout.flush()
        print('Pairwise DeepMatch completed.')
    else:
        print('Pairwise DeepMatches already present in outdir. Using them.')

    # use epic flow densification process
    if use_epic:
        # TODO: rescore deepmatch
        flowdir = outdir + '/flow/'
        run_epicFlow_sequence(imPathList, flowdir, deepmatchdir, vizFlow)
        matchdir = flowdir
        dmThresh = -1  # deepmatch score no longer matters
    else:
        matchdir = deepmatchdir

    # run homography before matching sequences
    imW, imH = Image.open(imPathList[0]).size
    if preTrackHomTh > 0:
        preHomMatchdir = outdir + '/pre_homographies/'
        run_pre_homography(preHomMatchdir, matchdir, preTrackHomTh, imH, imW,
                           dmThresh, imPathList, True)
        matchdir = preHomMatchdir
        dmThresh = -1  # deepmatch score no longer matters

    # resolve pairwise deep-matches to obtain sequence tracks
    totalShotTracks, shotends = match_sequence(imH, imW, matchdir, dmThresh,
                                               matchNbr, shotFrac)

    # after above tracking, find foreground points using homography
    if postTrackHomTh > 0:
        startF = 0
        for endF in np.nditer(shotends):
            currshotTracks = totalShotTracks[startF:endF + 1,
                                             totalShotTracks[endF, :,
                                                             0] > -1000]
            fgPts = shot_homography(currshotTracks, postTrackHomTh)
            totalShotTracks[startF:endF + 1, :] = -1000
            totalShotTracks[startF:endF + 1, :fgPts.shape[1]] = fgPts
            startF = endF + 1

    # save matches: no longer need duplicated frame tuples
    totalShotTracks = totalShotTracks[:, :, :2]
    np.save(outdir + '/totalShotTracks.npy', totalShotTracks)
    np.save(outdir + '/shotends.npy', shotends)

    # visualize deepmatch tracks on images and save them
    if vizTr and totalShotTracks.size > 0:
        col = np.array([255, 0, 0], dtype='int')
        totalShotTracks.transpose()
        shotNum = 0
        utils.rmdir_f(outdir + '/viz_tracks/')
        utils.mkdir_p(outdir + '/viz_tracks/%d' % shotNum)
        for i in range(len(imPathList)):
            validPts = totalShotTracks[i, totalShotTracks[i, :, 0] > -1000]
            im = np.array(Image.open(imPathList[i]))
            im = Image.fromarray(
                utils.draw_point_im(im, validPts[:, ::-1], col, sizeOut=10))
            im.save(outdir + '/viz_tracks/%d/%s' %
                    (shotNum, imPathList[i].split('/')[-1]))
            if i == shotends[shotNum] and i < len(imPathList) - 1:
                shotNum += 1
                utils.mkdir_p(outdir + '/viz_tracks/%d' % shotNum)
        import subprocess
        subprocess.call([
            'tar', '-zcf', outdir + '/viz_tracks.tar.gz', '-C',
            outdir + '/viz_tracks', '.'
        ])
        print('Track visualization saved.')

    # copy src images to output dir for which tracking performed
    if cpysrc:
        from shutil import copy
        utils.mkdir_p(outdir + '/origImages/')
        for i in range(len(imPathList)):
            copy(imPathList[i], outdir + '/origImages/')
        print('Source images copied to outdir.')
Exemplo n.º 6
0
def demo_images():
    """
    Input is the path of directory (imdir) containing images of a video
    """
    # Hard coded parameters
    maxSide = 400  # max length of longer side of Im
    lenSeq = 1e8  # longer seq will be shrinked between [lenSeq/2, lenSeq]

    # parse commandline parameters
    args = parse_args()
    np.random.seed(args.seed)
    if args.imdir == '':
        imagenetVideoList = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
                            'imagenet_videos/ILSVRC2015/ImageSets/VID/' + \
                            'train_10.txt'
        imagenetRoot = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \
                    'imagenet_videos/ILSVRC2015/Data/VID/train/'
        with open(imagenetVideoList, 'r') as f:
            lines = f.readlines()
        imdirs = [x.strip().split(' ')[0] for x in lines]
        imdirs = imdirs[np.random.randint(len(imdirs))]
        args.imdir = os.path.join(imagenetRoot, imdirs)
        args.outdir = os.path.join(args.outdir, imdirs)

    # setup input directory
    print('InputDir: ', args.imdir)
    imPathList = utils.read_r(args.imdir, '*.*')
    if len(imPathList) < 2:
        print('Not enough images in image directory: \n%s' % args.imdir)
        return

    # setup output directory
    suffix = args.imdir.split('/')[-1]
    suffix = args.imdir.split('/')[-2] if suffix == '' else suffix
    args.outdir = args.outdir + '/' + suffix
    utils.mkdir_p(args.outdir)
    print('OutputDir: ', args.outdir)

    # load image sequence after adjusting frame gap and imsize
    frameGap = args.frameGap
    if frameGap <= 0 and len(imPathList) > lenSeq:
        frameGap = int(len(imPathList) / lenSeq)
    imPathList = imPathList[0:len(imPathList):frameGap + 1]
    h, w, c = np.array(Image.open(imPathList[0])).shape
    frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)
    if frac < 1.0:
        h, w, c = imresize(np.array(Image.open(imPathList[0])), frac).shape
    imSeq = np.zeros((len(imPathList), h, w, c), dtype=np.uint8)
    for i in range(len(imPathList)):
        if frac < 1.0:
            imSeq[i] = imresize(np.array(Image.open(imPathList[i])), frac)
        else:
            imSeq[i] = np.array(Image.open(imPathList[i]))
    print('Total Video Shape: ', imSeq.shape)

    # run the algorithm
    shotIdx = vid2shots(imSeq,
                        maxShots=args.maxShots,
                        vmax=args.vmax,
                        colBins=args.colBins)
    print('Total Shots: ', shotIdx.shape, shotIdx)
    np.save(args.outdir + '/shotIdx_%s.npy' % suffix, shotIdx)

    # save visual results
    from PIL import ImageDraw
    utils.rmdir_f(args.outdir + '/shots_%s/' % suffix)
    utils.mkdir_p(args.outdir + '/shots_%s/' % suffix)
    frameNo = 1
    shotNo = 0
    for i in range(imSeq.shape[0]):
        img = Image.fromarray(imSeq[i])
        draw = ImageDraw.Draw(img)
        if i in shotIdx:
            draw.text((100, 100), "New Shot Begins !!", (255, 255, 255))
            shotNo += 1
            frameNo = 1
        draw.text((10, 10), "Shot: %02d, Frame: %03d" % (shotNo, frameNo),
                  (255, 255, 255))
        img.save(args.outdir + '/shots_%s/' % suffix +
                 imPathList[i].split('/')[-1])
        frameNo += 1
    import subprocess
    subprocess.call([
        'tar', '-zcf', args.outdir + '/../shots_%s.tar.gz' % suffix, '-C',
        args.outdir + '/shots_%s/' % suffix, '.'
    ])

    return