Example #1
0
def main():
    # モデル定義
    model = resnet.resnet152(pretrained=True)
    if torch.cuda.is_available():  # GPUが利用可能か確認
        device = 'cuda'
    else:
        device = 'cpu'
    print('device is {0}'.format(device))
    # print(model.weight.type)
    model.to(device)
    # 絶対パスに変換
    opt = parse_opts()
    outpath = os.path.abspath(opt.output)
    apath = os.path.abspath(opt.input)
    video_names = sorted(glob.glob(os.path.join(apath, '*')))

    if os.path.exists('tmp'):
        subprocess.call('rm -rf tmp', shell=True)

    for vpath in video_names:
        vname = os.path.splitext(os.path.basename(vpath))[0]

        subprocess.call('mkdir tmp', shell=True)
        subprocess.call(
            'ffmpeg -loglevel warning -i {} tmp/image_%05d.jpg'.format(vpath),
            shell=True)
        images = sorted(glob.glob('tmp/*.jpg'))
        if opt.only_hand:
            print('convert to masked images')
            for im in tqdm(images):
                frame = cv2.imread(im)
                maskedframe, _ = hd.detect(frame)
                cv2.imwrite(im, maskedframe)
            print('complete convert images')

        print('extract {}\'s DeepFeatrue'.format(vname))

        outputs = input_image(images, model)

        # ファイルに保存
        if not os.path.exists(outpath):
            subprocess.call('mkdir {}'.format(outpath), shell=True)

        savename = os.path.join(outpath, vname + '.npy')
        np.save(savename, outputs)
        subprocess.call('rm -rf tmp', shell=True)
Example #2
0
         foundRHandFullSize = RESIZE_FULL_RES_FACTOR*foundRHand # 
         print foundRHand
         newx,newy,neww,newh = foundRHand
         detectRight = False
 else:
     # check for left in same frame and in num_attmept next frames
     if numLeftAttempts < MAX_NUM_LEFT_ATTMEPTS:
         print "searching for left hand attempt#",numLeftAttempts
         lhands = handDetect(hand_cascade, image, detectLeft = True, getNegSamples = False)
         print lhands
         lhValidator.feedNewHandList(lhands)
         if lhValidator.checkForDetectionAndUpdateCounter():
             foundLHand = np.array(lhValidator.foundHandPos)
             print foundLHand
             #foundLHand = HandDetection.convertLHdet_to_imgCoords(foundLHand, origSize=(160,120))
             if HandDetection.validateLH_RelativeToRH(foundLHand, foundRHand):
                 lbeep.play()
                 fullResIm = HandDetection.getFullResImage(camera , fullResStream ,(160*RESIZE_FULL_RES_FACTOR , 120*RESIZE_FULL_RES_FACTOR))
                 foundLHandFullSize = RESIZE_FULL_RES_FACTOR*foundLHand
                 commandMenu.voiceOverImage(fullResIm,foundLHandFullSize,foundRHandFullSize)
                 fullResStream.seek(0)
                 fullResStream.truncate()
                 camera.resolution = (160,120)
         
         numLeftAttempts += 1
         if numLeftAttempts == MAX_NUM_LEFT_ATTMEPTS:
             numLeftAttempts = 0 # reset
             detectRight = True
         
         
        
Example #3
0
                        detectRight = True
                else:
                    # check for left in same frame and in num_attmept next frames
                    if numLeftAttempts < MAX_NUM_LEFT_ATTMEPTS:
                        print "searching for left hand attempt#", numLeftAttempts
                        lhands = handDetect(hand_cascade,
                                            image,
                                            detectLeft=True,
                                            getNegSamples=False)
                        print lhands
                        lhValidator.feedNewHandList(lhands)
                        if lhValidator.checkForDetectionAndUpdateCounter():
                            foundLHand = np.array(lhValidator.foundHandPos)
                            print foundLHand
                            #foundLHand = HandDetection.convertLHdet_to_imgCoords(foundLHand, origSize=(160,120))
                            if HandDetection.validateLH_RelativeToRH(
                                    foundLHand, foundRHand):
                                lbeep.play()
                                fullResIm = HandDetection.getFullResImage(
                                    camera, fullResStream,
                                    (160 * RESIZE_FULL_RES_FACTOR,
                                     120 * RESIZE_FULL_RES_FACTOR))
                                foundLHandFullSize = RESIZE_FULL_RES_FACTOR * foundLHand
                                commandMenu.voiceOverImage(
                                    fullResIm, foundLHandFullSize,
                                    foundRHandFullSize)
                                fullResStream.seek(0)
                                fullResStream.truncate()
                                camera.resolution = (160, 120)

                        numLeftAttempts += 1
                        if numLeftAttempts == MAX_NUM_LEFT_ATTMEPTS:
Example #4
0
 
 
 #print rhValidator.foundHandPos
 foundRHand = np.array(rhValidator.foundHandPos)
 foundRHandFullSize = RESIZE_FULL_RES_FACTOR*foundRHand # 
 print foundRHand
 newx,newy,neww,newh = foundRHand
 
 # check for left in same frame and in num_attmept next frames
 for i in range(NUM_LEFT_ATTMEPTS):
     lhands = handDetect(hand_cascade, image, detectLeft = True, getNegSamples = False)
     lhValidator.feedNewHandList(lhands)
     if lhValidator.checkForDetectionAndUpdateCounter():
         
         foundLHand = np.array(lhValidator.foundHandPos)
         foundLHand = HandDetection.convertLHdet_to_imgCoords(foundLHand, origSize=(160,120))
         if HandDetection.validateLH_RelativeToRH(foundLHand, foundRHand):
             lbeep.play()
             fullResIm = HandDetection.getFullResImage(camera , fullResStream ,(160*RESIZE_FULL_RES_FACTOR , 120*RESIZE_FULL_RES_FACTOR))
             foundLHandFullSize = RESIZE_FULL_RES_FACTOR*foundLHand
             commandMenu.readCommand(fullResIm,foundLHandFullSize,foundRHandFullSize)
             fullResStream.seek(0)
             fullResStream.truncate()
             camera.resolution = (160,120)
 
 
     camera.capture(stream, format='bgr' , use_video_port = True)
     image = stream.array
     cv2.imshow('image',image)