Пример #1
0
        network_name=args.network_name,
        checkpoint_path=args.checkpoint,
        batch_size=args.batch_size,
        num_classes=args.num_classes,
        preproc_func_name=args.preproc_func,
        preproc_threads=args.num_preproc_threads)

    # Print the network summary, use these layer names for feature extraction
    #feature_extractor.print_network_summary()

    # Feature extraction example using a filename queue to feed images
    feature_dataset = feature_extraction_queue(feature_extractor,
                                               args.image_path, layer_names,
                                               args.batch_size,
                                               args.num_classes)

    #    print(type(feature_dataset))
    #   for i in feature_dataset:
    #      print(i)

    data = feature_dataset['resnet_v2_101/logits']
    data = np.reshape(data, [data.shape[0], data.shape[3]])

    np.save(args.out_file, data)
    # Write features to disk as HDF5 file
    utils.write_hdf5('features.h5', layer_names, feature_dataset)
    print("Successfully written features to: {}".format(args.out_file))

    # Close the threads and close session.
    feature_extractor.close()
    print("Finished.")
Пример #2
0
def run_video(file_path=None):
    shutil.rmtree('tmp/')
    os.mkdir('tmp/')
    batch_size = 1
    with h5py.File('/data1/Project/TF_FeatureExtraction/features.h5','r') as f:
        criterion = f['resnet_v2_101']['logits'].value.squeeze(axis=1).squeeze(axis=1) #[N,d]
    feature_extractor = FeatureExtractor(
        network_name='resnet_v2_101',
        checkpoint_path='/data1/Project/TF_FeatureExtraction/checkpoints/resnet_v2_101.ckpt',
        batch_size=batch_size,
        num_classes=1001,
        preproc_func_name='inception',
        preproc_threads=2
    )
    FLAG = False
    keypoints_track = []
    if not file_path:
        file_path = 0
    cap = cv2.VideoCapture(file_path)
    out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 15, (int(cap.get(3)), int(cap.get(4))))
    while 1:
        t = cv2.getTickCount()
        # Read new image
        ret, frame = cap.read()
        #cv2.imwrite('screenshot.jpg', frame)
        if not ret:
            break
        # Output keypoints and the image with the human skeleton blended on it
        keypoints, output_image = openpose.forward(frame, True)
        #keypoints_track.append(keypoints)
        for i in range(keypoints.shape[0]):
            #boxes = body_parts_box(keypoints[i,:,:])
            box = upper_body_box(keypoints[i,:,:])
            if box is None:
                continue
            if box[0][0] <=0 or box[0][1] <= 0 or box[1][0] <= 0 or box[1][1] <=0:
                print(keypoints[i,:,:])
                FLAG = True
            cv2.imwrite('tmp/p%d.jpg'%i, frame[box[0][1]:box[1][1]+1, box[0][0]:box[1][0]+1, :])
            feature_data = feature_extraction_queue(feature_extractor, 'tmp/p%d.jpg'%i, 
                            ['resnet_v2_101/logits'], batch_size, num_classes=1001)
            feature = feature_data['resnet_v2_101/logits'].squeeze(axis=1).squeeze(axis=1)
            score = check_police(feature, criterion)
            if score >= 0.5: # is police
                color = (255, 0, 0)
            else:
                color = (0, 0, 255)
            shutil.rmtree('tmp/')
            os.mkdir('tmp/')
            cv2.rectangle(output_image, box[0], box[1], color, 2)
            #if score >= 0.5:
            #    cv2.imwrite('police_uniform/%s.jpg' % str(time.time()).replace('.', ''), frame[box[0][1]:box[1][1]+1, box[0][0]:box[1][0]+1, :])
            cv2.putText(output_image, "%.3f"%score, box[0], cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255))
            #cv2.drawContours(output_image, [box], 0, (0,0,255), 2)
            #for bb in boxes:
            #    if not bb:
            #        continue
            #    cv2.rectangle(output_image, bb[0], bb[1], (0, 0, 255), 2)
        #for i in range(keypoints.shape[0]):
        #    detect_action(keypoins[i,:,:])
        out.write(output_image)
        # Compute FPS
        t = (cv2.getTickCount() - t)/cv2.getTickFrequency()
                
        fps = 1.0 / t
        cv2.putText(output_image, "%.1f FPS"%fps, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255))
        # Display the image
        cv2.imshow("output", output_image)
        if FLAG:
            key = cv2.waitKey(0)
            if key == ord(' '):
                FLAG = False
                continue
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    feature_extractor.close()
    cap.release()
    out.release()
    cv2.destroyAllWindows()