Beispiel #1
0
def analyze_video(video, model_name, skeleton_name, batch_size=16):
    """
	analyze a video and save results to 'video_tracking.csv' in same directory
	video: 	  		 full path to video
	model:	  	  	 deepposekit model instance
	skeleton_name:   full path to skeleton
	"""

    # load model
    model = load_model(model_name)

    # predict
    print(f'analyzing video: {video}')
    reader = VideoReader(video,
                         batch_size=batch_size,
                         gray=True,
                         frame_size=model.input_shape)
    predictions = model.predict(reader,
                                verbose=1,
                                workers=1,
                                use_multiprocessing=False)
    reader.close()

    # get column names
    features = list(pd.read_csv(skeleton_name).name)
    columns = []
    for f in features:
        columns += [f + '_x', f + '_y', f + '_confidence']

    # save to csv
    data = pd.DataFrame(columns=columns, index=np.arange(predictions.shape[0]))
    data[:] = np.reshape(predictions, (predictions.shape[0], -1))
    data.to_csv(os.path.splitext(video)[0] + '_tracking.csv')

    return predictions
def start_videoanalyser():
    print("Starting DeepLabCut")
    model = load_model(
        r"D:\DeepPoseKit-Data-master\datasets\fly\best_model_densenet.h5")

    experiment_enabled = False
    video_output = True

    if experiment_enabled:
        print("Initializing experiment")
        experiment = ExampleExperiment()
        experiment.start_experiment()

    # some variables initialization
    all_rows = []
    index = 0

    while video.isOpened():
        ret, frame = video.read()
        if ret:
            scmap, locref, pose = get_pose(frame, config, sess, inputs,
                                           outputs)
            peaks = find_local_peaks_new(scmap, locref, ANIMALS_NUMBER, config)
            skeletons = calculate_skeletons(peaks, ANIMALS_NUMBER)
            if skeletons:
                for skeleton in skeletons:
                    if experiment_enabled:
                        result, response = experiment.check_skeleton(
                            frame, skeleton)
                        plot_triggers_response(frame, response)
                out_frame = plot_bodyparts(frame, skeletons)
            else:
                out_frame = frame
            cv2.imshow('stream', out_frame)
            if video_output:
                video_file.write(out_frame)
            if experiment_enabled:
                all_rows.append(
                    create_row(index, skeletons, experiment_enabled,
                               experiment.get_trial()))
            else:
                all_rows.append(
                    create_row(index, skeletons, experiment_enabled, None))
            index += 1
        else:
            break

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if experiment_enabled:
        experiment.stop_experiment()
    if video_output:
        print('Saving analyzed video for {}'.format(video_name))
        video_file.release()
    video.release()
    create_dataframes(all_rows)
def start_videoanalyser():
    print("Starting DeepPoseKit")
    video = cv2.VideoCapture(r"D:\DeepPoseKit-Data-master\datasets\fly\video.avi")
    resolution = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))

    path_model = r"D:\DeepPoseKit-Data-master\datasets\fly\best_model_densenet.h5"
    model = load_model(path_model)


    predict_model = model.predict_model
    # predict_model.layers.pop(0)  # remove current input layer
    #
    # inputs = tf.keras.layers.Input((resolution[0], resolution[1], 3))
    # outputs = predict_model(inputs)
    # predict_model = tf.keras.Model(inputs, outputs)


    experiment_enabled = False

    index = 0
    while video.isOpened():
        ret, frame = video.read()
        if ret is not None:
            org_frame = frame
            frame = frame[..., 1][..., None]
            st_frame = np.stack([frame])
            prediction = predict_model.predict(st_frame, batch_size= 1,  verbose=True)
            x, y, confidence = np.split(prediction, 3, -1)

            print(prediction.shape)
            predi = prediction[0,:,:2]
            pre = predi[:, :2]
            print(pre)
            out_frame = plot_dlc_bodyparts(org_frame, predi)
            # out_frame = org_frame
            cv2.imshow('stream', out_frame)
            index += 1
        else:
            break

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video.release()
Beispiel #4
0
def resume_training(fpath, original_model_file, new_model_file,
                    labeled_data_h5_file):
    data_generator = DataGenerator(os.path.join(fpath, labeled_data_h5_file))
    augmenter = get_augmenter(data_generator=data_generator)
    model = load_model(
        os.path.join(fpath, original_model_file),
        augmenter=augmenter,
        generator=data_generator,
    )

    callbacks = get_callbacks(fpath, model_file=new_model_file)

    model.fit(
        batch_size=8,
        validation_batch_size=2,
        callbacks=callbacks,
        epochs=500,
        n_workers=8,
        steps_per_epoch=None,
    )
Beispiel #5
0
parser.add_argument('vid_type', help="'stim' or 'tank'")
parser.add_argument('-overwrite',
                    help="overwrite existing files",
                    action='store_true')
args = parser.parse_args()
vid_type = args.vid_type

# load config
with open('config.yaml', 'r') as file:
    cfg = yaml.safe_load(file)[vid_type]

# find video folders
vid_folders = [f.path for f in os.scandir(cfg['data_dir']) if f.is_dir()]

# load model
model = load_model(cfg['model'])

# analyze videos
for vid_folder in vid_folders:
    vids = glob.glob(os.path.join(vid_folder, '*.avi'))
    vids = [
        vid for vid in vids
        if 'concatenated' not in vid and 'tracking' not in vid
    ]  # don't add concatenated vids
    vids.sort()

    for idx, vid in enumerate(vids):
        if args.overwrite or not os.path.exists(
                os.path.splitext(vid)[0] + '_tracking.csv'):
            print('\n({}/{})--------- analyzing {}'.format(
                idx, len(vids), vid))
Beispiel #6
0
from robotpose.angle_prediction import Predictor
import sys

setMemoryGrowth()

# Load dataset
ds = Dataset('set0','B')

# Read in Actual angles from JSONs to compare predicted angles to
S_angles = ds.angles[:,0]
L_angles = ds.angles[:,1]
U_angles = ds.angles[:,2]
B_angles = ds.angles[:,4]

# Load model, make predictions
model = load_model(os.path.join(os.getcwd(),r'models\set10__B__CutMobilenet.h5'))
reader = VideoReader(ds.seg_vid_path)
predictions = model.predict(reader)
tim = Predictor('B')
tim.load(predictions[25], ds.pointmaps[25])
print(tim.predict())

sys.exit()

# np.save('set6_output.npy',predToXYZ(predictions, ds.ply))
# print("Predictions saved")

pred_dict = predToDictList_new(predictions)
pred_dict_xyz = predToXYZdict_new(pred_dict, ds.pointmaps)

# Load video capture and make output
Beispiel #7
0
def predictnewvideoDPK(dpkini, videofolder):
    configFile = str(dpkini)
    config = ConfigParser()
    config.read(configFile)
    project_folder = config.get('general DPK settings', 'project_folder')
    modelPath = config.get('predict settings', 'modelPath')
    videoFolderPath = videofolder
    print(videoFolderPath)
    batchSize = config.getint('predict settings', 'batch_size')
    outputfolder = os.path.join(project_folder, 'predictions')
    if not os.path.exists(outputfolder):
        os.makedirs(outputfolder)

    bodyPartColumnNames = []

    skeletonPath = os.path.join(project_folder, 'skeleton.csv')
    skeletonDf = pd.read_csv(skeletonPath)
    skeletonList = list(skeletonDf['name'])

    for i in skeletonList:
        x_col, y_col, p_col = (str(i) + '_x', str(i) + '_y', str(i) + '_p')
        bodyPartColumnNames.append(x_col)
        bodyPartColumnNames.append(y_col)
        bodyPartColumnNames.append(p_col)

    filesFound = glob.glob(videoFolderPath + '/*.mp4')

    #Check if videos are greyscale
    cap = cv2.VideoCapture(filesFound[0])
    cap.set(1, 0)
    ret, frame = cap.read()
    fileName = str(0) + str('.bmp')
    filePath = os.path.join(videoFolderPath, fileName)
    cv2.imwrite(filePath, frame)
    img = cv2.imread(filePath)
    imgDepth = img.shape[2]
    if imgDepth == 3:
        greyscaleStatus = False
    else:
        greyscaleStatus = True
    os.remove(filePath)

    # This loads the trained model into memory for making predictions
    model = load_model(modelPath)
    for video in filesFound:
        print('Analyzing file: ' + str(os.path.basename(video)))
        reader = VideoReader(video, batch_size=batchSize, gray=greyscaleStatus)
        predictions = model.predict(reader, verbose=1)
        reader.close()
        outputFilename = os.path.join(
            outputfolder,
            os.path.basename(video).replace('.mp4', '.csv'))
        x, y, confidence = np.split(predictions, 3, -1)
        outputDataFrame = pd.DataFrame(columns=bodyPartColumnNames)
        for i in range(len(x)):
            currX, currY, currConf = (x[i], y[i], confidence[i])
            currCordList = []
            for ii in range(len(currX)):
                cords = [
                    float(currX[ii]),
                    float(currY[ii]),
                    float(currConf[ii])
                ]
                currCordList.extend(cords)
            outputDataFrame = outputDataFrame.append(pd.Series(
                dict(zip(outputDataFrame.columns, currCordList))),
                                                     ignore_index=True)
            outputDataFrame.reset_index(inplace=True, drop=True)
        outputDataFrame.reset_index(inplace=True, drop=True)
        outputDataFrame.to_csv(outputFilename)
        print('Saved predictions: ' + outputFilename)
    print('All files analyzed.')
Beispiel #8
0
        for larva_path in (root_path / setup_name / experiment_name).iterdir():
            larva_ID = f"{larva_path.name}_{setup_name}"

            if "_fish" not in larva_path.name:
                continue

            vid = imageio.get_reader(larva_path / "fish_roi.avi", 'ffmpeg')

            roi_movie = []

            for i, img in enumerate(vid):
                img = np.array(Image.fromarray(img[:, :, 0]).resize((96, 96)), dtype=np.float32)

                img = 255 * img / img.max()
                img = cv2.blur(img, (5, 5)).astype(np.uint8)
                img[img < 100] = 0

                roi_movie.append(img)

            roi_movie = np.moveaxis([roi_movie], 0, -1).astype(np.uint8)

            print("Roi images has the following shape", roi_movie.shape)
            model = load_model(r'/n/home10/abahl/engert_storage_armin/maxwell_paper/deepposekit_training/my_best_model.h5')

            roi_movie_posture = model.predict(roi_movie)

            print("Done prediction, saving....")

            np.save(larva_path / "roi_movie_posture.npy", roi_movie_posture)

def run(dataset, skeleton, model_type, batch_size, valid_size):

    ds = Dataset(dataset, skeleton)
    print("Dataset loaded")
    data_generator = DataGenerator(ds.deepposeds_path)
    print("Data Generator loaded")

    model_path = os.path.join(
        p().MODELS,
        os.path.basename(os.path.normpath(ds.deepposeds_path)).replace(
            '.h5', f'_{model_type}.h5'))
    model_path = os.path.join(p().MODELS,
                              f"{ds.name}__{ds.skele.name}__{model_type}.h5")

    if os.path.isfile(model_path):
        model = load_model(model_path, generator=data_generator)
    else:

        if model_type == "LEAP":
            ds_fac = 1
        else:
            ds_fac = 2

        train_generator = TrainingGenerator(generator=data_generator,
                                            downsample_factor=ds_fac,
                                            augmenter=None,
                                            sigma=5,
                                            validation_split=valid_size,
                                            use_graph=True,
                                            random_seed=1,
                                            graph_scale=1)
        print("Training Generator loaded")

        if model_type == "CutResnet":
            model = DeepLabCut(train_generator, backbone="resnet50")
        elif model_type == "CutMobilenet":
            model = DeepLabCut(train_generator,
                               backbone="mobilenetv2",
                               alpha=1.0)  # Increase alpha to improve accuracy
        elif model_type == "CutDensenet":
            model = DeepLabCut(train_generator, backbone="densenet121")
        elif model_type == "StackedDensenet":
            model = StackedDenseNet(train_generator,
                                    n_stacks=1,
                                    growth_rate=48)
        elif model_type == "LEAP":
            model = LEAP(train_generator)
        elif model_type == "StackedHourglass":
            model = StackedHourglass(train_generator)

    print("Model Set")

    logger = Logger(
        validation_batch_size=batch_size,
        # filepath saves the logger data to a .h5 file
        filepath=p().LOG)
    reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                  factor=0.2,
                                  verbose=1,
                                  patience=7)

    model_checkpoint = ModelCheckpoint(
        model_path,
        monitor="val_loss",
        # monitor="loss" # use if validation_split=0
        verbose=1,
        save_best_only=True,
    )

    early_stop = EarlyStopping(
        monitor="val_loss",
        # monitor="loss" # use if validation_split=0
        min_delta=0.001,
        patience=100,
        verbose=1)

    callbacks = [logger, early_stop, reduce_lr, model_checkpoint]
    print("Callbacks set")

    import webbrowser
    webbrowser.open('https://www.youtube.com/watch?v=IkdmOVejUlI')

    model.fit(
        batch_size=batch_size,
        validation_batch_size=batch_size,
        callbacks=callbacks,
        epochs=1000,
        n_workers=workerCount(),
        steps_per_epoch=None,
    )
Beispiel #10
0
import json

from robotpose.angle_prediction import Predictor

setMemoryGrowth()

predict = False
save = False
skele = 'E'
ds = Dataset('set10', skele)

if predict:
    print("Predicting...")
    # Load model, make predictions
    model = load_model(
        os.path.join(os.getcwd(),
                     fr'models\set10__{skele}__StackedDensenet.h5'))
    reader = VideoReader(ds.seg_vid_path)
    predictions = model.predict(reader)
    print("Finished Predicting.")

    if save:
        np.save(f'output/predictions_{skele}.npy', np.array(predictions))
        print("Predictions saved")
else:
    predictions = np.load(f'output/predictions_{skele}.npy')

# Load video capture and make output
cap = cv2.VideoCapture(ds.seg_vid_path)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(p().VIDEO.replace(".avi", "_overlay.avi"), fourcc, 20,
Beispiel #11
0
from deepposekit.io import TrainingGenerator, DataGenerator
import numpy as np
import pyrealsense2 as rs
from robotpose.utils import *


vid_path = "C:\\Users\\exley\\Desktop\\CDME\\RobotPose\\data\\video.avi"
image_path = "C:\\Users\\exley\\Desktop\\CDME\\RobotPose\\data\\2d"
skeleton_path = "C:\\Users\\exley\\Desktop\\CDME\\RobotPose\\data\\mult_skeleton.csv"
model_path = "C:\\Users\\exley\\Desktop\\CDME\\RobotPose\\data\\model_LEAP_mult.h5"

L_angles = readLinkXData(1)
U_angles = readLinkXData(2)
B_angles = readLinkXData(4)

model = load_model(model_path)
reader = VideoReader(vid_path)
predictions = model.predict(reader)
pred_dict = predToDictList(predictions)

cap = cv2.VideoCapture(vid_path)

fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(vid_path.replace(".avi","_overlay.avi"),fourcc, 12.5, (640*2,480))

L_pred = []
U_pred = []
B_pred = []

ret, image = cap.read()
i = 0
Beispiel #12
0
# Compile PLY data if not already complied
if not os.path.isfile(p.ply_data):
    parsePLYs()

# Read ply data
ply_data = readBinToArrs(p.ply_data)

# Read in Actual angles from JSONs to compare predicted angles to
S_angles = readLinkXData(0)
L_angles = readLinkXData(1)
U_angles = readLinkXData(2)
B_angles = readLinkXData(4)

# Load model, make predictions
model = load_model(p.model_mult)
reader = VideoReader(p.VIDEO)
predictions = model.predict(reader)
pred_dict = predToDictList(predictions)
pred_dict_xyz = predToXYZdict(pred_dict, ply_data)

# Load video capture and make output
cap = cv2.VideoCapture(p.VIDEO)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(p.VIDEO.replace(".avi", "_overlay.avi"), fourcc, 12.5,
                      (640 * 2, 480))

# Init predicted angle lists
S_pred = []
L_pred = []
U_pred = []
def initialize_annotation(root_path, filepath):
    path = os.path.join(root_path, 'my_best_model.h5')
    model = load_model(path)
    path2 = os.path.join(root_path, 'example_annotation_set.h5')
    data_generator = DataGenerator(path2, mode='unannotated')
    image_generator = ImageGenerator(data_generator)

    predictions = model.predict(image_generator, verbose=1)

    print(predictions.shape)

    data_generator[:] = predictions

    image, keypoints = data_generator[0]

    # plt.figure(figsize=(5,5))
    # image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
    # cmap = None if image.shape[-1] is 3 else 'gray'
    # plt.imshow(image, cmap=cmap, interpolation='none')
    # for idx, jdx in enumerate(data_generator.graph):
    #     if jdx > -1:
    #         plt.plot(
    #             [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
    #             [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
    #             'r-'
    #         )
    # plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
    # path3 = os.path.join(root_path, "figure6_%s.png" % filepath)
    # plt.savefig(path3)
    # plt.show()

    path = os.path.join(root_path, 'my_best_model.h5')
    model = load_model(path)

    path2 = os.path.join(root_path, '%s_fish_roi_resized.avi' % filepath)
    reader = VideoReader(path2, batch_size=10, gray=True)
    frames = reader[0]
    print(frames.shape)
    reader.close()
    fish_name = filepath

    # plt.imshow(frames[0,...,0], cmap='gray')
    # plt.savefig('figure_15_%s.png' % filepath)
    # plt.show()

    reader = VideoReader(path2, batch_size=50, gray=True)
    predictions = model.predict(reader, verbose=1)
    reader.close()

    path3 = os.path.join(root_path, filepath)
    np.save(path3 + '_predictions.npy', predictions)

    x, y, confidence = np.split(predictions, 3, -1)
    path4 = os.path.join(root_path, 'example_annotation_set.h5')
    data_generator = DataGenerator(path4)

    image = frames[0]
    keypoints = predictions[0]

    plt.figure(figsize=(5, 5))
    image = image if image.shape[-1] is 3 else image[..., 0]
    cmap = None if image.shape[-1] is 3 else 'gray'
    plt.imshow(image, cmap=cmap, interpolation='none')
    for idx, jdx in enumerate(data_generator.graph):
        if jdx > -1:
            plt.plot([keypoints[idx, 0], keypoints[jdx, 0]],
                     [keypoints[idx, 1], keypoints[jdx, 1]], 'r-')
    plt.scatter(keypoints[:, 0],
                keypoints[:, 1],
                c=np.arange(data_generator.keypoints_shape[0]),
                s=50,
                cmap=plt.cm.hsv,
                zorder=3)
    plt.savefig(path3 + "_figure7.png")
    # plt.show()

    confidence_diff = np.abs(np.diff(confidence.mean(-1).mean(-1)))

    confidence_outlier_peaks = find_peaks(confidence_diff, height=0.1)[0]

    time_diff = np.diff(predictions[..., :2], axis=0)
    time_diff = np.abs(time_diff.reshape(time_diff.shape[0], -1))
    time_diff = time_diff.mean(-1)

    time_diff_outlier_peaks = find_peaks(time_diff, height=10)[0]

    outlier_index = np.concatenate(
        (confidence_outlier_peaks, time_diff_outlier_peaks))
    outlier_index = np.unique(outlier_index)  # make sure there are no repeats

    reader = VideoReader(path2, batch_size=1, gray=True)

    outlier_images = []
    outlier_keypoints = []
    for idx in outlier_index:
        outlier_images.append(reader[idx])
        outlier_keypoints.append(predictions[idx])

    outlier_images = np.concatenate(outlier_images)
    outlier_keypoints = np.stack(outlier_keypoints)

    reader.close()

    data_generator = DataGenerator(path4)

    for idx in range(5):
        image = outlier_images[idx]
        keypoints = outlier_keypoints[idx]

        plt.figure(figsize=(5, 5))
        image = image if image.shape[-1] is 3 else image[..., 0]
        cmap = None if image.shape[-1] is 3 else 'gray'
        plt.imshow(image, cmap=cmap, interpolation='none')
        for idx, jdx in enumerate(data_generator.graph):
            if jdx > -1:
                plt.plot([keypoints[idx, 0], keypoints[jdx, 0]],
                         [keypoints[idx, 1], keypoints[jdx, 1]], 'r-')
        plt.scatter(keypoints[:, 0],
                    keypoints[:, 1],
                    c=np.arange(data_generator.keypoints_shape[0]),
                    s=50,
                    cmap=plt.cm.hsv,
                    zorder=3)
        plt.savefig(path3 + '_figure_12_%s.png' % filepath)
        plt.show()

    merge_new_images(
        datapath=path4,
        merged_datapath=path3 + '_annotation_data_release_merged.h5',
        images=outlier_images,
        keypoints=outlier_keypoints,
        # overwrite=True # This overwrites the merged dataset if it already exists
    )

    merged_generator = DataGenerator(path3 +
                                     '_annotation_data_release_merged.h5',
                                     mode="unannotated")

    image, keypoints = merged_generator[0]

    plt.figure(figsize=(5, 5))
    image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
    cmap = None if image.shape[-1] is 3 else 'gray'
    plt.imshow(image, cmap=cmap, interpolation='none')
    for idx, jdx in enumerate(data_generator.graph):
        if jdx > -1:
            plt.plot([keypoints[0, idx, 0], keypoints[0, jdx, 0]],
                     [keypoints[0, idx, 1], keypoints[0, jdx, 1]], 'r-')
    plt.scatter(keypoints[0, :, 0],
                keypoints[0, :, 1],
                c=np.arange(data_generator.keypoints_shape[0]),
                s=50,
                cmap=plt.cm.hsv,
                zorder=3)
    plt.savefig(path3 + '_figure_13.png')
    plt.show()

    plt.imshow(frame[..., ::-1])
    plt.savefig(path3 + '_figure_14.png')

    plt.show()
Beispiel #14
0
def load_dpk():
    model = load_model(MODEL_PATH)
    return model.predict_model
Beispiel #15
0
def load_sleap():
    model = load_model(MODEL_PATH)
    model.inference_model
    return model.inference_model
Beispiel #16
0
def load_sleap():
    model = load_model(MODEL_PATH, batch_size=1)
    model.inference_model
    return model.inference_model
Beispiel #17
0
cap = cv2.VideoCapture(filesFound[0])
cap.set(1, 0)
ret, frame = cap.read()
fileName = str(0) + str('.bmp')
filePath = os.path.join(videoFolderPath, fileName)
cv2.imwrite(filePath, frame)
img = cv2.imread(filePath)
imgDepth = img.shape[2]
if imgDepth == 3:
    greyscaleStatus = False
else:
    greyscaleStatus = True
os.remove(filePath)

# This loads the trained model into memory for making predictions
model = load_model(modelPath)
for video in filesFound:
    print('Analyzing file: ' + str(os.path.basename(video)))
    reader = VideoReader(video, batch_size=batchSize, gray=greyscaleStatus)
    predictions = model.predict(reader, verbose=1)
    reader.close()
    outputFilename = os.path.join(
        outputfolder,
        os.path.basename(video).replace('.mp4', '.csv'))
    x, y, confidence = np.split(predictions, 3, -1)

confidence_diff = np.abs(np.diff(confidence.mean(-1).mean(-1)))
confidence_outlier_peaks = find_peaks(confidence_diff, height=0.1)[0]
time_diff = np.diff(predictions[..., :2], axis=0)
time_diff = np.abs(time_diff.reshape(time_diff.shape[0], -1))
time_diff = time_diff.mean(-1)