Ejemplo n.º 1
0
    def __init__(self, exp_folder, lims_ID):

        for file in os.listdir(exp_folder):
            # looks for the excel file and makes the directory to it
            if file.endswith(".mp4") and file.startswith(lims_ID):
                self.directory = exp_folder
                self.file_string = os.path.join(exp_folder, file)
                self.sb = sb(exp_folder)
                self.ep = ep(exp_folder, lims_ID)
                self.sv = sv (exp_folder, lims_ID)
                self.video_pointer = cv2.VideoCapture(self.file_string)

        if os.path.isfile(self.file_string):
                self.data_present = True
        else:
                self.data_present = False
Ejemplo n.º 2
0
    def __init__(self, exp_folder, lims_ID):

        for file in os.listdir(exp_folder):
            # looks for the excel file and makes the directory to it
            if file.endswith(".mp4") and file.startswith(lims_ID):
                self.directory = exp_folder
                self.file_string = os.path.join(exp_folder, file)
                self.sb = sb(exp_folder)
                self.ep = ep(exp_folder, lims_ID)
                self.sv = sv(exp_folder, lims_ID)
                self.video_pointer = cv2.VideoCapture(self.file_string)

        if os.path.isfile(self.file_string):
            self.data_present = True
        else:
            self.data_present = False
Ejemplo n.º 3
0
def show_frame(frame):
    cv2.imshow('image', frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

if __name__ == '__main__':
    lims_ID = ['501560436', '502741583','509904120', '500860585']
    final_data = []
    y_train = []
    feature_data=[]
    y_fid=[]
    wheel=[]
    t= 0

    for itm in lims_ID:
        exl = ep("C:\Users\mahdir\Desktop\Mahdi files", itm)
        data = get_data(exl, itm)
        if t == 0:
            final_data = data['final_data']
            y_train= data['y_train']
            feature_data = data['fidget_data']
            y_fid = data['y_fid']
            wheel = data['wheel']
        else:
            vector = data['final_data']
            final_data = np.vstack((final_data, vector))
            y_train = np.concatenate((y_train, data['y_train']))
            feature_data = np.concatenate((feature_data, data['fidget_data']))
            y_fid = np.concatenate((y_fid, data['y_fid'] ))
            wheel = np.concatenate((wheel, data['wheel']))
        t += 1
Ejemplo n.º 4
0
    cv2.destroyAllWindows()


if __name__ == '__main__':
    # set limsID of video data to train on
    # '503412730', '497060401','502741583', '501004031', '500860585', '501560436'
    lims_ID = ['502741583', '501004031', '500860585', '501560436']
    # initialize training data and data label arrays, as well a a loop counter
    y_train = []
    y_track = []
    feature_data = []
    t = 0

    # for each limd_ID, get trainign and label data, and combine it with previous lims ID training and label data
    for itm in lims_ID:
        ex = ep("C:\Users\mahdir\Documents\Allen Projects\Behavior Annotation",
                itm)
        data = get_data(itm, ex)
        if t == 0:
            y_train = data['y_train']
            y_track = data['y_track']
            feature_data = data['feature_data'][0:len(y_train)]
            print(itm + ' video done')
            print(data['number'])
        else:
            y_vector = data['y_train']
            y_train = np.concatenate((y_train, y_vector))
            y_track = np.concatenate((y_track, data['y_track']))
            vector = data['feature_data'][0:len(y_vector)]
            feature_data = np.vstack((feature_data, vector))
            print(itm + ' video done')
def run_whole_video(exp_folder, lims_ID):
    #initializes video pointer for video of interest based on lims ID
    file_string = get_file_string(exp_folder + '\Videos\\', lims_ID)
    video_pointer = cv2.VideoCapture(file_string)

    # load feature scalers
    scale_wheel = joblib.load(exp_folder + '\Scalers\\' + 'wheel_scale_' +
                              str(lims_ID) + '.pkl')
    scale_frame = joblib.load(exp_folder + '\Scalers\\' + 'frame_scale_' +
                              str(lims_ID) + '.pkl')
    scale_optical = joblib.load(exp_folder + '\Scalers\\' + 'optical_scale_' +
                                str(lims_ID) + '.pkl')
    scale_angle = joblib.load(exp_folder + '\Scalers\\' + 'angle_scale_' +
                              str(lims_ID) + '.pkl')

    # import wheel data
    dir = os.path.join(exp_folder + '\Wheel\\', 'dxds' + str(lims_ID) + '.pkl')
    wheel = joblib.load(dir)
    first_non_nan = next(x for x in wheel if not isnan(x))
    first_index = np.where(wheel == first_non_nan)[0]
    k = first_index[0]
    imp = Imputer(missing_values='NaN', strategy='mean')
    wheel = imp.fit_transform(wheel)
    wheel = scale_wheel.transform(wheel)

    video_pointer.set(1, k)
    ret, frame = video_pointer.read()

    # crops and converts frame into desired format
    frame = cv2.cvtColor(frame[160:400, 100:640], cv2.COLOR_BGR2GRAY)

    prvs = frame
    nex = frame

    # initialize vectors to keep track of data
    count = 1
    mod = 0
    opticals = []
    angles = []
    frames = []

    behavior_data = ep(exp_folder, lims_ID).get_per_frame_data()

    labels = [
        "chattering", "trunk_present", "grooming", "trunk_absent", "running",
        "fidget", "tail_relaxed", "tail_tense", "flailing_present",
        "flailing_absent", "walking"
    ]

    Annotators = [
        'Mahdi Ramadan', 'Fiona Griffin', 'Nate Berbesque', 'Robert Howard',
        'Kyla Mace', 'Twee'
    ]

    # length of movie
    limit = int(video_pointer.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    fps = video_pointer.get(cv2.cv.CV_CAP_PROP_FPS)

    print('creating hf')
    # create hdf file
    hf = h5py.File(
        exp_folder + '\h5 files\\' + 'training_data_' + str(lims_ID) + '.h5',
        'w')

    while count < limit - first_index[0]:
        # get behavior info for frame

        behavior = ""
        temp = 1
        for item in labels:
            if behavior_data[temp][k] == 1:
                behavior = behavior + " " + item
            temp += 1

        prvs = nex
        frame_data = process_input(prvs)
        frames = scale_frame.transform(frame_data)

        ret, frame = video_pointer.read()
        nex = cv2.cvtColor(frame[160:400, 100:640], cv2.COLOR_BGR2GRAY)

        optical = optical_flow(prvs, nex)
        opticals = scale_optical.transform(optical['mag'])
        angles = scale_angle.transform(optical['ang'])

        vector_data = np.concatenate(
            (np.reshape(wheel[k], (1)), frames, opticals, angles))
        name = 'frame number ' + str(k)
        table = hf.create_dataset(name,
                                  data=vector_data,
                                  shape=(1, 5713),
                                  compression=9)
        table.attrs['behavior'] = behavior

        count += 1
        k += 1

        if count % 1000 == 0:
            print(count)
Ejemplo n.º 6
0
        k += 1
    return {'final_data': data, 'y_train': y_train}

def show_frame(frame):
    cv2.imshow('image', frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

if __name__ == '__main__':
    lims_ID = ['501560436', '501021421', '500860585']
    final_data = []
    y_train = []
    t= 0

    for itm in lims_ID:
        exl = ep("C:\Users\mahdir\Desktop\Mahdi files", itm)
        data = get_data(exl, itm)
        if t == 0:
            final_data = data['final_data']
            y_train= data['y_train']
        else:
            vector = data['final_data']
            final_data = np.vstack((final_data, vector))
            y_train = np.concatenate((y_train, data['y_train']))
        t += 1

    print('feature processing finished')
    p = Process(target = run_svm(final_data, y_train), args = (final_data, y_train))
    p.start()
    p.join()