def generate_abnormal_data(n_objects, generate_graph = False, show_graph = False):
    """
    Generates some abnormal trajectories like pedestrians crossing the road.
    Note: Refer to the image showed by show_image.py.
    It is worth mentioning that the y axis is inverted, which means that,
    when we say object going up, we expect y value to decrease and vise versa.
    :return: numpy array of abnormal trajectories with new objects
    """
    # Print a starting message
    print("=========================================================")
    print("Generating abnormal trajectories for sherbrooke dataset &")
    print("Exporting them to CSV files.")

    # Trajectory image viewer
    trajectory_image = tv.ImageViewer(dd.input_raw_image_frame_path)

    # Function that generates all the values of a line
    def _gen_line_values(x_1,x_2,y_1,y_2,v_x_val):
        """
        y = m * x + b
        :param x_1:
        :param x_2:
        :param y_1:
        :param y_2:
        :param v_x_val:
        :return: x, y, v_x, v_y
        """
        m = (y_1 - y_2)/(x_1 - x_2)
        b = (x_1 * y_2 - x_2 * y_1)/(x_1 - x_2)
        x = range(int(x_1),int(x_2)+1) if x_1 < x_2 else range(int(x_1),int(x_2)-1, -1)
        x = np.array(x).reshape(-1, 1)
        y = m * x + b
        v_y_val = m * v_x_val
        v_x = np.full((x.shape[0], 1), v_x_val)
        v_y = np.full((x.shape[0], 1), v_y_val)
        return x, y, v_x, v_y

    # Generate abnormal trajectory data of a car going the wrong direction
    # Left side of the road
    x_1 = 350.0
    x_2 = 608.0
    y_1 = 650.0
    y_2 = 150.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=10)
    object_id = n_objects
    object_label = 1
    array_dataset = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                         generate_graph=generate_graph,
                                         trajectory_image=trajectory_image)

    # Left side of the road
    x_1 = 470.0
    x_2 = 630.0
    y_1 = 670.0
    y_2 = 240.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=10)
    object_id += 1
    object_label = 1
    array_dataset_2 = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                           generate_graph=generate_graph,
                                           trajectory_image=trajectory_image)
    array_dataset = np.concatenate((array_dataset, array_dataset_2), axis=0)

    # Right side of the road
    x_1 = 690.0
    x_2 = 537.0
    y_1 = 160.0
    y_2 = 660.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=-10)
    object_id += 1
    object_label = 1
    array_dataset_2 = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                           generate_graph=generate_graph,
                                           trajectory_image=trajectory_image)
    array_dataset = np.concatenate((array_dataset, array_dataset_2), axis=0)

    # Right side of the road
    x_1 = 710.0
    x_2 = 590.0
    y_1 = 225.0
    y_2 = 670.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=-10)
    object_id += 1
    object_label = 1
    array_dataset_2 = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                           generate_graph=generate_graph,
                                           trajectory_image=trajectory_image)
    array_dataset = np.concatenate((array_dataset, array_dataset_2), axis=0)

    # Generate abnormal trajectory data of a bike
    x_1 = 420.0
    x_2 = 600.0
    y_1 = 640.0
    y_2 = 240.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=5)
    object_id += 1
    object_label = 2
    array_dataset_2 = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                           generate_graph=generate_graph,
                                           trajectory_image=trajectory_image)
    array_dataset = np.concatenate((array_dataset, array_dataset_2), axis=0)

    x_1 = 650.0
    x_2 = 590.0
    y_1 = 360.0
    y_2 = 665.0
    x,y,v_x,v_y = _gen_line_values(x_1,x_2,y_1,y_2,v_x_val=-5)
    object_id += 1
    object_label = 2
    array_dataset_2 = data._rearrange_data(object_id, object_label, x, y, v_x, v_y,
                                           generate_graph=generate_graph,
                                           trajectory_image=trajectory_image)
    array_dataset = np.concatenate((array_dataset, array_dataset_2), axis=0)

    # Export it to CSV file
    dataset_file_name = 'data/'\
                        +dd.raw_input_file_all[dd.raw_input_file_all.rfind('/')+1:dd.raw_input_file_all.rfind('.')]\
                        +'_abnormal.csv'

    directory = os.path.join(data.dir_name, dataset_file_name[:dataset_file_name.rfind('/')])
    if not os.path.exists(directory):
        os.makedirs(directory)

    with open(os.path.join(data.dir_name, dataset_file_name), 'wb') as datafile:
        np.savetxt(datafile, array_dataset, fmt="%.2f", delimiter=",")

    # Save and Show the trajectory image
    if generate_graph:
        trajectory_image_name = directory + '/' + \
                                dd.raw_input_file_all[dd.raw_input_file_all.rfind('/')+1:
                                dd.raw_input_file_all.rfind('.')] + \
                                '_abnormal_trajectories.pdf'
        trajectory_image.save_image(os.path.join(data.dir_name, trajectory_image_name))
    if generate_graph and show_graph:
        trajectory_image.show_image()

    # Print finishing message
    print("                                               ---> Done!")
    print("=========================================================")

    return array_dataset
def extract_augment_and_export_data(raw_input_file_all,
                                    input_raw_image_frame_path,
                                    raw_input_file_names,
                                    video_data_fps,
                                    generate_graph=False,
                                    show_graph=False):
    """
    Generate augmented data from the dataset.
    :return: Name of the augmented dataset
    """
    # Print a starting message
    print("=========================================================")
    print("Extracting trajectory data from raw dataset,")
    print("Generating augmented trajectory data and")
    print("Exporting them to CSV file.")

    # Trajectory image viewer
    trajectory_image = tv.ImageViewer(input_raw_image_frame_path)

    # Format of the data: [Object_id, Object_label x_0, y_0, v_x_0, v_y_0, x_1, y_1, v_x_1, v_y_1, x_2, y_2, ...]

    # Extract trajectories and export to CSV file
    dataset_file_name = 'data/'+raw_input_file_all[raw_input_file_all.rfind('/')+1:raw_input_file_all.rfind('.')]\
                        +'_data_2.csv'

    directory = os.path.join(dir_name,
                             dataset_file_name[:dataset_file_name.rfind('/')])
    if not os.path.exists(directory):
        os.makedirs(directory)

    object_label = 0

    # Initialize seed for random rand in order to have the same sequence of randomness every time this is called
    random.seed(random_seed_number)

    first_call = True

    for raw_input_file in raw_input_file_names:
        # Skip if the string is empty
        if not raw_input_file:
            object_label += 1
            continue

        # Convert sqlite dataset to dictionary
        conn = sqlite3.connect(raw_input_file)
        raw_dataset = sql.read_sql(
            'select * from {tn}'.format(tn=raw_table_name), conn)
        conn.close()

        # Maximum number of objects
        n_objects = int((raw_dataset.loc[raw_dataset['object_id'].idxmax()]
                         )['object_id']) + 1

        for object_id in range(0, n_objects):
            # Extract rows for the particular object_id
            df = raw_dataset.loc[raw_dataset['object_id'] == object_id]

            # Extract trajectory
            x_top_left = df['x_top_left'].values
            y_top_left = df['y_top_left'].values
            x_bottom_right = df['x_bottom_right'].values
            y_bottom_right = df['y_bottom_right'].values
            frame_number = df['frame_number'].values

            x_centered = (x_top_left + x_bottom_right) / 2
            y_centered = (y_top_left + y_bottom_right) / 2

            x_1_r = x_centered[:-1]
            x_2_r = x_centered[1:]
            y_1_r = y_centered[:-1]
            y_2_r = y_centered[1:]
            f_1 = frame_number[:-1]
            f_2 = frame_number[1:]

            v_x = video_data_fps * (x_2_r - x_1_r) / (f_2 - f_1)
            v_y = video_data_fps * (y_2_r - y_1_r) / (f_2 - f_1)

            # Concatenate data
            x_1 = np.array(x_1_r).reshape(-1, 1)
            y_1 = np.array(y_1_r).reshape(-1, 1)
            v_x = np.array(v_x).reshape(-1, 1)
            v_y = np.array(v_y).reshape(-1, 1)

            array_dataset = _rearrange_data(object_id,
                                            object_label,
                                            x_1,
                                            y_1,
                                            v_x,
                                            v_y,
                                            generate_graph=generate_graph,
                                            trajectory_image=trajectory_image)

            open_file_mode = 'wb' if first_call else 'ab'

            first_call = False

            with open(os.path.join(dir_name, dataset_file_name),
                      open_file_mode) as datafile:
                np.savetxt(datafile, array_dataset, fmt="%.2f", delimiter=",")

            # Generate augmented positions
            for i in range(number_of_augmented_data_per_raw_data):
                x_a = np.array([
                    random.randint(min(x[0], x[1]), max(x[0], x[1]))
                    for x in np.array([x_1_r, x_2_r]).astype(int).T
                ])
                y_a = np.array([
                    random.randint(min(y[0], y[1]), max(y[0], y[1]))
                    for y in np.array([y_1_r, y_2_r]).astype(int).T
                ])

                x_a_1 = x_a[:-1]
                x_a_2 = x_a[1:]
                y_a_1 = y_a[:-1]
                y_a_2 = y_a[1:]

                v_x_a = video_data_fps * (x_a_2 - x_a_1) / (f_2[:-1] -
                                                            f_1[:-1])
                v_y_a = video_data_fps * (y_a_2 - y_a_1) / (f_2[:-1] -
                                                            f_1[:-1])

                # Concatenate data
                x_a_1 = np.array(x_a_1).reshape(-1, 1)
                y_a_1 = np.array(y_a_1).reshape(-1, 1)
                v_x_a = np.array(v_x_a).reshape(-1, 1)
                v_y_a = np.array(v_y_a).reshape(-1, 1)

                augmented_object_id = int(
                    str(augmented_object_id_code) + str(object_id) + str(i))

                array_dataset = _rearrange_data(
                    augmented_object_id,
                    object_label,
                    x_a_1,
                    y_a_1,
                    v_x_a,
                    v_y_a,
                    generate_graph=False,
                    trajectory_image=trajectory_image)

                with open(os.path.join(dir_name, dataset_file_name),
                          'ab') as datafile:
                    np.savetxt(datafile,
                               array_dataset,
                               fmt="%.2f",
                               delimiter=",")

        object_label += 1

    # Save and Show the trajectory image
    if generate_graph:
        trajectory_image_name = directory + '/' + \
                                raw_input_file_all[raw_input_file_all.rfind('/')+1:raw_input_file_all.rfind('.')] + \
                                '_normal_trajectories.pdf'
        trajectory_image.save_image(
            os.path.join(dir_name, trajectory_image_name))
    if generate_graph and show_graph:
        trajectory_image.show_image()

    # Print finishing message
    print("                                               ---> Done!")
    print("=========================================================")

    return dataset_file_name
예제 #3
0
def extract_augment_and_export_data(is_normal=True,
                                    generate_graph=False,
                                    show_graph=False):
    """
        Generate augmented data from the dataset.
        :return: Name of the augmented dataset
        """
    data_type_name = 'normal' if is_normal else 'abnormal'

    # Print a starting message
    print("=========================================================")
    print("Extracting {} trajectory features data from raw dataset,".format(
        data_type_name))
    print("Generating {} augmented trajectory data and".format(data_type_name))
    print("Exporting them to CSV file.")

    # Trajectory image viewer
    trajectory_image = tv.ImageViewer(dd.input_raw_image_frame_path,
                                      dd.input_raw_image_frame_name,
                                      dd.frame_starting_number,
                                      is_caviar_data=True)

    # Format of the data: [Object_id, Object_label, x_0, y_0, v_x_0, v_y_0, x_1, y_1, v_x_1, v_y_1, x_2, y_2, ...]

    # Extract trajectories and export to CSV file
    data_file_name = 'data/' + data_type_name + '_data.csv'

    if is_normal:
        gt_names = dd.normal_data_names
        gt_pathname = dd.raw_input_normal_file_path
    else:
        gt_names = dd.abnormal_data_names
        gt_pathname = dd.raw_input_abnormal_file_path

    directory = os.path.join(data.dir_name,
                             data_file_name[:data_file_name.rfind('/')])
    if not os.path.exists(directory):
        os.makedirs(directory)

    object_label = 0

    # Initialize seed for random rand in order to have the same sequence of randomness every time this is called
    random.seed(data.random_seed_number)

    first_call = True

    for gt_filename in gt_names:
        xml_filename = gt_pathname + gt_filename + '.xml'
        tree = ET.parse(xml_filename)
        root = tree.getroot()

        data_bb_by_frame = []
        for frame in root.findall('frame'):
            object = frame.find('objectlist').find('object')
            if object is None:
                continue
            box = object.find('box')
            if box is None:
                continue
            data_bb = dict(fn=int(frame.get('number')))
            data_bb.update(id=int(object.get('id')))
            data_bb.update(o=int(object.find('orientation').text))
            data_bb.update(h=int(box.get('h')))
            data_bb.update(w=int(box.get('w')))
            data_bb.update(xc=int(box.get('xc')))
            data_bb.update(yc=int(box.get('yc')))
            data_bb_by_frame.append(data_bb)

        max_id = max([item['id'] for item in data_bb_by_frame])
        for object_id in range(0, max_id + 1):
            o_data = list(
                filter(lambda item: item['id'] == object_id, data_bb_by_frame))

            if not o_data:
                continue

            x_centered = np.array([item['xc'] for item in o_data])
            y_centered = np.array([item['yc'] for item in o_data])
            frame_number = np.array([item['fn'] for item in o_data])
            bb_h = np.array([item['h'] for item in o_data])
            bb_w = np.array([item['w'] for item in o_data])
            theta = np.array([item['o'] for item in o_data])

            x_1_r = x_centered[:-1]
            x_2_r = x_centered[1:]
            y_1_r = y_centered[:-1]
            y_2_r = y_centered[1:]
            f_1 = frame_number[:-1]
            f_2 = frame_number[1:]
            h_1_r = bb_h[:-1]
            h_2_r = bb_h[1:]
            w_1_r = bb_w[:-1]
            w_2_r = bb_w[1:]
            o_1_r = theta[:-1]
            o_2_r = theta[1:]

            v_x = dd.video_data_fps * (x_2_r - x_1_r) / (f_2 - f_1)
            v_y = dd.video_data_fps * (y_2_r - y_1_r) / (f_2 - f_1)

            # Concatenate data
            x_1 = np.array(x_1_r).reshape(-1, 1)
            y_1 = np.array(y_1_r).reshape(-1, 1)
            v_x = np.array(v_x).reshape(-1, 1)
            v_y = np.array(v_y).reshape(-1, 1)
            h_1 = np.array(h_1_r).reshape(-1, 1)
            w_1 = np.array(w_1_r).reshape(-1, 1)
            o_1 = np.array(o_1_r).reshape(-1, 1)

            array_data = data._rearrange_data_v2(
                object_id,
                object_label,
                x_1,
                y_1,
                v_x,
                v_y,
                h_1,
                w_1,
                o_1,
                generate_graph=generate_graph,
                trajectory_image=trajectory_image)

            open_file_mode = 'wb' if first_call else 'ab'

            first_call = False

            with open(os.path.join(data.dir_name, data_file_name),
                      open_file_mode) as datafile:
                np.savetxt(datafile, array_data, fmt="%.2f", delimiter=",")

            # Generate augmented positions
            for i in range(data.number_of_augmented_data_per_raw_data):
                x_a = np.array([
                    random.randint(min(x[0], x[1]), max(x[0], x[1]))
                    for x in np.array([x_1_r, x_2_r]).astype(int).T
                ])
                y_a = np.array([
                    random.randint(min(y[0], y[1]), max(y[0], y[1]))
                    for y in np.array([y_1_r, y_2_r]).astype(int).T
                ])
                h_a = np.array([
                    random.randint(min(h[0], h[1]), max(h[0], h[1]))
                    for h in np.array([h_1_r, h_2_r]).astype(int).T
                ])
                w_a = np.array([
                    random.randint(min(w[0], w[1]), max(w[0], w[1]))
                    for w in np.array([w_1_r, w_2_r]).astype(int).T
                ])
                o_a = np.array([
                    random.randint(min(o[0], o[1]), max(o[0], o[1]))
                    for o in np.array([o_1_r, o_2_r]).astype(int).T
                ])

                x_a_1 = x_a[:-1]
                x_a_2 = x_a[1:]
                y_a_1 = y_a[:-1]
                y_a_2 = y_a[1:]
                h_a_1 = h_a[:-1]
                w_a_1 = w_a[:-1]
                o_a_1 = o_a[:-1]

                v_x_a = dd.video_data_fps * (x_a_2 - x_a_1) / (f_2[:-1] -
                                                               f_1[:-1])
                v_y_a = dd.video_data_fps * (y_a_2 - y_a_1) / (f_2[:-1] -
                                                               f_1[:-1])

                # Concatenate data
                x_a_1 = np.array(x_a_1).reshape(-1, 1)
                y_a_1 = np.array(y_a_1).reshape(-1, 1)
                v_x_a = np.array(v_x_a).reshape(-1, 1)
                v_y_a = np.array(v_y_a).reshape(-1, 1)
                h_a_1 = np.array(h_a_1).reshape(-1, 1)
                w_a_1 = np.array(w_a_1).reshape(-1, 1)
                o_a_1 = np.array(o_a_1).reshape(-1, 1)

                #augmented_object_id = int(str(data.augmented_object_id_code) + str(object_id) + str(i))
                augmented_object_id = object_id

                array_data = data._rearrange_data_v2(
                    augmented_object_id,
                    object_label,
                    x_a_1,
                    y_a_1,
                    v_x_a,
                    v_y_a,
                    h_a_1,
                    w_a_1,
                    o_a_1,
                    generate_graph=False,
                    trajectory_image=trajectory_image)

                with open(os.path.join(data.dir_name, data_file_name),
                          'ab') as datafile:
                    np.savetxt(datafile, array_data, fmt="%.2f", delimiter=",")

    # Save and Show the trajectory image
    if generate_graph:
        trajectory_image_name = directory + '/' + data_type_name + '_trajectories.pdf'
        trajectory_image.save_image(
            os.path.join(data.dir_name, trajectory_image_name))
    if generate_graph and show_graph:
        trajectory_image.show_image()

    # Print finishing message
    print("                                               ---> Done!")
    print("=========================================================")

    return data_file_name
def extract_and_put_transformed_data(raw_input_file_all,
                                     input_raw_image_frame_path,
                                     raw_input_file_names,
                                     video_data_fps,
                                     changed_trajectories,
                                     generate_graph=False,
                                     show_graph=False):
    """
    Extracts the trajectories positions and the related velocities and
    exports those to two CSV files, one for training and the other for
    validation of NN.
    Note that the training_sample_percentage of total sample will be used for training
    and (100 - training_sample_percentage) will be used for validation.
    :return: names of the exported CSV files.
    """
    # Print a starting message
    print("====================================================")
    print("Exporting modified trajectories from raw dataset and")
    print("Exporting them to CSV files.")

    # Trajectory image viewer
    trajectory_image = tv.ImageViewer(input_raw_image_frame_path)

    # Format of the data: [Object_id, Object_label x_0, y_0, v_x_0, v_y_0, x_1, y_1, v_x_1, v_y_1, x_2, y_2, ...]

    data_filename = '_real_abnormal_2'

    # Extract trajectories and export to CSV file
    dataset_file_name = 'data/'+raw_input_file_all[raw_input_file_all.rfind('/')+1:raw_input_file_all.rfind('.')]\
                        + data_filename + '.csv'

    directory = os.path.join(dir_name,
                             dataset_file_name[:dataset_file_name.rfind('/')])
    if not os.path.exists(directory):
        os.makedirs(directory)

    object_label = 0

    first_call = True

    trajectory_index = 0

    for raw_input_file in raw_input_file_names:
        # Skip if the string is empty
        if not raw_input_file:
            object_label += 1
            continue

        # Convert sqlite dataset to dictionary
        conn = sqlite3.connect(raw_input_file)
        raw_dataset = sql.read_sql(
            'select * from {tn}'.format(tn=raw_table_name), conn)
        conn.close()

        # Maximum number of objects
        n_objects = int((raw_dataset.loc[raw_dataset['object_id'].idxmax()]
                         )['object_id']) + 1

        for object_id in range(0, n_objects):
            # Extract rows for the particular object_id
            df = raw_dataset.loc[raw_dataset['object_id'] == object_id]

            # Extract frame numbers
            frame_number = df['frame_number'].values

            x_centered = changed_trajectories[
                trajectory_index].trajectory.get_xdata()
            y_centered = changed_trajectories[
                trajectory_index].trajectory.get_ydata()
            total_scale_factor = changed_trajectories[
                trajectory_index].total_scale_factor

            x_1 = x_centered[:-1]
            x_2 = x_centered[1:]
            y_1 = y_centered[:-1]
            y_2 = y_centered[1:]
            f_1 = frame_number[:-1]
            f_2 = frame_number[1:]

            v_x = video_data_fps * (x_2 - x_1) / (total_scale_factor *
                                                  (f_2 - f_1))
            v_y = video_data_fps * (y_2 - y_1) / (total_scale_factor *
                                                  (f_2 - f_1))

            # Concatenate data
            x_1 = np.array(x_1).reshape(-1, 1)
            y_1 = np.array(y_1).reshape(-1, 1)
            trajectory_index += 1

            v_x = np.array(v_x).reshape(-1, 1)
            v_y = np.array(v_y).reshape(-1, 1)

            array_dataset = _rearrange_data(object_id,
                                            object_label,
                                            x_1,
                                            y_1,
                                            v_x,
                                            v_y,
                                            generate_graph=generate_graph,
                                            trajectory_image=trajectory_image)

            open_file_mode = 'wb' if first_call else 'ab'

            first_call = False

            with open(os.path.join(dir_name, dataset_file_name),
                      open_file_mode) as datafile:
                np.savetxt(datafile, array_dataset, fmt="%.2f", delimiter=",")

        object_label += 1

    # Save and Show the trajectory image
    if generate_graph:
        trajectory_name = '_real_abnormal_trajectories'
        trajectory_image_name = directory + '/' + \
                                raw_input_file_all[raw_input_file_all.rfind('/')+1:raw_input_file_all.rfind('.')] + \
                                trajectory_name + '.pdf'
        trajectory_image.save_image(
            os.path.join(dir_name, trajectory_image_name))
    if generate_graph and show_graph:
        trajectory_image.show_image()

    # Print finishing message
    print("                                          ---> Done!")
    print("====================================================")