Beispiel #1
0
def eval_metrics(cfg, metric):
    with open(cfg) as fd:
        data_specs = json.load(fd)

    temp_size = data_specs['temp_size']

    r = get_images_classes(
        data_specs['images_path'],
        data_specs['info_dict'],
        data_specs['class_of_interest']
    )
    train_d, val_d, test_d = split_data(
        data_specs['positive_ev_path'],
        r,
        window_size=temp_size,
        future_size=0,
        shuffle=False
    )
    train_d = add_texture_and_features_info(train_d, data_specs['info_path'])
    val_d = add_texture_and_features_info(val_d, data_specs['info_path'])
    test_d = add_texture_and_features_info(test_d, data_specs['info_path'])

    full_df = pd.concat([train_d, val_d, test_d], ignore_index=True)
    ds = ConcisePixelLevelDs(
        full_df
    )
    if metric == 'AGES_frame':
        ages_per_frame(ds)
    if metric == 'pixels_AGE':
        active_pixels_per_AGE(ds)
Beispiel #2
0
def get_single_plots(cfg):
    with open(cfg) as fd:
        data_specs = json.load(fd)

    temp_size = data_specs['temp_size']
    num_of_temp_features = data_specs['temp_features']
    m = load_model(data_specs['model_path'],
                   # ToNameNet.name(),
                   DOTNetCNN.name(),
                   "model_params.json")

    r = get_images_classes(
        data_specs['images_path'],
        data_specs['info_dict'],
        data_specs['class_of_interest']
    )
    train_d, val_d, test_d = split_data(
        data_specs['positive_ev_path'],
        r,
        window_size=temp_size,
        future_size=0,
        shuffle=True
    )

    info_path = data_specs["info_path"]
    ds = PixelLevelDs(
        val_d,
        info_path=info_path,
        add_polarity="neg" if num_of_temp_features > 1 else "",
    )
    plot_4D_predictions(m, ds, max_rows=5, black_out_aps=False)
Beispiel #3
0
def plot_transforms(cfg, apply_transform, max_rows=5):
    with open(cfg) as fd:
        data_specs = json.load(fd)

    # temp_size = data_specs['temp_size']
    temp_size = 3
    num_of_temp_features = 1

    individual_transforms = DelayedTransformSet(0.7,
                                                transforms=[
                                                    DelayedRotation(0.6,
                                                                    20,
                                                                    expand=1),
                                                    CustomResize(500),
                                                    transforms.CenterCrop(
                                                        (260, 346))
                                                ])

    r = get_images_classes(data_specs['images_path'], data_specs['info_dict'],
                           data_specs['class_of_interest'])
    train_d, val_d, test_d = split_data(data_specs['positive_ev_path'],
                                        r,
                                        window_size=temp_size,
                                        future_size=0,
                                        shuffle=True)

    info_path = data_specs["info_path"]
    ds = PixelLevelDs(test_d,
                      info_path=info_path,
                      add_polarity="neg" if num_of_temp_features > 1 else "",
                      transforms=apply_transform,
                      individual_transforms=individual_transforms)

    imgs_r, temp_data, ys = [], [], []
    for j, (x, y) in enumerate(ds):
        if j >= max_rows:
            break
        img_raw, temporal_data = x

        imgs_r.append(img_raw[0].view(1, *img_raw[0].shape).numpy())
        temp_data.append(temporal_data.numpy())
        ys.append(y.numpy())

    grid_plot(max_rows, 1, imgs_r, title="Static images")
    grid_plot(max_rows,
              temp_data[0].shape[0],
              temp_data,
              title="Net temporal input")
    grid_plot(max_rows, ys[0].shape[0], ys, title="Real")
Beispiel #4
0
def load_data(cfg_list):
    temp_size = 5
    future_size = 0
    val_size, test_size = .15, 0.30

    train_list, val_list, test_list = [], [], []
    for cfg in cfg_list:
        with open(cfg) as fd:
            data_specs = json.load(fd)

        r = get_images_classes(
            data_specs['images_path'],
            data_specs['info_dict'],
            data_specs['class_of_interest']
        )
        train_split, val_split, test_split = split_data(
            data_specs['positive_ev_path'],
            r,
            window_size=temp_size,
            future_size=future_size,
            test_size=test_size,
            val_size=val_size
        )

        train_split = add_texture_and_features_info(train_split, data_specs['info_path'])
        val_split = add_texture_and_features_info(val_split, data_specs['info_path'])
        test_split = add_texture_and_features_info(test_split, data_specs['info_path'])

        train_list.append(train_split)
        val_list.append(val_split)
        test_list.append(test_split)
    train_d, val_d, test_d = pd.concat(train_list), pd.concat(val_list), pd.concat(test_list)
    train_d = train_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)
    val_d = val_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)
    test_d = test_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)

    return train_d, val_d, test_d
Beispiel #5
0
def make_sequence_video(cfg):
    with open(cfg) as fd:
        data_specs = json.load(fd)

    temp_size = data_specs['temp_size']
    num_of_temp_features = data_specs['temp_features']
    m = load_model(data_specs['model_path'],
                   DOTNetCNN.name(),
                   # ToNameNet.name(),
                   "model_params.json")
    use_gpu = torch_cuda.is_available()
    device = torch_device(torch_cuda.current_device()) if use_gpu else torch_device("cpu")
    m.to(device)

    r = get_images_classes(
        data_specs['images_path'],
        data_specs['info_dict'],
        data_specs['class_of_interest']
    )
    train_d, val_d, test_d = split_data(
        data_specs['positive_ev_path'],
        r,
        window_size=temp_size,
        future_size=0,
        shuffle=False
    )
    full_df = pd.concat([train_d, val_d, test_d], ignore_index=True)

    info_path = data_specs["info_path"]
    ds = PixelLevelDs(
        full_df,
        info_path=info_path,
        add_polarity="neg" if num_of_temp_features > 1 else "",
    )
    # make_video(m, ds)
    make_non_repeating_video(m, ds)
Beispiel #6
0
def main():
    temp_size = 5
    future_size = 0
    net_temp_output = future_size + 1
    out_dims = (260, 346)
    batch_size = 12
    num_classes = 3
    # use_gpu = torch_cuda.is_available()
    use_gpu = False     # pytorch is giving a memory error
    device = torch_device(torch_cuda.current_device()) if use_gpu else torch_device("cpu")

    with open("cfg.txt") as fd:
        data_specs = json.load(fd)

    r = get_images_classes(
        data_specs['images_path'],
        data_specs['info_dict'],
        data_specs['class_of_interest']
    )

    train_d, val_d, test_d = split_data(
        data_specs['positive_ev_path'],
        r,
        window_size=temp_size,
        future_size=future_size
    )

    info_path = data_specs["info_path"]
    train_ds = PixelLevelDs(
        train_d,
        info_path=info_path
    )
    val_ds = PixelLevelDs(
        val_d,
        # add_polarity="neg",
        info_path=info_path
    )
    test_ds = PixelLevelDs(
        test_d,
        info_path=info_path
    )

    dls = {
        "train": DataLoader(train_ds, batch_size=batch_size, num_workers=0, pin_memory=use_gpu),
        "val": DataLoader(val_ds, batch_size=batch_size, num_workers=0, pin_memory=use_gpu),
        "test": DataLoader(test_ds, batch_size=1)
    }

    full_forward = SaveStatsForwardCallback(heatmap_CE_accuracy(num_classes))
    # to_device = ToDeviceCallback(device)
    compose_callbacks = ComposeCallback(
        start_callbacks=[StartPrinterCallback()],
        train_pData_callbacks=[
            # to_device,
            full_forward],
        val_pData_callbacks=[
            # to_device,
            full_forward],
        nextEpoch_callbacks=[full_forward],
        phaseEnded_callbacks=[full_forward],
        allEnded_callbacks=[]
    )
    # TODO: read <A Closer Look at Spatiotemporal Convolutions for Action Recognition>
    #  https://arxiv.org/pdf/1711.11248.pdf

    model = ToNameNet(temp_size, net_temp_output, out_dims, num_classes)
    model.to(device)

    train(model, dls['train'], dls['val'], compose_callbacks,
          # criterion=heatmap_mse(num_classes),
          criterion=heatmap_cross_entropy(num_classes, weight=tensor([0.3, 4, 15], dtype=torch_float)),
          epochs=1)
Beispiel #7
0
def plot_on_concise_dataset(cfg_list, apply_transform, max_rows=5):

    individual_transforms = DelayedTransformSet(0.7,
                                                transforms=[
                                                    DelayedRotation(0.6,
                                                                    20,
                                                                    expand=1),
                                                    CustomResize(500),
                                                    transforms.CenterCrop(
                                                        (260, 346))
                                                ])

    train_list, val_list, test_list = [], [], []
    for cfg in cfg_list:
        with open(cfg) as fd:
            data_specs = json.load(fd)

        r = get_images_classes(data_specs['images_path'],
                               data_specs['info_dict'],
                               data_specs['class_of_interest'])
        train_split, val_split, test_split = split_data(
            data_specs['positive_ev_path'],
            r,
            window_size=5,
            future_size=1,
            test_size=.3,
            val_size=.2)
        train_split = add_texture_and_features_info(train_split,
                                                    data_specs['info_path'])
        val_split = add_texture_and_features_info(val_split,
                                                  data_specs['info_path'])
        test_split = add_texture_and_features_info(test_split,
                                                   data_specs['info_path'])

        train_list.append(train_split)
        val_list.append(val_split)
        test_list.append(test_split)
    train_d, val_d, test_d = pd.concat(train_list), pd.concat(
        val_list), pd.concat(test_list)
    train_d = train_d.sample(frac=1, replace=False).reset_index(drop=True)
    val_d = val_d.sample(frac=1, replace=False).reset_index(drop=True)
    test_d = test_d.sample(frac=1, replace=False).reset_index(drop=True)

    ds = ConcisePixelLevelDs(val_d,
                             transforms=apply_transform,
                             individual_transforms=individual_transforms)

    imgs_r, temp_data, ys = [], [], []
    for j, (x, y) in enumerate(ds):
        if j >= max_rows:
            break
        img_raw, temporal_data = x

        imgs_r.append(img_raw[0].view(1, *img_raw[0].shape).numpy())
        temp_data.append(temporal_data.numpy())
        ys.append(y.numpy())

    grid_plot(max_rows, 1, imgs_r, title="Static images")
    grid_plot(max_rows,
              temp_data[0].shape[0],
              temp_data,
              title="Net temporal input")
    grid_plot(max_rows, ys[0].shape[0], ys, title="Real")
Beispiel #8
0
def eval_metrics(cfg_list, metric, coi=2):
    future_size = 0
    val_size, test_size = .15, 0.30

    # with open(cfg) as fd:
    with open(cfg_list[0]) as fd:
        data_specs = json.load(fd)

    temp_size = data_specs['temp_size']
    num_of_temp_features = data_specs['temp_features']
    m = load_model(data_specs['model_path'],
                   DOTNetCNN.name(),
                   # ToNameNet.name(),
                   "model_params.json")
    use_gpu = torch_cuda.is_available()
    device = torch_device(torch_cuda.current_device()) if use_gpu else torch_device("cpu")
    m.to(device)

    # this for normal evaluations
    # r = get_images_classes(
    #     data_specs['images_path'],
    #     data_specs['info_dict'],
    #     data_specs['class_of_interest']
    # )
    # train_d, val_d, test_d = split_data(
    #     data_specs['positive_ev_path'],
    #     r,
    #     window_size=temp_size,
    #     future_size=0,
    #     shuffle=False
    # )
    # train_d = add_texture_and_features_info(train_d, data_specs['info_path'])
    # val_d = add_texture_and_features_info(val_d, data_specs['info_path'])
    # test_d = add_texture_and_features_info(test_d, data_specs['info_path'])
    # full_df = pd.concat([train_d, val_d, test_d], ignore_index=True)
    # ds = ConcisePixelLevelDs(
    #     full_df
    # )

    train_list, val_list, test_list = [], [], []
    for cfg in cfg_list:
        with open(cfg) as fd:
            data_specs = json.load(fd)

        r = get_images_classes(
            data_specs['images_path'],
            data_specs['info_dict'],
            data_specs['class_of_interest']
        )
        train_split, val_split, test_split = split_data(
            data_specs['positive_ev_path'],
            r,
            window_size=temp_size,
            future_size=future_size,
            test_size=test_size,
            val_size=val_size
        )

        train_split = add_texture_and_features_info(train_split, data_specs['info_path'])
        val_split = add_texture_and_features_info(val_split, data_specs['info_path'])
        test_split = add_texture_and_features_info(test_split, data_specs['info_path'])

        train_list.append(train_split)
        val_list.append(val_split)
        test_list.append(test_split)
    train_d, val_d, test_d = pd.concat(train_list), pd.concat(val_list), pd.concat(test_list)
    train_d = train_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)
    val_d = val_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)
    test_d = test_d.sample(frac=1, replace=False, random_state=42).reset_index(drop=True)
    ds = ConcisePixelLevelDs(
        val_d
    )

    if metric == "event_acc":
        return avg_accuracy_by_events(m, ds)
    if metric == "ts":
        return T_metrics_by_events(m, ds, class_of_interest=coi)
    if metric == "yolo_IOU":
        return get_bbox_IOU(m, ds, data_specs['yolo_path'])
    if metric == "net_time":
        return get_net_processing_time(m, ds)
    if metric == "net_by_branch_time":
        return get_processing_time_by_branch_separation(m, ds)
    if metric == "pixel_T":
        return T_metrics_by_pixels(m, ds, class_of_interest=coi, pool_k=10, pool_s=1)