def process_hdf5(flags, h5_data, base_dir, mouse, input_types, views, out):
    # create directory
    # data_dir = os.path.join(
    #     "/groups/branson/bransonlab/kwaki/data/features/finetune2_i3d",
    #     mouse
    #     )
    # data_dir = os.path.join(
    #     "/groups/branson/bransonlab/kwaki/data/features/finetune_odas_unfroze/",
    #     mouse
    #     )
    # "/groups/branson/bransonlab/kwaki/data/features/finetune_odas_unfroze_reweight/",
    data_dir = os.path.join(
        "/groups/branson/bransonlab/kwaki/data/features/finetune_odas_unfroze_reweight",
        mouse
    )

    # # setup the flow.
    # print("\t\tflow")
    # mouse_dir = os.path.join(base_dir, mouse, "all", input_types[0])
    # opts = setup_opts(flags, mouse_dir)
    # paths.setup_output_space(opts)
    # compute_outputs(opts, h5_data, [input_types[0]], views, data_dir, out)

    # setup the rgb.
    print("\t\trgb")
    mouse_dir = os.path.join(base_dir, mouse, "all", input_types[0])
    opts = setup_opts(flags, mouse_dir)
    paths.setup_output_space(opts)
    compute_outputs(opts, h5_data, [input_types[0]], views, data_dir, out)
Esempio n. 2
0
def main(argv):
    opts = _setup_opts(argv)
    paths.setup_output_space(opts)

    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    with h5py.File(opts["flags"].train_file, "r") as train_data:
        with h5py.File(opts["flags"].test_file, "r") as test_data:
            with h5py.File(opts["flags"].valid_file, "r") as valid_data:
                if DEBUG:
                    train_data = valid_data
                    test_data = valid_data

                sequences_helper.copy_templates(opts, train_data, "train",
                                                g_label_names)
                sequences_helper.copy_templates(opts, test_data, "test",
                                                g_label_names)
                sequences_helper.copy_templates(opts, valid_data, "valid",
                                                g_label_names)

                sampler = HantmanVideoFrameSampler(
                    opts["rng"],
                    train_data,
                    opts["flags"].video_dir,
                    opts["flags"].hantman_mini_batch,
                    frames=opts["flags"].frames,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)

                label_weight = _get_label_weight(opts, train_data)
                # import pdb; pdb.set_trace()
                train_eval = HantmanVideoSampler(
                    opts["rng"],
                    train_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)
                test_eval = HantmanVideoSampler(
                    opts["rng"],
                    test_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)
                valid_eval = HantmanVideoSampler(
                    opts["rng"],
                    valid_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)

                network, optimizer, criterion = _init_network(
                    opts, label_weight)
                # import pdb; pdb.set_trace()
                _train_network(opts, network, optimizer, criterion, sampler,
                               train_eval, test_eval, valid_eval)
def main(argv):
    opts = _setup_opts(argv)
    paths.setup_output_space(opts)

    if not os.path.exists(opts["flags"].feature_dir):
        os.makedirs(opts["flags"].feature_dir)
        exp_dir = os.path.join(os.path.dirname(opts["flags"].train_file),
                               "exps",
                               os.path.basename(opts["flags"].feature_dir))

    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    # load data
    with h5py.File(opts["flags"].train_file, "a") as train_data:
        with h5py.File(opts["flags"].test_file, "a") as test_data:
            with h5py.File(opts["flags"].valid_file, "a") as valid_data:
                if DEBUG:
                    train_data = valid_data
                    test_data = valid_data
                sampler = None
                label_weight = _get_label_weight(opts, train_data)

                train_eval = HantmanVideoSampler(
                    None,
                    train_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)
                test_eval = HantmanVideoSampler(
                    None,
                    test_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)
                valid_eval = HantmanVideoSampler(
                    None,
                    valid_data,
                    opts["flags"].video_dir,
                    use_pool=True,
                    gpu_id=opts["flags"].cuda_device)

                sequences_helper.copy_templates(opts, train_data, "train",
                                                g_label_names)
                sequences_helper.copy_templates(opts, test_data, "test",
                                                g_label_names)
                sequences_helper.copy_templates(opts, valid_data, "valid",
                                                g_label_names)

                network, optimizer, criterion = _init_model(opts, label_weight)

                _train_network(opts, network, optimizer, criterion, sampler,
                               train_eval, test_eval, valid_eval)
def main(argv):
    print(argv)
    opts = _setup_opts(argv)
    paths.setup_output_space(opts)
    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    full_tic = time.time()
    with h5py.File(opts["flags"].train_file, "r") as train_data:
        with h5py.File(opts["flags"].test_file, "r") as test_data:
            if opts["flags"].valid_file is not None:
                with h5py.File(opts["flags"].valid_file, "r") as valid_data:
                    run_training(opts, train_data, test_data, valid_data)
            else:
                run_training(opts, train_data, test_data, None)
    print("Training took: %d\n" % (time.time() - full_tic))
def main(argv):
    # setup options
    opts = _setup_opts(argv)
    paths.setup_output_space(opts)
    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    # load data
    # timing here?
    tic = time.time()
    data_files = _load_hdfs(opts)
    # setup the templates for plotting
    _setup_templates(opts, data_files, g_label_names)
    samplers = _setup_samplers(opts, data_files)
    network, optimizer = _init_network(opts, samplers)
    _train_network(opts, network, optimizer, samplers)
    import pdb; pdb.set_trace()
    print(time.time() - tic)
    # close hdf5s
    for data_file in data_files:
        data_file.close()
Esempio n. 6
0
def process_hdf5(flags, h5_data, base_dir, mouse, input_types, views, out):
    # create directory
    data_dir = os.path.join(
        "/groups/branson/bransonlab/kwaki/data/features/finetune2_i3d", mouse)

    # setup the flow.
    print("\t\tflow")
    mouse_dir = os.path.join(base_dir, mouse, "all", input_types[0])
    opts = setup_opts(flags, mouse_dir)
    paths.setup_output_space(opts)
    compute_outputs(opts, h5_data, [input_types[0]], views, data_dir, out)

    # setup the rgb.
    print("\t\trgb")
    mouse_dir = os.path.join(base_dir, mouse, "all", input_types[1])
    opts = setup_opts(flags, mouse_dir)
    paths.setup_output_space(opts)
    compute_outputs(opts, h5_data, [input_types[1]], views, data_dir, out)

    # setup the both. in this case
    print("\t\tboth")
    mouse_dir = os.path.join(base_dir, mouse, "all", "feedforward")
    opts = setup_opts(flags, mouse_dir)
    paths.setup_output_space(opts)
    compute_outputs(opts, h5_data, input_types, views, data_dir, out)
def main(argv):
    opts = _setup_opts(sys.argv)
    paths.setup_output_space(opts)
    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    with h5py.File(opts["flags"].train_file, "r") as train_data:
        with h5py.File(opts["flags"].test_file, "r") as test_data:
            _copy_templates(opts, train_data, test_data)
            print("done")

            print("computing means")
            tic = time.time()
            temp_sampler = VideoSampler(opts["rng"],
                                        train_data,
                                        opts["flags"].video_dir,
                                        seq_len=2000,
                                        use_pool=False,
                                        gpu_id=opts["flags"].cuda_device)
            temp_sampler.reset()
            normalize = compute_means(opts, train_data, temp_sampler)
            print(time.time() - tic)

            sampler = VideoFrameSampler(opts["rng"],
                                        train_data,
                                        opts["flags"].video_dir,
                                        opts["flags"].hantman_mini_batch,
                                        frames=opts["flags"].frames,
                                        use_pool=False,
                                        gpu_id=opts["flags"].cuda_device,
                                        normalize=normalize)
            # frames=range(-10, 11), use_pool=False,
            # gpu_id=opts["flags"].cuda_device, normalize=normalize)

            train_eval = VideoSampler(opts["rng"],
                                      train_data,
                                      opts["flags"].video_dir,
                                      seq_len=-1,
                                      use_pool=False,
                                      gpu_id=opts["flags"].cuda_device,
                                      normalize=normalize)
            test_eval = VideoSampler(opts["rng"],
                                     test_data,
                                     opts["flags"].video_dir,
                                     seq_len=-1,
                                     use_pool=False,
                                     gpu_id=opts["flags"].cuda_device,
                                     normalize=normalize)

            network, optimizer, criterion = _init_network(opts)

            # batch = sampler.get_minibatch()
            # import pdb; pdb.set_trace()
            tic = time.time()
            # network(Variable(batch[0], requires_grad=True))

            _train_network(opts, network, optimizer, criterion, train_data,
                           test_data, sampler, train_eval, test_eval)

            print(time.time() - tic)
            import pdb
            pdb.set_trace()
            print("moo")
    else:
        for key in opts["flags"].feat_keys:
            temp_feat = train_data["exps"][exp_names[0]][key].value
            mean = np.zeros((temp_feat.shape[2], ))
            std = np.ones((temp_feat.shape[2], ))
            means.append(mean)
            stds.append(std)

    return means, stds


if __name__ == "__main__":
    print(sys.argv)

    opts = _setup_opts(sys.argv)
    paths.setup_output_space(opts)
    if opts["flags"].cuda_device != -1:
        torch.cuda.set_device(opts["flags"].cuda_device)

    # load data
    # try:
    tic = time.time()
    with h5py.File(opts["flags"].train_file, "r") as train_data:
        with h5py.File(opts["flags"].test_file, "r") as test_data:
            with h5py.File(opts["flags"].val_file, "r") as val_data:
                _copy_templates(opts, train_data, test_data, val_data)
                label_weight = _get_label_weight(train_data)
                label_mat = np.tile(
                    label_weight,
                    (opts["flags"].seq_len,
                        opts["flags"].hantman_mini_batch, 1)).astype('float32')