Exemplo n.º 1
0
def train_fused_rn(output_path,
                   dataset_name,
                   dataset_fold,
                   config_filepaths,
                   weights_filepaths,
                   batch_size=32,
                   epochs=100,
                   checkpoint_period=5,
                   learning_rate=1e-4,
                   drop_rate=0.1,
                   freeze_g_theta=False,
                   fuse_at_fc1=False,
                   initial_epoch=0,
                   initial_weights=None,
                   use_data_gen=True,
                   subsample_ratio=None,
                   gpus=1,
                   verbose=2):

    data_kwargs, _, _ = read_config(config_filepaths[0])

    if verbose > 0:
        print("***** Training parameters *****")
        print("\t Output path:", output_path)
        print("\t Dataset:", dataset_name)
        print("\t Dataset fold:", dataset_fold)
        print("\t Fusion info")
        print("\t > config_filepaths:", config_filepaths)
        print("\t > weights_filepaths:", weights_filepaths)
        print("\t > freeze_g_theta:", freeze_g_theta)
        print("\t > fuse_at_fc1:", fuse_at_fc1)
        print("\t Training options")
        print("\t > Batch Size:", batch_size)
        print("\t > Epochs:", epochs)
        print("\t > Checkpoint Period:", checkpoint_period)
        print("\t > Learning Rate:", learning_rate)
        print("\t > Dropout rate:", drop_rate)
        print("\t > Training Subsample Ratio:", subsample_ratio)
        print("\t GPUs:", gpus)
        print("\t Skeleton info")
        for key, value in data_kwargs.items():
            print("\t > {}: {}".format(key, value))

    if dataset_name == 'UT':
        dataset = UT
    elif dataset_name == 'SBU':
        dataset = SBU

    if verbose > 0:
        print("Reading data...")

    if use_data_gen:
        train_generator = DataGenerator(dataset_name,
                                        dataset_fold,
                                        'train',
                                        batch_size=batch_size,
                                        reshuffle=True,
                                        shuffle_indiv_order=True,
                                        **data_kwargs)
        val_generator = DataGenerator(dataset_name,
                                      dataset_fold,
                                      'validation',
                                      batch_size=batch_size,
                                      reshuffle=False,
                                      shuffle_indiv_order=False,
                                      **data_kwargs)
        X_train, Y_train = train_generator[0]
        X_val, Y_val = val_generator[0]
        train_data = train_generator
        val_data = val_generator
    else:
        X_train, Y_train = dataset.get_train(dataset_fold, **data_kwargs)
        X_val, Y_val = dataset.get_val(dataset_fold, **data_kwargs)
        train_data = [X_train, Y_train]
        val_data = [X_val, Y_val]

    num_joints = len(X_train) // 2
    object_shape = (len(X_train[0][0]), )
    output_size = len(Y_train[0])

    models_kwargs = []
    for config_filepath in config_filepaths:
        data_kwargs, model_kwargs, train_kwargs = read_config(config_filepath)
        timesteps = data_kwargs['timesteps']
        add_joint_idx = data_kwargs['add_joint_idx']
        add_body_part = data_kwargs['add_body_part']
        overhead = add_joint_idx + add_body_part  # True/False = 1/0
        num_dim = (object_shape[0] - overhead) // timesteps
        model_kwargs['num_dim'] = num_dim
        model_kwargs['overhead'] = overhead
        models_kwargs.append(model_kwargs)

    train_kwargs['drop_rate'] = drop_rate
    if verbose > 0:
        print("Creating model...")
    model = fuse_rn(num_joints,
                    object_shape,
                    output_size,
                    train_kwargs,
                    models_kwargs,
                    weights_filepaths,
                    freeze_g_theta=freeze_g_theta,
                    fuse_at_fc1=fuse_at_fc1)

    if initial_weights is not None:
        model.load_weights(initial_weights)

    fit_history = train_model(model=model,
                              verbose=verbose,
                              learning_rate=learning_rate,
                              output_path=output_path,
                              checkpoint_period=checkpoint_period,
                              batch_size=batch_size,
                              epochs=epochs,
                              use_data_gen=use_data_gen,
                              train_data=train_data,
                              val_data=val_data,
                              subsample_ratio=subsample_ratio)

    return fit_history
Exemplo n.º 2
0
def predict_fused_rn(fusion_weights_path,
                     dataset_name,
                     dataset_fold,
                     config_filepaths,
                     freeze_g_theta=False,
                     fuse_at_fc1=False,
                     batch_size=32,
                     verbose=2,
                     gpus=1):

    if verbose > 0:
        print("***** Predicting parameters *****")
        print("\t fusion_weights_path:", fusion_weights_path)
        print("\t Dataset:", dataset_name)
        print("\t Dataset fold:", dataset_fold)
        print("\t Fusion info")
        print("\t > config_filepaths:", config_filepaths)
        print("\t > freeze_g_theta:", freeze_g_theta)
        print("\t > fuse_at_fc1:", fuse_at_fc1)
        print("\t Predicting options")
        print("\t > Batch Size:", batch_size)

    data_kwargs, _, _ = read_config(config_filepaths[0])

    val_generator = DataGenerator(dataset_name,
                                  dataset_fold,
                                  'validation',
                                  batch_size=batch_size,
                                  reshuffle=False,
                                  shuffle_indiv_order=False,
                                  **data_kwargs)
    X_val, Y_val = val_generator[0]

    num_joints = len(X_val) // 2
    object_shape = (len(X_val[0][0]), )
    output_size = len(Y_val[0])

    if verbose > 0:
        print("Reading Y_val...")
    Y_val = []
    for batch_idx in range(len(val_generator)):
        _, y_val = val_generator[batch_idx]
        Y_val += y_val.tolist()

    models_kwargs = []
    for config_filepath in config_filepaths:
        data_kwargs, model_kwargs, train_kwargs = read_config(config_filepath)
        timesteps = data_kwargs['timesteps']
        add_joint_idx = data_kwargs['add_joint_idx']
        add_body_part = data_kwargs['add_body_part']
        overhead = add_joint_idx + add_body_part  # True/False = 1/0
        num_dim = (object_shape[0] - overhead) // timesteps
        model_kwargs['num_dim'] = num_dim
        model_kwargs['overhead'] = overhead
        models_kwargs.append(model_kwargs)

    train_kwargs['drop_rate'] = 0
    weights_filepaths = [[] for _ in config_filepaths]

    if verbose > 0:
        print("Creating model...")
    model = fuse_rn(num_joints,
                    object_shape,
                    output_size,
                    train_kwargs,
                    models_kwargs,
                    weights_filepaths,
                    freeze_g_theta=freeze_g_theta,
                    fuse_at_fc1=fuse_at_fc1)

    if verbose > 0:
        print("Loading weights...")
    model.load_weights(fusion_weights_path)

    if verbose > 0:
        print("Starting predicting...")

    Y_pred = model.predict_generator(val_generator,
                                     max_queue_size=10,
                                     workers=5,
                                     use_multiprocessing=True,
                                     verbose=verbose)

    acc_tensor = categorical_accuracy(Y_val, Y_pred)
    acc = K.eval(acc_tensor).mean()

    if verbose > 0:
        print("Validation acc: {:.2%}".format(acc))

    # Convert back from to_categorical
    Y_pred = np.argmax(Y_pred, axis=1, out=None).tolist()
    Y_val = np.argmax(Y_val, axis=1, out=None).tolist()

    return Y_pred, Y_val
Exemplo n.º 3
0
def predict_fused_rn_seq(fusion_weights_path,
                         dataset_name,
                         dataset_fold,
                         config_filepaths,
                         freeze_g_theta=False,
                         fuse_at_fc1=False,
                         flat_seqs=False,
                         batch_size=32,
                         verbose=2,
                         gpus=1,
                         return_acc=False,
                         use_data_gen=True):

    if verbose > 0:
        print("***** Predicting parameters *****")
        print("\t fusion_weights_path:", fusion_weights_path)
        print("\t Dataset:", dataset_name)
        print("\t Dataset fold:", dataset_fold)
        print("\t Fusion info")
        print("\t > config_filepaths:", config_filepaths)
        print("\t > freeze_g_theta:", freeze_g_theta)
        print("\t > fuse_at_fc1:", fuse_at_fc1)
        print("\t Predicting options")
        print("\t > Batch Size:", batch_size)
        print("\t > flat_seqs:", flat_seqs)
        print("\t > Use Data Generator:", use_data_gen)

    ####
    if dataset_name == 'UT':
        dataset = UT
    elif dataset_name == 'SBU':
        dataset = SBU
    elif dataset_name == 'NTU':
        dataset = NTU
    elif dataset_name == 'YMJA':
        dataset = YMJA

    data_kwargs, _, _ = read_config(config_filepaths[0])

    data_kwargs['sample_method'] = 'all'
    data_kwargs['seq_step'] = data_kwargs.get('seq_step',
                                              data_kwargs['timesteps'] // 2)

    if verbose > 0:
        print("Reading data...")

    if use_data_gen:
        if verbose > 0:
            print("> Using DataGenerator")
        val_generator = DataGenerator(dataset_name,
                                      dataset_fold,
                                      'validation',
                                      batch_size=batch_size,
                                      reshuffle=False,
                                      shuffle_indiv_order=False,
                                      **data_kwargs)
        X_val, Y_val = val_generator[0]
        num_joints = len(X_val) // 2
        object_shape = (len(X_val[0][0]), )

        if verbose > 0:
            print("> Reading Y_val...")
        Y_val_flat = []
        for batch_idx in range(len(val_generator)):
            _, y_val = val_generator[batch_idx]
            Y_val_flat += y_val.tolist()

        videos_address = []
        prvs_video_idx = val_generator.seqs_mapping[0][0]
        pointer, num_seqs = 0, 0
        for video_idx, seq_idx in val_generator.seqs_mapping:
            if prvs_video_idx == video_idx:
                num_seqs += 1
            else:
                videos_address.append(slice(pointer, pointer + num_seqs))
                pointer += num_seqs
                num_seqs = 1
                prvs_video_idx = video_idx
        videos_address.append(slice(pointer, pointer + num_seqs))

        Y_val = []
        for video_address in videos_address:
            Y_val.append(Y_val_flat[video_address][0])
    else:
        if verbose > 0:
            print("> Reading all data at once")

        X_val, Y_val = dataset.get_val(dataset_fold, **data_kwargs)

        if flat_seqs:  # Accuracy in this case will be per sequence and not per video
            num_joints = len(X_val) // 2
            object_shape = (len(X_val[0][0]), )
        else:  # Accuracy in this case will be per video, after averaging the seqs
            num_joints = len(X_val[0][0]) // 2
            object_shape = (len(X_val[0][0][0]), )
            ## Flatten X_val at axis = 1 (num_seqs), and swap axis (1,0,2)
            ## Num_videos replaced by -> SUM num_seqs
            ## Keep "address" of each input, so unflatten from Y_pred can take place
            reshaped_X_val = [
            ]  # reshaped X_val dropping axis which represents the video seqs
            videos_address = []  # video address in reshaped_X_val
            pointer = 0
            for video_seqs in X_val:
                num_seqs = len(video_seqs)
                videos_address.append(slice(pointer, pointer + num_seqs))
                pointer += num_seqs
                reshaped_X_val += video_seqs
            X_val = np.array(reshaped_X_val).transpose((1, 0, 2)).tolist()

    output_size = len(Y_val[0])

    if verbose > 0:
        print("Creating model...")

    models_kwargs = []
    for config_filepath in config_filepaths:
        data_kwargs, model_kwargs, train_kwargs = read_config(config_filepath)
        timesteps = data_kwargs['timesteps']
        add_joint_idx = data_kwargs['add_joint_idx']
        add_body_part = data_kwargs['add_body_part']
        overhead = add_joint_idx + add_body_part  # True/False = 1/0
        num_dim = (object_shape[0] - overhead) // timesteps
        model_kwargs['num_dim'] = num_dim
        model_kwargs['overhead'] = overhead
        models_kwargs.append(model_kwargs)

    train_kwargs['drop_rate'] = 0
    weights_filepaths = [[] for _ in config_filepaths]

    model = fuse_rn(num_joints,
                    object_shape,
                    output_size,
                    train_kwargs,
                    models_kwargs,
                    weights_filepaths,
                    freeze_g_theta=freeze_g_theta,
                    fuse_at_fc1=fuse_at_fc1)

    if verbose > 0:
        print("Loading weights...")
    model.load_weights(fusion_weights_path)

    if verbose > 0:
        print("Starting predicting...")

    if use_data_gen:
        reshaped_Y_pred = model.predict_generator(val_generator,
                                                  max_queue_size=10,
                                                  workers=5,
                                                  use_multiprocessing=True,
                                                  verbose=verbose)
    else:
        reshaped_Y_pred = model.predict(X_val,
                                        batch_size=batch_size,
                                        verbose=verbose)

    use_gauss_weight = True
    if not flat_seqs:  # Undo and avg reshaped_Y_pred (SUM num_seqs, ...) -> (Num_videos, ...)
        Y_pred = []
        for video_address in videos_address:
            if use_gauss_weight:
                avg_scores = np.average(
                    reshaped_Y_pred[video_address],
                    axis=0,
                    weights=gauss(len(reshaped_Y_pred[video_address])))
            else:
                avg_scores = np.average(reshaped_Y_pred[video_address], axis=0)
            Y_pred.append(avg_scores.tolist())

    acc_tensor = categorical_accuracy(Y_val, Y_pred)
    acc = K.eval(acc_tensor).mean()

    if verbose > 0:
        print("Validation acc: {:.2%}".format(acc))

    # Convert back from to_categorical
    Y_pred = np.argmax(Y_pred, axis=1, out=None).tolist()
    Y_val = np.argmax(Y_val, axis=1, out=None).tolist()

    if return_acc:
        return acc
    else:
        return Y_pred, Y_val
Exemplo n.º 4
0
def train_fused_rn(output_path,
                   dataset_name,
                   dataset_fold,
                   config_filepaths,
                   weights_filepaths,
                   batch_size=32,
                   epochs=100,
                   checkpoint_period=5,
                   learning_rate=1e-4,
                   drop_rate=0.1,
                   freeze_g_theta=False,
                   fuse_at_fc1=False,
                   new_arch=False,
                   avg_at_end=False,
                   initial_epoch=0,
                   initial_weights=None,
                   use_data_gen=True,
                   subsample_ratio=None,
                   gpus=1,
                   verbose=2):

    data_kwargs, _, _ = read_config(config_filepaths[0])
    if new_arch:
        data_kwargs['arch'] = 'joint_temp_fused'

    if verbose > 0:
        print("***** Training parameters *****")
        print("\t Output path:", output_path)
        print("\t Dataset:", dataset_name)
        print("\t Dataset fold:", dataset_fold)
        print("\t Fusion info")
        print("\t > config_filepaths:", config_filepaths)
        print("\t > weights_filepaths:", weights_filepaths)
        print("\t > freeze_g_theta:", freeze_g_theta)
        print("\t > fuse_at_fc1:", fuse_at_fc1)
        print("\t > New architecture:", new_arch)
        print("\t Training options")
        print("\t > Batch Size:", batch_size)
        print("\t > Epochs:", epochs)
        print("\t > Checkpoint Period:", checkpoint_period)
        print("\t > Learning Rate:", learning_rate)
        print("\t > Dropout rate:", drop_rate)
        print("\t > Training Subsample Ratio:", subsample_ratio)
        print("\t GPUs:", gpus)
        print("\t Skeleton info")
        for key, value in data_kwargs.items():
            print("\t > {}: {}".format(key, value))

    if dataset_name == 'UT':
        dataset = UT
    elif dataset_name == 'SBU':
        dataset = SBU
    elif dataset_name == 'YMJA':
        dataset = YMJA

    if verbose > 0:
        print("Reading data...")

    if use_data_gen:
        train_generator = DataGenerator(dataset_name,
                                        dataset_fold,
                                        'train',
                                        batch_size=batch_size,
                                        reshuffle=True,
                                        shuffle_indiv_order=True,
                                        **data_kwargs)
        val_generator = DataGenerator(dataset_name,
                                      dataset_fold,
                                      'validation',
                                      batch_size=batch_size,
                                      reshuffle=False,
                                      shuffle_indiv_order=False,
                                      **data_kwargs)

        X_train, Y_train = train_generator.getSampleData(0, new_arch)
        X_val, Y_val = val_generator.getSampleData(0, new_arch)
        train_data = train_generator
        val_data = val_generator
    else:
        X_train, Y_train = dataset.get_train(dataset_fold, **data_kwargs)
        X_val, Y_val = dataset.get_val(dataset_fold, **data_kwargs)
        train_data = [X_train, Y_train]
        val_data = [X_val, Y_val]

    models_kwargs = []
    output_size = len(Y_train[0])

    if new_arch:
        check_configs = []
        for config_filepath in config_filepaths:
            data_kwargs, model_kwargs, train_kwargs = read_config(
                config_filepath)
            check_configs.append(model_kwargs)

        # Ensure that temporal stream and joint stream both included. Reorder if necessary so that joint stream first.
        if (len(check_configs) != 2):
            print("Error: Expecting Joint Stream and Temporal Stream")
            exit(0)

        # Should reorder both weights and config.
        if (check_configs[0]['rel_type'] == 'temp_stream'):
            config_filepaths.reverse()
            weights_filepaths.reverse()

        check_configs = []
        for config_filepath in config_filepaths:
            data_kwargs, model_kwargs, train_kwargs = read_config(
                config_filepath)
            check_configs.append(model_kwargs)

        # Ensure that both joint and temporal stream exist
        if (check_configs[0]['rel_type'] != 'joint_stream'
                or check_configs[1]['rel_type'] != 'temp_stream'):
            print("Error: Expecting Joint Stream and Temporal Stream")
            exit(0)

        # Ensure X_train has two components
        if (len(X_train) != 2):
            print(
                "Error: Expecting X_train to consist of both joint and temporal components"
            )
            exit(0)

        for config_filepath, X_train_comp in zip(config_filepaths, X_train):
            num_joints = len(X_train_comp)
            object_shape = (len(X_train_comp[0][0]), )
            output_size = len(Y_train[0])

            data_kwargs, model_kwargs, train_kwargs = read_config(
                config_filepath)
            timesteps = data_kwargs['timesteps']
            add_joint_idx = data_kwargs['add_joint_idx']
            add_body_part = data_kwargs['add_body_part']
            overhead = add_joint_idx + add_body_part  # True/False = 1/0
            num_dim = (object_shape[0] - overhead) // timesteps
            model_kwargs['num_dim'] = num_dim
            model_kwargs['overhead'] = overhead
            model_kwargs['num_objs'] = num_joints
            model_kwargs['object_shape'] = object_shape

            models_kwargs.append(model_kwargs)

    else:
        num_joints = len(X_train) // 2
        object_shape = (len(X_train[0][0]), )

        for config_filepath in config_filepaths:
            data_kwargs, model_kwargs, train_kwargs = read_config(
                config_filepath)
            timesteps = data_kwargs['timesteps']
            add_joint_idx = data_kwargs['add_joint_idx']
            add_body_part = data_kwargs['add_body_part']
            overhead = add_joint_idx + add_body_part  # True/False = 1/0
            num_dim = (object_shape[0] - overhead) // timesteps
            model_kwargs['num_dim'] = num_dim
            model_kwargs['overhead'] = overhead
            model_kwargs['num_objs'] = num_joints
            model_kwargs['object_shape'] = object_shape
            models_kwargs.append(model_kwargs)

    for mod_kwargs in models_kwargs:
        mod_kwargs[
            'return_attention'] = False  # Don't return attention for fused mdels

    train_kwargs['drop_rate'] = drop_rate

    if verbose > 0:
        print("Creating model...")
    model = fuse_rn(output_size,
                    new_arch,
                    train_kwargs,
                    models_kwargs,
                    weights_filepaths,
                    freeze_g_theta=freeze_g_theta,
                    fuse_at_fc1=fuse_at_fc1,
                    avg_at_end=avg_at_end)

    if initial_weights is not None:
        model.load_weights(initial_weights)

    data_len = None

    fit_history = train_model(model=model,
                              verbose=verbose,
                              learning_rate=learning_rate,
                              output_path=output_path,
                              checkpoint_period=checkpoint_period,
                              batch_size=batch_size,
                              epochs=epochs,
                              use_data_gen=use_data_gen,
                              train_data=train_data,
                              val_data=val_data,
                              subsample_ratio=subsample_ratio,
                              data_len=data_len)

    return fit_history