Пример #1
0
import pickle

# File with predictions
stream = "fusion"
filter_type = "avg_fovea"
filename = "thresholds/context_fusion/predictions_" + stream + "_" + filter_type + "_1809281055.pickle"
with open(filename, 'rb') as handle:
    predictions = pickle.load(handle)

root_dir = '../../data/AVA/files/'

# Load groundtruth (test or val set)
# Load list of action classes and separate them (from utils_stream)
classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')
partition = get_AVA_set(classes=classes,
                        filename=root_dir + "AVA_Val_Custom_Corrected.csv",
                        soft_sigmoid=True)
test_splits = utils.make_chunks(original_list=partition,
                                size=len(partition),
                                chunk_size=2**11)

# Voting
thresh_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for thresh in thresh_values:
    print("Testing threshold " + str(thresh) + " ")
    pose_votes = {}
    obj_votes = {}
    human_votes = {}

    for row in partition:
        row = row.split("@")
Пример #2
0
def main():
    # root_dir = '../../../AVA2.1/' # root_dir for the files
    root_dir = '../../data/AVA/files/'

    # Erase previous models from GPU memory
    K.clear_session()

    # Load list of action classes and separate them
    classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')

    # Parameters for training
    params = {
        'dim': (224, 224),
        'batch_size': 32,
        'n_classes': len(classes['label_id']),
        'n_channels': 3,
        'shuffle': False,
        'nb_epochs': 200,
        'model': 'inceptionv3',
        'email': True,
        'freeze_all': True,
        'conv_fusion': False,
        'train_chunk_size': 2**12,
        'validation_chunk_size': 2**12
    }
    soft_sigmoid = True
    minValLoss = 9999990.0

    # Get ID's and labels from the actual dataset
    partition = {}
    partition['train'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Train_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for training
    partition['validation'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Val_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for validation

    # Labels
    labels_train = get_AVA_labels(classes,
                                  partition,
                                  "train",
                                  filename=root_dir +
                                  "AVA_Train_Custom_Corrected.csv",
                                  soft_sigmoid=soft_sigmoid)
    labels_val = get_AVA_labels(classes,
                                partition,
                                "validation",
                                filename=root_dir +
                                "AVA_Val_Custom_Corrected.csv",
                                soft_sigmoid=soft_sigmoid)

    # Create + compile model, load saved weights if they exist
    saved_weights = None
    # saved_weights = "../models/rgbextra_gauss_resnet50_1807250030.hdf5"
    model, keras_layer_names = rgb_create_model(
        classes=classes['label_id'],
        soft_sigmoid=soft_sigmoid,
        model_name=params['model'],
        freeze_all=params['freeze_all'],
        conv_fusion=params['conv_fusion'])
    model = compile_model(model, soft_sigmoid=soft_sigmoid)

    # TODO Experiment: 1. no initialization, 2. ucf initialization 3. kinetics initialization
    initialization = True  # Set to True to use initialization
    kinetics_weights = None
    ucf_weights = "a"

    if saved_weights is not None:
        model.load_weights(saved_weights)
    else:
        if initialization is True:
            if ucf_weights is None:
                print("Loading MConvNet weights: ")
                if params['model'] == "resnet50":
                    ucf_weights = utils.loadmat(
                        "../models/ucf_matconvnet/ucf101-img-resnet-50-split1.mat"
                    )
                    utils.convert_resnet(model, ucf_weights)
                    model.save(
                        "../models/ucf_keras/keras-ucf101-rgb-resnet50-newsplit.hdf5"
                    )
            if kinetics_weights is None:
                if params['model'] == "inceptionv3":
                    print("Loading Keras weights: ")
                    keras_weights = [
                        "../models/kinetics_keras/tsn_rgb_params_names.pkl",
                        "../models/kinetics_keras/tsn_rgb_params.pkl"
                    ]
                    utils.convert_inceptionv3(model, keras_weights,
                                              keras_layer_names)
                    model.save(
                        "../models/kinetics_keras/keras-kinetics-rgb-inceptionv3.hdf5"
                    )
    # Try to train on more than 1 GPU if    possible
    # try:
    #    print("Trying MULTI-GPU")
    #    model = multi_gpu_model(model)

    print("Training set size: " + str(len(partition['train'])))

    # Make spltis
    train_splits = utils.make_chunks(original_list=partition['train'],
                                     size=len(partition['train']),
                                     chunk_size=params['train_chunk_size'])
    val_splits = utils.make_chunks(original_list=partition['validation'],
                                   size=len(partition['validation']),
                                   chunk_size=params['validation_chunk_size'])

    time_str = time.strftime("%y%m%d%H%M", time.localtime())

    # TODO Don't forget to change your names :)
    filter_type = "gauss"
    bestModelPath = "../models/rgb_kininit_" + filter_type + \
        "_" + params['model'] + "_" + time_str + ".hdf5"
    traincsvPath = "../loss_acc_plots/rgb_kininit_train_" + filter_type + \
        "_plot_" + params['model'] + "_" + time_str + ".csv"
    valcsvPath = "../loss_acc_plots/rgb_kininit_val_" + filter_type + \
        "_plot_" + params['model'] + "_" + time_str + ".csv"

    with tf.device('/gpu:0'):  # NOTE Not using multi gpu
        for epoch in range(params['nb_epochs']):
            epoch_chunks_count = 0
            for trainIDS in train_splits:
                # Load and train
                start_time = timeit.default_timer()
                # -----------------------------------------------------------
                x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None
                x_train, y_train_pose, y_train_object, y_train_human = load_split(
                    trainIDS,
                    labels_train,
                    params['dim'],
                    params['n_channels'],
                    "train",
                    filter_type,
                    soft_sigmoid=soft_sigmoid)

                y_t = []
                y_t.append(
                    to_categorical(y_train_pose,
                                   num_classes=utils.POSE_CLASSES))
                y_t.append(
                    utils.to_binary_vector(y_train_object,
                                           size=utils.OBJ_HUMAN_CLASSES,
                                           labeltype='object-human'))
                y_t.append(
                    utils.to_binary_vector(y_train_human,
                                           size=utils.HUMAN_HUMAN_CLASSES,
                                           labeltype='human-human'))

                history = model.fit(x_train,
                                    y_t,
                                    batch_size=params['batch_size'],
                                    epochs=1,
                                    verbose=0)
                utils.learning_rate_schedule(model, epoch, params['nb_epochs'])

                # TODO Repeat samples of unrepresented classes?

                # ------------------------------------------------------------
                elapsed = timeit.default_timer() - start_time

                print("Epoch " + str(epoch) + " chunk " +
                      str(epoch_chunks_count) + " (" + str(elapsed) +
                      ") acc[pose,obj,human] = [" +
                      str(history.history['pred_pose_categorical_accuracy']) +
                      "," +
                      str(history.
                          history['pred_obj_human_categorical_accuracy']) +
                      "," +
                      str(history.
                          history['pred_human_human_categorical_accuracy']) +
                      "] loss: " + str(history.history['loss']))
                with open(traincsvPath, 'a') as f:
                    writer = csv.writer(f)
                    avg_acc = (
                        history.history['pred_pose_categorical_accuracy'][0] +
                        history.history['pred_obj_human_categorical_accuracy']
                        [0] + history.history[
                            'pred_human_human_categorical_accuracy'][0]) / 3
                    writer.writerow([
                        str(avg_acc),
                        history.history['pred_pose_categorical_accuracy'],
                        history.history['pred_obj_human_categorical_accuracy'],
                        history.
                        history['pred_human_human_categorical_accuracy'],
                        history.history['loss']
                    ])
                epoch_chunks_count += 1
            # Load val_data
            print("Validating data: ")
            # global_loss, pose_loss, object_loss, human_loss, pose_acc, object_acc, human_acc
            loss_acc_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            for valIDS in val_splits:
                x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None
                x_val, y_val_pose, y_val_object, y_val_human = load_split(
                    valIDS,
                    labels_val,
                    params['dim'],
                    params['n_channels'],
                    "val",
                    filter_type,
                    soft_sigmoid=soft_sigmoid)
                y_v = []
                y_v.append(
                    to_categorical(y_val_pose, num_classes=utils.POSE_CLASSES))
                y_v.append(
                    utils.to_binary_vector(y_val_object,
                                           size=utils.OBJ_HUMAN_CLASSES,
                                           labeltype='object-human'))
                y_v.append(
                    utils.to_binary_vector(y_val_human,
                                           size=utils.HUMAN_HUMAN_CLASSES,
                                           labeltype='human-human'))

                vglobal_loss, vpose_loss, vobject_loss, vhuman_loss, vpose_acc, vobject_acc, vhuman_acc = model.evaluate(
                    x_val, y_v, batch_size=params['batch_size'])
                loss_acc_list[0] += vglobal_loss
                loss_acc_list[1] += vpose_loss
                loss_acc_list[2] += vobject_loss
                loss_acc_list[3] += vhuman_loss
                loss_acc_list[4] += vpose_acc
                loss_acc_list[5] += vobject_acc

                loss_acc_list[6] += vhuman_acc
            # Average over all validation chunks
            loss_acc_list = [x / len(val_splits) for x in loss_acc_list]
            with open(valcsvPath, 'a') as f:
                writer = csv.writer(f)
                # We consider accuracy as the average accuracy over the three
                # types of accuracy
                acc = (loss_acc_list[4] + loss_acc_list[5] +
                       loss_acc_list[6]) / 3
                writer.writerow([
                    str(acc), loss_acc_list[4], loss_acc_list[5],
                    loss_acc_list[6], loss_acc_list[0], loss_acc_list[1],
                    loss_acc_list[2], loss_acc_list[3]
                ])
            if loss_acc_list[0] < minValLoss:
                print("New best loss " + str(loss_acc_list[0]))
                model.save(bestModelPath)
                minValLoss = loss_acc_list[0]

    if params['email']:
        utils.sendemail(from_addr='*****@*****.**',
                        to_addr_list=['*****@*****.**'],
                        subject='Finished training RGB-stream ',
                        message='Training RGB with following params: ' +
                        str(params),
                        login='******',
                        password='******')
def main():

    # root_dir = '../../../AVA2.1/' # root_dir for the files
    root_dir = '../../data/AVA/files/'

    # Erase previous models from GPU memory
    K.clear_session()

    # Load list of action classes and separate them
    classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')

    # Parameters for training
    params = {
        'dim': (224, 224),
        'batch_size': 32,
        'n_classes': len(classes['label_id']),
        'n_channels': 3,
        'shuffle': False,
        'nb_epochs': 200,
        'model': 'resnet50',
        'email': True,
        'freeze_all': True,
        'conv_fusion': False,
        'train_chunk_size': 2**12,
        'validation_chunk_size': 2**12
    }
    soft_sigmoid = True
    minValLoss = 9999990.0

    # Get ID's and labels from the actual dataset
    partition = {}
    partition['train'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Train_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for training
    partition['validation'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Val_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for validation

    # Labels
    labels_train = get_AVA_labels(classes,
                                  partition,
                                  "train",
                                  filename=root_dir +
                                  "AVA_Train_Custom_Corrected.csv",
                                  soft_sigmoid=soft_sigmoid)
    labels_val = get_AVA_labels(classes,
                                partition,
                                "validation",
                                filename=root_dir +
                                "AVA_Val_Custom_Corrected.csv",
                                soft_sigmoid=soft_sigmoid)

    # http://scikit-learn.org/stable/modules/generated/sklearn.utils.class_weight.compute_class_weight.html
    # Logistic Regression in Rare Events Data, Gary King, Langche Zeng
    # Keras needs a dict though
    # From documentation: class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only).
    # This can be useful to tell the model to "pay more attention" to samples from an under-represented class.
    y = labels_to_numpy(labels_train)

    penalizing_method = 'balanced'
    mu = 0.7
    # penalizing_method = 'weighted_log'
    class_weights = np.zeros(30)
    for i in y:
        class_weights[i] += 1
    max_class = max(class_weights)
    for i in range(len(class_weights)):
        if class_weights[i] != 0.0:
            if penalizing_method == 'balanced':
                print(
                    str(i) + " " + str(class_weights[i]) + " " +
                    str(len(y) / (class_weights[i])))
                #class_weights[i] = len(y) / (class_weights[i])
                class_weights[i] = max_class / (class_weights[i])
            elif penalizing_method == 'weighted_log':
                print(
                    str(i) + " " + str(class_weights[i]) + " " +
                    str(math.log(mu * len(y) / (class_weights[i]))))
                class_weights[i] = math.log(mu * len(y) / (class_weights[i]))
        else:
            print(str(i) + " " + str(class_weights[i]) + " inf ")
            class_weights[i] = 0.0
    g = sns.barplot(x=[str(i) for i in range(len(class_weights))],
                    y=class_weights)
    plt.xticks(rotation=-90)
    plt.title("Class weights " + penalizing_method)
    plt.grid(True)
    plt.show()

    class_dictionary = {}
    print(len(class_weights))
    for i in range(len(class_weights)):
        class_dictionary[i] = class_weights[i]
    print(class_dictionary)

    it = iter(class_weights)
    seclist = [
        utils.POSE_CLASSES, utils.OBJ_HUMAN_CLASSES, utils.HUMAN_HUMAN_CLASSES
    ]
    class_lists = [list(islice(it, 0, i)) for i in seclist]
    print(class_lists)

    # Create + compile model, load saved weights if they exist
    saved_weights = None
    # saved_weights = "../models/rgbextra_gauss_resnet50_1807250030.hdf5"
    model, keras_layer_names = rgb_create_model(
        classes=classes['label_id'],
        soft_sigmoid=soft_sigmoid,
        model_name=params['model'],
        freeze_all=params['freeze_all'],
        conv_fusion=params['conv_fusion'])
    model = compile_model(model, soft_sigmoid=soft_sigmoid)

    # TODO Experiment: 1. no initialization, 2. ucf initialization 3. kinetics initialization
    initialization = True  # Set to True to use initialization
    kinetics_weights = ""
    ucf_weights = "../models/ucf_keras/keras-ucf101-rgb-resnet50-newsplit.hdf5"

    if saved_weights is not None:
        model.load_weights(saved_weights)
    else:
        if initialization is True:
            if ucf_weights is None:
                print("Loading MConvNet weights: ")
                if params['model'] == "resnet50":
                    ucf_weights = utils.loadmat(
                        "../models/ucf_matconvnet/ucf101-img-resnet-50-split1.mat"
                    )
                    utils.convert_resnet(model, ucf_weights)
                    model.save(
                        "../models/ucf_keras/keras-ucf101-rgb-resnet50-newsplit.hdf5"
                    )
            if kinetics_weights is None:
                if params['model'] == "inceptionv3":
                    print("Loading Keras weights: ")
                    keras_weights = [
                        "../models/kinetics_keras/tsn_rgb_params_names.pkl",
                        "../models/kinetics_keras/tsn_rgb_params.pkl"
                    ]
                    utils.convert_inceptionv3(model, keras_weights,
                                              keras_layer_names)
                    model.save(
                        "../models/kinetics_keras/keras-kinetics-rgb-inceptionv3.hdf5"
                    )
    # Try to train on more than 1 GPU if    possible
    # try:
    #    print("Trying MULTI-GPU")
    #    model = multi_gpu_model(model)

    print("Training set size: " + str(len(partition['train'])))

    # Make spltis
    train_splits = utils.make_chunks(original_list=partition['train'],
                                     size=len(partition['train']),
                                     chunk_size=params['train_chunk_size'])
    val_splits = utils.make_chunks(original_list=partition['validation'],
                                   size=len(partition['validation']),
                                   chunk_size=params['validation_chunk_size'])

    time_str = time.strftime("%y%m%d%H%M", time.localtime())

    # TODO Don't forget to change your names :)
    filter_type = "gauss"
    bestModelPath = "../models/rgb_weightsfinal_" + filter_type + "_" + params[
        'model'] + "_" + time_str + ".hdf5"
    traincsvPath = "../loss_acc_plots/rgb_weightsfinal_train_" + filter_type + "_plot_" + params[
        'model'] + "_" + time_str + ".csv"
    valcsvPath = "../loss_acc_plots/rgb_weightsfinal_val_" + filter_type + "_plot_" + params[
        'model'] + "_" + time_str + ".csv"

    for epoch in range(params['nb_epochs']):
        epoch_chunks_count = 0
        for trainIDS in train_splits:
            # Load and train
            start_time = timeit.default_timer()
            # -----------------------------------------------------------
            x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None
            x_train, y_train_pose, y_train_object, y_train_human = load_split(
                trainIDS,
                labels_train,
                params['dim'],
                params['n_channels'],
                "train",
                filter_type,
                soft_sigmoid=soft_sigmoid)

            y_t = []
            y_t.append(
                to_categorical(y_train_pose, num_classes=utils.POSE_CLASSES))
            y_t.append(
                utils.to_binary_vector(y_train_object,
                                       size=utils.OBJ_HUMAN_CLASSES,
                                       labeltype='object-human'))
            y_t.append(
                utils.to_binary_vector(y_train_human,
                                       size=utils.HUMAN_HUMAN_CLASSES,
                                       labeltype='human-human'))

            history = model.fit(x_train,
                                y_t,
                                class_weight=class_lists,
                                batch_size=params['batch_size'],
                                epochs=1,
                                verbose=0)
            utils.learning_rate_schedule(model, epoch, params['nb_epochs'])

            # ------------------------------------------------------------
            elapsed = timeit.default_timer() - start_time

            print(
                "Epoch " + str(epoch) + " chunk " + str(epoch_chunks_count) +
                " (" + str(elapsed) + ") acc[pose,obj,human] = [" +
                str(history.history['pred_pose_categorical_accuracy']) + "," +
                str(history.history['pred_obj_human_categorical_accuracy']) +
                "," +
                str(history.history['pred_human_human_categorical_accuracy']) +
                "] loss: " + str(history.history['loss']))
            with open(traincsvPath, 'a') as f:
                writer = csv.writer(f)
                avg_acc = (
                    history.history['pred_pose_categorical_accuracy'][0] +
                    history.history['pred_obj_human_categorical_accuracy'][0] +
                    history.history['pred_human_human_categorical_accuracy'][0]
                ) / 3
                writer.writerow([
                    str(avg_acc),
                    history.history['pred_pose_categorical_accuracy'],
                    history.history['pred_obj_human_categorical_accuracy'],
                    history.history['pred_human_human_categorical_accuracy'],
                    history.history['loss']
                ])
            epoch_chunks_count += 1
        # Load val_data
        print("Validating data: ")
        # global_loss, pose_loss, object_loss, human_loss, pose_acc, object_acc, human_acc
        loss_acc_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        for valIDS in val_splits:
            x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None
            x_val, y_val_pose, y_val_object, y_val_human = load_split(
                valIDS,
                labels_val,
                params['dim'],
                params['n_channels'],
                "val",
                filter_type,
                soft_sigmoid=soft_sigmoid)
            y_v = []
            y_v.append(
                to_categorical(y_val_pose, num_classes=utils.POSE_CLASSES))
            y_v.append(
                utils.to_binary_vector(y_val_object,
                                       size=utils.OBJ_HUMAN_CLASSES,
                                       labeltype='object-human'))
            y_v.append(
                utils.to_binary_vector(y_val_human,
                                       size=utils.HUMAN_HUMAN_CLASSES,
                                       labeltype='human-human'))

            # NOTE Can't have weights on validation
            vglobal_loss, vpose_loss, vobject_loss, vhuman_loss, vpose_acc, vobject_acc, vhuman_acc = model.evaluate(
                x_val, y_v, batch_size=params['batch_size'])
            loss_acc_list[0] += vglobal_loss
            loss_acc_list[1] += vpose_loss
            loss_acc_list[2] += vobject_loss
            loss_acc_list[3] += vhuman_loss
            loss_acc_list[4] += vpose_acc
            loss_acc_list[5] += vobject_acc
            loss_acc_list[6] += vhuman_acc
        # Average over all validation chunks
        loss_acc_list = [x / len(val_splits) for x in loss_acc_list]
        with open(valcsvPath, 'a') as f:
            writer = csv.writer(f)
            # We consider accuracy as the average accuracy over the three
            # types of accuracy
            acc = (loss_acc_list[4] + loss_acc_list[5] + loss_acc_list[6]) / 3
            writer.writerow([
                str(acc), loss_acc_list[4], loss_acc_list[5], loss_acc_list[6],
                loss_acc_list[0], loss_acc_list[1], loss_acc_list[2],
                loss_acc_list[3]
            ])
        if loss_acc_list[0] < minValLoss:
            print("New best loss " + str(loss_acc_list[0]))
            model.save(bestModelPath)
            minValLoss = loss_acc_list[0]

    if params['email']:
        utils.sendemail(
            from_addr='*****@*****.**',
            to_addr_list=['*****@*****.**'],
            subject='Finished training RGB-stream with class_weights ' +
            penalizing_method,
            message='Training RGB with following params: ' + str(params),
            login='******',
            password='******')
def main():

    GPU = False
    CPU = True
    num_cores = 8

    if GPU:
        num_GPU = 1
        num_CPU = 1
    if CPU:
        num_CPU = 1
        num_GPU = 0

    # config = tf.ConfigProto(intra_op_parallelism_threads=num_cores, inter_op_parallelism_threads=num_cores, allow_soft_placement=True,
    #                        device_count={'CPU': num_CPU, 'GPU': num_GPU})
    # session = tf.Session(config=config)
    # K.set_session(session)

    # root_dir = '../../../AVA2.1/' # root_dir for the files
    root_dir = '../../data/AVA/files/'

    # Load list of action classes and separate them
    classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')

    # Parameters for training
    params = {
        'dim': (224, 224),
        'batch_size': 32,
        'n_classes': len(classes['label_id']),
        'n_channels': 3,
        'shuffle': False,
        'nb_epochs': 200,
        'model': 'resnet50',
        'email': True,
        'freeze_all': True,
        'conv_fusion': False,
        'train_chunk_size': 2**12,
        'validation_chunk_size': 2**12
    }
    soft_sigmoid = True
    minValLoss = 9999990.0
    print(classes)

    oversampling_train, oversampling_train_classes = oversampling(
        classes, root_dir, "AVA_Train_Custom_Corrected.csv")
    oversampling_val, oversampling_val_classes = oversampling(
        classes, root_dir, "AVA_Val_Custom_Corrected.csv")

    undersampling_train, undersampling_train_classes = undersampling(
        classes, root_dir, "AVA_Train_Custom_Corrected.csv",
        oversampling_train_classes)
    undersampling_val, undersampling_val_classes = undersampling(
        classes, root_dir, "AVA_Val_Custom_Corrected.csv",
        oversampling_val_classes)

    partition = {}
    partition['train'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Train_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for training
    partition['validation'] = get_AVA_set(
        classes=classes,
        filename=root_dir + "AVA_Val_Custom_Corrected.csv",
        soft_sigmoid=soft_sigmoid)  # IDs for validation

    print(len(partition['train']))
    undersampling_train = list(set(undersampling_train))
    print(len(oversampling_train))
    print(len(undersampling_train))
    print(len(undersampling_train + oversampling_train))
    print(1.0 * len(partition['train'] + oversampling_train) /
          len(partition['train']))
    bestsample = undersampling_train + oversampling_train

    sys.exit(0)
    # Labels
    # labels_train = get_AVA_labels(classes, partition, "train", filename=root_dir + "AVA_Train_Custom_Corrected.csv", soft_sigmoid=soft_sigmoid)
    # labels_val = get_AVA_labels(classes, partition, "validation", filename=root_dir + "AVA_Val_Custom_Corrected.csv", soft_sigmoid=soft_sigmoid)
    original_train_size = len(partition['train'])
    print("Training set size pre augmentation: " + str(original_train_size))
    partition['train'] = partition['train'] + aug_train
    print("Training set size pos augmentation: " +
          str(len(partition['train'])) + " --> " +
          str(100.0 * (len(partition['train']) - original_train_size) /
              original_train_size) + " % increase")

    original_val_size = len(partition['validation'])
    print("validation set size pre augmentation: " + str(original_train_size))
    partition['validation'] = partition['validation'] + aug_train
    print("Validation set size pos augmentation: " +
          str(len(partition['validation'])) + " --> " +
          str(100.0 * (len(partition['validation']) - original_val_size) /
              original_val_size) + " % increase")

    img = cv2.imread(
        "/media/pedro/actv-ssd/gauss_train/-5KQ66BBWC4_902_0.077_0.151_0.283_0.811/frames1.jpg"
    )
    print(img.shape)
    if random.random() < 0.5:
        flip_img = np.fliplr(img)
    crop_rand_val = random.randrange(0, 5, 1) / 10.0
    scale_rand_val = random.randrange(7, 8, 1) / 10.0
    # print(crop_rand_val)
    # print(scale_rand_val)
    seq = iaa.Sequential(
        [  # horizontal flips
            iaa.Scale((scale_rand_val, 1.0)),
            iaa.CropAndPad(percent=(0, crop_rand_val),
                           pad_mode=["edge"])  # random crops
        ],
        random_order=True)  # apply augmenters in random order

    flipped = seq.augment_image(flip_img)
    plt.imshow(flipped)
    plt.show()
Пример #5
0
def main():
    K.clear_session()

    root_dir = '../../data/AVA/files/'

    # Load list of action classes and separate them (from utils_stream)
    classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')

    # Parameters for training (batch size 32 is supposed to be the best?)
    params = {'dim': (224, 224), 'batch_size': 32,
              'n_classes': len(classes['label_id']), 'n_channels': 3,
              'shuffle': False, 'nb_epochs': 200, 'model': 'resnet50', 'email': False,
              'freeze_all': True, 'conv_fusion': False}

    filter_type = "gauss"
    split = "test"

    # Get validation set from directory
    partition = {}
    partition['test'] = get_AVA_set(classes=classes, filename=root_dir + "AVA_" + split.title() + "_Custom_Corrected.csv", soft_sigmoid=True)

    time_str = time.strftime("%y%m%d%H%M", time.localtime())
    result_csv = "test_outputs/augmentation/output_test_weightsnew_" + filter_type + "_" + time_str + ".csv"

    # Load trained model
    # Gauss
    # rgb_weights = "../models/rgb_" + filter_type + "_resnet50_1806290918.hdf5"
    # rgb_weights = "../models/rgbextra_" + filter_type + "_resnet50_1807250030.hdf5"
    #rgb_weights = "../models/rgb_augsamplingweightsnoaug_gauss_resnet50_1809242359.hdf5"
    rgb_weights = "../models/rgb_weightsnew_gauss_resnet50_1809281516.hdf5"
    # Crop
    # rgb_weights = "../models/rgb_" + filter_type + "_resnet50_1806300210.hdf5"

    # Fovea
    # rgb_weights = "../models/rgb_" + filter_type + "_resnet50_1806301953.hdf5"

    # RGB only
    # rgb_weights = "../models/rgb_" + filter_type + "_resnet50_1807060914.hdf5"

    model, keras_layer_names = rgb_create_model(classes=classes['label_id'], soft_sigmoid=True, model_name=params['model'], freeze_all=params['freeze_all'], conv_fusion=params['conv_fusion'])
    model = compile_model(model, soft_sigmoid=True)
    model.load_weights(rgb_weights)

    print("Test set size: " + str(len(partition['test'])))

    # Load chunks
    test_splits = utils.make_chunks(original_list=partition['test'], size=len(partition['test']), chunk_size=2**11)

    # Test directories where pre-processed test files are
    rgb_dir = "/media/pedro/actv-ssd/" + filter_type + "_" + split + "/"

    test_chunks_count = 0

    pose_votes = {}
    obj_votes = {}
    human_votes = {}

    for row in partition['test']:
        row = row.split("@")
        i = row[0] + "@" + row[1] + "@" + str(row[2]) + "@" + str(row[3]) + "@" + str(row[4]) + "@" + str(row[5])
        pose_votes[i] = np.zeros(utils.POSE_CLASSES)
        obj_votes[i] = np.zeros(utils.OBJ_HUMAN_CLASSES)
        human_votes[i] = np.zeros(utils.HUMAN_HUMAN_CLASSES)

    store_predictions = True
    test_predictions = []
    with tf.device('/gpu:0'):
        for testIDS in test_splits:
            # TODO Technically it shouldnt return labels here (these are ground truth)
            x_test_rgb, y_test_pose, y_test_object, y_test_human = load_split(testIDS, None, params['dim'], params['n_channels'], split, filter_type, soft_sigmoid=True, train=False)
            print("Predicting on chunk " + str(test_chunks_count) + "/" + str(len(test_splits)))

            predictions = model.predict(x_test_rgb, batch_size=params['batch_size'], verbose=1)
            if store_predictions is True:
                # print(predictions[0][0])
                # print(predictions[1][0])
                # print(predictions[2][0])

                # tarr = np.hstack((np.vstack(predictions[0]), np.vstack(predictions[1]), np.vstack(predictions[2])))
                test_predictions.append(predictions)

            # Convert predictions to readable output and perform majority voting
            voting.pred2classes(testIDS, predictions, pose_votes, obj_votes, human_votes, thresh=0.4)
            test_chunks_count += 1

    if store_predictions is True:
        #tp = np.vstack(test_predictions)
        # print(tp.shape)
        with open("thresholds/rgb_gauss/predictions_weightsnew_" + filter_type + "_" + time_str + ".pickle", 'wb') as handle:
            pickle.dump(test_predictions, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # When you're done getting all the votes, write output csv
    with open(result_csv, "a") as output_file:
        for key in pose_votes:
            idx = key.split("@")
            actions = []
            pv = pose_votes[key]
            pose_vote = pv.argmax(axis=0) + 1
            actions.append(pose_vote)

            # Get 3 top voted object
            ov = obj_votes[key]
            top_three_obj_votes = ov.argsort()[-3:][::-1] + utils.POSE_CLASSES + 1
            for t in top_three_obj_votes:
                if t != 0:  # Often there might only be two top voted or one
                    actions.append(t)
            # Get 3 top voted human
            hv = human_votes[key]
            top_three_human_votes = hv.argsort()[-3:][::-1] + utils.POSE_CLASSES + utils.OBJ_HUMAN_CLASSES + 1
            for t in top_three_human_votes:
                if t != 0:  # Often there might only be two top voted or one
                    actions.append(t)

            video_name = idx[0]
            timestamp = idx[1]
            bb_topx = idx[2]
            bb_topy = idx[3]
            bb_botx = idx[4]
            bb_boty = idx[5]
            for a in actions:
                line = video_name + "," + timestamp + "," + bb_topx + "," + bb_topy + "," + bb_botx + "," + bb_boty + "," + str(a)
                output_file.write("%s\n" % line)

    if params['email']:
        utils.sendemail(from_addr='*****@*****.**',
                        to_addr_list=['*****@*****.**'],
                        subject='Finished prediction for rgb ' + filter_type,
                        message='Testing rgb with following params: ' + str(params),
                        login='******',
                        password='******')