def exec_ensemble(models):
    # train models
    for m in models:
        train_model(m,
                    DATASET_INDEX,
                    dataset_prefix='lp1_',
                    epochs=1000,
                    batch_size=128)

    # Predict labels with models
    _, _, X_test, y_test, is_timeseries = load_dataset_at(DATASET_INDEX)
    labels = []
    for m in models:
        predicts = np.argmax(m.predict(X_test), axis=1)
        labels.append(predicts)

    # Ensemble with voting
    labels = np.array(labels)
    labels = np.transpose(labels, (1, 0))
    labels_result = scipy.stats.mode(labels, axis=1)[0]
    labels_result = np.squeeze(labels_result)

    # cal accuracy
    match_count = 0
    for i in range(len(labels_result)):
        p = labels_result[i]
        e = y_test[i][0]
        if p == e:
            match_count += 1

    accuracy = (match_count * 100) / len(labels_result)
    print('Accuracy: ', accuracy)

    return accuracy
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='hhar',
                epochs=100,
                batch_size=128)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='hhar', batch_size=128)
    model = Model(inp, output)

    model.summary()

    return model


if __name__ == "__main__":

    # generate_ model
    model5 = generate_model_5()  #
    print("GRU-FCN")
    history = train_model(model5,
                          DATASET_INDEX,
                          dataset_prefix='adiac',
                          epochs=4000,
                          batch_size=128)
    accuracy5, loss5 = evaluate_model(model5,
                                      DATASET_INDEX,
                                      dataset_prefix='adiac',
                                      batch_size=128)

    print("--- Run Time = %s seconds ---" % ((time.time() - start_time)))
    print("--- Run Time = %s minutes ---" %
          ((time.time() - start_time) / 60.0))
    text_file = open("training_time.txt", "w")
    text_file.write("--- Run Time =" + str(((time.time() - start_time))) +
                    " seconds ---" + "\n" + "--- Run Time = " +
                    str(((time.time() - start_time) / 60.0)) + " minutes ---" +
                    "\n")
Exemple #4
0
    max_seq_lenth = max_seq_len(dataset)
    nb_class = nb_classes(dataset)
    nb_cnn = int(round(math.log(max_seq_lenth, 2))-3)
    print("Number of Pooling Layers: %s" % str(nb_cnn))

    #model = lstm_fcn_model(proto_num, max_seq_lenth, nb_class)
    #model = alstm_fcn_model(proto_num, max_seq_lenth, nb_class)

    #model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)

    #model = vgg_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = vgg_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = vgg_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    model = vgg_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = vgg_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)

    print("Number of Pooling Layers: %s" % str(nb_cnn))

    train_model(model, dataset, method, proto_num, dataset_prefix=dataset, nb_iterations=50000, batch_size=50, opt='Nadam', learning_rate=0.0001, early_stop=False, balance_classes=False, run_ver='vgg_')

    acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="vgg_loss")
    np.savetxt("output/vgg/vgg-%s-%s-%s-loss-%s" % (dataset, method, str(proto_num), str(acc)), [acc])

    acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="vgg_val_acc")
    np.savetxt("output/vgg/vgg-%s-%s-%s-vacc-%s" % (dataset, method, str(proto_num), str(acc)), [acc])
Exemple #5
0
    print("Mahis F-Score : ", averagef_mahi)
    print("Mahis W F-Score : ", weightedf_mahi)
    return averagef


if __name__ == "__main__":
    model = generate_model_2()
    model.compile('adam',
                  'categorical_crossentropy',
                  metrics=['accuracy', f1_score])

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='opportunity_new',
                epochs=1000,
                batch_size=128,
                monitor='val_f1_score',
                optimization_mode='max',
                compile_model=False)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='opportunity',
                   batch_size=128)
    predict_model(
        model,
        DATASET_INDEX,
        dataset_prefix='opportunity_weights_attention_9208_512_lstm_128',
        batch_size=512)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='ozone',
                epochs=600,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='ozone',
                   batch_size=128)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='japanese_vowels',
                epochs=600,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='japanese_vowels',
                   batch_size=128)
Exemple #8
0
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='activity_attention',
                epochs=1000,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='activity_attention',
                   batch_size=128)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='uwave_attention',
                epochs=500,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='uwave_attention',
                   batch_size=128)
Exemple #10
0
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='movement_aal',
                epochs=1000,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='movement_aal',
                   batch_size=128)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='gesture_phase',
                epochs=1000,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='gesture_phase',
                   batch_size=128)
Exemple #12
0
from utils.model_utils import lstm_fcn_model, alstm_fcn_model
from utils.model_utils import cnn_raw_model, cnn_dtwfeatures_model, cnn_earlyfusion_model, cnn_midfusion_model, cnn_latefusion_model

import sys


if __name__ == "__main__":
    dataset = sys.argv[1]
    method = sys.argv[2]
    proto_num = int(sys.argv[3])
    nb_cnn = int(sys.argv[4])

    max_seq_lenth = max_seq_len(dataset)
    nb_class = nb_classes(dataset)

    model = lstm_fcn_model(proto_num, max_seq_lenth, nb_class)
    #model = alstm_fcn_model(proto_num, max_seq_lenth, nb_class)

    #model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)

    train_model(model, dataset, method, proto_num, dataset_prefix=dataset, nb_iterations=20000, batch_size=50, learning_rate=0.0001, early_stop=True)

    evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="loss")

    evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="val_acc")

Exemple #13
0
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='walk_vs_run_',
                epochs=1000,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='walk_vs_run_',
                   batch_size=128)
Exemple #14
0
    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":

    model = generate_model()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='vehicle_data',
                epochs=500,
                batch_size=16)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='vehicle_data',
                   batch_size=16)

    model.save('/Users/jhk1m/J_ZOOM/test_model.h5')
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='floodPrediction_',
                epochs=200,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='floodPrediction_',
                   batch_size=128)
Exemple #16
0
    #model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    model = cnn_midfusion_model_v2(nb_cnn, dim_num, proto_num, max_seq_lenth,
                                   nb_class)
    #model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)

    print("Number of Pooling Layers: %s" % str(nb_cnn))

    train_model(model,
                dataset,
                method,
                proto_num,
                dataset_prefix=dataset,
                nb_iterations=50000,
                batch_size=32,
                normalize_timeseries=True,
                learning_rate=0.0001,
                early_stop=False,
                balance_classes=False)

    acc = evaluate_model(model,
                         dataset,
                         method,
                         proto_num,
                         dataset_prefix=dataset,
                         batch_size=50,
                         normalize_timeseries=True,
                         checkpoint_prefix="loss")
    np.savetxt(
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='daily_sport_no_attention',
                epochs=500,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='daily_sport_no_attention',
                   batch_size=128)
Exemple #18
0
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model, DATASET_INDEX, dataset_prefix='synthetic_control', epochs=4000, batch_size=16)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='synthetic_control', batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='synthetic_control', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='synthetic_control', class_id=0)
                if not os.path.exists('weights/' + weights_dir):
                    os.makedirs('weights/' + weights_dir)

                dataset_name_ = weights_dir + dname

                # try:
                model = model_fn(MAX_SEQUENCE_LENGTH, NB_CLASS, cell)

                print('*' * 20, "Training model for dataset %s" % (dname),
                      '*' * 20)

                # comment out the training code to only evaluate !
                train_model(model,
                            did,
                            dataset_name_,
                            epochs=2000,
                            batch_size=128,
                            normalize_timeseries=normalize_dataset)

                acc = evaluate_model(model,
                                     did,
                                     dataset_name_,
                                     batch_size=128,
                                     normalize_timeseries=normalize_dataset)

                s = "%d,%s,%s,%0.6f\n" % (did, dname, dataset_name_, acc)

                file.write(s)
                file.flush()

                successes.append(s)
Exemple #20
0
    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    #model = generate_model_2()
    model = generate_model()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='geo',
                epochs=500,
                batch_size=16,
                normalize_timeseries=False)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='geo',
                   batch_size=16,
                   normalize_timeseries=False)