train_max = np.max(full_train[:,1:])
    train_min = np.min(full_train[:,1:])

    train_data = 2. * (full_train[:,1:] - train_min) / (train_max - train_min) - 1.
    train_labels = (full_train[:,0] + class_modifier_add(version))*class_modifier_multi(version)

    train_number = np.shape(train_labels)[0]
    #print(np.shape(train_data))
    #print(np.shape(train_labels))

    test_data = 2. * (full_test[:,1:] - train_min) / (train_max - train_min) - 1.
    test_labels = (full_test[:,0] + class_modifier_add(version))*class_modifier_multi(version)
    #print(np.shape(test_data))
    #print(np.shape(test_labels))
    test_number = np.shape(test_labels)[0]
    seq_length = max_seq_len(version)

    train_data = train_data.reshape((-1,seq_length, 1))
    test_data = test_data.reshape((-1, seq_length, 1))

    distances = train_number if selection == "random" else read_dtw_matrix(version)

    if classwise == "classwise":
        proto_loc = np.zeros(0, dtype=np.int32)
        proto_factor = int(proto_number / no_classes)
        for c in range(no_classes):
            cw = np.where(train_labels == c)[0]
            if selection == "random":
                cw_distances = []
            else:
                cw_distances = distances[cw]
Пример #2
0
from utils.generic import train_model, evaluate_model
#from utils.models import cnn_raw_model, cnn_dtwfeatures_model, cnn_earlyfusion_model, cnn_midfusion_model, cnn_latefusion_model
from utils.models import cnn_midfusion_model_v2, lstm_model

import sys
import math
import numpy as np
import os

if __name__ == "__main__":
    dataset = sys.argv[1]
    method = sys.argv[2]
    proto_num = int(sys.argv[3])
    os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[4]

    max_seq_lenth = max_seq_len(dataset)
    nb_class = nb_classes(dataset)
    dim_num = nb_dims(dataset)
    nb_cnn = int(round(math.log(max_seq_lenth, 2)) - 3)

    #model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    #model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
    model = cnn_midfusion_model_v2(nb_cnn, dim_num, proto_num, max_seq_lenth,
                                   nb_class)
    #model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)

    print("Number of Pooling Layers: %s" % str(nb_cnn))

    train_model(model,
                dataset,