Esempio n. 1
0
def run_classifier(mode='bilateral',
                   classifier='CNN',
                   sensor=["imu", "emg", "goin"],
                   NN_model=None):

    ########## SETTINGS  ########################

    BATCH_SIZE = 32
    LEARNING_RATE = 1e-5
    WEIGHT_DECAY = 1e-3
    NUMB_CLASS = 5
    NUB_EPOCH = 200
    numfolds = 10
    DATA_LOAD_BOOL = True
    BAND = 10
    HOP = 10
    # BAND=16,HOP=27
    SAVING_BOOL = True
    ############################################

    MODE = mode
    CLASSIFIER = classifier
    SENSOR = sensor
    sensor_str = '_'.join(SENSOR)

    MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
              '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'_subjects.pth'

    SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
        BAND) + '_HOP' + str(HOP) + 'subjects.pkl'
    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
        BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
            WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                BAND) + '_HOP' + str(HOP) + '_subjects_accuracy.txt'
    # Load the dataset and train, val, test splits
    print("Loading datasets...")

    subjects = [
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ]

    spectrogramTime = 0.0
    if SAVING_BOOL:
        subject_data = []
        for subject in subjects:
            subject_data.append(
                EnableDataset(subject_list=[subject],
                              data_range=(1, 51),
                              bands=BAND,
                              hop_length=HOP,
                              model_type=CLASSIFIER,
                              sensors=SENSOR,
                              mode=MODE))
            spectrogramTime += subject_data[-1].spectrogramTime
        save_object(subject_data, SAVE_NAME)
    else:
        with open(SAVE_NAME, 'rb') as input:
            subject_data = pickle.load(input)
    spectrogramTime = spectrogramTime / len(subjects)

    INPUT_NUM = subject_data[0].input_numb

    device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
    print('GPU USED?', torch.cuda.is_available())

    if NN_model == 'RESNET18':
        model = torch.hub.load('pytorch/vision:v0.4.2',
                               'resnet18',
                               pretrained=True)  # use resnet
        num_ftrs = model.fc.in_features
        # model.conv1 = nn.Conv2d(num_input_channel, 64, kernel_size=7, stride=2, padding=3,bias=False)
        top_layer = nn.Conv2d(INPUT_NUM, 3, kernel_size=5, stride=1, padding=2)
        model = nn.Sequential(top_layer, model)
        model.fc = nn.Linear(num_ftrs, NUMB_CLASS)

    else:
        model = Network(INPUT_NUM, NUMB_CLASS)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    num_epoch = NUB_EPOCH

    init_state = copy.deepcopy(model.state_dict())
    init_state_opt = copy.deepcopy(optimizer.state_dict())

    accuracies = []

    ss_accuracies = []
    tr_accuracies = []

    class_accs = [0] * NUMB_CLASS

    subject_numb = []

    # skf = KFold(n_splits = numfolds, shuffle = True)
    skf = KFold(n_splits=len(subject_data), shuffle=True)
    i = 0

    train_class = trainclass(model, optimizer, DATA_LOAD_BOOL, device,
                             criterion, MODEL_NAME)

    tests = []
    preds = []
    inferenceTime = 0.0
    # for train_index, test_index in skf.split(X, y, types):
    for train_index, test_index in skf.split(subject_data):
        print(train_index, test_index)

        print(train_index, test_index)

        model.load_state_dict(init_state)
        optimizer.load_state_dict(init_state_opt)

        train_set = [subject_data[i] for i in train_index]
        test_set = [subject_data[i] for i in test_index]
        BIO_train = torch.utils.data.ConcatDataset(train_set)
        wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

        for batch, label, dtype in tqdm(wholeloader, disable=DATA_LOAD_BOOL):
            X_train = batch
            y_train = label
            types_train = dtype
        BIO_train = None
        train_set = None

        BIO_test = torch.utils.data.ConcatDataset(test_set)
        wholeloader = DataLoader(BIO_test, batch_size=len(BIO_test))

        for batch, label, dtype in tqdm(wholeloader, disable=DATA_LOAD_BOOL):
            X_test = batch
            y_test = label
            types_test = dtype
        BIO_test = None
        test_set = None

        train_dataset = TensorDataset(X_train, y_train, types_train)
        test_dataset = TensorDataset(X_test, y_test, types_test)

        trainloader = DataLoader(train_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True)
        testloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

        print("######################Fold:{}#####################".format(i +
                                                                          1))

        train_class.train(trainloader, num_epoch)

        model.load_state_dict(torch.load(MODEL_NAME))

        accs, ss_accs, tr_accs, pred, test, class_acc, inf_time = train_class.evaluate(
            testloader)
        accuracies.append(accs)
        ss_accuracies.append(ss_accs)
        tr_accuracies.append(tr_accs)

        preds.extend(pred)
        tests.extend(test)

        subject_numb.append(test_index[0])

        for j in range(len(class_accs)):
            class_accs[j] += class_acc[j]
        inferenceTime += inf_time

        del test_dataset, train_dataset, trainloader, testloader

        i += 1

    # print("average:")
    # for i in range(len(class_accs)):
    # 	if class_accs[i] == 0:
    # 		print("Class {} has no samples".format(i))
    # 	else:
    # 		print("Class {} accuracy: {}".format(i, class_accs[i]/numfolds))

    print("Accuracies")
    for item in accuracies:
        print(item)

    print("Steady state")
    for item in ss_accuracies:
        print(item)

    print("Translational")
    for item in tr_accuracies:
        print(item)

    inferenceTime = inferenceTime / len(preds)
    print("Inference Time")
    print(inferenceTime)

    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('subject_numb ')
        for item in subject_numb:
            f.write("%s " % item)
        f.write('\n')

        f.write('spectrogram time %s' % spectrogramTime)
        f.write('\n')
        f.write('inference time %s' % inferenceTime)

    f.close()

    conf = confusion_matrix(tests, preds)
    print(conf)
    print(classification_report(tests, preds, digits=3))

    return conf
Esempio n. 2
0
def run_classifier(mode='bilateral',
                   classifier='CNN',
                   sensor=["imu", "emg", "goin"],
                   NN_model=None):

    ########## SETTINGS  ########################

    BATCH_SIZE = 32
    LEARNING_RATE = 1e-5
    WEIGHT_DECAY = 1e-3
    NUMB_CLASS = 5
    NUB_EPOCH = 200
    numfolds = 10
    DATA_LOAD_BOOL = True
    BAND = 10
    HOP = 10
    # BAND=16,HOP=27
    SAVING_BOOL = True
    ############################################

    MODE = mode
    CLASSIFIER = classifier
    SENSOR = sensor
    sensor_str = '_'.join(SENSOR)


    MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
              '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'.pth'

    # RESULT_NAME= './results/Freq-Encoding/accuracy'+ \
    # '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'.txt'

    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
        BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
            WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                BAND) + '_HOP' + str(HOP) + '_accuracy.txt'

    SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
        BAND) + '_HOP' + str(HOP) + '.pkl'

    if not os.path.exists('./models/Freq-Encoding'):
        os.makedirs('./models/Freq-Encoding')

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    if not os.path.exists('./checkpoints/' + CLASSIFIER):
        os.makedirs('./checkpoints/' + CLASSIFIER)

    spectrogramTime = 0.0
    if SAVING_BOOL:

        # Load the dataset and train, val, test splits
        print("Loading datasets...")
        BIO_train = EnableDataset(subject_list=[
            '156', '185', '186', '188', '189', '190', '191', '192', '193',
            '194'
        ],
                                  data_range=(1, 51),
                                  bands=BAND,
                                  hop_length=HOP,
                                  model_type=CLASSIFIER,
                                  sensors=SENSOR,
                                  mode=MODE)
        spectrogramTime += BIO_train.spectrogramTime
        save_object(BIO_train, SAVE_NAME)
    with open(SAVE_NAME, 'rb') as input:
        BIO_train = pickle.load(input)
    INPUT_NUM = BIO_train.input_numb

    wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

    device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
    print('GPU USED?', torch.cuda.is_available())

    if NN_model == 'RESNET18':
        model = torch.hub.load('pytorch/vision:v0.4.2',
                               'resnet18',
                               pretrained=True)  # use resnet
        num_ftrs = model.fc.in_features
        # model.conv1 = nn.Conv2d(num_input_channel, 64, kernel_size=7, stride=2, padding=3,bias=False)
        top_layer = nn.Conv2d(INPUT_NUM, 3, kernel_size=5, stride=1, padding=2)
        model = nn.Sequential(top_layer, model)
        model.fc = nn.Linear(num_ftrs, NUMB_CLASS)

    else:
        model = Network(INPUT_NUM, NUMB_CLASS)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    num_epoch = NUB_EPOCH

    init_state = copy.deepcopy(model.state_dict())
    init_state_opt = copy.deepcopy(optimizer.state_dict())

    for batch, label, dtype in tqdm(wholeloader, disable=DATA_LOAD_BOOL):
        X = batch
        y = label
        types = dtype

    accuracies = []

    ss_accuracies = []
    tr_accuracies = []

    # class_accs = [0] * NUMB_CLASS
    class_acc_list = []

    skf = KFold(n_splits=numfolds, shuffle=True)
    i = 0

    train_class = trainclass(model, optimizer, DATA_LOAD_BOOL, device,
                             criterion, MODEL_NAME)

    tests = []
    preds = []
    inferenceTime = 0.0
    for train_index, test_index in skf.split(X, y, types):

        model.load_state_dict(init_state)
        optimizer.load_state_dict(init_state_opt)

        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        types_train, types_test = types[train_index], types[test_index]

        train_dataset = TensorDataset(X_train, y_train, types_train)
        test_dataset = TensorDataset(X_test, y_test, types_test)

        trainloader = DataLoader(train_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True)
        testloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

        print("######################Fold:{}#####################".format(i +
                                                                          1))
        train_class.train(trainloader, num_epoch)

        model.load_state_dict(torch.load(MODEL_NAME))

        # print("Evaluate on test set")

        accs, ss_accs, tr_accs, pred, test, class_acc, inf_time = train_class.evaluate(
            testloader)
        accuracies.append(accs)
        ss_accuracies.append(ss_accs)
        tr_accuracies.append(tr_accs)

        preds.extend(pred)
        tests.extend(test)

        class_acc_list.append(class_acc)

        inferenceTime += inf_time

        i += 1

    print('saved on the results')

    # model.load_state_dict(torch.load('./models/bestmodel_BATCH_SIZE32_LR1e-05_WD0.001_EPOCH200_BAND10_HOP10.pth', map_location='cpu'))
    inferenceTime = inferenceTime / len(preds)
    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)

        for j in range(0, 5):
            f.write('\n')
            f.write('class {} '.format(j))
            for m in range(0, numfolds):
                f.write("%s " % class_acc_list[m][j])
        f.write('\n')

        f.write('spectrogram time %s' % spectrogramTime)
        f.write('\n')
        f.write('inference time %s' % inferenceTime)

    f.close()

    conf = confusion_matrix(tests, preds)
    print(conf)
    print(classification_report(tests, preds, digits=3))

    return conf
from mask_net import FigureNetwork

#plots the activation figure as seen in the paper

########## SETTINGS  ########################
BAND = 10
HOP = 10
sensors = ["imu", "emg", "gon"]
MODE = ['bilateral']
############################################

#import any subset of data
BIO_train = EnableDataset(subject_list=['156'],
                          data_range=(1, 20),
                          bands=BAND,
                          hop_length=HOP,
                          model_type="CNN",
                          sensors=sensors,
                          mode=MODE)
testloader = DataLoader(BIO_train, batch_size=1)

#initialize device
device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
print('GPU USED?', torch.cuda.is_available())

#initialize network
model = FigureNetwork(52, 5)
#load trained state dict here if you can!
#model.load_state_dict(torch.load(<your state dict here>, map_location='cpu'))
model = model.to(device)
len_class = len(numb_class)
len_phase = 4
BIO_trains=[]

BIO_trains_len=0

k = 0
subjects = ['156','185','186','188','189','190', '191', '192', '193', '194']
# subjects = ['156']

if SAVING_BOOL:
	for i in range(1,len_class+1):
		for j in range(1,len_phase+1):
			subject_data = []
			for subject in subjects:
				subject_data.append(EnableDataset(subject_list= [subject],phaselabel=j,prevlabel=i,model_type='LDA',sensors=SENSOR,mode=MODE))
			BIO_trains.append(subject_data)
			k +=1
			print(k)
	save_object(BIO_trains,SAVE_NAME)

else:
	
	with open(SAVE_NAME, 'rb') as input:
		 BIO_trains = pickle.load(input)


models=[]
tot=0
correct=0
steady_state_correct = 0
def run_classifier(args):
    """
	Main function runs training and testing of Random classifier.
	This code runs subject independent configuration. 

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    #parameters
    numfolds = 10
    SAVING_BOOL = args.data_saving
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    SENSOR = args.sensors

    subjects = [
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ]

    #save data for ease of later use
    if SAVING_BOOL:
        if not os.path.exists('./checkpoints/'):
            os.makedirs('./checkpoints/')
        subject_data = []
        for subject in subjects:
            subject_data.append(
                EnableDataset(subject_list=[subject],
                              model_type=CLASSIFIER,
                              sensors=SENSOR,
                              mode=MODE))
        save_object(subject_data, './checkpoints/count_Data_features.pkl')
    else:
        with open('./checkpoints/count_Data_features.pkl', 'rb') as input:
            subject_data = pickle.load(input)

    skf = KFold(n_splits=numfolds, shuffle=True)
    i = 0

    overall_accs = []
    ss_accs = []
    tr_accs = []
    #run leave-one-out evaluation of random guesser and mode specific classifiers
    for train_index, test_index in skf.split(subject_data):
        train_vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
        test_vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]

        print("######################Fold:{}#####################".format(i +
                                                                          1))

        #split data based on leaving one subject out
        train_set = [subject_data[i] for i in train_index]
        test_set = [subject_data[i] for i in test_index]
        BIO_train = torch.utils.data.ConcatDataset(train_set)
        wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))
        for batch, label, trigger, dtype in wholeloader:
            X_train = batch
            y_train = label
            types_train = dtype
            trigger_train = trigger

        BIO_test = torch.utils.data.ConcatDataset(test_set)
        wholeloader = DataLoader(BIO_test, batch_size=len(BIO_train))
        for batch, label, trigger, dtype in wholeloader:
            X_test = batch
            y_test = label
            types_test = dtype
            trigger_test = trigger

        train_dataset = TensorDataset(X_train, y_train, trigger_train)
        test_dataset = TensorDataset(X_test, y_test, trigger_test)

        #get dataset statistics
        for img, labels, trigger in train_dataset:
            train_vals[int(trigger) - 1][int(labels) - 1] += 1

        for img, labels, trigger in test_dataset:
            test_vals[int(trigger) - 1][int(labels) - 1] += 1

        test_vals = np.array(test_vals)
        train_vals = np.array(train_vals)

        #evaluate mode specific classifier
        if args.mode_specific:
            if np.argmax(train_vals, 1).all() == np.array([0, 1, 2, 3,
                                                           4]).all():

                overall_acc = np.sum(np.max(test_vals, 0)) / np.sum(test_vals)
                overall_accs.append(overall_acc)
                print(overall_acc)

                if np.max(train_vals).all() == np.diag(train_vals).all():
                    ss_acc = 1
                    tr_acc = 0
                    ss_accs.append(ss_acc)
                    tr_accs.append(tr_acc)

            else:
                overall_acc = Nan
                overall_accs.append(overall_acc)

        #evaluate random guesser
        else:
            if np.argmax(train_vals) == 0:
                overall_acc = np.sum(test_vals[:, 0]) / np.sum(test_vals)
                overall_accs.append(overall_acc)

                ss_acc = test_vals[0][0] / np.sum(np.diag(test_vals))
                tr_acc = np.sum(test_vals[1:, 0]) / (
                    np.sum(test_vals) - np.sum(np.diag(test_vals)))

                ss_accs.append(ss_acc)
                tr_accs.append(tr_acc)
            else:
                overall_acc = Nan
                overall_accs.append(overall_acc)

            print('overall.{}, ss.{}, tr,{}'.format(overall_acc, ss_acc,
                                                    tr_acc))

        i += 1

    #save results
    print('writing...')
    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_subjects_accuracy.txt'
    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in overall_accs:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accs:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accs:
            f.write("%s " % item)
    f.close()
Esempio n. 6
0
def run_classifier(mode='bilateral',
                   classifier='CNN',
                   sensor=["imu", "emg", "goin"],
                   NN_model=None):

    ########## SETTINGS  ########################

    BATCH_SIZE = 32
    LEARNING_RATE = 1e-5
    WEIGHT_DECAY = 1e-3
    NUMB_CLASS = 5
    NUB_EPOCH = 200
    numfolds = 10
    DATA_LOAD_BOOL = True

    SAVING_BOOL = True
    MODE_SPECIFIC_BOOL = True

    BAND = 10
    HOP = 10
    ############################################

    print('Number of folds: ', numfolds)

    MODE = mode
    CLASSIFIER = classifier
    SENSOR = sensor
    sensor_str = '_'.join(SENSOR)


    MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
              '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'.pth'

    # RESULT_NAME= './results/Freq-Encoding/accuracy'+ \
    # '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'.txt'

    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
        BAND) + '_HOP' + str(HOP) + '_accuracy.txt'

    SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
        BAND) + '_HOP' + str(HOP) + 'mode_specific' + '.pkl'

    if not os.path.exists('./models/Freq-Encoding'):
        os.makedirs('./models/Freq-Encoding')

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    if not os.path.exists('./checkpoints/' + CLASSIFIER):
        os.makedirs('./checkpoints/' + CLASSIFIER)

    # if not os.path.exists('./results/Freq-Encoding'):
    # 	os.makedirs('./results/Freq-Encoding')

    # Load the dataset and train, val, test splits
    print("Loading datasets...")

    spectrogramTime = 0.0
    if SAVING_BOOL:

        BIO_train = EnableDataset(subject_list=[
            '156', '185', '186', '188', '189', '190', '191', '192', '193',
            '194'
        ],
                                  data_range=(1, 51),
                                  bands=BAND,
                                  hop_length=HOP,
                                  model_type=CLASSIFIER,
                                  sensors=SENSOR,
                                  mode=MODE,
                                  mode_specific=MODE_SPECIFIC_BOOL)
        spectrogramTime += BIO_train.spectrogramTime
        save_object(BIO_train, SAVE_NAME)
    else:
        with open(SAVE_NAME, 'rb') as input:
            BIO_train = pickle.load(input)
    # BIO_train= EnableDataset(subject_list= ['156'],data_range=(1, 8),bands=BAND,hop_length=HOP,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE,mode_specific = MODE_SPECIFIC_BOOL)

    INPUT_NUM = BIO_train.input_numb

    wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

    device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
    print('GPU USED?', torch.cuda.is_available())

    if NN_model == 'RESNET18':
        model = MyResNet18()  # use resnet
        model.conv1 = nn.Conv2d(INPUT_NUM,
                                64,
                                kernel_size=5,
                                stride=1,
                                padding=2)
        model.fc = nn.Linear(517, NUMB_CLASS)
    else:
        model = Network_modespecific(INPUT_NUM, NUMB_CLASS)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    num_epoch = NUB_EPOCH

    init_state = copy.deepcopy(model.state_dict())
    init_state_opt = copy.deepcopy(optimizer.state_dict())

    one_hot_embed = torch.eye(5)

    for batch, label, dtype, prevlabels in tqdm(wholeloader,
                                                disable=DATA_LOAD_BOOL):
        X = batch
        y = label
        types = dtype
        prevlabel = prevlabels

    accuracies = []
    ss_accuracies = []
    tr_accuracies = []
    inferenceTime = 0.0
    total_predictions = 0

    skf = KFold(n_splits=numfolds, shuffle=True)
    i = 0

    train_class = trainclass(model, optimizer, DATA_LOAD_BOOL, device,
                             criterion, MODEL_NAME)

    for train_index, test_index in skf.split(X, y):

        model.load_state_dict(init_state)
        optimizer.load_state_dict(init_state_opt)

        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        types_train, types_test = types[train_index], types[test_index]
        onehot_train, onehot_test = one_hot_embed[
            prevlabel[train_index]], one_hot_embed[prevlabel[test_index]]

        train_dataset = TensorDataset(X_train, y_train, types_train,
                                      onehot_train)
        test_dataset = TensorDataset(X_test, y_test, types_test, onehot_test)

        trainloader = DataLoader(train_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True)
        testloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

        print("######################Fold:{}#####################3".format(i +
                                                                           1))
        train_class.train_modesp(trainloader, num_epoch)

        model.load_state_dict(torch.load(MODEL_NAME))

        # print("Evaluate on test set")
        accs, ss_accs, tr_accs, inf_time, num_preds = train_class.evaluate_modesp(
            testloader)
        accuracies.append(accs)
        ss_accuracies.append(ss_accs)
        tr_accuracies.append(tr_accs)

        inferenceTime += inf_time
        total_predictions += num_preds

        i += 1

    print('saved on the results')

    # with open(RESULT_NAME, 'w') as f:
    # 	for item in accuracies:
    # 		f.write("%s\n" % item)
    # f.close()

    inferenceTime = inferenceTime / total_predictions

    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
        f.write('\n')

        f.write('spectrogram time %s' % spectrogramTime)
        f.write('\n')
        f.write('inference time %s' % inferenceTime)
    f.close()
Esempio n. 7
0
def run_classifier(mode='bilateral',classifier='CNN',sensor=["imu","emg","goin"]):

	########## SETTINGS  ########################

	BATCH_SIZE = 32
	LEARNING_RATE = 1e-5
	WEIGHT_DECAY = 1e-3
	NUMB_CLASS = 5
	NUB_EPOCH= 200
	numfolds = 10
	DATA_LOAD_BOOL = True

	SAVING_BOOL = True
	############################################



	MODE = mode
	CLASSIFIER = classifier
	SENSOR = sensor
	sensor_str='_'.join(SENSOR)


	MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
	        		'_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'.pth'

	# RESULT_NAME= './results/Freq-Encoding/accuracy'+ \
	        		# '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'.txt'


	RESULT_NAME= './results/'+CLASSIFIER+'/'+CLASSIFIER+'_'+MODE+'_'+sensor_str+'_accuracy.txt'

	SAVE_NAME= './checkpoints/'+CLASSIFIER+'/'+CLASSIFIER+'_'+MODE+'_'+sensor_str+'.pkl'

	if not os.path.exists('./models/Freq-Encoding'):
		os.makedirs('./models/Freq-Encoding')


	if not os.path.exists('./results/'+CLASSIFIER):
		os.makedirs('./results/'+CLASSIFIER)

	if not os.path.exists('./checkpoints/'+CLASSIFIER):
		os.makedirs('./checkpoints/'+CLASSIFIER)

	# if not os.path.exists('./results/Freq-Encoding'):
	# 	os.makedirs('./results/Freq-Encoding')


	# Load the dataset and train, val, test splits
	print("Loading datasets...")

	# BIO_train= EnableDataset(subject_list= ['156','185','186','188','189','190', '191', '192', '193', '194'],data_range=(1, 50),bands=10,hop_length=10,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE)
	BIO_train= EnableDataset(subject_list= ['156'],data_range=(1, 2),bands=10,hop_length=10,model_type='CNN')

	INPUT_NUM=BIO_train.in_channels
	
	# with open('BIO_train_melspectro_500s_bands_16_hop_length_27.pkl', 'rb') as input:
	#     BIO_train = pickle.load(input)

	if SAVING_BOOL:
		save_object(BIO_train,SAVE_NAME)


	wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))


	device = "cuda" if torch.cuda.is_available() else "cpu" # Configure device
	print('GPU USED?',torch.cuda.is_available())
	model = Network(INPUT_NUM,NUMB_CLASS)
	model = model.to(device)

	criterion = nn.CrossEntropyLoss()
	optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
	num_epoch = NUB_EPOCH

	init_state = copy.deepcopy(model.state_dict())
	init_state_opt = copy.deepcopy(optimizer.state_dict())


	for batch, label, dtype in tqdm(wholeloader,disable=DATA_LOAD_BOOL):
		X = batch
		y = label
		types = dtype

	accuracies =[]
	class_accs = [0] * NUMB_CLASS



	train_class=trainclass(model,optimizer,DATA_LOAD_BOOL,device,criterion,MODEL_NAME)


	print('saved on the results')
	print("average:")
	for i in range(len(class_accs)):
		if class_accs[i] == 0:
			print("Class {} has no samples".format(i))
		else:
			print("Class {} accuracy: {}".format(i, class_accs[i]/numfolds))


	model.load_state_dict(torch.load('./models/Freq-Encoding/bestmodel_BATCH_SIZE32_LR1e-05_WD0.001_EPOCH200_BAND10_HOP10.pth', map_location='cpu'))

	def normalize_output(img):
		if img.min()==0 and img.max() ==0:
			pass
		else:
		    img = img - img.min()
		    img = img / img.max()
		return img

	# Save one channel from the first datum in the dataset

	# fig1, axs = plt.subplots(3,figsize=(6,20))
	fig1, axs = plt.subplots(3)
	fontname = 	 'Times New Roman' 
	plt.rcParams['font.family'] = fontname
	# representative image of IMU	Right_Shank_Ax
	im=axs[0].imshow(normalize_output(BIO_train[0][0][0]))
	axs[0].invert_yaxis()
	axs[0].spines['top'].set_visible(False)
	axs[0].spines['right'].set_visible(False)
	axs[0].spines['bottom'].set_visible(False)
	axs[0].spines['left'].set_visible(False)

	axs[0].get_xaxis().set_visible(False)
	# cb=fig1.colorbar(im, ax=axs[0])
	# cb.outline.set_visible(False)
	title=axs[0].set_title('IMU',fontname=fontname) 
	# title.rcParams['font.family'] = fontname
	# EMG Right_TA
	im2=axs[1].imshow(normalize_output(BIO_train[0][0][30]))
	axs[1].invert_yaxis()
	axs[1].spines['top'].set_visible(False)
	axs[1].spines['right'].set_visible(False)
	axs[1].spines['bottom'].set_visible(False)
	axs[1].spines['left'].set_visible(False)	
	axs[1].get_xaxis().set_visible(False)
	axs[1].set_title('EMG',fontname=fontname) 
	# GION Right_TA
	im3=axs[2].imshow(normalize_output(BIO_train[0][0][44]))
	axs[2].invert_yaxis()
	axs[2].spines['top'].set_visible(False)
	axs[2].spines['right'].set_visible(False)
	axs[2].spines['bottom'].set_visible(False)
	axs[2].spines['left'].set_visible(False)
	axs[2].set_title('Goniometer',fontname=fontname)
	locs, labels = plt.xticks()  
	# plt.xticks(np.array([0,25,50]), ['0','0.5','1'])
	# plt.xlabel('Time (s)')

	locs, labels = plt.yticks()  
	# plt.yticks(np.array([0,25,50]), ['0','0.5','1'])

	# plt.xlabel('Number of Pixels')
	# axs[0].set_ylabel('Number of Pixels')

	# axs[0].set_yticks(np.array([0,2,4,6,8]))



	def pix_to_hz(x):
		y=x*25
		return y
	def hz_to_pix(x):
		y=x/25
		return y




	ax2 = axs[0].secondary_yaxis('right', functions=(pix_to_hz,hz_to_pix))
	# ax2 = axs[0].twinx()
	ax2.set_yticks(np.array([0,50/25,100/25,150/25,200/25]))
	ax2.set_yticklabels(['0','50','100','150','200'])
	# ax2.yaxis.set_ticks(np.array([0,50/25,100/25,150/25,200/25]))
	# ax2.yaxis.set_tickslabels(['0','50','100','150','200'])
	ax2.spines['top'].set_visible(False)
	ax2.spines['right'].set_visible(False)
	ax2.spines['bottom'].set_visible(False)
	ax2.spines['left'].set_visible(False)
	ax2.set_ylabel('Hz')


	# plt.yticks(np.array([0,50/25,100/25,150/25,200/25,250/25]), ['0','50','100','150','200','250'])
	fig1.text(0.5, 0.04, 'Number of Time Frame', va='center', ha='center', fontsize=plt.rcParams['axes.labelsize'])
	fig1.text(0.04, 0.5, 'Number of Mel-bins', va='center', ha='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
	


# Visualize feature maps
	activation = {}
	def get_activation(name):
		def hook(model, input, output):
			activation[name] = output.detach()
		return hook	


	model.sclayer1.register_forward_hook(get_activation('sclayer1'))

	data = BIO_train[0][0]
	data.unsqueeze_(0)
	output = model(data)

	act = activation['sclayer1'].squeeze()

	fig, axarr = plt.subplots(5,2)
	# idxes = random.sample(range(0, 128), 10)
	idxes = np.array([4,63,32,5,56,8,119,105,110,48])

	col =0
	for idx, idxe in enumerate(idxes):

	    rem=idx%5 
	    im5=axarr[rem,col].imshow(normalize_output(act[idxe]),interpolation='bilinear', cmap='jet')
	    axarr[rem,col].invert_yaxis()
	    axarr[rem,col].spines['top'].set_visible(False)
	    axarr[rem,col].spines['right'].set_visible(False)
	    axarr[rem,col].spines['bottom'].set_visible(False)
	    axarr[rem,col].spines['left'].set_visible(False)
	    # axarr[rem,col].get_xaxis().set_visible(False)
	    # axarr[rem,col].get_yaxis().set_visible(False)
	    if not (idx % 5 ==4):
	    	axarr[rem,col].get_xaxis().set_visible(False)
	    if idx >4:
	    	axarr[rem,col].get_yaxis().set_visible(False)
	    if idx %  5==4:
	    	col +=1
	    print(idx,idxe)

	fontname = 	 'Times New Roman'   	
	for ax in axarr.flatten():
	    labels = ax.get_xticklabels() + ax.get_yticklabels()
	    [label.set_fontname(fontname) for label in labels]

	for ax in axs.flatten():
	    labels = ax.get_xticklabels() + ax.get_yticklabels()
	    [label.set_fontname(fontname) for label in labels]


	# cbar_ax = fig1.add_axes([0.9, 0.15, 0.05, 0.7])
	# cb=fig1.colorbar(im5, cax=cbar_ax)


	fig.text(0.5, 0.04, 'Number of Pixels', va='center', ha='center', fontsize=plt.rcParams['axes.labelsize'])
	fig.text(0.04, 0.5, 'Number of Pixels', va='center', ha='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])

	fig1.savefig('./input.png')
	fig.savefig('./activation.png')
	plt.show()



	with open(RESULT_NAME, 'w') as f:
		for item in accuracies:
			f.write("%s\n" % item)
	f.close()
def run_classifier(args):
    """
	Main function runs training and testing of Random classifier.
	This code runs subject dependent configuration. 

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    #parameters
    numfolds = 10
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    SENSOR = args.sensors

    #load the whole dataset
    BIO_train = EnableDataset(subject_list=[
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ],
                              data_range=(1, 51),
                              bands=10,
                              hop_length=10,
                              mode_specific=True,
                              model_type=CLASSIFIER,
                              sensors=SENSOR,
                              mode=MODE)

    #count number of occurences of data, based on previous walking mode and next walking mode
    vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0]]

    for img, labels, trigger, _ in BIO_train:

        vals[int(trigger) - 1][int(labels) - 1] += 1

    vals = np.array(vals)

    #mode specific classifier baseline: assume knowledge of previous mode
    if args.mode_specific:
        overall_acc = np.sum(np.max(vals, 0)) / np.sum(vals)
        print("Random mode specific error: ", overall_acc)
        if np.max(vals).all() == np.diag(vals).all():
            ss_acc = 1
            tr_acc = 0

    #random guesser baseline: predict based on distribution of samples
    else:
        overall_acc = np.sum(vals[:, 0]) / np.sum(vals)
        ss_acc = vals[0][0] / np.sum(np.diag(vals))
        tr_acc = np.sum(vals[1:, 0]) / (np.sum(vals) - np.sum(np.diag(vals)))

        print('overall.{}, ss.{}, tr,{}'.format(overall_acc, ss_acc, tr_acc))

    del vals

    #load training dataset
    wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

    #split dataset by number of folds
    for batch, label, trigger, dtype in tqdm(wholeloader, disable=True):
        X = batch
        y = label
        tri = trigger
        types = dtype

        skf = KFold(n_splits=numfolds, shuffle=True)
        i = 0

    overall_accs = []
    ss_accs = []
    tr_accs = []

    #validate classifier through k-fold cross validation
    for train_index, test_index in skf.split(X, y):
        train_vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
        test_vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]

        print("######################Fold:{}#####################".format(i +
                                                                          1))

        #split dataset
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        trigger_train, trigger_test = tri[train_index], tri[test_index]
        types_train, types_test = types[train_index], types[test_index]

        train_dataset = TensorDataset(X_train, y_train, trigger_train)
        test_dataset = TensorDataset(X_test, y_test, trigger_test)

        #get distribution based on fold
        for img, labels, trigger in train_dataset:
            train_vals[int(trigger) - 1][int(labels) - 1] += 1

        for img, labels, trigger in test_dataset:
            test_vals[int(trigger) - 1][int(labels) - 1] += 1

        #print(test_vals)
        test_vals = np.array(test_vals)
        train_vals = np.array(train_vals)

        #evaluate mode specific classifier
        if args.mode_specific:
            if np.argmax(train_vals, 1).all() == np.array([0, 1, 2, 3,
                                                           4]).all():

                overall_acc = np.sum(np.max(test_vals, 0)) / np.sum(test_vals)
                overall_accs.append(overall_acc)
                print(overall_acc)

                if np.max(train_vals).all() == np.diag(train_vals).all():
                    ss_acc = 1
                    tr_acc = 0
                    ss_accs.append(ss_acc)
                    tr_accs.append(tr_acc)

            else:
                overall_acc = Nan
                overall_accs.append(overall_acc)

        #evaluate random guesser
        else:
            if np.argmax(train_vals) == 0:
                overall_acc = np.sum(test_vals[:, 0]) / np.sum(test_vals)
                overall_accs.append(overall_acc)

                ss_acc = test_vals[0][0] / np.sum(np.diag(test_vals))
                tr_acc = np.sum(test_vals[1:, 0]) / (
                    np.sum(test_vals) - np.sum(np.diag(test_vals)))

                ss_accs.append(ss_acc)
                tr_accs.append(tr_acc)
            else:
                overall_acc = Nan
                overall_accs.append(overall_acc)

            print('overall.{}, ss.{}, tr,{}'.format(overall_acc, ss_acc,
                                                    tr_acc))

        i += 1

    #write results
    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)
    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_accuracy.txt'
    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in overall_accs:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accs:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accs:
            f.write("%s " % item)
    f.close()
numb_class = [5, 2, 2, 2, 2]
len_class = len(numb_class)
len_phase = 4
BIO_trains = []

BIO_trains_len = 0

k = 0
for i in range(1, len_class + 1):
    for j in range(1, len_phase + 1):
        BIO_trains.append(
            EnableDataset(subject_list=[
                '156', '185', '186', '188', '189', '190', '191', '192', '193',
                '194'
            ],
                          phaselabel=j,
                          prevlabel=i,
                          model_type='LDA',
                          sensors=SENSOR,
                          mode=MODE))
        # BIO_trains_len += len(BIO_trains[k])
        k += 1

if SAVING_BOOL:
    save_object(BIO_trains, SAVE_NAME)

# with open(SAVE_NAME, 'rb') as input:
# 	 BIO_trains = pickle.load(input)

wholeloaders = []
Esempio n. 10
0
def run_classifier(mode='bilateral',
                   classifier='LDA',
                   sensor=["imu", "emg", "goin"]):

    MODE = mode
    CLASSIFIER = classifier
    SENSOR = sensor
    sensor_str = '_'.join(SENSOR)

    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_subjects_accuracy.txt'

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    subjects = [
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ]
    subject_data = []
    for subject in subjects:
        subject_data.append(
            EnableDataset(subject_list=[subject],
                          model_type=CLASSIFIER,
                          sensors=SENSOR,
                          mode=MODE))

    correct = 0
    steady_state_correct = 0
    tot_steady_state = 0
    transitional_correct = 0
    tot_transitional = 0

    # Define cross-validation parameters
    skf = KFold(n_splits=len(subject_data), shuffle=True)

    scale = preprocessing.StandardScaler()
    pca = PCA()
    scale_PCA = Pipeline([('norm', scale), ('dimred', pca)])

    if CLASSIFIER == 'LDA':
        model = LinearDiscriminantAnalysis()
    elif CLASSIFIER == 'SVM':
        model = SVC(kernel='linear', C=10)

    # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
    accuracies = []
    ss_accuracies = []
    tr_accuracies = []
    subject_numb = []

    i = 0
    for train_index, test_index in skf.split(subject_data):

        print("**************FOLD {}*********".format(i + 1))

        print(train_index, test_index)

        train_set = [subject_data[i] for i in train_index]
        test_set = [subject_data[i] for i in test_index]
        BIO_train = torch.utils.data.ConcatDataset(train_set)
        wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))
        for batch, label, dtype in tqdm(wholeloader):
            X_train = batch
            y_train = label
            types_train = dtype

        BIO_test = torch.utils.data.ConcatDataset(test_set)
        wholeloader = DataLoader(BIO_test, batch_size=len(BIO_train))
        for batch, label, dtype in tqdm(wholeloader):
            X_test = batch
            y_test = label
            types_test = dtype

        if CLASSIFIER == 'LDA':

            scale_PCA.fit(X_train)
            feats_train_PCA = scale_PCA.transform(X_train)
            feats_test_PCA = scale_PCA.transform(X_test)

            pcaexplainedvar = np.cumsum(
                scale_PCA.named_steps['dimred'].explained_variance_ratio_)
            pcanumcomps = min(min(np.where(pcaexplainedvar > 0.95))) + 1
            unique_modes = np.unique(y_train)

            model.set_params(priors=np.ones(len(unique_modes)) /
                             len(unique_modes))
            model.fit(feats_train_PCA, y_train)
            y_pred = model.predict(feats_test_PCA)

        elif CLASSIFIER == 'SVM':

            scale.fit(X_train)
            feats_train_norm = scale.transform(X_train)
            feats_test_norm = scale.transform(X_test)

            model.fit(feats_train_norm, y_train)
            y_pred = model.predict(feats_test_norm)

        correct = (y_pred == np.array(y_test)).sum().item()
        tot = len(y_test)
        steady_state_correct = (np.logical_and(y_pred == np.array(y_test),
                                               types_test == 1)).sum().item()
        tot_steady_state = (types_test == 1).sum().item()
        transitional_correct = (np.logical_and(y_pred == np.array(y_test),
                                               types_test == 0)).sum().item()
        tot_transitional = (types_test == 0).sum().item()

        accuracies.append(accuracy_score(y_test, y_pred))

        tot_acc = correct / tot
        ss_acc = steady_state_correct / tot_steady_state if tot_steady_state != 0 else "No steady state samples used"
        tr_acc = transitional_correct / tot_transitional if tot_transitional != 0 else "No transitional samples used"

        ss_accuracies.append(
            ss_acc
        ) if tot_steady_state != 0 else "No steady state samples used"
        tr_accuracies.append(
            tr_acc
        ) if tot_transitional != 0 else "No transitional samples used"

        subject_numb.append(test_index[0])

        print("Total accuracy: {}".format(accuracy_score(y_test, y_pred)))
        print("Total correct: {}, number: {}, accuracy: {}".format(
            correct, tot, tot_acc))
        print("Steady-state correct: {}, number: {}, accuracy: {}".format(
            steady_state_correct, tot_steady_state, ss_acc))
        print("Transistional correct: {}, number: {}, accuracy: {}".format(
            transitional_correct, tot_transitional, tr_acc))
        # print(accuracy_score(y_test, y_pred))

        i += 1
    print('********************SUMMARY*****************************')
    # print('Accuracy_total:', correct/len(BIO_train))
    print('Accuracy_,mean:', np.mean(accuracies), 'Accuracy_std: ',
          np.std(accuracies))
    print('SR Accuracy_,mean:', np.mean(ss_accuracies), 'Accuracy_std: ',
          np.std(ss_accuracies))
    print('TR Accuracy_,mean:', np.mean(tr_accuracies), 'Accuracy_std: ',
          np.std(tr_accuracies))
    # model.fit(X_train, y_train)
    # total_accuracies = accuracies + ss_accuracies + tr_accuracies

    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('subject_numb ')
        for item in subject_numb:
            f.write("%s " % item)
    f.close()
Esempio n. 11
0
MODE = 'bilateral'
CLASSIFIER = 'Random_modespecific'
SENSOR = ["imu", "emg", "goin"]
sensor_str = '_'.join(SENSOR)

RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_accuracy.txt'

if not os.path.exists('./results/' + CLASSIFIER):
    os.makedirs('./results/' + CLASSIFIER)

# BIO_train= EnableDataset(subject_list= ['156','185','186','188','189','190', '191', '192', '193', '194'],data_range=(1, 51),bands=10,hop_length=10,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE)
BIO_train = EnableDataset(subject_list=[
    '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
],
                          data_range=(1, 4),
                          bands=10,
                          hop_length=10,
                          mode_specific=True,
                          model_type=CLASSIFIER,
                          sensors=SENSOR,
                          mode=MODE)

# save_object(BIO_train,'count_Data_features.pkl')

# with open('count_Data_features.pkl', 'rb') as input:
# 	   BIO_train = pickle.load(input)

vals = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0]]
# vals = [[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]
# vals = [[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]]
for img, labels, trigger, _ in BIO_train:
def run_classifier(args):
    """
	Main function runs training and testing of Heuristic based machine
	learning models (SVM, LDA)

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    ########## PRAMETER SETTINGS  ##############
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    SENSOR = args.sensors
    ############################################

    SENSOR = sensor
    sensor_str = '_'.join(SENSOR)

    RESULT_NAME = './results/' + '/' + CLASSIFIER + '/' + CLASSIFIER + '_mode_specfic' + '_' + MODE + '_' + sensor_str + '_accuracy.txt'

    SAVE_NAME = './checkpoints/' + CLASSIFIER + '/mode_specfic' + '_' + MODE + '_' + sensor_str + '.pkl'

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    if not os.path.exists('./checkpoints/' + CLASSIFIER):
        os.makedirs('./checkpoints/' + CLASSIFIER)

    numb_class = [5, 2, 2, 2,
                  2]  # Define output class number for each mode classifiers
    len_class = len(numb_class)
    len_phase = 4
    BIO_trains = []
    BIO_trains_len = 0

    # Loading/saving the ENABL3S dataset
    print("Loading datasets...")
    if args.data_saving:
        k = 0
        for i in range(1, len_class + 1):
            for j in range(1, len_phase + 1):
                BIO_trains.append(
                    EnableDataset(subject_list=[
                        '156', '185', '186', '188', '189', '190', '191', '192',
                        '193', '194'
                    ],
                                  phaselabel=j,
                                  prevlabel=i,
                                  model_type='LDA',
                                  sensors=SENSOR,
                                  mode=MODE))
                k += 1
        save_object(BIO_trains, SAVE_NAME)
    else:
        with open(SAVE_NAME, 'rb') as input:
            BIO_trains = pickle.load(input)

    wholeloaders = []

    k = 0
    for i in range(1, len_class + 1):
        for j in range(1, len_phase + 1):
            BIO_trains_len += len(BIO_trains[k])
            wholeloaders.append(
                DataLoader(BIO_trains[k], batch_size=len(BIO_trains[k])))
            k += 1

    models = []
    tot = 0
    correct = 0
    steady_state_correct = 0
    tot_steady_state = 0
    transitional_correct = 0
    tot_transitional = 0
    for i in range(1, len_class + 1):
        for j in range(1, len_phase + 1):
            if CLASSIFIER == 'LDA':
                model = LinearDiscriminantAnalysis()
            elif CLASSIFIER == 'SVM':
                model = SVC(kernel='linear', C=10)
            models.append(model)

    k = 0

    accuracies = [[[] for x in range(len_phase)] for y in range(len_class)]
    ss_accuracies = [[[] for x in range(len_phase)] for y in range(len_class)]
    tr_accuracies = [[[] for x in range(len_phase)] for y in range(len_class)]

    tot_numb_mat = np.zeros((10, 20))
    ss_numb_mat = np.zeros((10, 20))
    tr_numb_mat = np.zeros((10, 20))

    tot_mat = np.zeros((10, 20))
    ss_mat = np.zeros((10, 20))
    tr_mat = np.zeros((10, 20))

    # Define cross-validation parameters
    numfolds = 10
    skf = StratifiedKFold(n_splits=numfolds, shuffle=True)
    """
	main testing/training loop of mode-specific classifiers. 
	Separate classifiers for each activity classes (LW,RA,RD,SA,SD) 
	and phase (Right left toe off/heel contact)
	"""
    for i in range(1, len_class + 1):
        for j in range(1, len_phase + 1):
            print("**************mode #", i, "****phase", j)

            for batch, label, dtype in tqdm(wholeloaders[k],
                                            disable=args.progressbar):
                X = batch
                y = label
                types = dtype

            scale = preprocessing.StandardScaler()
            pca = PCA()
            scale_PCA = Pipeline([('norm', scale), ('dimred', pca)])

            m = 0
            for train_index, test_index in skf.split(X, y, types):
                X_train, X_test = X[train_index], X[test_index]
                y_train, y_test = y[train_index], y[test_index]
                types_train, types_test = types[train_index], types[test_index]

                if CLASSIFIER == 'LDA':
                    scale_PCA.fit(X_train)

                    feats_train_PCA = scale_PCA.transform(X_train)
                    feats_test_PCA = scale_PCA.transform(X_test)

                    pcaexplainedvar = np.cumsum(
                        scale_PCA.named_steps['dimred'].
                        explained_variance_ratio_)
                    pcanumcomps = min(min(
                        np.where(pcaexplainedvar > 0.95))) + 1

                    unique_modes = np.unique(y_train)
                    models[k].set_params(priors=np.ones(len(unique_modes)) /
                                         len(unique_modes))

                    models[k].fit(feats_train_PCA[:, 0:pcanumcomps], y_train)
                    y_pred = models[k].predict(
                        feats_test_PCA[:, 0:pcanumcomps]).ravel()

                elif CLASSIFIER == 'SVM':
                    scale.fit(X_train)

                    feats_train_norm = scale.transform(X_train)
                    feats_test_norm = scale.transform(X_test)

                    models[k].fit(feats_train_norm, y_train)
                    y_pred = models[k].predict(feats_test_norm)

                # append model performance metrics
                correct = (y_pred == np.array(y_test)).sum().item()
                tot = len(y_test)
                steady_state_correct = (np.logical_and(
                    y_pred == np.array(y_test), types_test == 1)).sum().item()
                tot_steady_state = (types_test == 1).sum().item()
                transitional_correct = (np.logical_and(
                    y_pred == np.array(y_test), types_test == 0)).sum().item()
                tot_transitional = (types_test == 0).sum().item()

                tot_acc = correct / tot
                ss_acc = steady_state_correct / tot_steady_state if tot_steady_state != 0 else "No steady state samples used"
                tr_acc = transitional_correct / tot_transitional if tot_transitional != 0 else "No transitional samples used"

                tot_numb_mat[m, k] = tot
                ss_numb_mat[m, k] = tot_steady_state
                tr_numb_mat[m, k] = tot_transitional

                tot_mat[m, k] = correct
                ss_mat[m, k] = steady_state_correct
                tr_mat[m, k] = transitional_correct

                m += 1
            k += 1
            del pca, X_train, X_test, y_train, y_test

    tot_numbs = np.sum(tot_numb_mat, axis=1)
    ss_numbs = np.sum(ss_numb_mat, axis=1)
    tr_numbs = np.sum(tr_numb_mat, axis=1)

    accuracies = np.sum(tot_mat, axis=1) / tot_numbs
    ss_accuracies = np.sum(ss_mat, axis=1) / ss_numbs
    tr_accuracies = np.sum(tr_mat, axis=1) / tr_numbs

    print('total number of classifiers: ', k)
    print('total number of data: ', BIO_trains_len)
    print('Accuracy_total:', np.mean(accuracies))
    print('Steady-state:', np.mean(ss_accuracies))
    print('Transistional:', np.mean(tr_accuracies))

    # writing to .txt file
    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
    f.close()
########## SETTINGS  ########################

numb_class = 7
num_epoch = 10

BATCH_SIZE = 32
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-4

############################################

# Load data and split into training (80%), test (10%) and validation (10%)
BIO_train = EnableDataset(subject_list=[
    '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
],
                          data_range=(1, 50),
                          time_series=True)
train_size = int(0.8 * len(BIO_train))
test_size = int((len(BIO_train) - train_size) / 2)
train_dataset, test_dataset, val_dataset = torch.utils.data.random_split(
    BIO_train, [train_size, test_size, test_size])

trainloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
classes = [0, 0, 0, 0, 0, 0, 0]
for data, labels in trainloader:
    for x in range(labels.size()[0]):
        classes[labels[x]] += 1

valloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
testloader = DataLoader(val_dataset, batch_size=BATCH_SIZE)
def run_classifier(args):
    """
	Main function runs training and testing of neural network models (LIR-Net, RESNET18).
	This code runs subject dependent configuration. 

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    ########## PRAMETER SETTINGS  ##############
    BATCH_SIZE = args.batch_size
    LEARNING_RATE = args.lr
    WEIGHT_DECAY = args.weight_decay
    NUMB_CLASS = 5
    NUB_EPOCH = args.num_epoch
    numfolds = args.num_folds
    BAND = args.band
    HOP = args.hop

    SENSOR = args.sensors
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    NN_model = args.nn_architecture
    MODE_SPECIFIC_BOOL = args.mode_specific
    ############################################

    sensor_str = '_'.join(SENSOR)

    MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
              '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'.pth'

    if MODE_SPECIFIC_BOOL:
        RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_mode_specific' + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
            BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
                WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                    BAND) + '_HOP' + str(HOP) + '_accuracy.txt'
    else:
        RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
            BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
                WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                    BAND) + '_HOP' + str(HOP) + '_accuracy.txt'

    if MODE_SPECIFIC_BOOL:
        SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_mode_specific' + '_' + sensor_str + '_BAND' + str(
            BAND) + '_HOP' + str(HOP) + '.pkl'
    else:
        SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
            BAND) + '_HOP' + str(HOP) + '.pkl'

    if not os.path.exists('./models/Freq-Encoding'):
        os.makedirs('./models/Freq-Encoding')

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    if not os.path.exists('./checkpoints/' + CLASSIFIER):
        os.makedirs('./checkpoints/' + CLASSIFIER)

    spectrogramTime = 0.0

    # Load the dataset and train, val, test splits
    print("Loading datasets...")
    if args.data_saving:
        BIO_train= EnableDataset(subject_list= ['156','185','186','188','189','190', '191', '192', '193', '194']  \
         ,data_range=(1, 51),bands=BAND,hop_length=HOP,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE,mode_specific = MODE_SPECIFIC_BOOL)
        spectrogramTime += BIO_train.avgSpectrogramTime
        save_object(BIO_train, SAVE_NAME)
    with open(SAVE_NAME, 'rb') as input:
        BIO_train = pickle.load(input)

    IN_CHANNELS = BIO_train.in_channels

    wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

    device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
    print('GPU USED?', torch.cuda.is_available())

    # Choose NN models to train/test on.
    if MODE_SPECIFIC_BOOL:
        model = Network_modespecific(IN_CHANNELS, NUMB_CLASS)
    else:
        if NN_model == 'RESNET18':
            print("model :**** RESNET18 ****")
            model = torch.hub.load('pytorch/vision:v0.4.2',
                                   'resnet18',
                                   pretrained=True)  # use resnet
            num_ftrs = model.fc.in_features
            top_layer = nn.Conv2d(IN_CHANNELS,
                                  3,
                                  kernel_size=5,
                                  stride=1,
                                  padding=2)
            model.fc = nn.Linear(num_ftrs, NUMB_CLASS)
            model = nn.Sequential(top_layer, model)
        else:
            model = Network(IN_CHANNELS, NUMB_CLASS)

    model = model.to(device)

    # set model training parameters
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    num_epoch = NUB_EPOCH

    # initialize weights of the NN models
    init_state = copy.deepcopy(model.state_dict())
    init_state_opt = copy.deepcopy(optimizer.state_dict())

    if MODE_SPECIFIC_BOOL:
        one_hot_embed = torch.eye(5)
        for batch, label, dtype, prevlabels in tqdm(wholeloader,
                                                    disable=args.progressbar):
            X = batch
            y = label
            types = dtype
            prevlabel = prevlabels
    else:
        for batch, label, dtype in tqdm(wholeloader, disable=args.progressbar):
            X = batch
            y = label
            types = dtype

    accuracies = []
    ss_accuracies = []
    tr_accuracies = []
    tests = []
    preds = []
    inferenceTime = 0.0
    class_acc_list = []

    # main training/testing loop
    if args.val_on:
        # 8:1:1 split for validation
        train_class = trainclass(model, optimizer, args.progressbar, device,
                                 criterion, MODEL_NAME, args)

        numfolds = 1

        train_size = int(0.8 * len(BIO_train)) + 1
        test_size = int((len(BIO_train) - train_size) / 2)
        train_dataset, test_dataset, val_dataset = torch.utils.data.random_split(
            BIO_train, [train_size, test_size, test_size])

        trainloader = DataLoader(train_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True)
        valloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
        testloader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

        train_class.train(trainloader, num_epoch, valloader=valloader)
        model.load_state_dict(torch.load(MODEL_NAME))

        print("Evaluate on test set")
        accs, ss_accs, tr_accs, pred, test, class_acc, inf_time = train_class.evaluate(
            testloader)
        accuracies.append(accs)
        ss_accuracies.append(ss_accs)
        tr_accuracies.append(tr_accs)

        preds.extend(pred)
        tests.extend(test)

        class_acc_list.append(class_acc)

        inferenceTime += inf_time

    else:
        # k-fold validation
        skf = KFold(n_splits=numfolds, shuffle=True)
        i = 0

        train_class = trainclass(model, optimizer, args.progressbar, device,
                                 criterion, MODEL_NAME, args)

        for train_index, test_index in skf.split(X, y, types):

            model.load_state_dict(init_state)
            optimizer.load_state_dict(init_state_opt)

            X_train, X_test = X[train_index], X[test_index]
            y_train, y_test = y[train_index], y[test_index]
            types_train, types_test = types[train_index], types[test_index]

            if MODE_SPECIFIC_BOOL:
                # onehot_train, onehot_test = one_hot_embed[prevlabel[train_index]], one_hot_embed[prevlabel[test_index]]
                onehot_train, onehot_test = prevlabel[train_index], prevlabel[
                    test_index]
                train_dataset = TensorDataset(X_train, y_train, types_train,
                                              onehot_train)
                test_dataset = TensorDataset(X_test, y_test, types_test,
                                             onehot_test)
            else:
                train_dataset = TensorDataset(X_train, y_train, types_train)
                test_dataset = TensorDataset(X_test, y_test, types_test)

            trainloader = DataLoader(train_dataset,
                                     batch_size=BATCH_SIZE,
                                     shuffle=True)
            testloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

            print(
                "######################Fold:{}#####################".format(i +
                                                                            1))
            train_class.train(trainloader, num_epoch)

            model.load_state_dict(torch.load(MODEL_NAME))

            print("Evaluate on test set")
            accs, ss_accs, tr_accs, pred, test, class_acc, inf_time = train_class.evaluate(
                testloader)
            accuracies.append(accs)
            ss_accuracies.append(ss_accs)
            tr_accuracies.append(tr_accs)

            preds.extend(pred)
            tests.extend(test)

            class_acc_list.append(class_acc)

            inferenceTime += inf_time

            i += 1

    print('saved on the results')

    # Write results to text files
    inferenceTime = inferenceTime / len(preds)
    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)

        for j in range(0, 5):
            f.write('\n')
            f.write('class {} '.format(j))
            for m in range(0, numfolds):
                f.write("%s " % class_acc_list[m][j])
        f.write('\n')
        if args.data_saving:
            f.write('spectrogram time %s' % spectrogramTime)
            f.write('\n')
        f.write('inference time %s' % inferenceTime)

    f.close()

    confusion = confusion_matrix(tests, preds)
    print(confusion)
    print(classification_report(tests, preds, digits=3))

    with open(
            './results/' + args.classifiers + '_' + sensor_str + '_' +
            args.laterality + '_' + 'confusion.txt', 'w') as f:
        for items in confusion:
            for item in items:
                f.write("%s " % item)
            f.write('\n')
    f.close()
Esempio n. 15
0
def run_classifier(mode='bilateral',
                   classifier='CNN',
                   sensor=["imu", "emg", "goin"],
                   NN_model=None):
    """
	Main function runs training and testing of neural network models (LIR-Net, RESNET18).
	This code runs subject independent configuration. 

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    ########## PRAMETER SETTINGS  ########################
    BATCH_SIZE = args.batch_size
    LEARNING_RATE = args.lr
    WEIGHT_DECAY = args.weight_decay
    NUMB_CLASS = 5
    NUB_EPOCH = args.num_epoch
    numfolds = args.num_folds
    BAND = args.band
    HOP = args.hop

    SENSOR = args.sensors
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    NN_model = args.nn_architecture
    MODE_SPECIFIC_BOOL = args.mode_specific
    ############################################

    sensor_str = '_'.join(SENSOR)

    MODEL_NAME = './models/Freq-Encoding/bestmodel'+ \
              '_BATCH_SIZE'+str(BATCH_SIZE)+'_LR'+str(LEARNING_RATE)+'_WD'+str(WEIGHT_DECAY)+'_EPOCH'+str(NUB_EPOCH)+'_BAND'+str(BAND)+'_HOP'+str(HOP)+'_subjects.pth'

    if MODE_SPECIFIC_BOOL:
        SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_mode_specific' + '_' + MODE + '_' + sensor_str + '_BAND' + str(
            BAND) + '_HOP' + str(HOP) + '_subjects.pkl'
    else:
        SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_BAND' + str(
            BAND) + '_HOP' + str(HOP) + '_subjects.pkl'

    if MODE_SPECIFIC_BOOL:
        RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_mode_specific' + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
            BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
                WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                    BAND) + '_HOP' + str(HOP) + '_subjects_accuracy.txt'
    else:
        RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + NN_model + '_' + MODE + '_' + sensor_str + '_BATCH_SIZE' + str(
            BATCH_SIZE) + '_LR' + str(LEARNING_RATE) + '_WD' + str(
                WEIGHT_DECAY) + '_EPOCH' + str(NUB_EPOCH) + '_BAND' + str(
                    BAND) + '_HOP' + str(HOP) + '_subjects_accuracy.txt'

    subjects = [
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ]

    spectrogramTime = 0.0

    # Load the dataset and train, val, test splits
    print("Loading datasets...")
    if args.data_saving:
        subject_data = []
        for subject in subjects:
            subject_data.append(
                EnableDataset(subject_list=[subject],
                              data_range=(1, 51),
                              bands=BAND,
                              hop_length=HOP,
                              model_type=CLASSIFIER,
                              sensors=SENSOR,
                              mode=MODE,
                              mode_specific=MODE_SPECIFIC_BOOL))
            spectrogramTime += subject_data[-1].avgSpectrogramTime
            print("subject ID", subject, "loaded")
        save_object(subject_data, SAVE_NAME)
    else:
        with open(SAVE_NAME, 'rb') as input:
            subject_data = pickle.load(input)

    spectrogramTime = spectrogramTime / len(subjects)

    IN_CHANNELS = subject_data[0].in_channels

    device = "cuda" if torch.cuda.is_available() else "cpu"  # Configure device
    print('GPU USED?', torch.cuda.is_available())

    # Choose NN models to train/test on.
    if MODE_SPECIFIC_BOOL:
        model = Network_modespecific(IN_CHANNELS, NUMB_CLASS)
    else:
        if NN_model == 'RESNET18':
            model = torch.hub.load('pytorch/vision:v0.4.2',
                                   'resnet18',
                                   pretrained=True)  # use resnet
            num_ftrs = model.fc.in_features
            top_layer = nn.Conv2d(IN_CHANNELS,
                                  3,
                                  kernel_size=5,
                                  stride=1,
                                  padding=2)
            model.fc = nn.Linear(num_ftrs, NUMB_CLASS)
            model = nn.Sequential(top_layer, model)
        else:
            model = Network(IN_CHANNELS, NUMB_CLASS)

    # set NN model parameters
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=LEARNING_RATE,
                           weight_decay=WEIGHT_DECAY)
    num_epoch = NUB_EPOCH

    #initialize model parameters
    init_state = copy.deepcopy(model.state_dict())
    init_state_opt = copy.deepcopy(optimizer.state_dict())

    accuracies = []
    ss_accuracies = []
    tr_accuracies = []
    class_accs = [0] * NUMB_CLASS
    subject_numb = []
    class_acc_list = []

    skf = KFold(n_splits=len(subject_data), shuffle=True)
    i = 0

    train_class = trainclass(model, optimizer, args.progressbar, device,
                             criterion, MODEL_NAME, args)

    tests = []
    preds = []
    inferenceTime = 0.0

    # main training/testing loop
    for train_index, test_index in skf.split(subject_data):
        # k-fold validation
        print('training subject No.:', train_index, ' Testing subject No.:',
              test_index)

        model.load_state_dict(init_state)
        optimizer.load_state_dict(init_state_opt)

        train_set = [subject_data[i] for i in train_index]
        test_set = [subject_data[i] for i in test_index]
        BIO_train = torch.utils.data.ConcatDataset(train_set)
        wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))

        if MODE_SPECIFIC_BOOL:
            for batch, label, dtype, prevlabel in tqdm(
                    wholeloader, disable=args.progressbar):
                X_train = batch
                y_train = label
                types_train = dtype
                prevlabel_train = prevlabel
        else:
            for batch, label, dtype in tqdm(wholeloader,
                                            disable=args.progressbar):
                X_train = batch
                y_train = label
                types_train = dtype

        BIO_train = None
        train_set = None

        BIO_test = torch.utils.data.ConcatDataset(test_set)
        wholeloader = DataLoader(BIO_test, batch_size=len(BIO_test))

        if MODE_SPECIFIC_BOOL:
            for batch, label, dtype, prevlabel in tqdm(
                    wholeloader, disable=args.progressbar):
                X_test = batch
                y_test = label
                types_test = dtype
                prevlabel_test = prevlabel
        else:
            for batch, label, dtype in tqdm(wholeloader,
                                            disable=args.progressbar):
                X_test = batch
                y_test = label
                types_test = dtype

        BIO_test = None
        test_set = None

        if MODE_SPECIFIC_BOOL:
            onehot_train, onehot_test = prevlabel_train, prevlabel_test
            train_dataset = TensorDataset(X_train, y_train, types_train,
                                          onehot_train)
            test_dataset = TensorDataset(X_test, y_test, types_test,
                                         onehot_test)
        else:
            train_dataset = TensorDataset(X_train, y_train, types_train)
            test_dataset = TensorDataset(X_test, y_test, types_test)

        trainloader = DataLoader(train_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True)
        testloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

        print("######################Fold:{}#####################".format(i +
                                                                          1))

        train_class.train(trainloader, num_epoch)

        model.load_state_dict(torch.load(MODEL_NAME))

        print("Evaluate on test set")
        accs, ss_accs, tr_accs, pred, test, class_acc, inf_time = train_class.evaluate(
            testloader)

        # append results
        accuracies.append(accs)
        ss_accuracies.append(ss_accs)
        tr_accuracies.append(tr_accs)
        preds.extend(pred)
        tests.extend(test)
        class_acc_list.append(class_acc)
        inferenceTime += inf_time
        subject_numb.append(test_index[0])

        del test_dataset, train_dataset, trainloader, testloader

        i += 1

    # Write results to text files
    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('subject_numb ')
        for item in subject_numb:
            f.write("%s " % item)
        f.write('\n')
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
        for j in range(0, 5):
            f.write('\n')
            f.write('class {} '.format(j))
            for m in range(0, numfolds):
                f.write("%s " % class_acc_list[m][j])
        f.write('\n')
        if args.data_saving:
            f.write('spectrogram time %s' % spectrogramTime)
            f.write('\n')
        f.write('inference time %s' % inferenceTime)

    f.close()

    confusion = confusion_matrix(tests, preds)
    print(confusion)
    print(classification_report(tests, preds, digits=3))

    with open(
            './results/' + args.classifiers + '_' + sensor_str + '_' +
            args.laterality + '_' + 'confusion_subjects.txt', 'w') as f:
        for items in confusion:
            for item in items:
                f.write("%s " % item)

            f.write('\n')

    f.close()
    print('result saved in', RESULT_NAME)
def run_classifier(args):
    """
	Main function runs training and testing of Heuristic based machine
	learning models (SVM, LDA)

	Input: argument passes through argparse. Each argument is described
	in the --help of each arguments.
	Output: No return, but generates a .txt file results of testing
	including accuracy of the models.
	"""
    ########## PRAMETER SETTINGS  ##############
    MODE = args.laterality
    CLASSIFIER = args.classifiers
    SENSOR = args.sensors
    ############################################

    sensor_str = '_'.join(SENSOR)

    RESULT_NAME = './results/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_subjects_accuracy.txt'

    SAVE_NAME = './checkpoints/' + CLASSIFIER + '/' + CLASSIFIER + '_' + MODE + '_' + sensor_str + '_subjects.pkl'

    if not os.path.exists('./results/' + CLASSIFIER):
        os.makedirs('./results/' + CLASSIFIER)

    subjects = [
        '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
    ]
    subject_data = []

    # Loading/saving the ENABL3S dataset
    if args.data_saving:
        print("Loading datasets...")
        for subject in subjects:
            subject_data.append(
                EnableDataset(subject_list=[subject],
                              model_type=CLASSIFIER,
                              sensors=SENSOR,
                              mode=MODE))
        save_object(subject_data, SAVE_NAME)
    else:
        with open(SAVE_NAME, 'rb') as input:
            subject_data = pickle.load(input)

    correct = 0
    steady_state_correct = 0
    tot_steady_state = 0
    transitional_correct = 0
    tot_transitional = 0

    # Define cross-validation parameters
    skf = KFold(n_splits=len(subject_data), shuffle=True)

    # Define PCA parameters
    scale = preprocessing.StandardScaler()
    pca = PCA()
    scale_PCA = Pipeline([('norm', scale), ('dimred', pca)])

    if CLASSIFIER == 'LDA':
        model = LinearDiscriminantAnalysis()
    elif CLASSIFIER == 'SVM':
        model = SVC(kernel='linear', C=10)

    accuracies = []
    ss_accuracies = []
    tr_accuracies = []
    subject_numb = []

    i = 0

    # main training/testing loop
    for train_index, test_index in skf.split(subject_data):

        print("**************FOLD {}*********".format(i + 1))

        print(train_index, test_index)

        train_set = [subject_data[i] for i in train_index]
        test_set = [subject_data[i] for i in test_index]
        BIO_train = torch.utils.data.ConcatDataset(train_set)
        wholeloader = DataLoader(BIO_train, batch_size=len(BIO_train))
        for batch, label, dtype in tqdm(wholeloader):
            X_train = batch
            y_train = label
            types_train = dtype

        BIO_test = torch.utils.data.ConcatDataset(test_set)
        wholeloader = DataLoader(BIO_test, batch_size=len(BIO_train))
        for batch, label, dtype in tqdm(wholeloader):
            X_test = batch
            y_test = label
            types_test = dtype

        if CLASSIFIER == 'LDA':

            scale_PCA.fit(X_train)
            feats_train_PCA = scale_PCA.transform(X_train)
            feats_test_PCA = scale_PCA.transform(X_test)

            pcaexplainedvar = np.cumsum(
                scale_PCA.named_steps['dimred'].explained_variance_ratio_)
            pcanumcomps = min(min(np.where(pcaexplainedvar > 0.95))) + 1
            unique_modes = np.unique(y_train)

            model.set_params(priors=np.ones(len(unique_modes)) /
                             len(unique_modes))
            model.fit(feats_train_PCA, y_train)
            y_pred = model.predict(feats_test_PCA)

        elif CLASSIFIER == 'SVM':

            scale.fit(X_train)
            feats_train_norm = scale.transform(X_train)
            feats_test_norm = scale.transform(X_test)

            model.fit(feats_train_norm, y_train)
            y_pred = model.predict(feats_test_norm)

        # append model performance metrics
        correct = (y_pred == np.array(y_test)).sum().item()
        tot = len(y_test)
        steady_state_correct = (np.logical_and(y_pred == np.array(y_test),
                                               types_test == 1)).sum().item()
        tot_steady_state = (types_test == 1).sum().item()
        transitional_correct = (np.logical_and(y_pred == np.array(y_test),
                                               types_test == 0)).sum().item()
        tot_transitional = (types_test == 0).sum().item()
        accuracies.append(accuracy_score(y_test, y_pred))

        tot_acc = correct / tot
        ss_acc = steady_state_correct / tot_steady_state if tot_steady_state != 0 else "No steady state samples used"
        tr_acc = transitional_correct / tot_transitional if tot_transitional != 0 else "No transitional samples used"

        ss_accuracies.append(
            ss_acc
        ) if tot_steady_state != 0 else "No steady state samples used"
        tr_accuracies.append(
            tr_acc
        ) if tot_transitional != 0 else "No transitional samples used"

        subject_numb.append(test_index[0])

        print("Total accuracy: {}".format(accuracy_score(y_test, y_pred)))
        print("Total correct: {}, number: {}, accuracy: {}".format(
            correct, tot, tot_acc))
        print("Steady-state correct: {}, number: {}, accuracy: {}".format(
            steady_state_correct, tot_steady_state, ss_acc))
        print("Transistional correct: {}, number: {}, accuracy: {}".format(
            transitional_correct, tot_transitional, tr_acc))

        i += 1
    print('********************SUMMARY*****************************')
    print('Accuracy_,mean:', np.mean(accuracies), 'Accuracy_std: ',
          np.std(accuracies))
    print('SR Accuracy_,mean:', np.mean(ss_accuracies), 'Accuracy_std: ',
          np.std(ss_accuracies))
    print('TR Accuracy_,mean:', np.mean(tr_accuracies), 'Accuracy_std: ',
          np.std(tr_accuracies))

    print('writing...')
    with open(RESULT_NAME, 'w') as f:
        f.write('total ')
        for item in accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('steadystate ')
        for item in ss_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('transitional ')
        for item in tr_accuracies:
            f.write("%s " % item)
        f.write('\n')
        f.write('subject_numb ')
        for item in subject_numb:
            f.write("%s " % item)
    f.close()
if not os.path.exists('./results/' + CLASSIFIER):
    os.makedirs('./results/' + CLASSIFIER)

# BIO_train= EnableDataset(subject_list= ['156','185','186','188','189','190', '191', '192', '193', '194'],data_range=(1, 10),bands=10,hop_length=10,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE)
# BIO_train= EnableDataset(subject_list= ['156'],data_range=(1, 4),bands=10,hop_length=10,mode_specific = True,model_type=CLASSIFIER,sensors=SENSOR,mode=MODE)

subjects = [
    '156', '185', '186', '188', '189', '190', '191', '192', '193', '194'
]

if SAVING_BOOL:
    subject_data = []
    for subject in subjects:
        subject_data.append(
            EnableDataset(subject_list=[subject],
                          model_type=CLASSIFIER,
                          sensors=SENSOR,
                          mode=MODE))

    save_object(subject_data, './checkpoints/count_Data_features.pkl')
else:
    with open('./checkpoints/count_Data_features.pkl', 'rb') as input:
        subject_data = pickle.load(input)

skf = KFold(n_splits=numfolds, shuffle=True)
i = 0

overall_accs = []
ss_accs = []
tr_accs = []

for train_index, test_index in skf.split(subject_data):