예제 #1
0
 for c in range(len(overall_list_title)):
     overall_sheet.write(0, c, str(overall_list_title[c]))
 dataset_list_title = ['activities'] + per_class_performance_index
 # Go through all bosch datasets
 datasets = ['b1']
 for datafile in datasets:
     feature_filename = 'feature_' + datafile + '.pkl'
     # Looking for processed feature data
     if os.path.exists(feature_filename):
         feature_file = open(feature_filename, mode='r')
         feature_dict = pickle.load(feature_file)
         feature = AlFeature()
         feature.load_from_dict(feature_dict)
     else:
         feature = load_casas_from_file(datafile,
                                        normalize=True,
                                        per_sensor=True,
                                        ignore_other=False)
         feature_file = open(feature_filename, mode='w')
         pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)
     feature_file.close()
     num_samples = feature.x.shape[0]
     train_index = []
     test_index = []
     x_tensor = theano.shared(np.asarray(feature.x,
                                         dtype=theano.config.floatX),
                              borrow=True)
     y_tensor = T.cast(theano.shared(feature.y, borrow=True), 'int32')
     week_array = get_boundary(feature, period='week')
     # Number of perceptrons in hidden layer
     hidden_layer_list = [[200, 200, 200]]
     for hidden_layer in hidden_layer_list:
예제 #2
0
    assert (type(model) == StackedDenoisingAutoencoder)
    x_tensor = theano.shared(np.asarray(x, dtype=theano.config.floatX),
                             borrow=True)
    result = model.classify(x_tensor)
    predicted_y = result[0]
    confusion_matrix = get_confusion_matrix(num_classes=num_classes,
                                            label=y,
                                            predicted=predicted_y)
    return confusion_matrix


if __name__ == '__main__':
    # Set current directory to local directory
    os.chdir(os.path.dirname(os.path.realpath(__file__)))
    # Go through all bosch datasets
    datasets = ['b1']
    for datafile in datasets:
        feature_filename = 'feature_' + datafile + '.pkl'
        # Looking for processed feature data
        if os.path.exists(feature_filename):
            feature_file = open(feature_filename, mode='r')
            feature_dict = pickle.load(feature_file)
            feature = AlFeature()
            feature.load_from_dict(feature_dict)
        else:
            feature = load_casas_from_file(datafile, datafile + '.translate')
            feature_file = open(feature_filename, mode='w')
            pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)
        feature_file.close()
        run_test(feature)
예제 #3
0
 for c in range(len(overall_list_title)):
     overall_sheet.write(0, c, str(overall_list_title[c]))
 dataset_list_title = ['activities'] + performance_index
 # Go through all bosch datasets
 datasets = ['b1', 'b2', 'b3']
 for datafile in datasets:
     feature_filename = 'feature_' + datafile + '.pkl'
     # Looking for processed feature data
     if os.path.exists(feature_filename):
         feature_file = open(feature_filename, mode='r')
         feature_dict = pickle.load(feature_file)
         feature = AlFeature()
         feature.load_from_dict(feature_dict)
     else:
         feature = load_casas_from_file(datafile,
                                        datafile + '.translate',
                                        normalize=False,
                                        per_sensor=False)
         feature_file = open(feature_filename, mode='w')
         pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)
     feature_file.close()
     num_samples = feature.x.shape[0]
     train_index = []
     test_index = []
     # for j in range(num_samples):
     #     if j % 3 == 0:
     #         test_index.append(j)
     #     else:
     #         train_index.append(j)
     num_test = num_samples / 3
     test_index = range(num_samples - num_test, num_samples)
     train_index = range(num_samples - num_test)
예제 #4
0
from actlearn.utils.event_bar_plot import event_bar_plot

if __name__ == '__main__':
    # Set current directory to local directory
    os.chdir(os.path.dirname(os.path.realpath(__file__)))
    # Go through all bosch datasets
    datasets = ['b1']
    for datafile in datasets:
        feature_filename = 'feature_' + datafile + '.pkl'
        # Looking for processed feature data
        if os.path.exists(feature_filename):
            feature_file = open(feature_filename, mode='r')
            feature_dict = pickle.load(feature_file)
            feature = AlFeature()
            feature.load_from_dict(feature_dict)
        else:
            feature = load_casas_from_file(datafile,
                                           datafile + '.translate',
                                           dataset_dir='../../datasets/bosch/')
            feature_file = open(feature_filename, mode='w')
            pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)
        feature_file.close()
        # feature.save_data_as_xls('tmp.xls', 0)
        # event_bar_plot(feature.time[0:10000], feature.y[0:10000], feature.num_enabled_activities,
        #                classified=feature.y[1:10001], ignore_activity=feature.activity_list['Other_Activity']['index'])
        event_bar_plot(
            feature.time[0:100000],
            feature.y[0:100000],
            feature.num_enabled_activities,
            ignore_activity=feature.activity_list['Other_Activity']['index'])
예제 #5
0
 overall_list_row = 0
 for c in range(len(overall_list_title)):
     overall_sheet.write(0, c, str(overall_list_title[c]))
 dataset_list_title = ['activities'] + per_class_performance_index
 # Go through all bosch datasets
 datasets = ['b1']
 for datafile in datasets:
     feature_filename = 'feature_' + datafile + '.pkl'
     # Looking for processed feature data
     if os.path.exists(feature_filename):
         feature_file = open(feature_filename, mode='r')
         feature_dict = pickle.load(feature_file)
         feature = AlFeature()
         feature.load_from_dict(feature_dict)
     else:
         feature = load_casas_from_file(datafile, normalize=False, per_sensor=False)
         feature_file = open(feature_filename, mode='w')
         pickle.dump(feature.export_to_dict(), feature_file, protocol=-1)
     feature_file.close()
     num_samples = feature.x.shape[0]
     train_index = []
     test_index = []
     week_array = get_boundary(feature, period='week')
     learning_result_fname = 'dt_learning_' + datafile + '.pkl'
     learning_result = AlResult(result_name='%s decision tree' % datafile, data_fname=datafile, mode='by_week')
     if os.path.exists(learning_result_fname):
         learning_result.load_from_file(learning_result_fname)
     for week_id in range(len(week_array) - 1):
         train_index = range(0, week_array[week_id])
         test_index = range(week_array[week_id], week_array[week_id + 1])
         decision_tree = DecisionTree(feature.x.shape[1], feature.num_enabled_activities)