예제 #1
0
    def __init__(self):
        self.caseid = 0
        self.bayes_score = 50  # init value of bayes score

        self.numRadarElem = 6  # number of elements in radar plot
        self.radar = [50] * self.numRadarElem  # init value of radar plot
        self.radarName = [
            'Attention', 'Self Control', 'Speed', 'Sensitivity',
            'Cautiousness', 'Steadiness'
        ]

        self.percentileElem = 3  # number of elements in percentile plot
        self.percentile = [
            50
        ] * self.percentileElem  # init value of percentile plot
        self.percentileName = [
            'Concentration', 'Stability', 'Focus Continuity'
        ]

        self.motion_box_numpoints = 3900  # not working at a fixed length now
        self.motion_box = np.zeros((self.motion_box_numpoints, 2))
        self.motion_index_points = 390  # not working at a fixed length now
        self.motion_index = [0] * self.motion_index_points

        self.roseBinNum = 360
        self.roseValue = [0] * self.roseBinNum

        self.cpt_response_min = [
            np.arange(52) * 1.5 * 10 / 60, [], []
        ]  # list0 for correct, list1 for commmission, list2 for omission

        self.db = db_manager()
예제 #2
0
def main(testCaseIds, testdb_name='webdarintest', write_db=False):
    """ Calculate features for incoming test cases.
	
	Args:
		testCaseIds: list. Raw data for case ids in the array must be found in testdb. 
		testdb_name: name of the testing database to fetch raw data and store features. Should contain raw data (hmd_data) for test cases.
		write_db: if true, write result to database (features for testCaseIds)
	"""

    # obtain raw data
    mydb = db_manager(testdb_name)
    data = get_hmd_data(testCaseIds, mydb)

    # compute features
    features = process_feature(testCaseIds, data)

    # insert to db
    if write_db:
        insert_features(features, mydb)

    return features
		def get_individual_data(CaseIds,modelIOStruct = None, tableNames=None,fieldNames=None,whereClauses=None, primary_keys = None):
			isTrainingSet = CaseIds[:,1]
			numTest = np.sum(isTrainingSet==0)
			CaseIds = CaseIds[:,0]

			try:
				if modelIOStruct != None:
					tableNames,fieldNames,whereClauses, primary_keys = modelIOStruct.getInput()
				
				if (tableNames == None) and (fieldNames == None):
					# prepare a cursor object using cursor() method
					dataCPT = get_cpt(CaseIds, isTrainingSet, cursor_rnd, cursor_web)
					dataSignal = get_signal(CaseIds, isTrainingSet, cursor_rnd, cursor_web)
					# dataSNAP = get_snap(CaseIds, db, cursor)
					dataSNAP = np.zeros((len(CaseIds),1))
					# dataADHD = get_adhd([CaseIds[i] for i in np.where(isTrainingSet==1)[0]], db_rnd, cursor_rnd, cross_db = True)
					# testADHD = np.concatenate((np.asarray([CaseIds[i] for i in np.where(isTrainingSet==0)[0]]).reshape(numTest,1),np.zeros((numTest,1))),axis=1)

					# dataADHD = np.concatenate((dataADHD,testADHD),axis=0)
					dataADHD = get_adhd(CaseIds, isTrainingSet, cursor_rnd, cursor_web, cross_db=True)
					result = np.concatenate((dataADHD, dataCPT, dataSignal, dataSNAP), axis = 1)
				elif len(tableNames) != len(fieldNames):
					print('tableNames and fieldNames must be the same length')
					raise Exception('tableNames and fieldNames must be the same length')
				else:
					mydb = db_manager()
					results = []
					for dbname,isTrain in zip((traindb_name,testdb_name),[1,0]):
						mydb.connect(dbname)
						df = get_model_input(modelIOStruct,CaseIds[isTrainingSet==isTrain],mydb)
						results.append(df)
					result = pd.concat(results,axis=0)
					
			except Exception as e:
				raise Exception(4)


			return result
		def insert_bayes_probabilities_df(df, caseIds):
			"""
				Insert probabilities dataframe to database, for caseIds.
				"""
			mydb = db_manager(testdb_name)
			mydb.insert_table(df,'bayes_probabilities',index_name='CaseId',del_row_if_exist = True)
def main(testCaseIds,
         traindb_name='vrclassroomdata',
         testdb_name='webtest',
         use_training_cases=False,
         write_db=False,
         train_features=None,
         test_features=None):
    """ calculate percentile for incoming test cases, based on training set
	
	Args:
		testCaseIds: array-like. Features for case ids in the array must be found in testdb. 
		traindb_name: name of the training database to fetch training set. Should contain features for training cases
		testdb_name: name of the testing database to store percentile. Should contain features for test cases.
		use_training_cases: if true, training features are the used for testing as well (append to the end). ``traindb_name`` and ``testdb_name`` must be the same for this to take effect.
		write_db: if true, write result to database (percentile for testCaseIds)
		train_features: if specified, use the input data instead of fetching from traindb_name
		test_features: if specified, use the input data instead of fetching from testdb_name
	"""

    # input check
    if use_training_cases and (traindb_name != testdb_name):
        raise ValueError(
            "``traindb_name`` and ``testdb_name`` must be the same")

    # initiate db connection
    mydb_train = db_manager(traindb_name)
    mydb_test = db_manager(testdb_name)

    # define field names for input and output
    input_tables = ['head_features', 'cpt_output_results']
    input_fields = [['PathLen', 'TimeActive', 'NumRot', 'TotalDeg'],
                    [
                        'OmissionErrors', 'CommissionErrors', 'TargetsRT',
                        'TargetsRtVariability', 'CommissionErrorsRT',
                        'CommissionErrorsRtVariability'
                    ]]
    input_where = ['', 'where block=0']
    input_primarykey = ['CaseId', 'CaseId']
    output_fields = []
    for input_field in input_fields:
        output_fields += ['Per' + x for x in input_field]
    output_fields += ['CaseId', 'BlockNum']

    # get training caseids
    trainCaseIds = get_training_ids(mydb_train)
    if len(trainCaseIds) == 0:
        raise RuntimeError('cannot get training ids from training set table')

    if use_training_cases:
        testCaseIds += trainCaseIds

    # fetch features for training & testing
    modIO = modelIO(input_tables, input_fields, input_where, input_primarykey)
    if train_features is None:
        train_features = get_model_input(modIO, trainCaseIds, mydb_train)
    if test_features is None:
        test_features = get_model_input(modIO, testCaseIds, mydb_test)

    # calculate percentile for all fields
    percentiles = []
    for field in modIO.getAllFields():
        percentiles.append(
            percentileCalc(train_features[field], test_features[field]))
    percentiles.append(testCaseIds)
    percentiles.append(np.zeros(len(testCaseIds)))
    percentiles = np.stack(percentiles, axis=1)
    result = pd.DataFrame(percentiles,
                          columns=output_fields,
                          index=testCaseIds)

    # insert to db
    if write_db:
        insert_percentile(result, mydb_test)
    return result