Exemplo n.º 1
0
def main():
	if (len(argv) != 2):
		print('Usage: %s [data_file.csv]' % argv[0])
		exit()
	data = Data(argv[1])
	data.guess_thetas()
	data.save()
	display(data)
Exemplo n.º 2
0
def main():
    # global data
    database = Data()
    database.debug_flag = False
    """
    test from keyboard input
    """
    #database.debug_flag = True

    # sensor data
    data_source = Source(database)
    lidar_source_thread = threading.Thread(
        target=data_source.lidar_stream_main)
    left_cam_source_thread = threading.Thread(
        target=data_source.left_cam_stream_main)
    right_cam_source_thread = threading.Thread(
        target=data_source.right_cam_stream_main)
    mid_cam_source_thread = threading.Thread(
        target=data_source.mid_cam_stream_main)

    lidar_source_thread.start()
    left_cam_source_thread.start()
    right_cam_source_thread.start()
    mid_cam_source_thread.start()

    # Subroutines
    monitoring = Monitoring(data_source, database)
    platform = CarPlatform('COM5', database)  # PLEASE CHECK YOUR COMPORT
    sign_cam = SignCam(data_source, database)
    planner = MotionPlanner(data_source, database)
    control = Control(database)

    monitoring_thread = threading.Thread(target=monitoring.main)
    platform_thread = threading.Thread(target=platform.main)
    sign_cam_thread = threading.Thread(target=sign_cam.main)
    planner_thread = threading.Thread(target=planner.main)
    control_thread = threading.Thread(target=control.main)

    monitoring_thread.start()
    platform_thread.start()
    sign_cam_thread.start()
    planner_thread.start()
    control_thread.start()

    while True:
        time.sleep(0.1)
        if database.is_all_system_stop():
            break
    time.sleep(2)
    return 0
Exemplo n.º 3
0
def data_info():
	new_variable = request.args.get("new_variable", default = None, type=str)
	variable_list_string = request.args.get("variable_list_string", default = None, type = str)

	global variable_list
	variable_list = variable_list_string.split('||')
	number_of_variables = len(variable_list)

	plottype_list = list()
	for var in variable_list:


		metadata_variable_series = metadata_all[var]
		metadata_variable = pandas.DataFrame(metadata_variable_series)
	
		is_plottype = metadata_variable.index == "plottype"
		plottype_df = metadata_variable[is_plottype]
		plottype = str(plottype_df.ix[0,0])

		#adding each plottype to the 'plottype' list:
		plottype_list.append(plottype)

	#create a list with all the UNIQUE plottypes:
	unique_plottypes = list(set(plottype_list))

	selected_data = Data(variable_list,unique_plottypes,number_of_variables)




	d= {'all_plot_types': selected_data.all_plot_types, 'number_of_variables':selected_data.number_of_variables, 'new_variable':new_variable, 'variable_list':variable_list}
	return jsonify(**d)
Exemplo n.º 4
0
    def get_embeddings(self, use_eigenvalues=False):
        '''Assings the trained embeddings as a property to the evaluator object'''
        if use_eigenvalues:
            print("Reading embeddings (eigenvalues used)")
        else:
            print("Reading embeddings")

        embeddings = self.embeddings
        embeddings = copy.deepcopy(embeddings)

        if use_eigenvalues:
            e_vals = self.e_vals
            embeddings = np.dot(embeddings, np.diag(np.sqrt(e_vals)))
            self.ev_embs_per_lang = Data.get_embeddings_per_lang(
                self.training_languages, self.emb_index_2_word_id_per_lang,
                embeddings)
        else:
            self.embs_per_lang = Data.get_embeddings_per_lang(
                self.training_languages, self.emb_index_2_word_id_per_lang,
                embeddings)
Exemplo n.º 5
0
 def create_object(item):
     new_list = item.split(",")
     if new_list[9] == "":
         return None
     try:
         line = Data(float(new_list[0]), float(new_list[1]),
                     float(new_list[2]), float(new_list[3]),
                     float(new_list[4]), float(new_list[5]),
                     float(new_list[6]), float(new_list[7]),
                     float(new_list[8]), new_list[9])
         return line
     except ValueError:
         return None
Exemplo n.º 6
0
def main():

    checkpoint_file = \
        '/data/zhanglab/afeeney/inception_resnet_v2_2016_08_30.ckpt'

    scenes_path = '/data/zhanglab/afeeney/7scenes/'

    scene = 'heads/'

    image_size = 299

    num_classes = 7

    set_data_path(scenes_path)
    train_data = load_training_data(scene, 1000)
    set_data_path(scenes_path)
    test_data = load_test_data(scene, 1000)

    data = Data(train_data, test_data)

    print('starting training')

    save_location = '/data/zhanglab/afeeney/checkpoints/'
    scene_name = scene[0:-2]

    train(checkpoint_file,
          save_location,
          scene_name,
          data,
          batch_size=40,
          num_epochs=60,
          verbose=True)

    print('starting testing')

    distance_error, angle_error = test(data, save_location)

    print('finished testing')

    print('distance: ' + str(distance_error) + ' angle: ' + str(angle_error))
Exemplo n.º 7
0
try:
    tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
    print('NO GPU')

import numpy as np
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from data_class import Data
from sklearn.utils import class_weight
from tf_models import NN_models
from sklearn.metrics import recall_score, precision_score
from gensim.models import Word2Vec
import funs
###read data
data_path = './data/'
data = Data(data_path)

###extract SVO from data
max_len = 20
pos_srl_SVO = {}
neg_srl_SVO = {}

pos_srl_tag = {}
neg_srl_tag = {}

for sent in data.all_pos_sents_srl:
    for v in data.all_pos_sents_srl[sent]:
        if 'V' in data.all_pos_sents_srl[sent][v]:
            srl_sent, srl_tag = data.get_SVO_from_srl(
                data.all_pos_sents_srl[sent], v)
Exemplo n.º 8
0
def test_data():
    obj = Data('data/data.grib', '2t')
    obj.values = np.arange(100 * 24)
    obj.values = np.reshape(obj.values, (24, 10, 10))
    obj.lat = np.arange(9, -1, -1)
    obj.lon = np.arange(10)
    obj.name = 'test'

    assert np.mean(obj.ann_mean(1979)) == 599.5

    assert np.mean(obj.get_time_mean()) == 1199.5

    assert np.mean(obj.get_space_mean()) == 614.1830350238611

    lat, lon = obj.get_coord()
    assert np.array_equal(lat, np.arange(9, -1, -1))
    assert np.array_equal(lon, np.arange(10))

    assert obj.get_name() == 'test'

    data, lat, lon = obj.reduce_grid(obj.values, 4, 6, 4, 6)
    assert np.mean(data) == 399.5

    assert np.mean(obj.get_mean_per_season()) == 1199.5
Exemplo n.º 9
0
 def start_from_u_turn(data: Data):
     data._mission_checklist[1] = True
Exemplo n.º 10
0
    def __init__(self,
                 results_obj_name,
                 languages_to_evaluate,
                 vocabulary_size,
                 test_set_flag,
                 target_concepts_suffix,
                 validation_set_file_name=None):
        '''Initialized by the name of a results_obj, list of langages that you would like to evaluate. By default the validation object is retrieved from the results object, but another validation object can be passed optionally (asuming that it contains validation concepts set for each pair in languages to evaluate)'''
        data_obj = Data()

        if validation_set_file_name is None:
            data_obj.load_validation(
                results_obj_dump_name=results_obj_name,
                vocabulary_size=vocabulary_size,
                test_set_flag=test_set_flag,
                target_concepts_suffix=target_concepts_suffix)
        else:
            print('Validation_set_file_name:', validation_set_file_name)
            data_obj.load_validation(
                results_obj_dump_name=results_obj_name,
                vocabulary_size=vocabulary_size,
                validation_set_file_name=validation_set_file_name,
                test_set_flag=test_set_flag,
                target_concepts_suffix=target_concepts_suffix)

        self.results_obj_name = results_obj_name
        self.embedding_types = {'baseline', 'learned', 'ev_learned'}
        # self.data_obj = data_obj
        self.embeddings = data_obj.embeddings
        self.training_languages = data_obj.training_languages
        self.e_vals = data_obj.e_vals
        self.training_params = data_obj.results_dump_obj['parameters']
        self.validation_set_file_name = data_obj.validation_set_file_name

        # langugages to consider when doing evaluation
        self.lang_codes_to_evaluate = sorted(languages_to_evaluate)

        # languages for which there are embeddings available in the results object
        self.validation_lang_codes = sorted(data_obj.lang_codes)

        # checks whether all of the languages that we want to evaluate actually have trained embeddings
        assert (len(languages_to_evaluate) == len(
            set(languages_to_evaluate) & set(self.validation_lang_codes)))

        # raw data, index_2_word, index_2_concept, concept_2_index
        self.Z_per_lang = data_obj.Z_per_lang
        self.val_index_2_concept_id_per_lang = data_obj.val_index_2_concept_id_per_lang
        self.val_index_2_word_id_per_lang = data_obj.val_index_2_word_id_per_lang

        concept_id_2_index_per_lang = {}

        for lang_code in self.lang_codes_to_evaluate:
            concept_id_2_index_per_lang[lang_code] = {concept_id : idx for idx, concept_id in \
                        self.val_index_2_concept_id_per_lang[lang_code].items()}

        self.concept_id_2_index_per_lang = concept_id_2_index_per_lang

        self.emb_index_2_word_id_per_lang = data_obj.emb_index_2_word_id_per_lang
        self.emb_case_folding_flag = data_obj.emb_case_folding_flag

        # generating a word_id_2_idf dictionary per language
        idf_per_lang = data_obj.idf_per_lang
        word_id_2_idf_per_lang = {}

        for lang_code in self.lang_codes_to_evaluate:
            idf = idf_per_lang[lang_code]
            emb_index_2_word_id = self.emb_index_2_word_id_per_lang[lang_code]

            word_id_2_idf = {}
            for index, word_id in emb_index_2_word_id.items():
                word_id_2_idf[word_id] = idf[index]

            word_id_2_idf_per_lang[lang_code] = word_id_2_idf

        self.word_id_2_idf_per_lang = word_id_2_idf_per_lang
        self.validation_set = data_obj.validation_set

        self.doc_embs_per_lang = {}
        self.ranking_dfs = {}
        self.concept_id_2_concept_name = utils.get_concept_id_concept_name_mapping(
        )
Exemplo n.º 11
0
def main():
    #variables
    temp_sum = 0
    time_value = 0.3
    #new files
    file_creation()
    pin_num = input("Enter the button pin number: ") #computing prototype purposes only
    print("Only for physical computing type purposes ^^") #remove later
    print(" ")
    #Button_Creation(pin_num)
    init_data = Data(init_temp1, init_gsr, init_heart)
    x = 30
    xx = 50
    y = 26
    z = 60
    therm_control.main()
    s_temp1 = therm_control.read()
    while True:
        #all values below will be read form sensors to creat a new class object
        #s_temp1 = therm_control.read()
        #s_gsr = GSR.read() #still need to setup GSR sensor code
        #s_hbeat = PHOTORESIST.read()
        #values meant for testing code:
        #s_temp1 = x
        s_gsr = y
        s_hbeat = z #info will come from heart_rate_control.py
        
        new_sensor_data = Data(s_temp1, s_gsr, s_hbeat)
        returned_avg = temp_average(new_sensor_data, 0)
        temp_warning = temp_analysis(returned_avg, new_sensor_data)
        gsr_warning = gsr_analysis(new_sensor_data)
        heart_warning = heart_analysis(new_sensor_data)
        
        #printing data if it meets warning thresholds:
        if temp_warning and gsr_warning and heart_warning != type(None): #comparing results from sensors
            print("You may be experiencing an episode of autonomic dysreflexia")
            time.sleep(time_value)
            print("Please notify a physician or caretaker immediately")
            time.sleep(time_value)
            print("Remove tight clothing, sit upright, & empty your bladder if possible")
            time.sleep(time_value)
            #avg = (new_sensor_data.get_temp1() + new_sensor_data.get_temp2())/2
            print("Your current approximate skin temperature is: ", returned_avg, "degrees C")
            time.sleep(time_value)
            print("Your current heartrate is approximately: ", new_sensor_data.get_heart(), "BPM")
            time.sleep(time_value)
            print("You current GSR reading is approximately: ", new_sensor_data.get_gsr())
            time.sleep(time_value)
            print("To accept this information, press the button.")
            input() #simulation of pressing button
        else:
            #printing measured data, regardless of value:
            #avg_temp = (new_sensor_data.get_temp1()+new_sensor_data.get_temp2()/2)
            avg_temp = temp_average(new_sensor_data, 0)
            current_heart = new_sensor_data.get_heart()
            current_gsr = new_sensor_data.get_gsr()
            print("Your current temperature is: ", avg_temp, "C")
            time.sleep(time_value)
            print("your current heart rate is: ", current_heart, "BPM")
            time.sleep(time_value)
            print("Your current GSR reading is: ", current_gsr) #must add GSr unit - not sure atm
            print(" ")
            time.sleep(3.5)
 
            
        s_temp1 += 1 #simulated changes in sensor readings
        y -= 1 #^^
        z += 5 #^^
        '''button.wait_for_press #not sure if necessary - will need to keep collecting data
        print("The device will continue collecting data.")'''
        time.sleep(0.3)
Exemplo n.º 12
0
                           (500 + int(car_width / 2), 0), (0, 0, 255), 2)
            img = cv2.putText(img, "%d" % distance, (
                int(500 + distance * math.cos(min_theta * math.pi / 360)),
                500 - distance,
            ), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 0))
            img = cv2.resize(img, (684, 342))
            return img, distance


if __name__ == "__main__":
    import threading
    from control import Control
    from car_platform import CarPlatform
    from monitoring import Monitoring

    testDT = Data()
    """
    test code
    특정 미션 번호에서 시작하도록 함
    """
    testDT.current_mode = 1

    testDS = Source(testDT)
    car = CarPlatform('COM5', testDT)
    testMP = MotionPlanner(testDS, testDT)
    test_control = Control(testDT)
    monitor = Monitoring(testDS, testDT)

    lidar_source_thread = threading.Thread(target=testDS.lidar_stream_main)
    left_cam_source_thread = threading.Thread(
        target=testDS.left_cam_stream_main)
Exemplo n.º 13
0
 def steer_right_test(test_data: Data):
     test_data.set_control_value(gear=SerialPacket.GEAR_NEUTRAL,
                                 speed=SerialPacket.SPEED_MIN,
                                 steer=SerialPacket.STEER_MAXRIGHT,
                                 brake=SerialPacket.BRAKE_NOBRAKE)
Exemplo n.º 14
0
                                    steer=SerialPacket.STEER_MAXLEFT,
                                    brake=SerialPacket.BRAKE_NOBRAKE)

    def steer_straight_test(test_data: Data):
        test_data.set_control_value(gear=SerialPacket.GEAR_NEUTRAL,
                                    speed=SerialPacket.SPEED_MIN,
                                    steer=SerialPacket.STEER_STRAIGHT,
                                    brake=SerialPacket.BRAKE_NOBRAKE)

    def steer_right_test(test_data: Data):
        test_data.set_control_value(gear=SerialPacket.GEAR_NEUTRAL,
                                    speed=SerialPacket.SPEED_MIN,
                                    steer=SerialPacket.STEER_MAXRIGHT,
                                    brake=SerialPacket.BRAKE_NOBRAKE)

    test_data = Data()
    test_platform = CarPlatform('COM5', test_data)  # PLEASE CHECK YOUR COMPORT
    platform_thread = threading.Thread(target=test_platform.main)
    platform_thread.start()

    if test_data.read_packet.aorm == SerialPacket.AORM_AUTO:
        test_data.detected_mission_number = 1
        test_data.current_mode = 1
        print("mission num: ", test_data.detected_mission_number,
              test_data.current_mode)
        t = time.time()
        i = 1
        while True:
            print("read: ", test_data.car_platform_status())
            print("WRITE: ", test_data.write_packet.steer)
            if time.time() - t < 2:
Exemplo n.º 15
0
    def run_experiment(self):
        '''Compleates an actual run of the full pipeline, with the parameters corresponding to the arguments passed in the constructor'''

        params = self.params
        print(params)

        cg_max_iter = 500
        eigs_max_iter = 250

        training_concepts_file_name = params['training_concepts_file_name']
        validation_set_file_name = params['validation_set_file_name']
        case_folding_flag = params['case_folding_flag']

        _lambda = params['lambda']

        cg_tol_1 = 10**(-1 * params['cg_tol_1'])
        eigs_tol_1 = 10**(-1 * params['eigs_tol_1'])

        # Same for now
        cg_tol_2 = 10**(-1 * params['cg_tol_2'])
        eigs_tol_2 = 10**(-1 * params['eigs_tol_2'])

        dims = params['dimensions']
        vocabulary_size = params['vocabulary_size']

        data_obj = Data()
        data_obj.load_training(training_concepts_file_name,
                               validation_set_file_name, case_folding_flag,
                               vocabulary_size)

        operations_obj = Operations(data_obj, _lambda, cg_max_iter, cg_tol_1)

        start = default_timer()

        try:
            vals, vecs = operations_obj.decompose_M_eigsh(
                dims, eigs_max_iter, eigs_tol_1)
        except ArpackError as e:
            try:
                print("ERROR occured!")
                print(e)
                vals, vecs = operations_obj.decompose_M_eigsh(
                    dims, eigs_max_iter, eigs_tol_1, True)
            except ArpackError as e:
                print("FAIL! Can't complete the decomposition!")
                return

        end = default_timer()
        time_elapsed = end - start
        print("Finished decomposition one: ", time_elapsed)

        training_outcome = {}

        training_outcome['e_vals'] = vals
        training_outcome['e_vecs'] = vecs

        results_obj = {}
        results_obj['training_outcome'] = training_outcome
        results_obj['parameters'] = params
        results_obj['data'] = data_obj.final_dataset_dump_name

        with open(self.results_dump_path, 'wb') as f:
            pickle.dump(results_obj, f, protocol=4)

        start = default_timer()

        try:
            vals_m2, vecs_m2 = operations_obj.decompose_M2_eigsh(
                dims, eigs_max_iter, eigs_tol_1)
            print(vals_m2)  # Visual sanity check
        except ArpackError as e:
            try:
                print("ERROR occured!")
                print(e)
                vals_m2, vecs_m2 = operations_obj.decompose_M2_eigsh(
                    dims, eigs_max_iter, eigs_tol_1, True)
            except ArpackError as e:
                print("FAIL! Can't complete the decomposition!")
                return

        end = default_timer()
        time_elapsed = end - start
        print("Finished decomposition two: ", time_elapsed)

        training_outcome['M2_e_vals'] = vals_m2
        training_outcome['M2_e_vecs'] = vecs_m2

        # training_outcome['cg_residuals'] = operations_obj.cg_residuals
        training_outcome['num_iter'] = operations_obj.num_iter
        # training_outcome['cg_residuals2'] = operations_obj.cg_residuals2
        training_outcome['num_iter2'] = operations_obj.num_iter2
        training_outcome['time_consumed'] = operations_obj.time_consumed

        results_obj['training_outcome'] = training_outcome

        with open(self.results_dump_path, 'wb') as f:
            pickle.dump(results_obj, f, protocol=4)

        self.logger.revert_standard_output()
        self.logger.log_run()
Exemplo n.º 16
0
        else:
            self.right_coefficients = None

        filtered_both = np.vstack((filtered_R, filtered_L))
        final = cv2.flip(cv2.transpose(filtered_both), 1)

        return cv2.cvtColor(final, cv2.COLOR_GRAY2BGR)


if __name__ == "__main__":
    import threading
    import time
    from dummy_data_source import DummySource

    testData = Data()
    # ------------------- Dummy Data 사용 시 아래 코드를 활성화 ----------------------
    testDDS = DummySource('2018-11-17-08-14-08')
    testLC = LaneCam(testDDS)  # DummySource for test
    dummy_thread = threading.Thread(target=testDDS.main)
    dummy_thread.start()

    # ------------------- 센서 Data 사용 시 아래 코드를 활성화 ----------------------
    # testDS = Source(testData)
    # lidar_source_thread = threading.Thread(target=testDS.lidar_stream_main)
    # left_cam_source_thread = threading.Thread(target=testDS.left_cam_stream_main)
    # right_cam_source_thread = threading.Thread(target=testDS.right_cam_stream_main)
    # mid_cam_source_thread = threading.Thread(target=testDS.mid_cam_stream_main)

    # lidar_source_thread.start()
    # left_cam_source_thread.start()
Exemplo n.º 17
0
        steer_now = (theta_1 + theta_2)

        adjust = 0.3

        steer_final = ((adjust * self.park_steer_past) +
                       ((1 - adjust) * steer_now))
        self.park_steer_past = steer_final

        steer = steer_final * 71
        if steer > 1970:
            steer = 1970
            self.park_steer_past = 27.746
        elif steer < -1970:
            steer = -1970
            self.park_steer_past = -27.746

        self.park_steer = steer
        return self.park_steer


# 테스트용 코드, 아래에 원하는 대로 바꿔서 테스트해볼 수 있습니다.
if __name__ == '__main__':
    import threading

    test_data = Data()
    test_control = Control(test_data)
    control_thread = threading.Thread(target=test_control.main)

    control_thread.start()
Exemplo n.º 18
0
 def start_from_crosswalk(data: Data):
     for i in [1, 2]:
         data._mission_checklist[i] = True