Example #1
0
def ground_truth_callback(msg):
    x = msg.pose.pose.position.x
    y = msg.pose.pose.position.y

    theta = get_rotation(msg.pose.pose.orientation)  # radians

    csv_writer('ground_truth', x, y, theta)
Example #2
0
def odom_callback(msg):
    timestamp = msg.header.stamp
    x = msg.pose.pose.position.x
    y = msg.pose.pose.position.y

    theta = get_rotation(msg.pose.pose.orientation)  # radians

    v = msg.twist.twist.linear.x
    w = msg.twist.twist.angular.z

    # Calculate elapsed time
    global previous_timestamp
    if (previous_timestamp == None):
        dt = 1
    else:
        dt = timestamp.to_sec() - previous_timestamp.to_sec()

    previous_timestamp = timestamp

    # Predict robot state based on it's velocities
    ekf.set_params(v, w, dt)
    ekf.predict()

    # Write data to CSV, note that room center is at 5,5
    csv_writer('odom', 5. + x, 5. + y, theta)
    csv_writer('ekf', (ekf.state_vector[0])[0], (ekf.state_vector[1])[0],
               (ekf.state_vector[2])[0])
Example #3
0
def ground_truth_callback(msg):
    x = msg.pose.pose.position.x
    y = msg.pose.pose.position.y

    theta = get_rotation(msg.pose.pose.orientation)  # radians

    # Write data to CSV, note that room center is at 5,5
    csv_writer('ground_truth', 5. + x, 5. + y, theta)
Example #4
0
    def test_008_time_tuple(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv',
                            True,
                            'time(time_tuple)',
                            'uint8',
                            precision=4)

        # generate time pair pdu
        time_tuple = pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(0.0))
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('time'),
                                time_tuple)
        expected = pmt.cons(metadata, pmt.PMT_NIL)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv',
                            expected,
                            data_type='uint8',
                            has_header=True))
Example #5
0
    def test_006_precision(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv', False, '', 'float', precision=4)

        # generate pdu
        metadata = pmt.PMT_NIL
        data = pmt.init_f32vector(2, [1.111111] * 2)
        sent = pmt.cons(metadata, data)
        expected = pmt.cons(pmt.PMT_NIL, pmt.init_f32vector(2, [1.1111] * 2))

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv',
                            expected,
                            data_type='float',
                            has_header=False))
Example #6
0
    def test_002_uint8_metadata_header(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv', True, '', 'uint8')

        # generate pdu
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('a'),
                                pmt.intern('a'))
        metadata = pmt.dict_add(metadata, pmt.intern('b'), pmt.from_long((0)))
        data = pmt.init_u8vector(5, [11, 12, 13, 14, 15])
        pdu = pmt.cons(metadata, data)

        # expected will only have intern strings
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('a'),
                                pmt.intern('a'))
        metadata = pmt.dict_add(metadata, pmt.intern('b'), pmt.intern('0'))
        expected = pmt.cons(metadata, data)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv', expected, has_header=True))
Example #7
0
def marker_callback(msg):
    """
    In SLAM only the range and heading are known, not beacon location
    """
    markersList = []
    # get beacons info
    for marker in msg.markers:
        markersList.append((
            np.sqrt(np.square(marker.pose.position.x) + np.square(marker.pose.position.y)),  # range
            get_rotation(marker.pose.orientation),  # theta in radians
            marker.ids[0]))

        # Posteriori step
    ekf.set_observed_features(markersList)
    ekf.predict()
    ekf.update()
    csv_writer('ekf_slam', (ekf.mean[0])[0], (ekf.mean[1])[0], (ekf.mean[2])[0])
Example #8
0
def marker_callback(msg):
    # rospy.loginfo("Marker message")
    timestamp = msg.header.stamp
    markersList = []
    # get beacons info
    for marker in msg.markers:
        markersList.append((
            marker.ids[0],
            (beacons[marker.ids[0] - 1])[0],
            (beacons[marker.ids[0] - 1])[1],
            marker.pose.position.x,
            marker.pose.position.y,
            get_rotation(marker.pose.orientation),  # theta in radians
            timestamp))

    # Propagate robot state
    pf.set_beacons(markersList)
    pf.propagate_state()
    csv_writer('pf', (pf.estimated_state_vector[0])[0],
               (pf.estimated_state_vector[1])[0],
               (pf.estimated_state_vector[2])[0])
Example #9
0
def odom_callback(msg):
    timestamp = msg.header.stamp
    x = msg.pose.pose.position.x
    y = msg.pose.pose.position.y
    theta = get_rotation(msg.pose.pose.orientation)
    v = msg.twist.twist.linear.x
    w = msg.twist.twist.angular.z

    # Calculate elapsed time
    global previous_timestamp
    if (previous_timestamp == None):
        dt = 1
    else:
        dt = timestamp.to_sec() - previous_timestamp.to_sec()

    previous_timestamp = timestamp

    # Prediction step
    ekf.set_params(v, w, dt)
    # ekf.predict()

    # Write results to csv
    csv_writer('odom', 5. + x, 5. + y, theta)
Example #10
0
def datasets():
    path = p.datasets_location
    datasets = []
    datasets_found = []
    for dirpath, dirs, files in os.walk(path):
        for filename in files:
            fname = os.path.join(dirpath, filename)
            dataset_name = [os.path.basename(fname)]
            datasets.append(fname)
            datasets_found.append(dataset_name)
    found_datasets = cw.csv_writer('datasets_found', '', '', '', '', '', '',
                                   p.datasets_found_save_path, datasets_found,
                                   'w', ['Datasets'], '', '', '', '', '', '',
                                   '', '')
    found_datasets.csv_writer_result()
    return datasets
Example #11
0
def get_data(alpha_zero, delta_alpha, alpha_value, seed, n, cache_size, type_cache, window_size, data, no_of_win, win_ind):
    random_query_save_path = ''
    if v.args.alpha is not None:
        random_query_save_path = p.random_query_save_path + 'perf_eval_input_queries_a.' + str(v.args.alpha) + '_with_full_path/'
    elif v.args.alpha is None:
        random_query_save_path = p.random_query_save_path + 'perf_eval_input_queries_a.' + str(v.args.astart)+'-'+str(v.args.astop) + '_with_full_path/'
    random_query_csv = []
    data = data
    b = np.ravel(data)
    iter = n
    i = 0
    alpha = alpha_value   # skewness parameter; the higher, the less skewed the distibution
    alpha_zero = alpha_zero
    delta_alpha = delta_alpha
    
    if alpha == 1.0:
        alpha = alpha + 0.1
    seed = seed
    
    n = len(b)
    if n == 0:
        print("No datasets found")
        sys.exit()
    f = lambda alpha,k,n: \
    (1 - alpha) / (1 - np.power(alpha,n)) * np.power(alpha,k)
    g = lambda k: f(alpha,k,n)
    k = range(0, n)
    ks = []
    for ki in k:
        ks.append(ki)
    dfks = pd.DataFrame(ks)
    dfp = dfks.apply(g)
    c = np.asanyarray(dfp)
    np.random.seed(seed)
    
    while i < iter:
        random_choice = [np.random.choice(data, p=np.ravel(c), replace=True)]
        random_query_csv.append(random_choice)
        i += 1
    if v.args.verbose >= 3:
        random_qurey =  cw.csv_writer('random_queries_', 'az.'+str(round(alpha_zero, 1)) if alpha_zero else '', '_da.'+str(round(delta_alpha, 1)) if delta_alpha else '', '_a.'+str(round(alpha, 1)) if alpha else '', '_cs'+str(cache_size), '_'+ '_ct'+str(type_cache), '_s'+str(seed), random_query_save_path, random_query_csv, 'w', ['random_queries_with_full_path'], '_n'+str(iter), '', '_w'+str(window_size), '_nbw'+str(no_of_win), '', '', '', '_wi'+str(win_ind))
        random_qurey.csv_writer_result()
    return random_query_csv
Example #12
0
    def test_001_uint8_no_header(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv', False, '', 'uint8')

        # generate pdu
        metadata = pmt.PMT_NIL
        data = pmt.init_u8vector(5, [11, 12, 13, 14, 15])
        expected = pmt.cons(metadata, data)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()
        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv', expected, has_header=False))
Example #13
0
    def test_004_all_data_types(self):
        # data types and their constructors
        data_types = {
            'uint8': pmt.init_u8vector,
            'int8': pmt.init_s8vector,
            'uint16': pmt.init_u16vector,
            'int16': pmt.init_s16vector,
            'uint32': pmt.init_u32vector,
            'int32': pmt.init_s32vector,
            'float': pmt.init_f32vector,
            'complex float': pmt.init_c32vector,
            'double': pmt.init_f64vector,
            'complex double': pmt.init_c64vector
        }

        passed = True
        for data_type, init_func in data_types.items():
            emitter = pdu_utils.message_emitter()
            writer = csv_writer('/tmp/file.csv', False, '', data_type)

            # expected pdu
            data = init_func(5, [1, 2, 3, 4, 5])
            expected = pmt.cons(pmt.PMT_NIL, data)

            # run
            tb = gr.top_block()
            tb.msg_connect((emitter, 'msg'), (writer, 'in'))
            tb.start()
            emitter.emit(expected)
            time.sleep(.5)
            tb.stop()
            tb.wait()

            passed &= self.check_file('/tmp/file.csv',
                                      expected,
                                      has_header=False,
                                      data_type=data_type)

        self.assertTrue(passed)
Example #14
0
    def test_005_all_header_fields(self):
        emitter = pdu_utils.message_emitter()
        fields = 'field0(string),field1(bool),field2(long),field3(uint64)' + \
            ',field4(float),field5(double),field6(complex)'
        writer = csv_writer('/tmp/file.csv', True, fields, 'uint8')

        # generate pdu
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('field0'),
                                pmt.intern('field0'))
        metadata = pmt.dict_add(metadata, pmt.intern('field1'),
                                pmt.from_bool(True))
        metadata = pmt.dict_add(metadata, pmt.intern('field2'),
                                pmt.from_long(0))
        metadata = pmt.dict_add(metadata, pmt.intern('field3'),
                                pmt.from_uint64(0))
        metadata = pmt.dict_add(metadata, pmt.intern('field4'),
                                pmt.from_float(0.0))
        metadata = pmt.dict_add(metadata, pmt.intern('field5'),
                                pmt.from_double(0.0))
        metadata = pmt.dict_add(metadata, pmt.intern('field6'),
                                pmt.from_complex(1.0 + 1.0j))

        data = pmt.init_u8vector(5, [11, 12, 13, 14, 15])
        expected = pmt.cons(metadata, data)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv', expected, has_header=True))
Example #15
0
    def test_009_float_precision(self):
        emitter = pdu_utils.message_emitter()
        fields = 'field0(long:03d),field1(uint64:03d),field2(float:.9f),field3(double:.9f),field4(complex:.9f)'
        writer = csv_writer('/tmp/file.csv',
                            True,
                            fields,
                            'uint8',
                            precision=4)

        # generate time pair pdu
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('field0'),
                                pmt.from_long(1))
        metadata = pmt.dict_add(metadata, pmt.intern('field1'),
                                pmt.from_uint64(1))
        metadata = pmt.dict_add(metadata, pmt.intern('field2'),
                                pmt.from_float(1.234567890))
        metadata = pmt.dict_add(metadata, pmt.intern('field3'),
                                pmt.from_double(1.234567890))
        metadata = pmt.dict_add(metadata, pmt.intern('field4'),
                                pmt.from_complex(1.234567890 + 1.234567890j))
        expected = pmt.cons(metadata, pmt.PMT_NIL)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv',
                            expected,
                            data_type='uint8',
                            has_header=True))
Example #16
0
def comp_decomp(exp_ind, alpha_zero, delta_alpha, alpha, del_max, cache_size,
                type_cache, seed, n, result_location, window_size,
                cache_obj_lfu, cache_obj_fifo, dat, no_of_win, win_ind, mark):
    if v.args.verbose >= 1:  # Parsing data
        print('value of alpha is set to: ' + str(alpha))

    header = [
        'exp_ind', 'seed', 'cache_type', 'alpha_zero', 'delta_alpha', 'alpha',
        'da_amp', 'cache_size', 'n', 'comp_time', 'decomp_time', 'total_time',
        'comp_ratio', 'ds_size', 'ds_name', 'cached', 'win_size', 'nb_win',
        'win_ind'
    ]
    results_csv = []
    query_csv = []
    cache_type = []
    result_location = result_location
    window_size = window_size
    no_of_win = no_of_win
    exp_ind = exp_ind
    del_max = del_max
    mark = mark
    data = []

    # print ("Before data")
    # print (dat)
    # input ("enter to continue")

    if mark == 'normal':
        data = ul.get_data(alpha_zero, delta_alpha, alpha, seed, n, cache_size,
                           type_cache, window_size, dat, no_of_win, win_ind)
    elif mark == 'scss' or mark == 'rd':
        for d in dat:
            if mark == 'scss':
                data.append([d])
            elif mark == 'rd':
                data.append(d)
    # print ("After data")
    # print (data)
    # input ("enter to continue")
    if (type_cache == "lfu"):
        if v.args.verbose >= 3:
            print('Cache type is LFU')
            print('Creating LFU object')

        if v.args.cm is True:
            cache_obj = cache_obj_lfu
        elif v.args.cm is False:
            cache_obj = cache_lfu.cache_lfu()

        cache_obj.set_cache_size(cache_size)
        ctype = 'ctlfu'  # File name of the result
        cache_type = type_cache
        result_location = result_location + '/lfu/'
    if (type_cache == "fifo"):
        if v.args.verbose >= 3:
            print('Cache type is FIFO')
            print('Creating FIFO object')

        if v.args.cm is True:
            cache_obj = cache_obj_fifo
        elif v.args.cm is False:
            cache_obj = cache_fifo.cache_fifo()

        cache_obj.set_cache_size(cache_size)
        ctype = 'ctfifo'  # File name of the result
        cache_type = type_cache
        result_location = result_location + '/fifo/'

    if v.args.n is None:
        if v.args.rd == False:
            n = len(data)
        elif v.args.rd == True:
            n = n
    else:
        n = n

    cdc_obj = cdc.comp_decomp()  # Object of comp_decomp class
    cdc_obj.set_seed_alpha(exp_ind, seed, alpha_zero, delta_alpha, alpha,
                           del_max, cache_type, cache_size, n, window_size,
                           no_of_win, win_ind)

    cdcrd_obj = cdcrd.comp_decomp()  # Object of comp_decomp class
    cdcrd_obj.set_seed_alpha(exp_ind, cache_type, cache_size, n, window_size,
                             no_of_win, win_ind)

    if v.args.rd == True:
        header = [
            'exp_ind', 'cache_type', 'cache_size', 'n', 'comp_time',
            'decomp_time', 'total_time', 'comp_ratio', 'ds_size', 'ds_name',
            'cached', 'win_size', 'nb_win', 'win_ind'
        ]
        # print ("Data found is ")
        # print (data)
        # print ("N is ")
        # print (n)
        # print ("Length of data")
        # print (len(data))
        for i in range(0, n):  # name me n pls
            fname = data[i]  # Reading the names of datasets with full path
            query_csv.append([
                fname
            ])  # query_csv is a list of all the datasets after randomization
            if cache_obj.if_in_cache(str(
                    fname)) == True:  # check if dataset is in the cache or not
                cache_results = cdcrd_obj.in_cache(fname)
                # Increment dataset frequency of occurance in cache
                results_csv.append(cache_results)
                cache_obj.increment_cache(str(fname))
            if cache_obj.if_in_cache(str(fname)) == False:
                cache_obj.push_cache(
                    str(fname)
                )  # Compress and Decompress dataset and set frequency of occurance of dataset in cache
                results = cdcrd_obj.not_in_cache(fname)
                results_csv.append(results)
            if v.args.verbose >= 3:
                print('Cache view for iteration no: ' + str(i))
                cache_df = pd.DataFrame.from_dict(
                    cache_obj.cache, orient='index',
                    columns=['Frequency'])  # TODO call object passed cache
                print(cache_df)
                print(
                    '---------------------------------------------------------------'
                )
            cache_obj.check_size()
            #i += 1

        c = cw.csv_writer(
            'results_', '', '', '', 'cs' + str(cache_size), '_' + str(ctype),
            '', result_location, results_csv, 'w', header, '_n' + str(n), '',
            '_w' + str(window_size), '_nbw' + str(no_of_win), '', '', '',
            '_wi' + str(win_ind))  # writing results in the csv file
        c.csv_writer_result()

    if v.args.rd == False:
        header = [
            'exp_ind', 'seed', 'cache_type', 'alpha_zero', 'delta_alpha',
            'alpha', 'da_amp', 'cache_size', 'n', 'comp_time', 'decomp_time',
            'total_time', 'comp_ratio', 'ds_size', 'ds_name', 'cached',
            'win_size', 'nb_win', 'win_ind'
        ]
        for i in range(0, n):  # name me n pls
            fname = ', '.join(
                data[i])  # Reading the names of datasets with full path
            dataset_name = os.path.basename(
                fname
            )  # dataset_name has only name of the datasets without full path
            query_csv.append([dataset_name] + [
                fname
            ])  # query_csv is a list of all the datasets after randomization
            if cache_obj.if_in_cache(
                    str(dataset_name
                        )) == True:  # check if dataset is in the cache or not
                cache_results = cdc_obj.in_cache(fname)
                # Increment dataset frequency of occurance in cache
                results_csv.append(cache_results)
                cache_obj.increment_cache(str(dataset_name))
            if cache_obj.if_in_cache(str(dataset_name)) == False:
                cache_obj.push_cache(
                    str(dataset_name)
                )  # Compress and Decompress dataset and set frequency of occurance of dataset in cache
                results = cdc_obj.not_in_cache(fname)
                results_csv.append(results)
            if v.args.verbose >= 3:
                print('Cache view for iteration no: ' + str(i))
                cache_df = pd.DataFrame.from_dict(
                    cache_obj.cache, orient='index',
                    columns=['Frequency'])  # TODO call object passed cache
                print(cache_df)
                print(
                    '---------------------------------------------------------------'
                )
            cache_obj.check_size()
            #i += 1

        c = cw.csv_writer(
            'results_', 'az.' + str(alpha_zero), '_da.' + str(delta_alpha),
            '_a.' + str(alpha), '_cs' + str(cache_size), '_' + str(ctype),
            '_s' + str(seed), result_location, results_csv, 'w', header,
            '_n' + str(n), '', '_w' + str(window_size),
            '_nbw' + str(no_of_win), '', '_damp' + str(del_max), '',
            '_wi' + str(win_ind))  # writing results in the csv file
        c.csv_writer_result()

    if v.args.verbose >= 2:
        cache_df = pd.DataFrame.from_dict(cache_obj.cache,
                                          orient='index',
                                          columns=['Frequency'])
        print(cache_df)
        print('\n')
        print(pd.DataFrame.from_dict(results_csv))
        print('Minimum value in cache: ' +
              min(cache_obj.cache, key=cache_obj.cache.get))
        print('Maximum value in cache: ' +
              max(cache_obj.cache, key=cache_obj.cache.get))
    if v.args.verbose >= 3:
        random_query_save_path = ''
        if v.args.alpha is not None:
            random_query_save_path = p.random_query_save_path + 'perf_eval_input_queries_a.' + str(
                v.args.alpha) + '_without_full_path/'
        elif v.args.alpha is None:
            random_query_save_path = p.random_query_save_path + 'perf_eval_input_queries_a.' + str(
                v.args.astart) + '-' + str(
                    v.args.astop) + '_without_full_path/'

        if v.args.rd == False:
            queries = cw.csv_writer(
                'results_', 'az.' + str(alpha_zero), '_da.' + str(delta_alpha),
                '_a.' + str(alpha), '_cs' + str(cache_size), '_' + str(ctype),
                '_s' + str(seed), random_query_save_path, query_csv, 'w',
                ['dataset_name', 'path'], '_n' + str(n), '',
                '_w' + str(window_size), '_nbw' + str(no_of_win), '',
                '_damp' + str(del_max), '', '_wi' + str(win_ind))
            queries.csv_writer_result()
        if v.args.rd == True:
            queries = cw.csv_writer('results_', '', '', '',
                                    'cs' + str(cache_size), '_' + str(ctype),
                                    '', random_query_save_path, query_csv, 'w',
                                    ['dataset_name', 'path'], '_n' + str(n),
                                    '', '_w' + str(window_size),
                                    '_nbw' + str(no_of_win), '', '', '',
                                    '_wi' + str(win_ind))
            queries.csv_writer_result()
    return results_csv
from bs4 import BeautifulSoup as BSoup
import requests as req

from csv_writer import csv_writer

outfile = 'output.csv'

url = 'https://amdm.ru/akkordi/viktor_coi/'
resp = req.get(url)

if resp.status_code == 200:
    soup = BSoup(resp.text, 'lxml')
    links = soup.select_one('table#tablesort').select('a.g-link')
    links_text = (a.text for a in links)
    links_href = (a['href'] if a.get('href') else None for a in links)
    links_href = map(lambda href: f'https:{href}', links_href)
    fieldnames = ['song_name', 'link']
    csv_writer(fieldnames, zip(links_text, links_href), outfile)
    print('Success')
else:
    print(f'Error {resp.status_code}')

Example #18
0
def action(request):
    #Keep track of which checkboxes are selected, initialized to 0, convert to 1 if selected
    #Case and Layer names initialized to empty string, convert to actual names inputed
    w, h = 5, 5
    cases = [0 for x in range(w)]
    caseNames = ["" for x in range(w)]
    layers = [[0 for x in range(w)] for y in range(h)]
    layerNames = [["" for x in range(w)] for y in range(h)]
    for caseNum in range(5):
        boxName = "box" + str(caseNum + 1)
        caseName = "boxInput" + str(caseNum + 1)
        try:
            cases[caseNum] = request.POST[boxName]
            caseNames[caseNum] = request.POST[caseName]
            if (cases[caseNum] == "on"):
                cases[caseNum] = 1
        except:
            pass
        for layerNum in range(5):
            if (cases[caseNum]):
                layerboxName = "layerbox" + str(layerNum +
                                                1) + "_" + str(caseNum + 1)
                layerName = "layerboxInput" + str(layerNum +
                                                  1) + "_" + str(caseNum + 1)
                try:
                    layerNames[caseNum][layerNum] = request.POST[layerName]
                    layers[caseNum][layerNum] = request.POST[layerboxName]
                    if (layers[caseNum][layerNum] == "on"):
                        layers[caseNum][layerNum] = 1
                except:
                    pass

    #Initialize variables for data
    epsilon_p_input = [0 for x in range(w)]
    n_input = [0 for x in range(w)]
    T_input = [0 for x in range(w)]
    p_input = [0 for x in range(w)]
    U_0_input = [0 for x in range(w)]
    alpha_input = [[0 for x in range(w)] for y in range(h)]
    d_f_input = [[0 for x in range(w)] for y in range(h)]
    d_f_R_input = [[0 for x in range(w)] for y in range(h)]
    epsilon_f_input = [[0 for x in range(w)] for y in range(h)]
    h_input = [[0 for x in range(w)] for y in range(h)]
    h_input_units = [[0 for x in range(w)] for y in range(h)]
    sigma_0_input = [[0 for x in range(w)] for y in range(h)]
    n_input = [0 for x in range(w)]

    d_p = [0 for x in range(w)]
    E = [0 for x in range(w)]
    FOM = [0 for x in range(w)]
    delta_p = [[0 for x in range(w)] for y in range(h)]
    delta_p_units = ["" for x in range(w)]
    Re_f = [[0 for x in range(w)] for y in range(h)]
    Kn_f = [[0 for x in range(w)] for y in range(h)]
    warning = [["" for x in range(w)] for y in range(h)]
    warning_report = [["" for x in range(w)] for y in range(h)]

    #UNITS:
    T_input_units = [0 for x in range(w)]
    p_input_units = [0 for x in range(w)]
    U_0_input_units = [0 for x in range(w)]

    notes_input = ["" for x in range(w)]
    #Data Calculation:
    #result = [0 for x in range(w)]
    for caseNum in range(5):
        if (cases[caseNum]):
            try:
                if (request.POST['particlePerm_' +
                                 str(caseNum + 1)] == 'Infinity'):
                    epsilon_p_input[caseNum] = 99999999
                else:
                    epsilon_p_input[caseNum] = float(
                        request.POST['particlePerm_' + str(caseNum + 1)])
                if (request.POST['chargeInput_' +
                                 str(caseNum + 1)] == 'neutralized'):
                    n_input[caseNum] = 'neutralized'
                else:
                    n_input[caseNum] = float(request.POST['chargeInput_' +
                                                          str(caseNum + 1)])
                T_input[caseNum] = float(request.POST['TemperatureConstant_' +
                                                      str(caseNum + 1)])
                T_input_units[caseNum] = request.POST['tempUnits_' +
                                                      str(caseNum + 1)]
                p_input[caseNum] = float(request.POST['pressure_' +
                                                      str(caseNum + 1)])
                p_input_units[caseNum] = request.POST['pressureUnits_' +
                                                      str(caseNum + 1)]
                U_0_input[caseNum] = float(request.POST['velocity_' +
                                                        str(caseNum + 1)])
                U_0_input_units[caseNum] = request.POST['velocityUnits_' +
                                                        str(caseNum + 1)]
                P = 1
                for layerNum in range(5):
                    if (layers[caseNum][layerNum]):
                        try:
                            alpha_input[caseNum][layerNum] = float(
                                request.POST['solidity_' + str(caseNum + 1) +
                                             '_' + str(layerNum + 1)])
                            d_f_R_input[caseNum][layerNum] = float(
                                request.POST['fiberDiameter_' +
                                             str(caseNum + 1) + '_' +
                                             str(layerNum + 1)])
                            if request.POST['geometricDiameter_' +
                                            str(caseNum + 1) + '_' +
                                            str(layerNum + 1)]:
                                d_f_input[caseNum][layerNum] = float(
                                    request.POST['geometricDiameter_' +
                                                 str(caseNum + 1) + '_' +
                                                 str(layerNum + 1)])
                            else:
                                d_f_input[caseNum][layerNum] = d_f_R_input[
                                    caseNum][layerNum]
                            epsilon_f_input[caseNum][layerNum] = float(
                                request.POST['fiberPerm_' + str(caseNum + 1) +
                                             '_' + str(layerNum + 1)])
                            h_input[caseNum][layerNum] = float(
                                request.POST['fiberThickness_' +
                                             str(caseNum + 1) + '_' +
                                             str(layerNum + 1)])
                            h_input_units[caseNum][layerNum] = request.POST[
                                'thicknessUnits_' + str(caseNum + 1) + '_' +
                                str(layerNum + 1)]
                            sigma_0_input[caseNum][layerNum] = float(
                                request.POST['chargeDensity_' +
                                             str(caseNum + 1) + '_' +
                                             str(layerNum + 1)])
                            results = electret_eval.electret_plotting_function(
                                epsilon_p_input[caseNum], n_input[caseNum],
                                T_input[caseNum], p_input[caseNum],
                                U_0_input[caseNum],
                                alpha_input[caseNum][layerNum],
                                d_f_input[caseNum][layerNum],
                                d_f_R_input[caseNum][layerNum],
                                epsilon_f_input[caseNum][layerNum],
                                h_input[caseNum][layerNum],
                                sigma_0_input[caseNum][layerNum],
                                T_input_units[caseNum], p_input_units[caseNum],
                                U_0_input_units[caseNum],
                                h_input_units[caseNum][layerNum])
                            d_p[caseNum] = results['d_p']
                            P = results['P'] * P
                            delta_p[caseNum][layerNum] = results['delta_p']
                            Re_f[caseNum][layerNum] = results['Re_f']
                            Kn_f[caseNum][layerNum] = results['Kn_f']
                            warning_str = results['warning']
                            warning_report[caseNum][layerNum] = warning_str
                            if warning_str == '':
                                warning[caseNum][layerNum] = ''
                            else:
                                warning[caseNum][layerNum] = mark_safe(
                                    'For ' + caseNames[caseNum] + ', ' +
                                    layerNames[caseNum][layerNum] + ': ' +
                                    warning_str)
                            if U_0_input_units[caseNum] == 'cm/s':
                                delta_p_units[caseNum] = 'Pa'
                                delta_p[caseNum][layerNum] = results['delta_p']
                            elif U_0_input_units[caseNum] == 'ft/min':
                                delta_p_units[caseNum] = 'in H2O'
                                delta_p[caseNum][
                                    layerNum] = results['delta_p'] / 249.0889
                        except:
                            return HttpResponse("Check values in layer" +
                                                str(layerNum + 1) + "_" +
                                                str(caseNum + 1))
                E[caseNum] = np.transpose(np.atleast_2d(1 - np.sum(P, axis=1)))
                FOM[caseNum] = np.transpose(
                    np.atleast_2d(-np.log(np.sum(P, axis=1)) /
                                  sum(delta_p[caseNum][:])))
                #E[caseNum] = np.transpose(np.atleast_2d(1-P))
                #FOM[caseNum] = np.transpose(np.atleast_2d(-np.log(P)/sum(delta_p[caseNum][:])))
                try:
                    notes_input[caseNum] = request.POST['notes_' +
                                                        str(caseNum + 1)]
                except:
                    pass

            except:
                return HttpResponse(
                    "Error! Please go back and check your values on case " +
                    str(caseNum + 1) + ".")
    values = {}
    #Draw Graph based on 'E' or 'FOM':
    if request.method == 'POST' and 'Penetration' in request.POST:
        #form = ValueForm(request.POST)
        #if form.is_valid():
        #return HttpResponse("TRUE")
        plt.gcf().clear()
        #plt.plot([0, 1, 2, 3], [value1, value2, value3 , value4])
        fig, ax = plt.subplots()
        for caseNum in range(5):
            if (cases[caseNum]):
                plt.semilogx(d_p[caseNum] / sc.nano,
                             1 - E[caseNum],
                             '-',
                             linewidth=2,
                             label=caseNames[caseNum] + ' (' +
                             r'$\Delta p = $' +
                             str(round(sum(delta_p[caseNum][:]), 2)) + ' ' +
                             delta_p_units[caseNum] + ')')
        ax.grid(True, which='both', zorder=0)
        plt.ylabel('Penetration (-)')
        plt.xlabel('Particle diameter (nm)')
        plt.legend(loc=2)
        #ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3e'))

        #plt.tight_layout()
        #plt.savefig(os.path.join(BASE_DIR,'static','personal','images','test.png'))

    elif request.method == 'POST' and 'Efficiency' in request.POST:
        #form = ValueForm(request.POST)
        #if form.is_valid():
        #return HttpResponse("TRUE")
        plt.gcf().clear()
        #plt.plot([0, 1, 2, 3], [value1, value2, value3 , value4])
        fig, ax = plt.subplots()
        for caseNum in range(5):
            if (cases[caseNum]):
                plt.semilogx(d_p[caseNum] / sc.nano,
                             E[caseNum] * 100,
                             '-',
                             linewidth=2,
                             label=caseNames[caseNum] + ' (' +
                             r'$\Delta p = $' +
                             str(round(sum(delta_p[caseNum][:]), 2)) + ' ' +
                             delta_p_units[caseNum] + ')')
        ax.grid(True, which='both', zorder=0)
        plt.ylabel('Filtration efficiency (%)')
        plt.xlabel('Particle diameter (nm)')
        plt.legend(loc=3)
        ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3e'))
        #plt.tight_layout()
        #plt.savefig(os.path.join(BASE_DIR,'static','personal','images','test.png'))

    elif request.method == 'POST' and 'FOM' in request.POST:
        #form = ValueForm(request.POST)
        plt.gcf().clear()
        #plt.plot([0, 1, 2, 3], [filevalue1, filevalue2, filevalue3 , filevalue4])
        fig, ax = plt.subplots()
        for caseNum in range(5):
            if (cases[caseNum]):
                plt.semilogx(d_p[caseNum] / sc.nano,
                             FOM[caseNum],
                             '-',
                             linewidth=2,
                             label=caseNames[caseNum] + ' (' +
                             r'$\Delta p = $' +
                             str(round(sum(delta_p[caseNum][:]), 2)) + ' ' +
                             delta_p_units[caseNum] + ')')
                plt.ylabel('Figure of merit (' + delta_p_units[caseNum] +
                           '$^{-1}$)')
        ax.grid(True, which='both', zorder=0)
        plt.xlabel('Particle diameter (nm)')
        plt.legend(loc=3)
        #ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3e'))
        #plt.tight_layout()
        #plt.savefig(os.path.join(BASE_DIR,'static','personal','images','test.png'))

    #f= open("personal/static/personal/file.txt","w+")
    #f.write("Just a file")
    #f.close()
    elif request.method == 'POST' and 'Download' in request.POST:
        #return HttpResponseRedirect(request.META.get('HTTP_REFERER'),{'download':'yes'})
        f = csv_writer(layers, caseNames, epsilon_p_input, n_input,
                       T_input_units, T_input, p_input_units, p_input,
                       U_0_input_units, U_0_input, layerNames, alpha_input,
                       d_f_input, d_f_R_input, epsilon_f_input, h_input_units,
                       h_input, sigma_0_input, notes_input, Re_f, Kn_f,
                       delta_p_units, delta_p, warning_report, d_p, E, FOM)
        response = HttpResponse(f, content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename=report.csv'
        return response
        #return render(request, 'personal/electret.html',{'download':1})
    #values = {}

    for caseNum in range(5):
        values['case_' + str(caseNum + 1)] = cases[caseNum]
        values['caseName_' + str(caseNum + 1)] = caseNames[caseNum]
        values['particlePerm_' + str(caseNum + 1)] = epsilon_p_input[caseNum]
        values['chargeInput_' + str(caseNum + 1)] = n_input[caseNum]
        values['TemperatureConstant_' + str(caseNum + 1)] = T_input[caseNum]
        values['pressure_' + str(caseNum + 1)] = p_input[caseNum]
        values['velocity_' + str(caseNum + 1)] = U_0_input[caseNum]
        values['tempUnits_' + str(caseNum + 1)] = T_input_units[caseNum]
        values['pressureUnits_' + str(caseNum + 1)] = p_input_units[caseNum]
        values['velocityUnits_' + str(caseNum + 1)] = U_0_input_units[caseNum]
        for layerNum in range(5):
            values['layer_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = layers[caseNum][layerNum]
            values['layerName_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = layerNames[caseNum][layerNum]
            values['solidity_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = alpha_input[caseNum][layerNum]
            values['fiberDiameter_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = d_f_R_input[caseNum][layerNum]
            values['geometricDiameter_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = d_f_input[caseNum][layerNum]
            values['fiberPerm_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = epsilon_f_input[caseNum][layerNum]
            values['fiberThickness_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = h_input[caseNum][layerNum]
            values['thicknessUnits_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = h_input_units[caseNum][layerNum]
            values['chargeDensity_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = sigma_0_input[caseNum][layerNum]
            values['warning_' + str(caseNum + 1) + '_' +
                   str(layerNum + 1)] = warning[caseNum][layerNum]
    plt.tight_layout()
    buf = io.BytesIO()
    fig.savefig(buf, format='png')
    buf.seek(0)
    string = base64.b64encode(buf.read())
    uri = 'data:image/png;base64,' + urllib.parse.quote(string)
    html = '<img src = "%s"/>' % uri
    values['figure'] = urllib.parse.quote(string)
    return render(request, 'personal/action.html', values)
Example #19
0
from csv_writer import csv_writer
import csv
import synonyms
logging.basicConfig(level=logging.DEBUG)
path = '/Users/charilie/Desktop/PMI/l1.csv'
if __name__ == '__main__':
    parser=argparse.ArgumentParser()
    parser.add_argument("--dict",dest="dict",action="store_true",default=False)
    parser.add_argument("--enlarge", dest="enlarge", action="store_true", default=False)
    args=parser.parse_args()
    if args.dict:
        labelset = read_csv(path)
        logging.debug(labelset)
        dict_all = get_all_frequency(path)  # 所有词的字典
        for label in labelset:
            res_list = get_frequency(path, label, dict_all)
            csv_writer('pmi_dict_l1.csv', res_list, label)
    if args.enlarge:
        dict_path="pmi_dict_l1.csv"
        output_path="l1_enlarged_dict.csv"
        with open(dict_path,'r',encoding='utf-8-sig') as csvfile: #完美解决ufeff
            reader=csv.DictReader(csvfile)
            for line in reader:
                word=line['word']
                word_label=line['label']
                word_list=synonyms.nearby(word)[0]
                csv_writer(output_path,word_list,word_label)#把每个同义词都写进去



Example #20
0
def main(exp_ind, seed, itr, data, cs, az, da, a, del_max, window_size,
         cache_obj_lfu, cache_obj_fifo, no_of_win, exp_folder_name, win_ind,
         marker):

    cache_size = cs
    seed = seed
    alpha_zero = az
    delta_alpha = da
    alpha = a
    type_cache = 'undefined'
    data = data
    n = itr
    window_size = window_size
    no_of_win = no_of_win
    exp_ind = exp_ind
    exp_folder_name = exp_folder_name
    win_ind = win_ind
    del_max = del_max
    result_fifo = []
    result_lfu = []

    if v.args.lfu == False and v.args.fifo == False:
        print('\nCache type missing. Cache is undefined.')
        print('Use -lfu for LFU cache')
        print('Use -fifo for FIFO cache. \n')
        sys.exit()

    if v.args.lfu or v.args.fifo:
        if v.args.scss == False:
            if v.args.rd == False:
                print('Value of alpha is: ' + str(a))
        else:
            print('Value of alpha is: ' + str(a))
        print('Cache size is set to: ' + str(cache_size))
        print('Seed for random value is set to: ' + str(seed))
        print('No of iterations is set to: ' + str(n) + '\n')
        if v.args.alpha == True:
            print('window_size size not used.')
        if v.args.alpha == False:
            print('window_size size: ' + str(window_size))

    result_location = p.results_save_path + exp_folder_name + '/'
    fname = 'agg_result_' + exp_folder_name  # File name for results for one type of cache

    if v.args.verbose >= 3:
        print('Result location')
        print(result_location)

    if v.args.lfu is True:
        type_cache = 'LFU'
        print('Running experiment using cache', type_cache)
        print('---------------------------------------------')
        type_cache = 'lfu'
        result_lfu = cd.comp_decomp(exp_ind + '_' + type_cache,
                                    alpha_zero, delta_alpha, alpha, del_max,
                                    int(cache_size), type_cache, seed, n,
                                    result_location, window_size,
                                    cache_obj_lfu, '', data, no_of_win,
                                    win_ind, marker)
        # input ("LFU done ")
        if v.args.rd == False:
            cm = cw.csv_writer(fname, '', '', '', '', '', '', result_location,
                               '', '', '', '', 'lfu', '', '', '', '', '',
                               win_ind)
            cm.csv_merge()
        if v.args.rd == True:
            cm = cw.csv_writer(fname, '', '', '', '', '', '', result_location,
                               '', '', '', '', 'lfu', '', '', '', '', '',
                               win_ind)
            cm.csv_merge()
            # input ("I wrote the result of LFU with real data ")
    if v.args.fifo is True:
        type_cache = 'FIFO'
        print('---------------------------------------------')
        print('Running experiment using cache', type_cache)
        print('---------------------------------------------')
        type_cache = 'fifo'
        result_fifo = cd.comp_decomp(exp_ind + '_' + type_cache, alpha_zero,
                                     delta_alpha, alpha, del_max,
                                     int(cache_size), type_cache, seed, n,
                                     result_location, window_size, '',
                                     cache_obj_fifo, data, no_of_win, win_ind,
                                     marker)
        if v.args.rd == False:
            cm = cw.csv_writer(fname, '', '', '', '', '', '', result_location,
                               '', '', '', '', 'fifo', '', '', '', '', '',
                               win_ind)
            cm.csv_merge()
        if v.args.rd == True:
            cm = cw.csv_writer(fname, '', '', '', '', '', '', result_location,
                               '', '', '', '', 'fifo', '', '', '', '', '',
                               win_ind)
            cm.csv_merge()
    # File name for combined results LFU and FIFO
    if v.args.lfu != False and v.args.fifo != False:
        cmer = cw.csv_writer('comb_agg_results_' + exp_folder_name, '', '', '',
                             '', '', '', result_location, '', '', '', '',
                             'result', '', '', '', '', '', '')
        cmer.csv_merge()

    return result_fifo, result_lfu
Example #21
0
    for delta_al in rand_alpha_list:
        delta_alpha.append(round(float(delta_al[1]), 1))

    alphas = []
    for al_rand in rand_alpha_list:
        alphas.append(round(float(al_rand[2]), 1))

    for ws in window_size:
        for n in itr:
            nbw.append(n // ws)

    exp_des = 'Experiment with random alphas and window size'
    exp_folder_name = 'of_experiments'
    mr = cw.csv_writer(
        exp_folder_name, alpha_zero, delta_alpha, alphas, cache_size,
        str('LFU' if v.args.lfu == True else '') + " " +
        str('FIFO' if v.args.fifo == True else ''), seed, p.agg_results + '/',
        '', 'w', '', itr, '', window_size, nbw, del_min, del_max, del_step, '')
    mr.log_write(exp_des)
    header_syn = [
        'exp_ind', 'seed', 'cache_type', 'alpha_zero', 'delta_alpha', 'alpha',
        'da_amp', 'cache_size', 'n', 'comp_time', 'decomp_time', 'total_time',
        'comp_ratio', 'ds_size', 'ds_name', 'cached', 'win_size', 'nb_win',
        'win_ind'
    ]
    result_syn = pd.DataFrame()

    # Experiment
    print(exp_des)
    for win_size in window_size:
        i = 0