Пример #1
0
def th11decodedata(file, buffer, flength):
    length = unsigned_int(buffer, 0x1c)
    dlength = unsigned_int(buffer, 0x20)
    decodedata = bytearray(dlength)
    rawdata = bytearray(buffer[0x24:])

    decode(rawdata, length, 0x800, 0xaa, 0xe1)
    decode(rawdata, length, 0x40, 0x3d, 0x7a)
    decompress(rawdata, decodedata, length)

    return decodedata
Пример #2
0
 def __init__(self, name="ioT Device"):
     f = open("TodayWeather.json", "r")
     todayWeather = common.decode(f.read())
     self.weather = json.loads(todayWeather)
     f.close()
     self.name = name
     print(self.weather)
Пример #3
0
def test_create_sound_autenticated(logged_in_client):
    """
    Logged-in users should be able to upload new sounds if the data provided
    is complete.
    """
    response = logged_in_client.post('/sounds/', data=sound_upload('a.ogg'))
    assert status(response) == 'created'
    assert is_subdict(sound_details('a.ogg'), decode(response))
Пример #4
0
def test_list_sounds_empty_database(client):
    """
    Listing sounds from an empty database should result in an empty list.
    Any client can make this request (no authentication needed).
    """
    response = client.get('/sounds/')
    assert status(response) == 'ok'
    assert decode(response) == []
Пример #5
0
def read_posting(term_id):
  # provide implementation for posting list lookup for a given term
  # a useful function to use is index_f.seek(file_pos), which does a disc seek to 
  # a position offset 'file_pos' from the beginning of the file
  file_pos = file_pos_dict[term_id]
  index_f.seek(file_pos)
  posting_list = index_f.read( posting_size_dict[term_id] )
  posting_list = common.decode( posting_list )
  return posting_list
Пример #6
0
def read_posting(term_id):
    # provide implementation for posting list lookup for a given term
    # a useful function to use is index_f.seek(file_pos), which does a disc seek to
    # a position offset 'file_pos' from the beginning of the file
    file_pos = file_pos_dict[term_id]
    index_f.seek(file_pos)
    posting_list = index_f.read(posting_size_dict[term_id])
    posting_list = common.decode(posting_list)
    return posting_list
Пример #7
0
def load_user_map():
    with open(USER_MAP_FILE, "r") as jsonFile:
        data = json.load(jsonFile)

    if data:
        global USER_NAME_MAP
        for k, v in data.iteritems():
            dk = decode(k)
            USER_NAME_MAP[dk] = v
            print '[LOAD-USER]: %s %s' % (dk, v)
Пример #8
0
 def listen_to_client(self, client, addr):
     size = 1024
     while True:
         try:
             data = client.recv(size)
             if data:
                 self.responses.put(decode(data))
         except:
             client.close()
             return False
Пример #9
0
def test_sound_details(client, logged_in_client):
    """
    Any user can request details about an uploaded sound. These details
    should contain complete information about the sound description and
    properties.
    """
    # Upload a sound while authenticated
    response = logged_in_client.post('/sounds/', data=sound_upload('a.ogg'))
    # Request details while not authenticated
    response = client.get('/sounds/1/')
    assert status(response) == 'ok'
    assert is_subdict(sound_details('a.ogg'), decode(response))
Пример #10
0
def start(ip, port):
    max_petitions = 1

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind((ip, port))
    print("Started server on " + ip + ":" + str(port))
    s.listen(max_petitions)
    print("Listening")
    while True:
        conn, cl_address = s.accept()
        params = common.recv_file(conn)
        decoded_params = common.decode(params, True)
        generate_song(decoded_params)
Пример #11
0
def callMeStart(filePaths, columnNames):
    '''
    :type: [str]
    :type: [[str]]
    :rtype: {str : set(str)} 
    '''

    ks = KeysStorage()
    dic = {}  # path -> keys

    for i, path in enumerate(filePaths):

        keys = ks.findCompositeKeys(columnNames[i])
        if len(keys) == 0:
            # build prefix tree
            root, depth = buildPrefixTree(path)
            if root == -1:
                dic[path] = []
            else:
                # initialize NonKeyFinder and find non-keys
                nonKFinder = NonKeyFinder(depth)
                nonKFinder.find(root, 0)

                # initialize KeyFinder and find keys from non-keys
                kFinder = KeyFinder(depth)
                keys = kFinder.find(
                    nonKFinder.NonKeySet)  # format {29318, 21938, 1121}

                translatedKeys = [
                ]  # [{'col1', 'col2', 'col3'}, {'col2', 'col4'}]
                for key in decode(
                        keys, depth
                ):  # format ['000000010110010000', '100000000000000000', '010000000010010000']

                    # translate '000000010110010000' to format {'col1', 'col2', 'col3'}
                    translatedKey = set(columnNames[i][j]
                                        for j, digit in enumerate(key)
                                        if digit == '1')

                    # format [{'col1', 'col2', 'col3'}, {'col2', 'col4'}]
                    translatedKeys.append(translatedKey)

                    # record the key
                    ks.insert(translatedKey)
                dic[path] = translatedKeys
        else:
            dic[path] = keys

    return dic
Пример #12
0
def test_list_sounds_filled_database(client, logged_in_client):
    """
    Listing sounds from a filled database should result in a non-empty list.
    Any client can make this request (no authentication needed).
    """
    # Upload a sound while authenticated
    logged_in_client.post('/sounds/', data=sound_upload('a.ogg'))
    logged_in_client.post('/sounds/', data=sound_upload('b.ogg'))
    # Request details while not authenticated
    response = client.get('/sounds/')
    assert status(response) == 'ok'
    response = decode(response)
    assert len(response) == 2
    assert is_subdict(sound_details('a.ogg'), response[0])
    assert is_subdict(sound_details('b.ogg'), response[1])
Пример #13
0
def recvThreadFun():
    print("start recvThread")
    while continue_flag:
        for sock in recvSockSet:
            data = ''
            try:
                data = recv_msg_queues[sock].get_nowait()
            except queue.Empty:
                continue
            if data == '':
                continue

            # ret,msg_len,msg_code,msg_no,result,userName,pwd,heartBeatInt
            data_set = common.decode(data)
            ret = data_set[0]
            msg_len = data_set[1]
            msg_code = data_set[2]
            msg_no = data_set[3]
            print("recvThread msg_code=%s"%msg_code)
            for case in switch(msg_code):
                if case('S101'):     # 登录请求
                    result = data_set[4]
                    userName = data_set[5]
                    pwd = data_set[6]
                    heartBeatInt = data_set[7]
                    if ret == 0:
                        print("RecvMsg[%s,%i,%s,%s,%s,%s]"% (msg_code,msg_no,result,userName,pwd,heartBeatInt))
                        flag = ''
                        if result == 1:
                            flag = SUCCESS
                        else:
                            flag = FAILED
                        utcStamp = time.time()
                        retData = (str(result).encode(), str(utcStamp).encode(), flag.encode())
                        #send_msg_queues[sock].put(retData)
                        msg = common.encode('A101',msg_no,retData)
                        send_msg_queues[sock].put(msg)
                        break
                        #sock.send(msg)
                    else:
                        print("Error: upack failed")
                if case('S201'):    # 用来测试序列化
                    result = data_set[4]
                    if ret == 0:
                        print(result)
                        rebuild = json.loads(result, object_hook=lambda d: common.Student(d['name'], d['age'], d['score']))
                        print(rebuild)
                if case('S301'): pass
Пример #14
0
 def process(self, socket, msg):
     """"""
     id, value = decode(msg)
     if id == common.ID_ROVER:
         self.move(value)
     elif id == common.ID_MAP:
         self.traverse_map(value)
     elif id == common.ID_LIGHTS:
         if value:
             self.set_led1(True)
             self.set_led2(True)
         else:
             self.set_led1(False)
             self.set_led2(False)
     else:
         logger.info(id, value)
Пример #15
0
def test(file):
    from common import entry
    file, buffer, flength = entry(file)

    length = unsigned_int(buffer, 0x1c)
    dlength = unsigned_int(buffer, 0x20)
    decodedata = bytearray(dlength)
    rawdata = bytearray(buffer[0x24:])

    def assert_file_eq(obj, path):
        from utils import find_last_match
        with open(path, 'rb') as f:
            cont = f.read()
        cut = min(len(obj), len(cont))
        assert obj[:cut] == cont[:cut]

    #assert_file_eq(rawdata, '{}.raw.rawdata1'.format(file))

    decode(rawdata, length, 0x400, 0xaa, 0xe1)

    #assert_file_eq(rawdata, '{}.raw.rawdata2'.format(file))

    decode(rawdata, length, 0x80, 0x3d, 0x7a)

    #assert_file_eq(rawdata, '{}.raw.rawdata3'.format(file))

    decompress(rawdata, decodedata, length)

    assert_file_eq(decodedata, '{}.raw'.format(file))

    try:
        assert_file_eq(decodedata, '{}.raw'.format(file))
    except AssertionError:
        rawdata = bytearray(buffer[0x24:])

        assert_file_eq(rawdata, '{}.raw.rawdata1'.format(file))
        decode(rawdata, length, 0x400, 0xaa, 0xe1)
        assert_file_eq(rawdata, '{}.raw.rawdata2'.format(file))
        decode(rawdata, length, 0x80, 0x3d, 0x7a)
        assert_file_eq(rawdata, '{}.raw.rawdata3'.format(file))

    print('test pass')
Пример #16
0
    def run(self):
        while True:
            # recieve messages and put them into queue
            conn, addr = self._sock.accept()
            conn.settimeout(1)
            data = conn.recv(1024)

            msg = decode(data)
            if msg['type'] not in ['ping', 'start']:
                logging.debug(repr(msg))

            if not msg:
                continue

            if msg['type'] == 'ping':
                # respond with pong to heartbeat messages
                # no need to put it into queue
                conn.sendall(encode('{"type": "pong"}'))
            else:
                incoming_queue.put(msg)
            conn.close()
Пример #17
0
 def on_reception(self, obj, params):
     """"""
     id, value = decode(obj.get_property(params.name))
     if id == common.ID_BUMPER:
         self.bumper_img.set_from_pixbuf(BUMPER_PIXBUFS[value])
     elif id == common.ID_ROVER:
         self.rover_img.set_from_pixbuf(ROVER_PIXBUFS[value])
     elif id == common.ID_MAP:
         if value == common.MOVE_END:
             self.play_button.set_active(False)
         else:
             self.map.move(value)
     elif id == common.ID_WLAN:
         self.wlan_img.set_from_pixbuf(WLAN_PIXBUFS[(int(value)/26)+1])
     elif id == common.ID_TELEMETRY:
         iter = self.store.get_iter_first()
         for i in value:
             self.store.set_value(iter, 1, i)
             iter = self.store.iter_next(iter)
     else:
         logger.warning(id, value)
Пример #18
0
    def recv(self):
        buf = ''
        self.socket.setblocking(0)
        while 0xf0f0 != common.stop_flag:
            try:
                rlist, wlist, xlist = select.select([self.socket], [], [],
                                                    0.05)
                if [] == rlist:
                    time.sleep(0.05)
                    continue
                while 1:
                    data = ''
                    try:
                        data = bytes.decode(self.socket.recv(300))
                    except socket.error as e:
                        if e.errno == 11:
                            pass
                        else:
                            print("socket error, Error code:", e)
                            self.socket.close()
                            #stop_flag = 0xf0f0
                            break

                    buf += data
                    if len(buf) < common.headerLen:
                        time.sleep(0.05)
                        continue

                    ret, msg_len, msg_code, msg_no, result, utcStamp, desc = common.decode(
                        buf)
                    if ret == 0:  #0表示消息是完整的
                        curDate = datetime.fromtimestamp(float(utcStamp))
                        print("%s,%i,%s,%s,%s" %
                              (msg_code, msg_no, result, curDate, desc))
                        buf = buf[msg_len:]
            except socket.error as msg:
                print("except:%s, socket is closed by peer" % msg)
                break

        self.socket.close()
Пример #19
0
    def recv(self):
        buf = ''
        self.socket.setblocking(0)
        while 0xf0f0 != common.stop_flag:
            try:
                rlist, wlist, xlist=select.select([self.socket], [], [], 0.05)
                if [] == rlist:
                    time.sleep(0.05)
                    continue
                while 1:
                    data = ''
                    try:
                        data = bytes.decode(self.socket.recv(300))
                    except socket.error as e:
                        if e.errno == 11:
                            pass
                        else:
                            print("socket error, Error code:", e)
                            self.socket.close()
                            #stop_flag = 0xf0f0
                            break

                    buf += data
                    if len(buf) < common.headerLen:
                        time.sleep(0.05)
                        continue

                    ret,msg_len,msg_code,msg_no,result,utcStamp,desc = common.decode(buf)
                    if ret == 0:    #0表示消息是完整的
                        curDate = datetime.fromtimestamp(float(utcStamp))
                        print("%s,%i,%s,%s,%s"%(msg_code,msg_no,result,curDate,desc))
                        buf = buf[msg_len:]
            except socket.error as msg:
                print("except:%s, socket is closed by peer"%msg)
                break

        self.socket.close()
    # ground truth
    val = result[0]
    # bit array of our prediction
    raw_prediction = result[1].coordinates[0]

    if len(raw_prediction) > 0:
        # make blank SDR big enough for one number
        prediction = SDR(numSize)
        # remove prompt blank space from answer
        for value in raw_prediction:
            prediction.dense[value - numSize * 2] = 1
        # tell SDR we updated its values
        # (unsure why this works, found in sp tutorial)
        prediction.dense = prediction.dense
        # convert prediction into a number!
        prediction = common.decode(numDecoder, prediction)
    else:
        prediction = None  # no prediction
    # is prediction correct?
    agreement = (val == prediction)
    print("truth:", val, "prediction:", prediction, "agree?", agreement)
    if not agreement:
        errors += 1
        err_list.append((val, prediction))

print(err_list)

# should be 0 at default settings.
print("errors:", errors, "/", int(tries / 2))

### misc info ###
Пример #21
0
#!/usr/bin/python

import sys, common

for a in sys.argv[1:]:
    print common.decode(a)
Пример #22
0
filePath1 = 'd:\\toytest.csv'
filePath2 = 'd:\\open-10000-1.csv'
filePath3 = 'd:\\parking-10000.csv'
filePath4 = 'd:\\open-100.csv' 
filePath5 = 'd:\\5.csv' 
filePath6 = 'd:\\6.csv' 
filePath7 = 'd:\\7.csv' 
filePath8 = 'd:\\8.csv' 


a = datetime.datetime.now()

root, depth = buildPrefixTree(filePath4)
if root == -1:
      print('no composite key')
else:
      finder = NonKeyFinder(depth)
      finder.find(root, 0)
      kFinder = KeyFinder(depth)
      keys = kFinder.find(finder.NonKeySet)

      print(decode(finder.NonKeySet, depth))
      print(decode(keys, depth))

      print(0)

b = datetime.datetime.now()
delta = b - a
print(int(delta.total_seconds() * 1000))

def process_data2(DEBUG):
    pd.set_option('display.width', 1000)
    pd.set_option('display.max_colwidth', 99999)
    pd.set_option('display.float_format', lambda x: '%.6f' % x) # Suppresses scientific notation of floats z.b. 1.298e+06

    """### Dataset Path Binding"""

    path_json_directions_file = './data/100directions.json'
    path_bikehsharing_data = './data/bikesharing_data.csv'

    """### Binding to Dataframes"""

    df_bikesharing = pd.read_csv(path_bikehsharing_data)

    # Step 2.2: Load JSON file
    # since the dataset is in a JSON file, then we need
    # to open it using a JSON function.
    with open(path_json_directions_file, 'r') as f:
        google_directions_json = json.load(f)

    """### We take the first n numbers so that they could be matched with their respective directions"""

    df_bikesharing = df_bikesharing.head(len(google_directions_json))

    # We have 100

    """### Obtain trip duration from Google Directions"""

    df_bikesharing['g_directions_duration'] = 0

    """### Obtain duration from JSON reponses and save them into a new column"""

    for x in range(len(google_directions_json)):
        df_bikesharing['g_directions_duration'].at[x] = google_directions_json[str(x+1)
        ]['routes'][0]['legs'][0]['duration']['value']

    """### Convert start/end time in the CSV to datetime objects"""

    df_bikesharing['start_datetime'] = pd.to_datetime(
        df_bikesharing['start_datetime'])
    df_bikesharing['end_datetime'] = pd.to_datetime(
        df_bikesharing['end_datetime'])

    """### Change Datetime columns to Epoch"""

    df_bikesharing['start_seconds'] = (
        (df_bikesharing['start_datetime'].astype(np.int64) // 10**9))
    df_bikesharing['end_seconds'] = (
        (df_bikesharing['end_datetime'].astype(np.int64) // 10**9))

    """## Step 5: Add trip start time from ODK to Google directions trip duration

    ### We calculate the end trip time by adding the start_seconds and the duration from Google Directions together in a new column
    """

    df_bikesharing['g_end_seconds'] = df_bikesharing['g_directions_duration'] + \
                                      df_bikesharing['start_seconds']

    """## Prepare to decode the polyline and save its mercato coords in a cell

    ### We create a new column for the polyline_code from Google Directions
    """

    df_bikesharing['polyline_code'] = 'a'

    """### We fill the cells of the column from the JSON response for each trip"""

    for x in range(len(google_directions_json)):
        df_bikesharing['polyline_code'].at[x] = google_directions_json[str(x+1)
        ]['routes'][0]['overview_polyline']['points']

    """## Step 7: We decode the lng/lat coords from the polyline and put them into a separate dataframe for further processing.

    ### Step 7.1: Define lists that will be populated in the loop
    """

    lat_coords = list()
    lng_coords = list()
    trip_no = list()
    tick_list = list()
    step_list = list()

    """### Step 7.2: Populate our lists by decoding the polylines of each trip and by appending the trip number from a simple iterator "i"."""
    for x in range(len(google_directions_json)):
        decodedCoordsList = decode(
            google_directions_json[str(x+1)]['routes'][0]['overview_polyline']['points'])

        gDirectionsDuration = df_bikesharing['g_directions_duration'].at[x]
        startSecondsinEpoch = df_bikesharing['start_seconds'].at[x]
        global step
        step = round(gDirectionsDuration/len(decodedCoordsList))

        for i in range(len(decodedCoordsList)):
            lat_coords.append(decodedCoordsList[i][1])
            lng_coords.append(decodedCoordsList[i][0])
            trip_no.append(x)
            tick_list.append((step*i)+startSecondsinEpoch)
            step_list.append(step)

    """### Step 7.3: Create a dataframe object to hold the decoded coordinates and other data in step 7.2"""

    df_decoded_polylines = pd.DataFrame(
        {'tick': tick_list, 'lat': lat_coords, 'lng': lng_coords, 'trip_no': trip_no, 'step_to_next': step_list})

    """### Step 7.4: Calculate the mercato x,y coords from the lng,lat lists and save them into columns"""

    df_decoded_polylines['mercato_x'], df_decoded_polylines['mercato_y'] = calculateMercatoProjectionFromLongAndLat(
        df_decoded_polylines['lng'].to_list(), df_decoded_polylines['lat'].to_list())

    df_decoded_polylines

    """### We remove the e+09 values from the tick by setting the column 'tick' into int

    [Convert from epoch to datetime](https://stackoverflow.com/questions/16517240/pandas-using-unix-epoch-timestamp-as-datetime-index)
    """

    df_decoded_polylines = df_decoded_polylines.astype({'tick': 'datetime64[s]'})

    """### Create a multi-index to have trip_ids inside seconds"""

    #df_decoded_polylines.set_index(['tick', 'trip_no'], inplace=True)
    #df_decoded_polylines.set_index(['tick', 'trip_no'], inplace=True)

    double_index = df_decoded_polylines.set_index(['tick', 'trip_no'])
    double_index.sort_index(inplace=True)

    #df_decoded_polylines.reindex(pd.date_range(df_bikesharing['start_seconds'].head(1).values[0],end=df_bikesharing['g_end_seconds'].tail(1).values[0], periods=1))
    double_index

    """### We sort the index for the hierarchy to take effect
    [Section: The Multi-index of a pandas DataFrame](https://www.datacamp.com/community/tutorials/pandas-multi-index)
    * Finds out duplicate values: ```df_decoded_polylines[df_decoded_polylines.index.duplicated()]```

    * [Reindex Multi-Index](https://stackoverflow.com/questions/53286882/pandas-reindex-a-multiindex-dataframe)
    """

    df_decoded_polylines.sort_index(inplace=True)

    double_index.to_html('./debug/double_index.html')
    double_index.loc['2019-07-01 02:06:30']

    """### Fill the missing seconds

    *Journal*: Trying the question on Github
    * [Source](https://github.com/pandas-dev/pandas/issues/28313)

    > This works, checkout the output

    > Bug: does not interp my columns

    * [Resampling and Interpolating](https://machinelearningmastery.com/resample-interpolate-time-series-data-python/)

    * [How to select rows in a DataFrame between two values, in Python Pandas?](https://stackoverflow.com/questions/31617845/how-to-select-rows-in-a-dataframe-between-two-values-in-python-pandas)
    """

    # Fill the missing seconds
    print('[+] 198: Filling the missing seconds')
    github_interpd = double_index.reset_index('trip_no').groupby('trip_no', group_keys=False).resample('S').pad().reset_index().set_index(['tick','trip_no'])
    print('[-] Filling the missing seconds')
    # github_interpd['mercato_x'].loc[:,0]

    # Sort the index
    github_interpd.sort_index(inplace=True)
    # github_interpd.to_html('github_i.html')

    # Output the dataframe
    github_interpd

    """### Remove duplicate coordinates because the function padded them.

    #### Create a column to detect duplicates later on and set it to NaN
    """

    # Create a new column for detecting duplicates and set it to NaN
    github_interpd['duplicate'] = np.nan

    """#### [Unstack](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.unstack.html) the dataframe

    > We used ```-1``` for the level because we want to unstack the ```trip_ids``` ***not*** the ```timestamp```.
    """
    print('[+] 222: Unstacking..')
    g = github_interpd.unstack(level=-1, fill_value=None)
    print('[-] Finished unstacking')

    """#### Convert the index to a UNIX Timestamp"""

    # Convert index to UNIX Timestamp
    print('[+] 222: Converting to Unix Timestamp..')
    g.index = g.index.map(np.int64) // 10**9  # Pandarallel
    print('[-] Done with conversion')

    # Checkout our progress so far
    if DEBUG is True: g.to_html('./debug/unstack_w_epoch.html')

    """#### Create child columns inside our ```duplicate``` column"""

    # Create a column for detecting duplicate values and set its type to bool
    print('[+] 239: Creating a boolean column')
    g[['duplicate'][:]].astype(bool)
    print('[-] Creating a boolean column - Done')

    """#### We ascertain which coordinates are duplicates for all trips
    > We only need to do this for one coordinate because the coordinates are in pairs.

    > *Journal: I tried in many ways to find an alternative that doesn't use a for-loop but I couldn't.*
    """

    # Output the result of the duplicated() function into each trip
    for i in range(len(g['duplicate'].columns)):
        g['duplicate', i] = g['lat', i].duplicated().values

    """#### Set the duplicate values in each coordinate to NaN if they were a duplicate"""

    # Set the duplicate values in each coordinate to NaN if they were a duplicate
    g['lat'] = g['lat'].where((g['duplicate'] == False), np.NaN)
    g['lng'] = g['lng'].where((g['duplicate'] == False), np.NaN)
    g['mercato_x'] = g['mercato_x'].where((g['duplicate'] == False), np.NaN)
    g['mercato_y'] = g['mercato_y'].where((g['duplicate'] == False), np.NaN)

    """#### Interpolate the value between those two sets of coordinates linearly
    > *Journal: I limited the interpolation to 1 minute because if we didn't specify it then the trip would just remain on the map until the end of the slider.*

    >> Hint: if you are dealing with nested columns, then you need to talk to it via
    ```df['COL_NAME', <INDEX OR CHILD COLUMN NAME>]``` e.g. ```df['parent', 0:10]``` _**not**_ using ```df['parent'][0]``` as this would be understood as row number.
    """

    # Interpolate the empty values between each coordinate (accurate up to 1 minute)
    print('[+] 264: Interpolating...')
    g['lat'] = g['lat'].interpolate(limit=60)
    g['lng'] = g['lng'].interpolate(limit=60)
    g['mercato_x'] = g['mercato_x'].interpolate(limit=60)
    g['mercato_y'] = g['mercato_y'].interpolate(limit=60)
    print('[-] Interpolation finished')

    """#### Checkout our result!"""

    # Output the dataframe into an HTML file.
    if (DEBUG is True): g.to_html('./debug/interpolated.html')

    """# TEST: TO DICTIONARY AND DATASOURCE

    * [Convert A Level in a Multi-index to a another datatype](https://stackoverflow.com/questions/34417970/pandas-convert-index-type-in-multiindex-dataframe)

    * [Dataframe with multi-index to_dict()](https://stackoverflow.com/questions/39067831/dataframe-with-multiindex-to-dict)

    * [Multi-index from List of Lists with irregular length](https://stackoverflow.com/questions/58940018/multiindex-from-lists-of-lists-with-irregular-length)
    * [DataFrame with MultiIndex to dict](https://stackoverflow.com/questions/24988131/nested-dictionary-to-multiindex-dataframe-where-dictionary-keys-are-column-label)
    * [Nested dictionary to multiindex dataframe where dictionary keys are column labels](https://stackoverflow.com/questions/24988131/nested-dictionary-to-multiindex-dataframe-where-dictionary-keys-are-column-label)
    * [Python Dictionary Comprehension Tutorial](https://www.datacamp.com/community/tutorials/python-dictionary-comprehension)


    >> Hint: If you want to join two dataframes horizontally, then use ```result = pd.concat([g_duplicated, g_lat], axis=1)```

    ### Merge all of the columns into a list
    Resource: [Merge multiple column values into one column in python pandas
    ](https://stackoverflow.com/questions/33098383/merge-multiple-column-values-into-one-column-in-python-pandas)
    """

    # Merge all columns into a list
    # https://stackoverflow.com/questions/33098383/merge-multiple-column-values-into-one-column-in-python-pandas

    """#### Create A New Dataframe From The Coordinates For Each Trip"""

    # create a new dataframe from the coordinates for each trip
    tmp_mrx = g['mercato_x']
    tmp_mry = g['mercato_y']
    tmp_lat = g['lat']
    tmp_lng = g['lng']

    """#### Drop NA Values & Save Them As Lists In Their Cells"""

    # DropNA values and join the coordinates for each trip in each second as a string
    # TODO: try to implement this by using only lists.
    print('[+] 310: Applying...')
    tmp_mrx['merged_mrx'] = tmp_mrx[tmp_mrx.columns[0:]].apply(lambda x:','.join(x.dropna().astype(str)), axis=1)
    tmp_mry['merged_mry'] = tmp_mry[tmp_mry.columns[0:]].apply(lambda x:','.join(x.dropna().astype(str)), axis=1)
    tmp_lat['merged_lat'] = tmp_lat[tmp_lat.columns[0:]].apply(lambda x:','.join(x.dropna().astype(str)), axis=1)
    tmp_lng['merged_lng'] = tmp_lng[tmp_lng.columns[0:]].apply(lambda x:','.join(x.dropna().astype(str)), axis=1)

    # split the resulting string into a list of floats
    tmp_mrx['merged_mrx'] = tmp_mrx.merged_mrx.apply(lambda s: [float(x.strip(' []')) for x in s.split(',')])
    tmp_mry['merged_mry'] = tmp_mry.merged_mry.apply(lambda s: [float(x.strip(' []')) for x in s.split(',')])
    tmp_lat['merged_lat'] = tmp_lat.merged_lat.apply(lambda s: [float(x.strip(' []')) for x in s.split(',')])
    tmp_lng['merged_lng'] = tmp_lng.merged_lng.apply(lambda s: [float(x.strip(' []')) for x in s.split(',')])
    print('[-]: Finished applying...')
    print('----------------------------------------------------------------------')


    """#### Checkout Split Values For 1 Coordinate"""

    # checkout our progress for one of the variables so far
    if (DEBUG is True): tmp_lat.to_html('./debug/merged.html')

    """#### Merge Columns Into The Master Dataframe"""

    # Merge those columns into our master dataframe
    g['merged_mrx'] = tmp_mrx['merged_mrx']
    g['merged_mry'] = tmp_mry['merged_mry']
    g['merged_lat'] = tmp_lat['merged_lat']
    g['merged_lng'] = tmp_lng['merged_lng']

    #print(type(tmp_lat['merged_lat'])) #series

    """### Prepare for Visualization"""

    # We prepare a new dataframe for the visualization
    visualization = g
    visualization

    """#### We Drop Extra Columns That We Don't Need"""

    # We drop the extra columns as we only need the coords for each trip as a list
    print('[+] 353: Removing Unnecessary Columns...')
    visualization = visualization.drop(['lat', 'lng', 'mercato_x', 'mercato_y', 'duplicate', 'step_to_next'], axis=1)
    visualization

    """#### We drop the child level from the multi-level column index
    Resource: [Pandas: drop a level from a multi-level column index?](https://stackoverflow.com/questions/22233488/pandas-drop-a-level-from-a-multi-level-column-index)
    """

    # We drop the child level from the multi-level column index
    # https://stackoverflow.com/questions/22233488/pandas-drop-a-level-from-a-multi-level-column-index
    visualization['new_index'] = visualization.index
    visualization.columns = visualization.columns.droplevel(1)
    visualization.set_index('new_index', inplace=True)
    if DEBUG is True: visualization.to_html('visualization.html')

    """#### We create a dict with the key as the index of our dataframe"""

    # We create a dict with the key as the index of our dataframe
    ready_dict = visualization.to_dict('index')

    print("[+] Done with the upscaling and interpolation function")
    if DEBUG is True:
        visualization.to_parquet('./data/demo.parquet', engine='pyarrow')
        print('[+] Outputted the result of the function into >> ./data/demo.parquet for later demonstration purposes')

    print("[+][+][+][+][+] We Rowdy! [+][+][+][+][+][+]")
    return ready_dict