Beispiel #1
0
def user_login():
    """ Handles password login """
    try:
        hashfunc = hashlib.sha256()
        hashfunc.update(request.form["password"].encode("utf-8"))

        stored_hash = storage.retrieve("config", "password_hash")
        if stored_hash is not None:
            salt = storage.retrieve("config", "password_salt")["value"]
            hashfunc.update(salt.encode('ascii'))
            calc_hash = base64.b64encode(hashfunc.digest()).decode('ascii')
            if calc_hash != stored_hash["value"]:
                return render_template("message.html", msgtype="danger", msg=\
                    'Invalid password! - To try again, '\
                    '<a href="/">click here</a>')
        else:
            # first time login, so store the password
            salt = secrets.token_urlsafe(32)
            hashfunc.update(salt.encode('ascii'))
            calc_hash = base64.b64encode(hashfunc.digest()).decode('ascii')
            storage.store("config", "password_salt", {"value": salt})
            storage.store("config", "password_hash", {"value": calc_hash})

        session = secrets.token_urlsafe(32)
        util.save_session_cookie(session)

        resp = make_response(redirect('/'))
        resp.set_cookie("session", session)
        return resp

    except Exception:
        traceback.print_exc()
        return render_template("message.html", msgtype="danger", msg=\
            'An unknown exception occurred. See the logs. <br><br>'\
            '<a href="/">Click here to return home</a>')
Beispiel #2
0
def process_bunq_accounts_callback(accounts):
    """ Process bunq accounts with the callback dataset of accounts """
    global _BUNQ_ACCOUNTS_CALLBACK
    get_bunq_accounts_callback()

    for acc2 in accounts:
        found = False
        for acc in _BUNQ_ACCOUNTS_CALLBACK:
            if acc["iban"] == acc2["iban"]:
                # update account
                found = True
                acc["id"] = acc2["id"]
                acc["name"] = acc2["name"]
                acc["type"] = acc2["type"]
                acc["description"] = acc2["description"]
                acc["enableMutation"] = acc2["enableMutation"]
                acc["enableRequest"] = acc2["enableRequest"]
                acc["callbackMutation"] = acc2["callbackMutation"]
                acc["callbackRequest"] = acc2["callbackRequest"]
                acc["callbackOther"] = acc2["callbackOther"]
                storage.store("account_callback", acc["iban"], {"value": acc})
        if not found:
            # new account
            storage.store("account_callback", acc2["iban"], {"value": acc2})
            _BUNQ_ACCOUNTS_CALLBACK.append(acc2)

    # remove deleted
    newaccs = []
    ibans = [x["iban"] for x in accounts]
    for acc in _BUNQ_ACCOUNTS_CALLBACK:
        if acc["iban"] in ibans:
            newaccs.append(acc)
        else:
            storage.remove("account_callback", acc["iban"])
    _BUNQ_ACCOUNTS_CALLBACK = sorted(newaccs, key=lambda k: k["description"])
Beispiel #3
0
def save_app_mode(value):
    """ Save the app mode """
    global _APP_MODE
    _APP_MODE = value
    if value is None:
        storage.remove("config", "app_mode")
    else:
        storage.store("config", "app_mode", {"value": value})
Beispiel #4
0
def save_bunq_security_mode(value):
    """ Save the bunq security mode """
    global _BUNQ_SECURITY_MODE
    _BUNQ_SECURITY_MODE = value
    if value is None:
        storage.remove("config", "bunq_security_mode")
    else:
        storage.store("config", "bunq_security_mode", {"value": value})
Beispiel #5
0
def retrieve_and_save_bunq_userid():
    """ Retrieve the bunq userid from bunq and save it """
    global _BUNQ_USERID
    result = bunq.get("v1/user")
    for user in result["Response"]:
        for typ in user:
            _BUNQ_USERID = user[typ]["id"]
    storage.store("config", "bunq_userid", {"value": _BUNQ_USERID})
 def _mock_insert_document(self, url):
     """A function that inserts a url into a documet db table
     and then returns that newly inserted document's id."""
     ret_id = self._mock_next_doc_id
     self._mock_next_doc_id += 1
     dic = {'url': url, 'id': ret_id, 'title': "", 'elem': "", 'img': ""}
     store('documet_index', dic)
     return ret_id
 def _mock_insert_word(self, word):
     """A function that insterts a word into the lexicon db table
     and then returns that newly inserted word's id."""
     ret_id = self._mock_next_word_id
     self._mock_next_word_id += 1
     dic = {'word': word, 'id': ret_id}
     store('lexicon', dic)
     return ret_id
Beispiel #8
0
def save_app_master_url(value):
    """ Save the URL of the master instance """
    global _APP_MASTER_URL
    _APP_MASTER_URL = value
    if value is None:
        storage.remove("config", "app_master_url")
    else:
        storage.store("config", "app_master_url", {"value": value})
Beispiel #9
0
def refresh_session_token():
    """ Refresh an expired session token """
    global _SESSION_TOKEN
    print("[bunq] Refreshing session token...")
    data = {"secret": get_access_token()}
    result = post("v1/session-server", data)
    if "Response" in result:
        _SESSION_TOKEN = result["Response"][1]["Token"]["token"]
        storage.store("config", "bunq_session_token", {"value": \
            _SESSION_TOKEN})
Beispiel #10
0
    def crawl(self, depth=2, timeout=3):
        """Crawl the web!"""
        seen = set()
        while len(self._url_queue):

            url, depth_ = self._url_queue.pop()

            # skip this url; it's too deep
            if depth_ > depth:
                continue

            doc_id = self.document_id(url)

            # we've already seen this document
            if doc_id in seen:
                continue

            seen.add(doc_id)  # mark this document as haven't been visited

            socket = None
            try:
                socket = urllib2.urlopen(url, timeout=timeout)
                soup = BeautifulSoup(socket.read())

                self._curr_depth = depth_ + 1
                self._curr_url = url
                self._curr_doc_id = doc_id
                self._font_size = 0
                self._curr_words = []
                self._index_document(soup)
                self._add_words_to_document()
                #print ("    url=" + repr(self._curr_url))

            except Exception as e:
                print(e)
                pass
            finally:
                if socket:
                    socket.close()

        # NEW in lab3 add inverted_index in the db
        for i in self._inverted_index:
            l = []
            for j in self._inverted_index[i]:
                l.append(j)
            dic = {'words_id': i, 'doc_id': l}
            store('inverted_index', dic)

        #	NEW FOR LAB3 ADD THE PAGERANK
        self._rank_page = page_rank(
            list(zip(self._from_doc_list, self._to_doc_list)))
        for i in self._rank_page:
            dic = {'doc_id': i, 'score': self._rank_page[i]}
            store('page_rank', dic)
Beispiel #11
0
    def word_id(self, word):  # word_id_cache is the same with lexicon
        """Get the word id of some specific word."""
        if word in self._word_id_cache:
            return self._word_id_cache[word]

        word_id = self._mock_insert_word(word)
        self._word_id_cache[word] = word_id

        ##NEW FOR LAB3 ADD INTO DATABASE
        dic = {'word': word, 'id': word_id}
        store('lexicon', dic)
        return word_id
Beispiel #12
0
 def check(self):
     """Throws an exception if the throttle rate has been exhausted.
     """
     if self._rate is not None:
         if self._rate.limit > 0:
             new_limit = self._rate.limit - 1
             storage.store(self._configuration, {
                 'isbndb': {
                     'rate': Rate(limit=new_limit, date=date.today())
                 }
             })
         else:
             raise Exception("Calls to ISBNDB are throttled. "
                             "Check the configuration.")
Beispiel #13
0
    def document_id(self, url):
        """Get the document id for some url."""
        if url in self._doc_id_cache:
            return self._doc_id_cache[url]

        # TODO: just like word id cache, but for documents. if the document
        #       doesn't exist in the db then only insert the url and leave
        #       the rest to their defaults.

        doc_id = self._mock_insert_document(url)
        self._doc_id_cache[url] = doc_id
        # NEW in lab3 add doc id and doc_title in database
        dic = {'url': url, 'id': doc_id, 'title': "", 'elem': ""}
        store('documet_index', dic)
        return doc_id
Beispiel #14
0
 def check(self):
     """Throws an exception if the throttle rate has been exhausted.
     """
     if self._rate is not None:
         if self._rate.limit > 0:
             new_limit = self._rate.limit - 1
             storage.store(
                 self._configuration, {
                     'isbndb': {
                         'rate': Rate(limit=new_limit,
                                      date=date.today())
                     }
                 })
         else:
             raise Exception("Calls to ISBNDB are throttled. "
                             "Check the configuration.")
Beispiel #15
0
    def execute(self):
        """Updates the library.
        """
        books = []
        directory = self._configuration['directory']
        if not exists(directory):
            return None, Error('Cannot open library: %s' % directory)
        moves, books = files.find_moves(self._configuration, directory)
        moved = 0
        if self._configuration['import']['move']:
            moved = files.move_to_library(self._configuration, moves)
            if self._configuration['import']['prune']:
                files.prune(self._configuration)
        found = len(books)
        if found > 0:
            storage.store(self._configuration, {'library': books})

        self.out('Updated %d %s, moved %d.' %
                 (found, found != 1 and 'books' or 'book', moved))
        return None, None
Beispiel #16
0
    def execute(self):
        """Updates the library.
        """
        books = []
        directory = self._configuration['directory']
        if not exists(directory):
            return None, Error('Cannot open library: %s' % directory)
        moves, books = files.find_moves(self._configuration, directory)
        moved = 0
        if self._configuration['import']['move']:
            moved = files.move_to_library(self._configuration, moves)
            if self._configuration['import']['prune']:
                files.prune(self._configuration)
        found = len(books)
        if found > 0:
            storage.store(self._configuration, {'library': books})

        self.out('Updated %d %s, moved %d.' % (
            found, found != 1 and 'books' or 'book', moved
        ))
        return None, None
Beispiel #17
0
def process_bunq_accounts_local(accounts):
    """ Process retrieved bunq accounts with the local dataset of accounts """
    global _BUNQ_ACCOUNTS_LOCAL
    get_bunq_accounts_local()

    default_internal = True
    default_draft = True
    for acc in _BUNQ_ACCOUNTS_LOCAL:
        # leave defaults True only if all existing accounts have True
        default_internal &= acc["enableInternal"]
        default_draft &= acc["enableDraft"]

    for acc2 in accounts:
        found = False
        for acc in _BUNQ_ACCOUNTS_LOCAL:
            if acc["iban"] == acc2["iban"]:
                # update account
                found = True
                acc["id"] = acc2["id"]
                acc["name"] = acc2["name"]
                acc["type"] = acc2["type"]
                acc["description"] = acc2["description"]
                storage.store("account_local", acc["iban"], {"value": acc})
        if not found:
            # new account
            acc2["enableInternal"] = default_internal
            acc2["enableDraft"] = default_draft
            acc2["enableExternal"] = False
            storage.store("account_local", acc2["iban"], {"value": acc2})
            _BUNQ_ACCOUNTS_LOCAL.append(acc2)

    # remove deleted
    newaccs = []
    ibans = [x["iban"] for x in accounts]
    for acc in _BUNQ_ACCOUNTS_LOCAL:
        if acc["iban"] in ibans:
            newaccs.append(acc)
        else:
            storage.remove("account_local", acc["iban"])
    _BUNQ_ACCOUNTS_LOCAL = sorted(newaccs, key=lambda k: k["description"])
Beispiel #18
0
def store():
    ''' Stores the posted data to the mongo '''
    jsonAsDict = getDictFromPost(request)
    listName = jsonAsDict.get('listName')
    name = jsonAsDict.get('name')
    amount = Decimal(jsonAsDict.get('amount')) * Decimal('100')
    comment = jsonAsDict.get('comment')
    if listName != None and listName != '' and listName != 'undefined' and name != None and name != '' and name != 'undefined' and amount != None and amount >= 0:
        storedObjectDict = storage.store(listName, current_identity, name, int(amount), comment)
        if storedObjectDict:
            storedObjectDict['listName'] = listName
            return json.dumps(storedObjectDict)
        return Response('List is locked, cannot store', 409)
    return Response('Wrong format, will not store.', 400)
Beispiel #19
0
def set_bunq_oauth_api_key():
    """ Handles bunq OAuth id/secret submission or API key submission """
    try:
        key = request.form["bunqkey"]
        allips = False
        if "allips" in request.form and request.form["allips"] == 'on':
            allips = True
        print("allips:", allips)
        tokens = re.split("[:, \r\n\t]+", key.strip())
        if len(tokens) == 6 and len(tokens[2]) == 64 and len(tokens[5]) == 64:
            # OAuth client id/secret submitted
            storage.store("config", "bunq_client_id", {"value": tokens[2]})
            storage.store("config", "bunq_client_secret", {"value": tokens[5]})
            storage.store("config", "bunq_allips", {"value": allips})
            redirect_url = request.url_root + "auth"
            url = "https://oauth.bunq.com/auth?response_type=code"\
                  "&client_id=" + tokens[2] + \
                  "&redirect_uri=" + redirect_url
            return render_template("message.html", msgtype="primary", msg=\
                "Make sure the following URL is included as a redirect url:"\
                "<br><br><b>" + redirect_url + "</b><br><br>"\
                'Then click <a href="' + url + '">this link</a>')
        if len(tokens) == 1 and len(tokens[0]) == 64:
            # API key submitted
            try:
                bunq.install(key, allips=allips)
                util.save_bunq_security_mode("API key")
                util.retrieve_and_save_bunq_userid()
                util.update_bunq_accounts()
                return render_template("message.html", msgtype="success", msg=\
                    'API key successfully installed <br><br>'\
                    '<a href="/">Click here to return home</a>')
            except Exception:
                traceback.print_exc()
                return render_template("message.html", msgtype="danger", msg=\
                    'An exception occurred while installing the API key. '\
                    'See the logs. <br><br>'\
                    '<a href="/">Click here to try again</a>')
        print("Invalid key: ", key)
        return render_template("message.html", msgtype="danger", msg=\
            'No valid API key or OAuth client id/secret found!<br><br>'\
            '<a href="/">Click here to return home</a>')
    except Exception:
        traceback.print_exc()
        return render_template("message.html", msgtype="danger", msg=\
            'An unknown exception occurred. See the logs. <br><br>'\
            '<a href="/">Click here to return home</a>')
Beispiel #20
0
def spotfi(csi, calibration_possibility, lookupfiles, storagefiles):

    eprint("Running spotfi algorithm")
    startTime = time.time()

    scaled_csi = []
    for csi_info in csi:
        csi_matrix = [[[complex(sub['real'], sub['imag']) for sub in rx]
                       for rx in tx] for tx in csi_info['csi_matrix']]
        csi_matrix = np.array(csi_matrix)
        #scaled_csi.append(get_scaled_csi(csi_info['csi_status'], csi_matrix)[:, 0, 0:SUBCARRIERS_USED])
        # TODO: compare
        scaled_csi.append(csi_matrix[:, 0, 0:SUBCARRIERS_USED]
                          )  # cut to 1st TX antenna, SUBCARRIERS_USED carriers

    # Sanitize ToF (remove sampling time offset (STO))
    sanitized_csi = list(map(sanitizeToF, scaled_csi))

    # Construct virtual antenna array matrix based on actual csi to be used for music
    smoothed_csi = list(map(smoothCSI, sanitized_csi))

    # computed paths: D, the number of multipaths, E_n is the matrix of eigenvectors corresponding to the M-D smallest eigenvalues
    E_n = []
    computed_paths = []
    for s in smoothed_csi:
        # Construct noise space matrix of covariance matrix X*X_H
        p, E = constructNoiseSubspaceMatrix(s)
        computed_paths.append(p)
        E_n.append(E)

    E_nH = list(map(lambda e: e.conj().T,
                    E_n))  # precompute conjugate transpose

    # The spectrum repeats every PI across theta on average, there are small diversions that,
    # exact repetition appears only after 2 PI
    theta = np.linspace(-1 * np.pi, 1 * np.pi,
                        92)  # -90° to 90° # TODO: -0.5 pi -> 0.5 pi
    tau = np.linspace(-20 / speed_of_light, 20 / speed_of_light,
                      100)  # TODO: choose better

    assert len(theta) % 4 == 0, "theta samples need to be divisible by 4"

    # Load lookup data if present
    lookup_spectrum_db = []
    if len(lookupfiles) == len(E_n):
        eprint("Loading music spectrums from lookup file...")
        for l_file in lookupfiles:
            lookup_spectrum_db.append(lookup(l_file))

    # Calculate music spectrum
    music_spec_db = []
    peaks = []
    batch_count = 4
    batch_size = len(theta) // batch_count
    for packet in range(len(E_n)):

        pool = mp.Pool(batch_count)

        if packet < len(
                lookup_spectrum_db) and lookup_spectrum_db[packet] is not None:
            eprint("Found lookup music spectrum for packet: ", packet)
            music_spec_db.append(lookup_spectrum_db[packet])
        else:
            eprint("Calculating music spectrum for packet: ", packet)
            music_spec = np.zeros((len(theta), len(tau)))
            music_spec = np.concatenate(
                tuple([
                    pool.apply(musicSpectrumFuncRange,
                               args=(theta[(i * batch_size):((i + 1) *
                                                             batch_size)], tau,
                                     E_n[packet], E_nH[packet]))
                    for i in range(batch_count)
                ]))
            music_spec_db.append(10.0 * np.log10(abs(music_spec)))

            if len(storagefiles) == len(E_n):
                store(music_spec_db[len(music_spec_db) - 1],
                      storagefiles[packet])
                eprint("Storing into: ", storagefiles[packet])

        pool.close()

        # Search for peaks in the spectrum and obtain their AoA and ToF
        packet_peaks = searchPeaks(music_spec_db[packet], theta, tau,
                                   computed_paths[packet])
        peaks.append(packet_peaks)

        if len(storagefiles) == len(E_n):
            save_peaks(packet_peaks, storagefiles[packet])

    # Cluster peaks
    flat_peaks = reduce(lambda x, y: x + y, peaks)
    filtered_peaks, removed_peaks = filterIndividualPoints(flat_peaks)
    filtered_peaks, rp = filterSpread(filtered_peaks)
    removed_peaks.extend(rp)
    cluster_means, cluster_covariances, weight_per_mean, height_per_mean = clusterGMM(
        filtered_peaks)  # weight is the sum of probabilities, not normalized!

    likelihood_per_mean = []
    for i in range(len(cluster_means)):
        likelihood = 0

        likelihood += CLUSTER_WEIGHT_POINTS * (weight_per_mean[i] /
                                               len(filtered_peaks))
        likelihood -= CLUSTER_WEIGHT_THETA_VAR * (
            cluster_covariances[i][0][0]
        )  # the diagonal entries of the covariance matrix are the variances
        likelihood -= CLUSTER_WEIGHT_TAU_VAR * (cluster_covariances[i][1][1])
        likelihood -= CLUSTER_WEIGHT_SMALL_TOF * (cluster_means[i][1])

        likelihood_per_mean.append(np.exp(likelihood))
        eprint("theta: %.2f" % np.degrees(cluster_means[i][0]))
        eprint("points: %.2f, theta_var: %.2f, tau_var: %.2f, tof: %.2f" %
               (CLUSTER_WEIGHT_POINTS *
                (weight_per_mean[i] / len(filtered_peaks)),
                CLUSTER_WEIGHT_THETA_VAR *
                (cluster_covariances[i][0][0]), CLUSTER_WEIGHT_TAU_VAR *
                (cluster_covariances[i][1][1]), CLUSTER_WEIGHT_SMALL_TOF *
                (cluster_means[i][1])))
        eprint("likelihood: %.2f, exp: %f\n" %
               (likelihood, np.exp(likelihood)))

    # TODO: filter likelihoods
    # TODO: combine likelihoods (180 degree, smiliar angles)

    sys.stderr.flush()

    # ---------------------
    # MUSIC Spectrum Plot:
    # ---------------------
    figure = plt.figure()
    mplot3d  # the sole purpose of this is to prevent the mplot3d import from being optimized away by my IDE
    axes = plt.axes(projection='3d')
    # axes.set_title('calibration %d' % calibration_possibility)
    axes.set_xlabel('theta (degree)')
    axes.set_ylabel('tau (meter)')
    music_spec_db = music_spec_db[0].swapaxes(0,
                                              1)  # Yep, this library is weird
    x_grid, y_grid = np.meshgrid(theta / np.pi * 180.0, tau * speed_of_light)
    # axes.view_init(elev=45, azim=-45)
    axes.view_init(elev=85, azim=270)

    # Plot spectrum surface:
    # axes.set_xlim([-180, 180])
    # axes.set_ylim([-20, 20])
    # axes.plot_surface(x_grid, y_grid, music_spec_db, cmap=cm.get_cmap('winter'), cstride=1, rstride=1, linewidth=0)

    # Plot peaks
    axes.set_xlim([-180, 180])
    axes.set_ylim([-20, 20])  # TODO: <=12m
    filtered_peaks = np.array(filtered_peaks)
    removed_peaks = np.array(removed_peaks)
    axes.scatter(removed_peaks[:, 0] * 180.0 / np.pi,
                 removed_peaks[:, 1] * speed_of_light,
                 removed_peaks[:, 2] + 0.15,
                 c='#AAAAAA',
                 s=4.0)
    axes.scatter(filtered_peaks[:, 0] * 180.0 / np.pi,
                 filtered_peaks[:, 1] * speed_of_light,
                 filtered_peaks[:, 2] + 0.15,
                 c='#FF0000')

    # Plot clusters
    # average_height = sum(np.array(filtered_peaks)[:, 2]) / len(filtered_peaks)
    # cluster_means = np.array(cluster_means)
    # for i in range(len(cluster_means)):
    #     axes.scatter(cluster_means[i, 0] * 180.0 / np.pi, cluster_means[i, 1] * speed_of_light, height_per_mean[i], c='#00FF00', s=(max(3.0, np.log(likelihood_per_mean[i])) * 4.0))

    mng = plt.get_current_fig_manager()
    mng.resize(*mng.window.maxsize())
    plt.show()
Beispiel #21
0
def trigger_request():
    """ Callback for IFTTT trigger bunq_request """
    try:
        data = request.get_json()
        print("[trigger_request] input: {}".format(json.dumps(data)))

        if "triggerFields" not in data or \
                "account" not in data["triggerFields"]:
            print("[trigger_request] ERROR: account field missing!")
            return json.dumps({"errors": [{"message": "Invalid data"}]}), 400
        account = data["triggerFields"]["account"]
        fields = data["triggerFields"]
        fieldsstr = json.dumps(fields)

        if "trigger_identity" not in data:
            print("[trigger_request] ERROR: trigger_identity field missing!")
            return json.dumps({"errors": [{"message": "Invalid data"}]}), 400
        identity = data["trigger_identity"]

        limit = 50
        if "limit" in data:
            limit = data["limit"]

        if account == "NL42BUNQ0123456789":
            return trigger_request_test(limit)

        timezone = "UTC"
        if "user" in data and "timezone" in data["user"]:
            timezone = data["user"]["timezone"]

        entity = storage.retrieve("trigger_request", identity)
        if entity is not None:
            if entity["account"] != account or \
                    json.dumps(entity["fields"]) != fieldsstr:
                storage.store("trigger_request", identity, {
                    "account": account,
                    "identity": identity,
                    "fields": fields
                })
                print("[trigger_request] updating trigger {} {}".format(
                    account, fieldsstr))
        else:
            storage.store("trigger_request", identity, {
                "account": account,
                "identity": identity,
                "fields": fields
            })
            print("[trigger_request] storing new trigger {} {}".format(
                account, fieldsstr))

        transactions = storage.get_value("trigger_request", identity + "_t")
        if transactions is None:
            transactions = []
        for trans in transactions:
            trans["created_at"] = arrow.get(trans["created_at"])\
                                  .to(timezone).isoformat()

        print("[trigger_request] Found {} transactions".format(
            len(transactions)))
        return json.dumps({"data": transactions[:limit]})
    except Exception:
        traceback.print_exc()
        print("[trigger_request] ERROR: cannot retrieve requests")
        return json.dumps({"errors": [{"message": \
                           "Cannot retrieve requests"}]}), 400
Beispiel #22
0
#!/usr/bin/env python
# -- coding: utf-8 --
from flask import Flask, request, jsonify
import sys
import json
import socket
from storage import kv_storage as store

reload(sys)
sys.setdefaultencoding('utf8')

app = Flask(__name__)
store = store()

@app.route('/')
def index():
    return "welcome to json-tore, a python based in memory json kv store"

@app.route('/index/<index>', methods=['POST'])
def add_index(index):
    if not request.headers['Content-Type'] == 'application/json':
        return jsonify({"msg": "Only accept json format body"}), 403
    if (store.create_index(index, request.get_json(force=True))):
        return jsonify({"msg": "created index {0}".format(index)}), 201
    return jsonify({"msg": "failed created index {0}".format(index)}), 409

@app.route('/index/<index>', methods=['PUT'])
def update_index(index):
    if not request.headers['Content-Type'] == 'application/json':
        return jsonify({"msg": "Only accept json format body"}), 403
    if (store.update_index(index, request.get_json(force=True))):
Beispiel #23
0
def trigger_request():
    """ Callback for IFTTT trigger bunq_request """
    try:
        data = request.get_json()
        print("[trigger_request] input: {}".format(json.dumps(data)))

        if "triggerFields" not in data or \
                "account" not in data["triggerFields"]:
            print("[trigger_request] ERROR: account field missing!")
            return json.dumps({"errors": [{"message": "Invalid data"}]}), 400
        account = data["triggerFields"]["account"]
        fields = data["triggerFields"]
        fieldsstr = json.dumps(fields)

        if "trigger_identity" not in data:
            print("[trigger_request] ERROR: trigger_identity field missing!")
            return json.dumps({"errors": [{"message": "Invalid data"}]}), 400
        identity = data["trigger_identity"]

        limit = 50
        if "limit" in data:
            limit = data["limit"]

        if account == "NL42BUNQ0123456789":
            return trigger_request_test(limit)

        timezone = "UTC"
        if "user" in data and "timezone" in data["user"]:
            timezone = data["user"]["timezone"]

        entity = storage.retrieve("trigger_request", identity)
        if entity is not None:
            if entity["account"] != account or \
                    json.dumps(entity["fields"]) != fieldsstr:
                storage.store("trigger_request", identity, {
                    "account": account,
                    "identity": identity,
                    "fields": fields
                })
                print("[trigger_request] updating trigger {} {}".format(
                    account, fieldsstr))
        else:
            storage.store("trigger_request", identity, {
                "account": account,
                "identity": identity,
                "fields": fields
            })
            storage.store(
                "request_" + identity, "0", {
                    "value": {
                        "created_at": "2018-01-05T11:25:15+00:00",
                        "date": "2018-01-05",
                        "amount": "0.00",
                        "account": account,
                        "counterparty_account": "NL11BANK1111111111",
                        "counterparty_name": "Dummy Transaction",
                        "description": "This is a dummy transaction",
                        "request_id": "123e4567-e89b-12d3-a456-426655440001",
                        "meta": {
                            "id": "0",
                            "timestamp": "1515151515"
                        }
                    }
                })
            print("[trigger_request] storing new trigger {} {}".format(
                account, fieldsstr))

        transactions = []
        for entity in storage.query_all("request_" + identity):
            entity["value"]["created_at"] = arrow.get(\
                entity["value"]["created_at"]).to(timezone).isoformat()
            transactions.append(entity["value"])
        transactions = sorted(transactions,
                              key=lambda k: -int(k["meta"]["timestamp"]))

        if len(transactions) > 50:
            for trans in transactions[50:]:
                storage.remove("request_" + identity, str(trans["meta"]["id"]))

        print("[trigger_request] Found {} transactions".format(
            len(transactions)))
        return json.dumps({"data": transactions[:limit]})
    except Exception:
        traceback.print_exc()
        print("[trigger_request] ERROR: cannot retrieve requests")
        return json.dumps({"errors": [{"message": \
                           "Cannot retrieve requests"}]}), 400
Beispiel #24
0
def bunq_callback_request():
    """ Handle bunq callbacks of type REQUEST """
    try:
        data = request.get_json()
        print("[bunqcb_request] input: {}".format(json.dumps(data)))
        if data["NotificationUrl"]["event_type"] != "REQUEST_RESPONSE_CREATED":
            print("[bunqcb_request] ignoring {} event".format(
                data["NotificationUrl"]["event_type"]))
            return ""

        obj = data["NotificationUrl"]["object"]["RequestResponse"]
        metaid = obj["id"]
        if storage.seen("seen_request", metaid):
            print("[bunqcb_request] duplicate transaction")
            return ""

        iban = obj["alias"]["iban"]
        item = {
            "created_at": obj["created"],
            "date": arrow.get(obj["created"]).format("YYYY-MM-DD"),
            "amount": obj["amount_inquired"]["value"],
            "account": iban,
            "counterparty_account": counterparty_account(obj),
            "counterparty_name": obj["counterparty_alias"]["display_name"],
            "description": obj["description"],
            "request_id": metaid,
            "meta": {
                "id": metaid,
                "timestamp": arrow.get(obj["created"]).timestamp
            }
        }

        print("[bunqcb_request] translated: {}".format(json.dumps(item)))

        triggerids = []
        for account in ["ANY", iban]:
            for trigger in storage.query("trigger_request", "account", "=",
                                         account):
                ident = trigger["identity"]
                if check_fields("request", ident, item, trigger["fields"]):
                    triggerids.append(ident)
                    storage.store("request_" + ident, metaid, {"value": item})
        print("[bunqcb_request] Matched triggers:", json.dumps(triggerids))
        if triggerids:
            data = {"data": []}
            for triggerid in triggerids:
                data["data"].append({"trigger_identity": triggerid})
            headers = {
                "IFTTT-Channel-Key": util.get_ifttt_service_key(),
                "IFTTT-Service-Key": util.get_ifttt_service_key(),
                "X-Request-ID": uuid.uuid4().hex,
                "Content-Type": "application/json"
            }
            print("[bunqcb_request] to ifttt: {}".format(json.dumps(data)))
            res = requests.post("https://realtime.ifttt.com/v1/notifications",
                                headers=headers,
                                data=json.dumps(data))
            print("[bunqcb_request] result: {} {}".format(
                res.status_code, res.text))

    except Exception:
        traceback.print_exc()
        print("[bunqcb_request] ERROR during handling bunq callback")
    return ""
Beispiel #25
0
def install(token, name=NAME, allips=False):
    """ Handles the installation and registration of the API key

    Args:
        token (str): the API key as provided by the app or the token returned
                     from the OAuth token exchange (by calling the v1/token)
    """
    global _ACCESS_TOKEN, _INSTALL_TOKEN, _SESSION_TOKEN, \
           _SERVER_KEY, _PRIVATE_KEY
    try:
        _ACCESS_TOKEN = token
        print("[bunq] Generating new private key...")
        _PRIVATE_KEY = rsa.generate_private_key(public_exponent=65537,
                                                key_size=2048,
                                                backend=default_backend())
        print("[bunq] Installing key...")
        data = {"client_public_key": get_public_key()}
        result = post("v1/installation", data)
        _INSTALL_TOKEN = result["Response"][1]["Token"]["token"]
        _server_bytes = result["Response"][2]["ServerPublicKey"] \
            ["server_public_key"].encode("ascii")
        _SERVER_KEY = serialization.load_pem_public_key(
            _server_bytes, backend=default_backend())

        print("[bunq] Registering token...")
        if allips:
            ips = ["*"]
        else:
            ips = [requests.get("https://api.ipify.org").text]
        data = {
            "description": name,
            "secret": _ACCESS_TOKEN,
            "permitted_ips": ips
        }
        result = post("v1/device-server", data)

        _private_bytes = _PRIVATE_KEY.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption())
        # split to fit within 1500 byte maximum
        private_1 = _private_bytes[:1000]
        private_2 = _private_bytes[1000:]

        storage.store("config", "bunq_private_key_1", {"value": \
            base64.a85encode(private_1).decode("ascii")})
        storage.store("config", "bunq_private_key_2", {"value": \
            base64.a85encode(private_2).decode("ascii")})
        storage.store("config", "bunq_server_key", {"value": \
            base64.a85encode(_server_bytes).decode("ascii")})
        storage.store("config", "bunq_access_token", {"value": \
            _ACCESS_TOKEN})
        storage.store("config", "bunq_install_token", {"value": \
            _INSTALL_TOKEN})

        _SESSION_TOKEN = None

    except:
        traceback.print_exc()
        _ACCESS_TOKEN = None
        _INSTALL_TOKEN = None
        _SERVER_KEY = None
        _PRIVATE_KEY = None
        _SESSION_TOKEN = None
        raise
Beispiel #26
0
def download(url, paper=None):
    """
    Main entry point for executing paperbot's primary function, paper fetching.
    The given url may be to a pdf file, which should be archived, or it may be
    to an academic publisher's website which points to a paper. The paper needs
    to be downloaded and the metadata should be stored.

    Returns a tuple of (paper, json_path, pdf_path, logpath).

    :param url: url to fetch and examine
    :type url: str
    """
    # store logs in tempfile
    (templogpath, loghandler) = loghijack()

    if paper is None:
        paper = Paper.create({})

    # clean up url if necessary
    url = run_url_fixers(url)

    # whether or not metadata has already been populated
    populated_metadata = False

    for (url2, response) in iterdownload(url, paper=paper):
        if is_response_pdf(response):
            log.debug("Got pdf.")
            pdfcontent = remove_watermarks(response.content)
            paper.pdf = pdfcontent
            store(paper)
            break

        paper.html = response.content

        # Was not pdf. Attempt to parse the HTML based on normal expected
        # HTML elements. The HTML elements may say that the actual pdf url
        # is something else. If this happens, then attempt to download that
        # pdf url instead and then break out of this loop.

        # no reason to get same metadata on every iteration of loop
        if not populated_metadata:
            tree = parse_html(response.content)

            # most publishers show paper metadata in html in same way because ?
            populate_metadata_from_tree(tree, paper)

            # TODO: better way to check if populate_metadata_from_tree did
            # anything useful?
            if paper.title in [None, ""]:
                log.debug(
                    "# TODO: parse metadata from html using plugins here")
            else:
                populated_metadata = True

        # can't try anything else if the url is still bad
        if paper.pdf_url in [None, ""]:
            continue

        # Normalize the two urls. The url from the metadata on the page
        # might be different from the url that was originally passed in,
        # even though both urls might still refer to the same resource.
        if is_same_url(url, paper.pdf_url):
            # pdf_url is same as original url, no pdf found yet. This
            # happens when the pdf url is correct, but the publisher is
            # returning html instead. And the html happens to reference the
            # url that was originally requested in the first place. Argh.
            continue

        log.debug("Switching activity to pdf_url {}".format(paper.pdf_url))

        # paper pdf is stored at a different url. Attempt to fetch that
        # url now. Only do this if pdf_url != url because otherwise
        # this will be an endless loop.
        for (url3, response2) in iterdownload(paper.pdf_url, paper=paper):
            if is_response_pdf(response2):
                log.debug("Got pdf on second-level page.")
                pdfcontent = remove_watermarks(response.content)
                paper.pdf = pdfcontent
                store(paper)
                break
        else:
            log.debug("Couldn't download pdf from {}".format(paper.pdf_url))

        break

    # was pdf downloaded?
    if (hasattr(paper, "pdf") and paper.pdf not in [None, ""]) or \
       os.path.exists(paper.file_path_pdf):
        fetched = True
    else:
        fetched = False

    hasdoi = (paper.doi not in [None, ""])

    if hasdoi:
        # check if libgen has this paper already
        libgenhas = check_libgen_has_paper(paper.doi)

        if fetched and not libgenhas:
            # upload if libgen doesn't already have it
            upload_to_libgen(paper.file_path_pdf, paper.doi)
        elif not fetched and libgenhas:
            urldoi = make_libgen_doi_url(paper.doi)

            # get from libgen
            log.debug("Haven't yet fetched paper. Have doi. Also, libgenhas.")
            log.debug("HTTP GET {}".format(urldoi))
            response = requests.get(urldoi, headers=DEFAULT_HEADERS)

            if is_response_pdf(response):
                log.debug("Got pdf from libgen.")

                # skip pdfparanoia because it's from libgen
                pdfcontent = response.content
                paper.pdf = pdfcontent

                store(paper)

                fetched = True
            else:
                log.debug("libgen lied about haspdf :(")
    else:
        log.debug("Don't know doi, can't check if libgen has this paper.")
        libgenhas = None

    # store(paper) usually handles json but in case of failure there needs to
    # be an explicit save of paper metadata.
    if not fetched:
        store_json(paper)

    # move logs into position
    logpath = store_logs(paper, templogpath)

    # remove loghandler from logger
    mainlogger = logging.getLogger("paperbot")
    mainlogger.handlers.remove(loghandler)

    return (paper, paper.file_path_json, paper.file_path_pdf, logpath)
Beispiel #27
0
def download(url, paper=None):
    """
    Main entry point for executing paperbot's primary function, paper fetching.
    The given url may be to a pdf file, which should be archived, or it may be
    to an academic publisher's website which points to a paper. The paper needs
    to be downloaded and the metadata should be stored.

    Returns a tuple of (paper, json_path, pdf_path, logpath).

    :param url: url to fetch and examine
    :type url: str
    """
    # store logs in tempfile
    (templogpath, loghandler) = loghijack()

    if paper is None:
        paper = Paper.create({})

    # clean up url if necessary
    url = run_url_fixers(url)

    # whether or not metadata has already been populated
    populated_metadata = False

    for (url2, response) in iterdownload(url, paper=paper):
        if is_response_pdf(response):
            log.debug("Got pdf.")
            pdfcontent = remove_watermarks(response.content)
            paper.pdf = pdfcontent
            store(paper)
            break

        paper.html = response.content

        # Was not pdf. Attempt to parse the HTML based on normal expected
        # HTML elements. The HTML elements may say that the actual pdf url
        # is something else. If this happens, then attempt to download that
        # pdf url instead and then break out of this loop.

        # no reason to get same metadata on every iteration of loop
        if not populated_metadata:
            tree = parse_html(response.content)

            # most publishers show paper metadata in html in same way because ?
            populate_metadata_from_tree(tree, paper)

            # TODO: better way to check if populate_metadata_from_tree did
            # anything useful?
            if paper.title in [None, ""]:
                log.debug("# TODO: parse metadata from html using plugins here")
            else:
                populated_metadata = True

        # can't try anything else if the url is still bad
        if paper.pdf_url in [None, ""]:
            continue

        # Normalize the two urls. The url from the metadata on the page
        # might be different from the url that was originally passed in,
        # even though both urls might still refer to the same resource.
        if is_same_url(url, paper.pdf_url):
            # pdf_url is same as original url, no pdf found yet. This
            # happens when the pdf url is correct, but the publisher is
            # returning html instead. And the html happens to reference the
            # url that was originally requested in the first place. Argh.
            continue

        log.debug("Switching activity to pdf_url {}".format(paper.pdf_url))

        # paper pdf is stored at a different url. Attempt to fetch that
        # url now. Only do this if pdf_url != url because otherwise
        # this will be an endless loop.
        for (url3, response2) in iterdownload(paper.pdf_url, paper=paper):
            if is_response_pdf(response2):
                log.debug("Got pdf on second-level page.")
                pdfcontent = remove_watermarks(response.content)
                paper.pdf = pdfcontent
                store(paper)
                break
        else:
            log.debug("Couldn't download pdf from {}".format(paper.pdf_url))

        break

    # was pdf downloaded?
    if (hasattr(paper, "pdf") and paper.pdf not in [None, ""]) or os.path.exists(paper.file_path_pdf):
        fetched = True
    else:
        fetched = False

    hasdoi = (paper.doi not in [None, ""])

    if hasdoi:
        # check if libgen has this paper already
        libgenhas = check_libgen_has_paper(paper.doi)

        if fetched and not libgenhas:
            # upload if libgen doesn't already have it
            upload_to_libgen(paper.file_path_pdf, paper.doi)
        elif not fetched and libgenhas:
            urldoi = make_libgen_doi_url(paper.doi)

            # get from libgen
            log.debug("Haven't yet fetched paper. Have doi. Also, libgenhas.")
            log.debug("HTTP GET {}".format(urldoi))
            response = requests.get(urldoi, headers=DEFAULT_HEADERS)

            if is_pdf_response(response):
                log.debug("Got pdf from libgen.")

                # skip pdfparanoia because it's from libgen
                pdfcontent = response.content
                paper.pdf = pdfcontent

                store(paper)

                fetched = True
            else:
                log.debug("libgen lied about haspdf :(")
    else:
        log.debug("Don't know doi, can't check if libgen has this paper.")
        libgenhas = None

    # store(paper) usually handles json but in case of failure there needs to
    # be an explicit save of paper metadata.
    if not fetched:
        store_json(paper)

    # move logs into position
    logpath = store_logs(paper, templogpath)

    # remove loghandler from logger
    mainlogger = logging.getLogger("paperbot")
    mainlogger.handlers.remove(loghandler)

    return (paper, paper.file_path_json, paper.file_path_pdf, logpath)
Beispiel #28
0
def save_session_cookie(value):
    """ Save the users session cookie """
    storage.store("config", "session_cookie", {"value": value})
Beispiel #29
0
def bunq_callback_mutation():
    """ Handle bunq callbacks of type MUTATION """
    try:
        data = request.get_json()
        print("[bunqcb_mutation] input: {}".format(json.dumps(data)))
        payment = data["NotificationUrl"]["object"]["Payment"]
        metaid = payment["id"]
        if storage.seen("seen_mutation", metaid):
            print("[bunqcb_mutation] duplicate transaction")
            return ""

        iban = payment["alias"]["iban"]
        item = {
            "created_at": payment["created"],
            "date": arrow.get(payment["created"]).format("YYYY-MM-DD"),
            "type": mutation_type(payment),
            "amount": payment["amount"]["value"],
            "balance": payment["balance_after_mutation"]["value"],
            "account": iban,
            "counterparty_account": counterparty_account(payment),
            "counterparty_name": payment["counterparty_alias"]["display_name"],
            "description": payment["description"],
            "payment_id": metaid,
            "meta": {
                "id": metaid,
                "timestamp": arrow.get(payment["created"]).timestamp
            }
        }

        print("[bunqcb_mutation] translated: {}".format(json.dumps(item)))
        triggerids_1 = []
        triggerids_2 = []
        for account in ["ANY", iban]:
            for trigger in storage.query("trigger_mutation", "account", "=",
                                         account):
                ident = trigger["identity"]
                if check_fields("mutation", ident, item, trigger["fields"]):
                    triggerids_1.append(ident)
                    storage.store("mutation_" + ident, metaid, {"value": item})
            for trigger in storage.query("trigger_balance", "account", "=",
                                         account):
                ident = trigger["identity"]
                if check_fields("balance", ident, item, trigger["fields"]):
                    if not trigger["last"]:
                        triggerids_2.append(ident)
                        storage.store("balance_" + ident, metaid,
                                      {"value": item})
                        trigger["last"] = True
                        storage.store("trigger_balance", ident, trigger)
                elif trigger["last"]:
                    trigger["last"] = False
                    storage.store("trigger_balance", ident, trigger)
        print("Matched mutation triggers:", json.dumps(triggerids_1))
        print("Matched balance triggers:", json.dumps(triggerids_2))
        data = {"data": []}
        for triggerids in [triggerids_1, triggerids_2]:
            for triggerid in triggerids:
                data["data"].append({"trigger_identity": triggerid})
        if data["data"]:
            headers = {
                "IFTTT-Channel-Key": util.get_ifttt_service_key(),
                "IFTTT-Service-Key": util.get_ifttt_service_key(),
                "X-Request-ID": uuid.uuid4().hex,
                "Content-Type": "application/json"
            }
            print("[bunqcb_mutation] to ifttt: {}".format(json.dumps(data)))
            res = requests.post("https://realtime.ifttt.com/v1/notifications",
                                headers=headers,
                                data=json.dumps(data))
            print("[bunqcb_mutation] result: {} {}".format(
                res.status_code, res.text))

    except Exception:
        traceback.print_exc()
        print("[bunqcb_mutation] ERROR during handling bunq callback")

    return ""
Beispiel #30
0
def save_ifttt_service_key(value):
    """ Save the IFTTT service key, used to secure IFTTT calls """
    global _IFTTT_SERVICE_KEY
    _IFTTT_SERVICE_KEY = value
    storage.store("bunq2IFTTT", "ifttt_service_key", {"value": value})
Beispiel #31
0
    "https://www.bbc.com/news/world-europe-50013048",
    "https://www.bbc.com/news/world-europe-50361886",
    "https://www.bbc.com/news/world-asia-50359435",
    "https://www.bbc.com/news/business-50342714",
    "https://www.cnn.com/2019/11/07/opinions/pence-trump-ukraine-scandal-scapegoat-dantonio/index.html",
    "https://www.cnn.com/2019/11/08/politics/michael-bloomberg-analysis/index.html",
    "https://www.cnn.com/2019/10/14/business/target-cutting-hours-wage-increase/index.html",
    "https://www.cnn.com/2019/11/08/tech/facebook-whistleblower-name/index.html",
    "https://www.foxnews.com/opinion/david-bossie-trump-impeachment-witch-hunt-must-end-whistleblower-lawyer-is-trump-hater-who-forecast-coup",
    "https://www.foxnews.com/media/doug-schoen-michael-bloomberg-substantive",
    "https://www.foxnews.com/us/missing-california-hiker-found-dead-at-top-glacier-weeks-before-wife-gives-birth",
    "https://www.foxnews.com/world/berlin-wall-east-germans-stasi-answers",
    "https://www.foxnews.com/politics/aoc-bloomberg-purchase-election",
    "https://www.nytimes.com/2019/11/09/world/berlin-wall-photos-30-year-anniversary.html",
    "https://www.nytimes.com/2019/11/09/upshot/bloomberg-new-york-prosperity-inequality.html",
    "https://www.nytimes.com/2019/11/09/us/a-slave-rebellion-rises-again.html",
    "https://www.nytimes.com/2019/09/28/opinion/sunday/millennial-dining-car-amtrak.html",
    "https://www.nytimes.com/2019/11/09/science/seals-distemper.html"
]

for url in articles:
    article = get_text(url)  # change to array of article texts
    sentences = text_to_sentences.split(
        article)  # split article into individual sentences
    analyzed = sentiment_analyzer.anaylze(sentences)  # analyze each sentence

    analyzed[:] = [x.__dict__ for x in analyzed]
    db_obj = storage_data(url=url, sentences=analyzed).__dict__
    storage.store(url=url, db_obj=db_obj)

print("Boom! I'm done.")
Beispiel #32
0
data = []
msg = connection.recv(BUFFER_SIZE)
recieved = 1024
data.append(msg)

while (recieved < filesize):
    recieved += BUFFER_SIZE
    msg = connection.recv(BUFFER_SIZE)
    data.append(msg)
    print("receiving")
print("done receiving ...")

response = inference_handler.predict(data)
#response = 'dog'

response_size = pack('!I', len(response))
connection.send(response_size)

connection.send(response.encode())

connection.send("do we have your consent to store the data?".encode())

consent = connection.recv(BUFFER_SIZE)
print(consent)

if (consent):
    storage.store(response, data)

connection.close()
Beispiel #33
0
def save_session_cookie(value):
    """ Save the users session cookie """
    global _SESSION_COOKIE
    _SESSION_COOKIE = value
    storage.store("config", "session_cookie", {"value": value})
Beispiel #34
0
def add():
  key = str(uuid())
  data=json.loads(request.get_data())
  print(request.get_data())
  store(key, data)
  return '', 200
Beispiel #35
0
def handle(comingFrom, goingTo, data, forward, reply):
	try:
		if len(data)==0:
			forward(data)
			return

		bundle = Bundle(data)
		bundle.display()
		forward(data)
		return

		if bundle.destination != LOCAL_EID[4:]:
			forward(data)
			return

		if bundle.isstatusreport():
			forward(data)

			print "Status Report!"
			#TODO: do it better, instead of instanciating bundle AND statusreport
			statusReport = statusreport(data)
			statusReport.display()
			storage.report(statusReport)
		elif bundle.getBlock(200) != None:
		#elif bundle.destination == LOCAL_EID[4:]:#"//village.bytewalla.com/":
		#	forward(data)

			print "From: ", comingFrom
			print "To: ", goingTo

			serviceBlock = ServiceBlock(bundle.getBlock(200).payload)

			#if we are authorized to handle the request
			if serviceBlock.destination == "" or serviceBlock.destination == LOCAL_EID[4:]:
				try:#try to handle the request. If we can't, forward it.
					#TODO: store and confirm delivery?
					service.handle(serviceBlock.service, bundle.payloadBlock.payload)
					forward(data)
					return
				except:
					pass

			#the request could not be handled
			storage.store(bundle)

			if serviceBlock.destination == "":
				destination = service.getDestination()
				if destination == None:
					raise Exception("No destination for this service")
				serviceBlock.destination = destination
			else:
				destination = serviceBlock.destination
			bundle.primaryBlock.destination = destination

			priority = service.getPriority(serviceBlock.service)
			if priority != None:
				bundle.setPriority(priority)

			#bundle.getBlock(200).controlFlags = bundle.getBlock(200).controlFlags | (1<<2)#set processed
			bundle.invokeReports()			

			#TODO: maybe its not necessary to do that
			bundle.getBlock(200).payload = serviceBlock.serialize()
			bundle.display()

			#if Caching.caching(bundle):

			#bundle2 = Bundle(bundle.serialize())
			#bundle2.display()

			forward(bundle.serialize())
		else:
			forward(data)
	except NotSegment:
		forward(data)
	except Exception, e:
		print "Bundle handle error: "+str(e)
		traceback.print_exc()
		forward(data)