예제 #1
0
 def open_new_monthly_expense(self, event):
     transaction.Transaction(self, transaction_type=transaction.EXPENSE)
예제 #2
0
    def mineBlock(self):
        print("Mining...")
        time.sleep(1)
        # wait till tx_pool is not empty
        if self.blockchain is not None:
            while not self.tx_pool:
                time.sleep(1)

        # While there is no new block that is of a longer len than this miner's blockchain, keep mining till completed.
        interruptQueue = Queue(1)
        nonceQueue = Queue(1)
        yield interruptQueue

        # if this is ever invoked, it must be the first block
        # of the first miner
        if self.blockchain is None:
            tx = transaction.Transaction(
                _sender_public_key=self.client.publickey,
                _receiver_public_key=self.client.publickey,
                _comment="Hello world",
                _amount=100,
                _reward=True)
            tx.sign(self.client.privatekey)

            first_block = block.Block(
                _transaction_list=[tx],
                _difficulty=1,
            )

            p = first_block.build(_found=nonceQueue, _interrupt=interruptQueue)
            p.start()
            p.join()
            nonce_found = nonceQueue.get()
            if nonce_found == "":
                print("interrupted!")
                return

            first_block.completeBlockWithNonce(_nonce=nonce_found)
            first_block.tx_list = [tx.data]
            first_block.executeChange()

            self.blockchain = blockChain.Blockchain(_block=first_block)

        else:
            # validate the transactions
            temp_pool = []

            print("Getting from tx_pool...-->", self.tx_pool)
            print("Current:", self.blockchain.current_block.state)

            while len(self.tx_pool) > 0 and len(temp_pool) <= 10:
                item = self.tx_pool.pop(0)

                # create transaction
                t = transaction.Transaction(item["Sender"], item["Receiver"],
                                            item["Amount"], item["Comment"],
                                            item["Reward"], item["Signature"])
                temp_pool.append(t)

            temp_pool.append(
                self.createRewardTransaction(self.client.privatekey))

            print("----> TEMP POOL:", temp_pool)
            newBlock = block.Block(
                _transaction_list=temp_pool,
                _prev_header=self.blockchain.current_block.header,
                _prev_block=self.blockchain.current_block,
                _difficulty=4,
            )

            p = newBlock.build(_found=nonceQueue, _interrupt=interruptQueue)
            p.start()
            p.join()

            nonce_found = nonceQueue.get()
            if nonce_found == "":
                return  # stop here
            newBlock.completeBlockWithNonce(_nonce=nonce_found)
            newBlock.executeChange()
            print("newBlock--->", newBlock.state, newBlock.tx_list)

            self.blockchain.addBlock(_incoming_block=newBlock,
                                     _prev_block_header=newBlock.prev_header)

        to_broadcast = self.blockchain.current_block.getData()

        yield to_broadcast
예제 #3
0
    def checkCrossConnectionIsolation(self):
        # Verify MVCC isolates connections.
        # This will fail if Connection doesn't poll for changes.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = PersistentMapping()
            r1['gamma'] = PersistentMapping()
            transaction.commit()

            # Open a second connection but don't load root['alpha'] yet
            c2 = db.open()
            r2 = c2.root()

            r1['alpha']['beta'] = 'yes'

            storage = c1._storage
            t = transaction.Transaction()
            t.description = u'isolation test 1'
            c1.tpc_begin(t)
            c1.commit(t)
            storage.tpc_vote(t.data(c1))
            storage.tpc_finish(t.data(c1))

            # The second connection will now load root['alpha'], but due to
            # MVCC, it should continue to see the old state.
            self.assertTrue(r2['alpha']._p_changed is None)  # A ghost
            self.assertTrue(not r2['alpha'])
            self.assertTrue(r2['alpha']._p_changed == 0)

            # make root['alpha'] visible to the second connection
            c2.sync()

            # Now it should be in sync
            self.assertTrue(r2['alpha']._p_changed is None)  # A ghost
            self.assertTrue(r2['alpha'])
            self.assertTrue(r2['alpha']._p_changed == 0)
            self.assertTrue(r2['alpha']['beta'] == 'yes')

            # Repeat the test with root['gamma']
            r1['gamma']['delta'] = 'yes'

            storage = c1._storage
            t = transaction.Transaction()
            t.description = u'isolation test 2'
            c1.tpc_begin(t)
            c1.commit(t)
            storage.tpc_vote(t.data(c1))
            storage.tpc_finish(t.data(c1))

            # The second connection will now load root[3], but due to MVCC,
            # it should continue to see the old state.
            self.assertTrue(r2['gamma']._p_changed is None)  # A ghost
            self.assertTrue(not r2['gamma'])
            self.assertTrue(r2['gamma']._p_changed == 0)

            # make root[3] visible to the second connection
            c2.sync()

            # Now it should be in sync
            self.assertTrue(r2['gamma']._p_changed is None)  # A ghost
            self.assertTrue(r2['gamma'])
            self.assertTrue(r2['gamma']._p_changed == 0)
            self.assertTrue(r2['gamma']['delta'] == 'yes')
        finally:
            db.close()
예제 #4
0
    def testBackupNormalCase(self):
        np = 7
        nr = 2
        check_dict = dict.fromkeys(xrange(np))
        with NEOCluster(partitions=np, replicas=nr - 1,
                        storage_count=3) as upstream:
            upstream.start()
            importZODB = upstream.importZODB()
            importZODB(3)

            def delaySecondary(conn, packet):
                if isinstance(packet, Packets.Replicate):
                    tid, upstream_name, source_dict = packet._args
                    return not upstream_name and all(source_dict.itervalues())

            with NEOCluster(partitions=np,
                            replicas=nr - 1,
                            storage_count=5,
                            upstream=upstream) as backup:
                backup.start()
                # Initialize & catch up.
                backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
                self.tic()
                self.assertEqual(np * nr, self.checkBackup(backup))
                # Normal case, following upstream cluster closely.
                importZODB(17)
                self.tic()
                self.assertEqual(np * nr, self.checkBackup(backup))

                # Check that a backup cluster can be restarted.
                backup.stop()
                backup.start()
                self.assertEqual(backup.neoctl.getClusterState(),
                                 ClusterStates.BACKINGUP)
                importZODB(17)
                self.tic()
                self.assertEqual(np * nr, self.checkBackup(backup))
                backup.neoctl.checkReplicas(check_dict, ZERO_TID, None)
                self.tic()
                # Stop backing up, nothing truncated.
                backup.neoctl.setClusterState(ClusterStates.STOPPING_BACKUP)
                self.tic()
                self.assertEqual(np * nr, self.checkBackup(backup))
                self.assertEqual(backup.neoctl.getClusterState(),
                                 ClusterStates.RUNNING)

                # Restart and switch to BACKINGUP mode again.
                backup.stop()
                backup.start()
                backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
                self.tic()

                # Leave BACKINGUP mode when 1 replica is late. The cluster
                # remains in STOPPING_BACKUP state until it catches up.
                with backup.master.filterConnection(*backup.storage_list) as f:
                    f.add(delaySecondary)
                    while not f.filtered_count:
                        importZODB(1)
                    self.tic()
                    backup.neoctl.setClusterState(
                        ClusterStates.STOPPING_BACKUP)
                    self.tic()
                self.tic()
                self.assertEqual(
                    np * nr, self.checkBackup(backup, max_tid=backup.last_tid))

                # Again but leave BACKINGUP mode when a storage node is
                # receiving data from the upstream cluster.
                backup.stop()
                backup.start()
                backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
                self.tic()
                with ConnectionFilter() as f:
                    f.delayAddObject(lambda conn: conn.getUUID() is None)
                    while not f.filtered_count:
                        importZODB(1)
                    self.tic()
                    backup.neoctl.setClusterState(
                        ClusterStates.STOPPING_BACKUP)
                    self.tic()
                self.tic()
                self.assertEqual(
                    np * nr, self.checkBackup(backup, max_tid=backup.last_tid))

                storage = upstream.getZODBStorage()

                # Check that replication from upstream is resumed even if
                # upstream is idle.
                backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
                self.tic()
                x = backup.master.backup_app.primary_partition_dict
                new_oid_storage = x[0]
                with upstream.moduloTID(next(p for p, n in x.iteritems()
                                               if n is not new_oid_storage)), \
                     ConnectionFilter() as f:
                    f.delayAddObject()
                    # Transaction that touches 2 primary cells on 2 different
                    # nodes.
                    txn = transaction.Transaction()
                    tid = storage.load(ZERO_OID)[1]
                    storage.tpc_begin(txn)
                    storage.store(ZERO_OID, tid, '', '', txn)
                    storage.tpc_vote(txn)
                    storage.tpc_finish(txn)
                    self.tic()
                    # Stop when exactly 1 of the 2 cells is synced with
                    # upstream.
                    backup.stop()
                backup.start()
                self.assertEqual(
                    np * nr, self.checkBackup(backup, max_tid=backup.last_tid))

                # Check that replication to secondary cells is resumed even if
                # upstream is idle.
                with backup.master.filterConnection(*backup.storage_list) as f:
                    f.add(delaySecondary)
                    txn = transaction.Transaction()
                    storage.tpc_begin(txn)
                    storage.tpc_finish(txn)
                    self.tic()
                    backup.stop()
                backup.start()
                self.assertEqual(
                    np * nr, self.checkBackup(backup, max_tid=backup.last_tid))
예제 #5
0
    def _importFromFileStorage(self,
                               multi=(),
                               root_filter=None,
                               sub_filter=None):
        beforeCheck, before, finalCheck, after = self.getData()
        iter_list = []
        db_list = []
        # Setup several FileStorage databases.
        for i, db in enumerate(('root', ) + multi):
            fs_path, cfg = self.getFS(db)
            c = ZODB.DB(FileStorage(fs_path)).open()
            r = c.root()['tree'] = random_tree.Node()
            transaction.commit()
            iter_list.append(before(r, sub_filter(db) if i else root_filter))
            db_list.append((db, r, cfg))
        # Populate FileStorage databases.
        for i, iter_list in enumerate(izip_longest(*iter_list)):
            for r in iter_list:
                if r:
                    transaction.commit()
        # Get oids of mount points and close.
        zodb = []
        importer = {'zodb': zodb}
        for db, r, cfg in db_list:
            if db == 'root':
                if multi:
                    for x in multi:
                        cfg['_%s' % x] = str(u64(r[x]._p_oid))
                else:
                    beforeCheck(random_tree.hashTree(r))
                    importer['writeback'] = 'true'
            else:
                cfg["oid"] = str(u64(r[db]._p_oid))
                db = '_%s' % db
            r._p_jar.db().close()
            zodb.append((db, cfg))
        del db_list, iter_list
        #del zodb[0][1][zodb.pop()[0]]
        # Start NEO cluster with transparent import.
        with NEOCluster(importer=importer, partitions=2) as cluster:
            # Suspend import for a while, so that import
            # is finished in the middle of the below 'for' loop.
            # Use a slightly different main loop for storage so that it
            # does not import data too fast and we test read/write access
            # by the client during the import.
            dm = cluster.storage.dm

            def doOperation(app):
                del dm.doOperation
                try:
                    while True:
                        if app.task_queue:
                            app.task_queue[-1].next()
                        app._poll()
                except StopIteration:
                    app.task_queue.pop()

            dm.doOperation = doOperation
            cluster.start()
            t, c = cluster.getTransaction()
            r = c.root()['tree']
            # Test retrieving of an object from ZODB when next serial is in NEO.
            r._p_changed = 1
            t.commit()
            t.begin()
            storage = c.db().storage
            storage._cache.clear()
            storage.loadBefore(r._p_oid, r._p_serial)
            ##
            self.assertRaisesRegexp(NotImplementedError, " getObjectHistory$",
                                    c.db().history, r._p_oid)
            h = random_tree.hashTree(r)
            h(30)
            logging.info("start migration")
            dm.doOperation(cluster.storage)
            # Adjust if needed. Must remain > 0.
            beforeCheck(h, 22)
            # New writes after the switch to NEO.
            last_import = -1
            for i, r in enumerate(after(r)):
                c.readCurrent(r)
                t.commit()
                if cluster.storage.dm._import:
                    last_import = i
            for x in 0, 1:
                undo = TransactionalUndo(c.db(), [storage.lastTransaction()])
                txn = transaction.Transaction()
                undo.tpc_begin(txn)
                undo.commit(txn)
                undo.tpc_vote(txn)
                undo.tpc_finish(txn)
            self.tic()
            # Same as above. We want last_import smaller enough compared to i
            assert i < last_import * 3 < 2 * i, (last_import, i)
            self.assertFalse(cluster.storage.dm._import)
            storage._cache.clear()
            finalCheck(r)
            if dm._writeback:
                dm.commit()
                dm._writeback.wait()
        if dm._writeback:
            db = ZODB.DB(FileStorage(fs_path, read_only=True))
            finalCheck(db.open().root()['tree'])
            db.close()
예제 #6
0
파일: DB.py 프로젝트: e42s/ZODB
    def __init__(self, storage,
                 pool_size=7,
                 pool_timeout=1<<31,
                 cache_size=400,
                 cache_size_bytes=0,
                 historical_pool_size=3,
                 historical_cache_size=1000,
                 historical_cache_size_bytes=0,
                 historical_timeout=300,
                 database_name='unnamed',
                 databases=None,
                 xrefs=True,
                 large_record_size=1<<24,
                 **storage_args):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `historical_pool_size`: expected maximum number of total
            historical connections
          - `historical_cache_size`: target size of Connection object cache for
            historical (`at` or `before`) connections
          - `historical_cache_size_bytes` -- similar to `cache_size_bytes` for
            the historical connection.
          - `historical_timeout`: minimum number of seconds that
            an unused historical connection will be kept, or None.
          - `xrefs` - Boolian flag indicating whether implicit cross-database
            references are allowed
        """
        if isinstance(storage, six.string_types):
            from ZODB import FileStorage
            storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
        elif storage is None:
            from ZODB import MappingStorage
            storage = ZODB.MappingStorage.MappingStorage(**storage_args)

        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # pools and cache sizes
        self.pool = ConnectionPool(pool_size, pool_timeout)
        self.historical_pool = KeyedConnectionPool(historical_pool_size,
                                                   historical_timeout)
        self._cache_size = cache_size
        self._cache_size_bytes = cache_size_bytes
        self._historical_cache_size = historical_cache_size
        self._historical_cache_size_bytes = historical_cache_size_bytes

        # Setup storage
        self.storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None) # Backward compat

        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.",
                DeprecationWarning, 2)
            storage.tpc_vote = lambda *args: None

        if IMVCCStorage.providedBy(storage):
            temp_storage = storage.new_instance()
        else:
            temp_storage = storage
        try:
            try:
                temp_storage.load(z64, '')
            except KeyError:
                # Create the database's root in the storage if it doesn't exist
                from persistent.mapping import PersistentMapping
                root = PersistentMapping()
                # Manually create a pickle for the root to put in the storage.
                # The pickle must be in the special ZODB format.
                file = BytesIO()
                p = Pickler(file, _protocol)
                p.dump((root.__class__, None))
                p.dump(root.__getstate__())
                t = transaction.Transaction()
                t.description = 'initial database creation'
                temp_storage.tpc_begin(t)
                temp_storage.store(z64, None, file.getvalue(), '', t)
                temp_storage.tpc_vote(t)
                temp_storage.tpc_finish(t)
        finally:
            if IMVCCStorage.providedBy(temp_storage):
                temp_storage.release()

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self
        self.xrefs = xrefs

        self.large_record_size = large_record_size
예제 #7
0
 def deserialize(to_des):
     des_tx = tx.Transaction(to_des[4:38], to_des[38:72],
                             int(to_des[:4], 16))
     des_tx.public_key = to_des[72:202]
     des_tx.signature = to_des[202:]
     return (des_tx)
예제 #8
0
 def _start_txn(self):
     txn = transaction.Transaction()
     self._storage.tpc_begin(txn)
     oid = self._storage.new_oid()
     self._storage.store(oid, ZERO, zodb_pickle(MinPO(1)), '', txn)
     return oid, txn
예제 #9
0
파일: DB.py 프로젝트: bendavis78/zope
    def __init__(
        self,
        storage,
        pool_size=7,
        cache_size=400,
        cache_size_bytes=0,
        version_pool_size=3,
        version_cache_size=100,
        database_name='unnamed',
        databases=None,
    ):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `version_pool_size`: expected maximum number of connections (per
            version)
          - `version_cache_size`: target size of Connection object cache for
            version connections
          - `historical_pool_size`: expected maximum number of total
            historical connections
        """
        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # Setup connection pools and cache info
        # _pools maps a version string to a _ConnectionPool object.
        self._pools = {}
        self._pool_size = pool_size
        self._cache_size = cache_size
        self._version_pool_size = version_pool_size
        self._version_cache_size = version_cache_size
        self._cache_size_bytes = cache_size_bytes

        # Setup storage
        self._storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None)  # Backward compat

        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.", DeprecationWarning, 2)
            storage.tpc_vote = lambda *args: None

        try:
            storage.load(z64, '')
        except KeyError:
            # Create the database's root in the storage if it doesn't exist
            from persistent.mapping import PersistentMapping
            root = PersistentMapping()
            # Manually create a pickle for the root to put in the storage.
            # The pickle must be in the special ZODB format.
            file = cStringIO.StringIO()
            p = cPickle.Pickler(file, 1)
            p.dump((root.__class__, None))
            p.dump(root.__getstate__())
            t = transaction.Transaction()
            t.description = 'initial database creation'
            storage.tpc_begin(t)
            storage.store(z64, None, file.getvalue(), '', t)
            storage.tpc_vote(t)
            storage.tpc_finish(t)

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self

        self._setupUndoMethods()
        self._setupVersionMethods()
        self.history = storage.history
예제 #10
0
fields = []
rows = []
transactions = []

# reading csv file
with open(filename, "r") as csvfile:
    csvreader = csv.reader(csvfile)
    fields = next(csvreader)
    for row in csvreader:
        rows.append([row[0], row[2], row[3], float(row[4])])

# move transactions into local list
print(fields)
for x in rows:
    # kinda dont like the way the numbers on this one work, I'm probably going to come back and change it
    trans = transaction.Transaction(x[0], x[1], x[2], x[3])
    transactions.append(trans)

# this is to test that the transactions are in the list properly
for x in transactions:
    x.printInfo()

with open("transaction-deposit.csv", "w") as csvfile:
    csvwriter = csv.writer(csvfile)
    rows = []
    for row in transactions:
        rows.append(row.__repr__())
    csvwriter.writerows(rows)
# Save contents of account objects to json file
with open("account-data.json", "w") as f:
    json.dump(accounts, f, default=account.transform)
예제 #11
0
 def more(self, event):
     transaction.Transaction(self.GetParent(),
                             transaction_type=self.data.type,
                             data=self.data,
                             editable=False)
     self.exit(None)
예제 #12
0
def trading():
    if not validateLogin():
        return render_template('login.html')

    if request.method == "POST":
        try:
            cursor, conn = mysqlConfig.mysql_connection()
            conn.autocommit = False

            # show ask price and bid price
            cursor.execute(
                "SELECT crypto_id FROM crypto WHERE crypto_name = %s",
                (request.form['crypto_name'], ))
            crypto_id = cursor.fetchone()[0]
            side = request.form['side']
            time = datetime.datetime.now()
            time.strftime('%Y-%m-%d %H:%M:%S')

            if side == 'Buy':
                # create transaction
                trans = ts.Transaction(crypto_id, request.form['crypto_name'],
                                       side, float(request.form['buy_amount']),
                                       float(request.form['buy_price']), time)

                # check if valid price
                coin_name = trans.name.lower() + 'usdt'
                compare_price = float(api.marketData(coin_name)['askPrice'])
                if trans.price < compare_price * 0.95:
                    error_message = "The price you set is 5% lower than the current ask price. Please reset a valid one."
                    return render_template('error.html',
                                           error_message=error_message)
                if trans.price > compare_price * 1.1:
                    error_message = 'The price you set is 10% higher than the current ask price. Please reset a valid one.'
                    return render_template('error.html',
                                           error_message=error_message)

                # check if enough cash
                remaining_cash = ts.get_remaining_cash(cursor, conn,
                                                       session['userId'])
                if not ts.enough_cash_to_buy(remaining_cash,
                                             trans.get_amount()):
                    error_message = "You don't have enough cash to buy. Please go to Account-Deposit to refill your account."
                    return render_template('error.html',
                                           error_message=error_message)

                # update vwap
                remaining_coin = ts.get_remaining_coin(cursor, conn, trans.id,
                                                       session['userId'])
                VWAP = ts.get_VWAP(cursor, conn, trans.id, session['userId'])
                updated_VWAP = (VWAP * remaining_coin + decimal.Decimal(
                    trans.get_amount())) / (remaining_coin +
                                            decimal.Decimal(trans.amount))
                ts.update_VWAP(cursor, conn, trans.id, session['userId'],
                               updated_VWAP)

            if side == 'Sell':
                # create transaction
                trans = ts.Transaction(crypto_id,
                                       request.form['crypto_name'], side,
                                       float(request.form['sell_amount']),
                                       float(request.form['sell_price']), time)
                trans.adjust_side()

                # check if valid price
                coin_name = trans.name.lower() + 'usdt'
                if trans.price > float(
                        api.marketData(coin_name)['bidPrice']) * 1.05:
                    error_message = 'The price you set is 5% higher than the current bid price. Please reset a valid one.'
                    return render_template('error.html',
                                           error_message=error_message)
                elif trans.price < float(
                        api.marketData(coin_name)['bidPrice']) * 0.9:
                    error_message = 'The price you set is 10% lower than the current bid price. Please reset a valid one.'
                    return render_template('error.html',
                                           error_message=error_message)

                # check if we have enough coin
                remaining_coin = ts.get_remaining_coin(cursor, conn, trans.id,
                                                       session['userId'])
                if remaining_coin < abs(trans.amount):
                    error_message = "You don't have enough coin to sell. Please try again."
                    return render_template('error.html',
                                           error_message=error_message)

                # update RPL
                ts.update_RPL(cursor, conn, crypto_id, trans,
                              session['userId'], trans.price)

            # update cash
            remaining_cash = ts.get_remaining_cash(cursor, conn,
                                                   session['userId'])
            updated_cash = remaining_cash - decimal.Decimal(trans.get_amount())
            ts.update_cash(cursor, conn, updated_cash, session['userId'])

            # update blotter
            ts.update_blotter(cursor, conn, trans.id, trans.amount,
                              trans.price, trans.time, trans.side,
                              session['userId'])

            # update crypto_bank
            remaining_coin += decimal.Decimal(trans.amount)
            ts.update_crypto_bank(cursor, conn, crypto_id, remaining_coin,
                                  session['userId'])

            conn.commit()

        except conn.Error as error:
            print("Failed to update record to database rollback: {}".format(
                error))
            # reverting changes because of exception
            conn.rollback()

        finally:
            if (conn.is_connected()):
                cursor.close()
                conn.close()
                print("connection is closed")

        return redirect(url_for('trading'))

    else:
        Bitcoin = api.candlestick('btcusdt')
        Ethrium = api.candlestick('ethusdt')
        Litecoin = api.candlestick('ltcusdt')

        BitcoinWeekData = api.weekData(Bitcoin)
        EthriumWeekData = api.weekData(Ethrium)
        LitecoinWeekData = api.weekData(Litecoin)

        class currencyData:
            def __init__(self, name, marketData, weekdata, imageName):
                self.name = name
                self.marketData = marketData
                self.weekdata = weekdata
                self.imageName = imageName

        BitcoinData = currencyData('Bitcoin', api.marketData('btcusdt'),
                                   BitcoinWeekData, 'btc')
        EthriumData = currencyData('Ethrium', api.marketData('ethusdt'),
                                   EthriumWeekData, 'eth')
        LitecoinData = currencyData('Litecoin', api.marketData('ltcusdt'),
                                    LitecoinWeekData, 'ltc')
        currencies = [BitcoinData, EthriumData, LitecoinData]

        cursor, conn = mysqlConfig.mysql_connection()
        # update UPL
        coins = ['btcusdt', 'ethusdt', 'ltcusdt']
        bid_price = []
        userId = session['userId']
        remaining_coin_q = """SELECT amount FROM crypto_bank WHERE user_id = %s"""
        cursor.execute(remaining_coin_q, (userId, ))
        results = cursor.fetchall()

        VWAP_list = []
        crypto_id_list = [1, 2, 3]
        for crypto_id in crypto_id_list:
            VWAP = ts.get_VWAP(cursor, conn, crypto_id, session['userId'])
            VWAP_list.append(VWAP)

        for i in range(len(coins)):
            result = results[i]
            amount = float(str(result).strip("(Decimal(''),)"))
            upl = (float(api.marketData(coins[i])['bidPrice']) -
                   float(VWAP_list[i])) * amount
            bid_price.append(upl)
            cursor.execute(
                "UPDATE PnL SET UPL = %s WHERE crypto_id = %s AND user_id = %s",
                (
                    bid_price[i],
                    i + 1,
                    userId,
                ))
        conn.commit()

        # display blotter the pnl & Cash
        cursor, conn = mysqlConfig.mysql_connection()
        blotter_list, PnL_list = ts.display_blotter_PnL(
            cursor, conn, session['userId'])
        Cash_quantity = ts.get_remaining_cash(cursor, conn, session['userId'])
        cursor.close()
        conn.close()

    return render_template('trading.html',
                           Cash_quantity=Cash_quantity,
                           blotter_list=blotter_list,
                           PnL_list=PnL_list,
                           len=len(currencies),
                           currencies=currencies)
예제 #13
0
파일: testZEO.py 프로젝트: bendavis78/zope
    def checkStoreAndLoadBlob(self):
        from ZODB.utils import oid_repr, tid_repr
        from ZODB.blob import Blob, BLOB_SUFFIX
        from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
             handle_serials
        import transaction

        somedata_path = os.path.join(self.blob_cache_dir, 'somedata')
        somedata = open(somedata_path, 'w+b')
        for i in range(1000000):
            somedata.write("%s\n" % i)
        somedata.seek(0)

        blob = Blob()
        bd_fh = blob.open('w')
        ZODB.utils.cp(somedata, bd_fh)
        bd_fh.close()
        tfname = bd_fh.name
        oid = self._storage.new_oid()
        data = zodb_pickle(blob)
        self.assert_(os.path.exists(tfname))

        t = transaction.Transaction()
        try:
            self._storage.tpc_begin(t)
            r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
            r2 = self._storage.tpc_vote(t)
            revid = handle_serials(oid, r1, r2)
            self._storage.tpc_finish(t)
        except:
            self._storage.tpc_abort(t)
            raise

        # The uncommitted data file should have been removed
        self.assert_(not os.path.exists(tfname))

        def check_data(path):
            self.assert_(os.path.exists(path))
            f = open(path, 'rb')
            somedata.seek(0)
            d1 = d2 = 1
            while d1 or d2:
                d1 = f.read(8096)
                d2 = somedata.read(8096)
                self.assertEqual(d1, d2)

        # The file should be in the cache ...
        filename = self._storage.fshelper.getBlobFilename(oid, revid)
        check_data(filename)

        # ... and on the server
        server_filename = filename.replace(self.blob_cache_dir, self.blobdir)
        self.assert_(server_filename.startswith(self.blobdir))
        check_data(server_filename)

        # If we remove it from the cache and call loadBlob, it should
        # come back. We can do this in many threads.  We'll instrument
        # the method that is used to request data from teh server to
        # verify that it is only called once.

        sendBlob_org = ZEO.ServerStub.StorageServer.sendBlob
        calls = []

        def sendBlob(self, oid, serial):
            calls.append((oid, serial))
            sendBlob_org(self, oid, serial)

        ZODB.blob.remove_committed(filename)
        returns = []
        threads = [
            threading.Thread(target=lambda: returns.append(
                self._storage.loadBlob(oid, revid))) for i in range(10)
        ]
        [thread.start() for thread in threads]
        [thread.join() for thread in threads]
        [self.assertEqual(r, filename) for r in returns]
        check_data(filename)
예제 #14
0
    def checkTransactionalUndoIterator(self):
        # this test overrides the broken version in TransactionalUndoStorage.

        s = self._storage

        BATCHES = 4
        OBJECTS = 4

        orig = []
        for i in range(BATCHES):
            t = transaction.Transaction()
            tid = p64(i + 1)
            s.tpc_begin(t, tid)
            for j in range(OBJECTS):
                oid = s.new_oid()
                obj = MinPO(i * OBJECTS + j)
                s.store(oid, None, zodb_pickle(obj), '', t)
                orig.append((tid, oid))
            s.tpc_vote(t)
            s.tpc_finish(t)

        # pylint:disable=unnecessary-comprehension
        orig = [(tid, oid, s.getTid(oid)) for tid, oid in orig]

        i = 0
        for tid, oid, revid in orig:
            self._dostore(oid,
                          revid=revid,
                          data=MinPO(revid),
                          description="update %s" % i)

        # Undo the OBJECTS transactions that modified objects created
        # in the ith original transaction.

        def undo(i):
            info = s.undoInfo()
            t = transaction.Transaction()
            s.tpc_begin(t)
            base = i * OBJECTS + i
            for j in range(OBJECTS):
                tid = info[base + j]['id']
                s.undo(tid, t)
            s.tpc_vote(t)
            s.tpc_finish(t)

        for i in range(BATCHES):
            undo(i)

        # There are now (2 + OBJECTS) * BATCHES transactions:
        #     BATCHES original transactions, followed by
        #     OBJECTS * BATCHES modifications, followed by
        #     BATCHES undos

        iter = s.iterator()
        offset = 0

        eq = self.assertEqual

        for i in range(BATCHES):
            txn = iter[offset]
            offset += 1

            tid = p64(i + 1)
            eq(txn.tid, tid)

            L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
            L2 = [(oid, revid, None) for _tid, oid, revid in orig
                  if _tid == tid]

            eq(L1, L2)

        for i in range(BATCHES * OBJECTS):
            txn = iter[offset]
            offset += 1
            eq(len([rec for rec in txn if rec.data_txn is None]), 1)

        for i in range(BATCHES):
            txn = iter[offset]
            offset += 1

            # The undos are performed in reverse order.
            otid = p64(BATCHES - i)
            L1 = [rec.oid for rec in txn]
            L2 = [oid for _tid, oid, revid in orig if _tid == otid]
            L1.sort()
            L2.sort()
            eq(L1, L2)

        self.assertRaises(IndexError, iter.__getitem__, offset)
예제 #15
0
    def startListening(self):
        # Listen (1 connection at a time)
        self.sock.listen(1)
        # Any received transactions
        transactions = []
        done_loopin = False

        # Loop
        while not done_loopin:
            # Wait for connection
            print("Waiting for a connection...\n")
            connection, client_address = self.sock.accept()

            try:
                print("Connection from {}".format(client_address))

                t_list = b''
                # Receive Data
                while True:
                    data = connection.recv(4096)
                    # Current UNIX timestamp
                    data_time = time.time()
                    # print("Received '{}'".format(data))

                    if data:
                        # print("Sending data back to client for confirmation.")
                        connection.sendall(data)

                        # Test
                        # test_data1 = "Give Sarah 10n from Logan"
                        # test_data2 = "Give Sarah 10n from Logan; What a gal!"
                        t_list += data
                        # print("Added data to t_list")

                    else:
                        print(
                            "No more data from {}, the filthy animal!".format(
                                client_address))
                        break

                # Split Transactions on bytes
                split_data = t_list.split(b'\x80\x03')
                # Add each Transaction object to transactions array
                for tx_data in split_data:
                    if tx_data:
                        try:
                            received_tx = transaction.Transaction(
                                None, tx_data)
                            transactions.append(received_tx)
                        except Exception as e:
                            print(
                                "Received malformed transaction in byte format, skipping."
                            )

            finally:
                # Clean Up!
                connection.close()
                done_loopin = True

        # Check Transactions for Validity, and only accept the good ones
        check = self.checkTransactions(transactions)
        if (check):
            print("The following transactions were f****d:\n")
            print(check)
            return False

        # Return Transactions array
        if (transactions):
            count = len(transactions)
            print("Submitting " + str(count) + " transactions to NutChain")
            return transactions
        else:
            print("No transactions received")
            return False
예제 #16
0
 def __new_transaction(self):
     t = transaction.Transaction()
     self.storage.tpc_begin(t)
     t.note(six.u('Updated factory references using `zodbupdate`.'))
     return t
예제 #17
0
# Connect the socket to the port where the server is listening
server_address = ('localhost', 10000)
host, port = server_address
print("Connecting to {}:{}...".format(host, port))
sock.connect(server_address)

try:
    txs = []
    # Get and Send Data
    print("Please enter your transactions, one line at a time:\n")
    while True:
        line = input()
        if line:
            # Transaction
            tx = transaction.Transaction(line)

            txs.append(tx)
        else:
            break

    total_length = 0
    # Send Transactions
    for tx in txs:
        print("Sending {} to {} from {}".format(tx.amount, tx.receiver,
                                                tx.sender))
        # print(tx)

        # Send raw data (in bytes)
        tx_bytes = tx.byte_representation()
        sock.sendall(tx_bytes)
예제 #18
0
 def retrieve_transaction(self, tx_hash, tx_height=0):
     import transaction
     r = self.synchronous_get([('blockchain.transaction.get',
                                [tx_hash, tx_height])])[0]
     if r:
         return transaction.Transaction(r)
예제 #19
0
        sha256_hex = codecs.encode(sha256_2_nbpk_digest, 'hex')

        checksum = sha256_hex[:8]

        address_hex = (network_bitcoin_public_key + checksum).decode('utf-8')

        blockchain_address = base58.b58encode(address_hex).decode('utf-8')

        return blockchain_address


if __name__ == '__main__':
    wallet_A = Wallet()
    wallet_B = Wallet()
    value = 100
    print(f'wallet_A\'s blockchain_address : {wallet_A.blockchain_address}')
    print(f'wallet_B\'s blockchain_address : {wallet_B.blockchain_address}')
    print(f'value : {value}')
    t = transaction.Transaction(wallet_A.blockchain_address,
                                wallet_B.blockchain_address, 100,
                                wallet_A.private_key, wallet_A.public_key)
    blockchain = blockchain.BlockChain()
    blockchain.add_transaction(
        sender_blockchain_address=wallet_A.blockchain_address,
        recipient_blockchain_address=wallet_B.blockchain_address,
        value=value,
        sender_public_key=wallet_A.public_key,
        signature=t.generate_signature())
    blockchain.mining()
    print(blockchain.print_chain())
예제 #20
0
def create_transaction(inputik, output):
    trans = transaction.Transaction(1, inputik, output, 0)
    trans = trans.get_full_transaction()
    return trans
def attack():
    global internal_storage, saved_block, attackQueue
    tx = request.get_json(force=True)["TX"]
    print("Entering attacker mode with:", tx)
    attackQueue.put(1)
    print("Current tx in memory:", waiting_tx)
    a = internal_storage["Miner"]
    starting_block = saved_block

    # make sure attacker has money
    t_attack = transaction.Transaction(
        _sender_public_key=internal_storage["Public_key"],
        _receiver_public_key=internal_storage["Public_key"],
        _amount=100,
        _comment="Ensure enough",
        _reward=True)
    t_attack.sign(a.client.privatekey)
    print("!====> 1:", starting_block)
    newBlock = selfMine(t_attack.data, starting_block)
    starting_block = newBlock.getData()
    print("!====> 2:", starting_block)

    # create tx for merchant 2
    t = transaction.Transaction(
        _sender_public_key=internal_storage["Public_key"],
        _receiver_public_key=tx["Receiver"],
        _amount=tx["Amount"],
        _comment=tx["Comment"])
    t.sign(a.client.privatekey)
    newBlock = selfMine(t.data, starting_block)
    starting_block = newBlock.getData()
    print("!====> 3:", starting_block)

    # give miner back his money
    for addr, balance in a.blockchain.current_block.state["Balance"].items():
        if addr != "merchant1" and addr != internal_storage["Public_key"]:
            print("<== !! ==>")
            print("Giving {} to {}".format(balance, addr))
            t = transaction.Transaction(
                _sender_public_key=internal_storage["Public_key"],
                _receiver_public_key=addr,
                _amount=balance,
                _comment="generated",
                _reward=True)
            t.sign(a.client.privatekey)
            newBlock = selfMine(t.data, starting_block)
            starting_block = newBlock.getData()

    # start mining blocks and adding to starting block
    for i in range(len(waiting_tx)):
        # create tx and manually mine
        t = transaction.Transaction(
            _sender_public_key=internal_storage["Public_key"],
            _receiver_public_key=internal_storage["Public_key"],
            _amount=0,
            _comment="generated",
            _reward=True)
        t.sign(a.client.privatekey)
        newBlock = selfMine(t.data, starting_block)
        starting_block = newBlock.getData()

    return "Attack Complete!"
예제 #22
0
from portfolio import Portfolio
import pandas as pd
import datetime as dt
from dateutil import parser
import csv
import copy

# if qty of any stock is negative (check in calculate value function) throw error

if __name__ == '__main__':
    # read in transactions file and construct list of transactions
    transactions = []
    f = open('transactions.csv', 'r')
    lines = f.readlines()
    for line in lines:
        transactions.append(transaction.Transaction(line))

    # turn transaction objects into portfolio objects
    portfolios = []  # list of portfolio objects for each date
    init_port = Portfolio()
    portfolios.append(init_port)
    for t in transactions:
        portfolios.append(Portfolio(t, portfolios[-1]))

    # get all trading days as a list
    bizdates = pd.bdate_range(portfolios[0].start_date,
                              parser.parse('1/1/2016'))
    print(bizdates)

    # get portfolio values
    portfolios_length = len(portfolios)
예제 #23
0
    def _wait_for_connections(self):
        # Temp for testing
        b1 = block.Block()
        while True:
            try:
                print ("Awaiting New connection")
                self.socket.listen(3)

                conn, addr = self.socket.accept()

                print("Got connection from:", addr)

                data = conn.recv(1024)
                string = bytes.decode(data)

                split_message = string.split('$?$')
                
                message_type = split_message[0]
                
                if message_type == 'ping':
                
                    message_type, name, pubkey_n, pubkey_e = split_message
                    user_pubkey = {'pubkey_n': int(pubkey_n), 'pubkey_e': int(pubkey_e)}
                    self.phonebook.add_client_to_phonebook(name, user_pubkey)
                
                    ping_response = "ack {0} {1} {2} {3}".format(self.uid,self.port,self.publickey.n,self.publickey.e)
                    conn.send(bytes(ping_response,'utf-8'))
                    
                    
                elif message_type == 't':
                
                    message_type, sender, receiver, message, value, fee, timestamp, verification = split_message
                    
                    verification_message = bytes("$?$".join([receiver, message, str(value), fee, str(timestamp)]),'utf-8')
                    
                    sender_key = self.phonebook.get_pubkey_from_UID(sender)
                    try:
                        if rsa.verify(verification_message, literal_eval(verification), sender_key):
                            
                            print ("Sender: " + sender + " - Receiver: " + receiver + " - Message: " + message)
                            transaction = ts.Transaction(sender,sender,receiver,message)
                            b1.add_transaction(transaction)
                            if b1.has_enough_transactions():
                            #if 1 == 1:
                                b1.mine()
                                self.broadcast_block_to_network(b1)
                                b1.empty_transactions()
                                
                    except VerificationError as e:
                        print ('Not verified')
                        
                        
                elif message_type == 'b':
                    
                    message_type, message = split_message
                    self.chain.add_block_to_ledger(message)
                    
                    
                elif message_type == 'book':
                
                    
                    full_book = self.phonebook.get_all_peers()
                    
                    peers_serialized = json.dumps(full_book[0])
                    clients_serialized = json.dumps(full_book[1])
                    
                    book_response = "bookack$" + peers_serialized + "$" +  clients_serialized
                    
                    
                    conn.send(bytes(book_response,'utf-8'))
                    
                
            finally:
                conn.close()
예제 #24
0
	def msg_makeRoute(self, msg):
		log.log('Processing MakeRoute message')

		sourceLink = self.__getLinkObject(msg.ID)
		ret = sourceLink.makeRouteIncoming(msg)

		payerID, payeeID = \
		{
		True: (msg.ID, None),
		False: (None, msg.ID)
		}[msg.isPayerSide]

		#Possible routes we can take
		if msg.routingContext is None:
			#Order is important: try meeting points first
			possibleLinks = self.meetingPoints.keys() + self.links.keys()
		else:
			possibleLinks = [msg.routingContext]

		def tryRemove(ID):
			try:
				possibleLinks.remove(ID)
			except ValueError:
				pass #it's OK if the source link wasn't present already
			
		#Remove the source link:
		tryRemove(msg.ID)

		#Remove source link and possible routes of earlier instances of
		#this route:
		#for these, the route should be made by the earlier instance.
		#Allowing them to be selected by later instances would allow
		#infinite routing loops.
		#Note: generally, this will remove ALL routes, if earlier instances of
		#the same route exist. The only situation where this is not the case
		#is when an earlier instance was restricted in its routing choices,
		#and, theoretically, when a new route was created in-between.
		earlierTransactions = self.findMultipleTransactions(
			transactionID=msg.transactionID, isPayerSide=msg.isPayerSide)
		for earlierTx in earlierTransactions:
			earlierSourceLinkID = earlierTx.payerID if msg.isPayerSide else earlierTx.payeeID
			tryRemove(earlierSourceLinkID)

			for ID in earlierTx.initialLinkIDs:
				tryRemove(ID)

		#Increment end time on the payee side:
		#On the payer side, this will be done in haveRoute.
		if not msg.isPayerSide:
			#TODO: check sanity (and data type) of startTime, endTime
			msg.endTime += self.settings.timeoutIncrement

		#Create new transaction
		newTx = transaction.Transaction(
			state=transaction.Transaction.states.makingRoute,
			isPayerSide=msg.isPayerSide,
			payeeID=payeeID,
			payerID=payerID,

			initialLinkIDs=possibleLinks[:],
			remainingLinkIDs=possibleLinks[:],

			meetingPointID=msg.meetingPointID,
			amount=msg.amount,

			transactionID=msg.transactionID,
			startTime=msg.startTime,
			endTime=msg.endTime
			)
		self.transactions.append(newTx)

		nextRoute = newTx.tryNextRoute()
		if nextRoute is None:
			log.log('  No route found')
			#Delete the tx we just created:
			self.transactions.remove(newTx)
			#Send back haveNoRoute:
			ret += sourceLink.haveNoRouteOutgoing(
				msg.transactionID, msg.isPayerSide)
			return ret

		log.log('  Forwarding MakeRoute to the first route')

		ret += self.__getLinkObject(nextRoute).makeRouteOutgoing(msg)

		#route time-out:
		#TODO: configurable time-out value?
		ret.append(messages.TimeoutMessage(timestamp=time.time()+5.0, message=\
			messages.NodeStateTimeout_Route(
				transactionID=msg.transactionID, isPayerSide=msg.isPayerSide,
				payerID=newTx.payerID
				)))

		return ret
예제 #25
0
import block, blockchain, transaction, time

blockchain = blockchain.Blockchain()

blockchain.construct_genesis()
print(blockchain)

blockchain.constructGenesis()

b = block.Block(1, transaction.Transaction("me", "you", 100, time.time),
                time - time)
blockchain.addBlock(b)

print(blockchain)
예제 #26
0
    def testBackupReadOnlyAccess(self, backup):
        """Check backup cluster can be used in read-only mode by ZODB clients"""
        B = backup
        U = B.upstream
        Z = U.getZODBStorage()
        #Zb = B.getZODBStorage()    # XXX see below about invalidations

        oid_list = []
        tid_list = []

        # S -> Sb link stops working during [cutoff, recover) test iterations
        cutoff = 4
        recover = 7

        def delayReplication(conn, packet):
            return isinstance(packet, Packets.AnswerFetchTransactions)

        with ConnectionFilter() as f:
            for i in xrange(10):
                if i == cutoff:
                    f.add(delayReplication)
                if i == recover:
                    # .remove() removes the filter and retransmits all packets
                    # that were queued once first filtered packed was detected
                    # on a connection.
                    f.remove(delayReplication)

                # commit new data to U
                txn = transaction.Transaction()
                txn.note(u'test transaction %s' % i)
                Z.tpc_begin(txn)
                oid = Z.new_oid()
                Z.store(oid, None, '%s-%i' % (oid, i), '', txn)
                Z.tpc_vote(txn)
                tid = Z.tpc_finish(txn)
                oid_list.append(oid)
                tid_list.append(tid)

                # make sure data propagated to B  (depending on cutoff)
                self.tic()
                if cutoff <= i < recover:
                    self.assertLess(B.backup_tid, U.last_tid)
                else:
                    self.assertEqual(B.backup_tid, U.last_tid)
                self.assertEqual(B.last_tid, U.last_tid)
                self.assertEqual(1, self.checkBackup(B, max_tid=B.backup_tid))

                # read data from B and verify it is what it should be
                # XXX we open new ZODB storage every time because invalidations
                # are not yet implemented in read-only mode.
                Zb = B.getZODBStorage()
                for j, oid in enumerate(oid_list):
                    if cutoff <= i < recover and j >= cutoff:
                        self.assertRaises(POSKeyError, Zb.load, oid, '')
                    else:
                        data, serial = Zb.load(oid, '')
                        self.assertEqual(data, '%s-%s' % (oid, j))
                        self.assertEqual(serial, tid_list[j])

                # verify how transaction log & friends behave under potentially
                # not-yet-fully fetched backup state (transactions committed at
                # [cutoff, recover) should not be there; otherwise transactions
                # should be fully there)
                Zb = B.getZODBStorage()
                Btxn_list = list(Zb.iterator())
                self.assertEqual(len(Btxn_list),
                                 cutoff if cutoff <= i < recover else i + 1)
                for j, txn in enumerate(Btxn_list):
                    self.assertEqual(txn.tid, tid_list[j])
                    self.assertEqual(txn.description,
                                     'test transaction %i' % j)
                    obj, = txn
                    self.assertEqual(obj.oid, oid_list[j])
                    self.assertEqual(obj.data, '%s-%s' % (obj.oid, j))

                # TODO test askObjectHistory once it is fixed

                # try to commit something to backup storage and make sure it is
                # really read-only
                Zb._cache.max_size = 0  # make store() do work in sync way
                txn = transaction.Transaction()
                self.assertRaises(ReadOnlyError, Zb.tpc_begin, txn)
                self.assertRaises(ReadOnlyError, Zb.new_oid)
                self.assertRaises(ReadOnlyError, Zb.store, oid_list[-1],
                                  tid_list[-1], 'somedata', '', txn)
                # tpc_vote first checks whether there were store replies -
                # thus not ReadOnlyError
                self.assertRaises(NEOStorageError, Zb.tpc_vote, txn)

                # close storage because client app is otherwise shared in
                # threaded tests and we need to refresh last_tid on next run
                # (XXX see above about invalidations not working)
                Zb.close()
예제 #27
0
import client as c
import transaction as t
import block as b
import utilities as u

last_block_hash = ""

Dinesh = c.Client()

t0 = t.Transaction (
   "Genesis",
   Dinesh.identity,
   500.0
)

# create block instance
block0 = b.Block()

# init block values
block0.previous_block_hash = None
Nonce = None

# append to verified_transactions
block0.verified_transactions.append(t0)

# has the block and digest the value
digest = hash (block0)
last_block_hash = digest

#
# creating blockchain
예제 #28
0
 def open_new_monthly_income(self, event):
     transaction.Transaction(self, transaction_type=transaction.INCOME)
예제 #29
0
 def hydrate(self, row):
     date = datetime.datetime.strptime(row[6], '%d/%m/%Y')
     return transaction.Transaction(date, numutil.parsefloat(row[2]),
                                    row[1])