def process_rawblock(raw): global DASHBOARD_BLOCK_COUNTER, DASHBOARD_TX_COUNTER, DASHBOARD_FAILED_COUNTER DASHBOARD_BLOCK_COUNTER += 1 block_header = raw[:80] version = block_header[:4] prev_merkle_root = block_header[4:36] merkle_root = block_header[36:68] blockhash = calculate_id(block_header) tx_count = 0 # transaction_data = raw[80:] # # Decoding the number of transactions, offset is the size of # # the varint (1 to 9 bytes) # n_transactions, offset = decode_varint(transaction_data) # txs = [] # for i in range(n_transactions): # raw = transaction_data[offset:] # transaction = Transaction.import_raw(raw) # tx_id = calculate_id(raw) # txs.append(tx_id) # offset += transaction.size for tx in get_block_transactions(raw): set_trace() tx_count += 1 log_block(blockhash, tx_count, DASHBOARD_BLOCK_COUNTER, DASHBOARD_TX_COUNTER, DASHBOARD_FAILED_COUNTER) return
def divide(denominator=2, other_val=students_list): """ dummy function that divide 10 by a given number return: a list """ list_num = [] set_trace() for value in [3, 2, 1, 0]: denominator *= value answer = 10 / denominator list_num.append(answer) return list_num
def fibo(n): """ Calculate fibonaci number. """ if n == 10: set_trace() if n == 0: return 0 elif n == 1: return 1 else: return fibo(n-1) + fibo(n-2)
def validate(): predictions = [] for x in Xv: h = np.dot(x, w) if sigmoid(h) > 0.5: y_hat = 1 else: y_hat = 0 predictions.append(y_hat) set_trace() score = 0 for i, p in enumerate(predictions): if predictions[i] == yv[i]: score += 1 print("score is %.2f"%(score/len(yv)))
def kernel_gradient_descent_TEST(X, y, epsilon): X = X[:500] y = y[:500] n = len(X) a = np.array(np.ones(n), ndmin=2).T lamb = 1e-3 rho = 12 count = 0 def calculate_kernel_matrix(): kernel_matrix = [[0 for _ in range(n)] for __ in range(n)] for i in range(n): if i % (n // 10) == 0: print("calculating kernel matrix...") for j in range(n): kernel_matrix[i][j] = kernel(X[i], X[j], rho) return np.matrix(kernel_matrix) kernel_matrix = calculate_kernel_matrix() set_trace() R = math.inf set_trace() while R > 0: i = random.randrange(0, n) a[i] = a[i] - (lamb * a[i]) + epsilon * (y[i] - sigmoid(np.dot(kernel_matrix[i], a))) for h in range(n): if h != i: a[h] = a[h] - lamb * a[h] inside_sigmoid = sum([a[j] * kernel_matrix[j, i] for j in range(n)]) R = -sum([y[j] * safelog(sigmoid(inside_sigmoid))\ + (1 - y[j]) * safelog(1 - sigmoid(inside_sigmoid))\ for j in range(n)]) + lamb * np.dot(a.T, a) count += 1 if count % 100 == 0: print(R, count) if count == 10000: break return a
def test_service(self): spec = self.spec('push7-test-service-uc') ServiceUseCase.create(spec) names = [service.name for service in ServiceUseCase.list()] assert 'push7-test-service-uc' in names def search_app(name='push7-test-service-uc'): apps = [] for app in AppUseCase.list(): if name in app.name: apps.append(app) if len(apps) == 0: return None return apps assert search_app() is not None import ptpdb ptpdb.set_trace() AppUseCase.create(spec) target = None for app in search_app(): if not app.primary: target = app ServiceUseCase.switch('push7-test-service-uc', target.name) target2 = None for app in search_app(): if app.primary: target2 = app assert target2 == target ServiceUseCase.delete('push7-test-service-uc') names = [service.name for service in ServiceUseCase.list()] assert 'push7-test-service-uc' not in names assert search_app() is None
def logistic_reg(option, epsilon, st, kern=False): data = sio.loadmat('spam_dataset/spam_data.mat') # silence warnings X = data['training_data'].astype('float64', copy = False) set_trace() y = data['training_labels'].T set_trace() if option == 0: X = tr_standardize(X) elif option == 1: X = tr_log(X) elif option == 2: X = tr_binarize(X) X, y = shuffle(X, y) X, Xv = X[:3418], X[3418:5172] y, yv = y[:3418], y[3418:5172] if kern: a = kernel_gradient_descent(X, y, epsilon) w = np.dot(X.T, a) else: w = gradient_descent(X, y, epsilon, stochastic=st) def validate(): predictions = [] for x in Xv: h = np.dot(x, w) if sigmoid(h) > 0.5: y_hat = 1 else: y_hat = 0 predictions.append(y_hat) set_trace() score = 0 for i, p in enumerate(predictions): if predictions[i] == yv[i]: score += 1 print("score is %.2f"%(score/len(yv))) validate()
def load_tv_show_data(self): """ load the data, create a dictionary structure with all seasons, episodes, magnet. """ url = "{}/search/".format(URL) payload = {'SearchString': self._id_tv_show, 'SearchString1': '', 'search': 'Search'} req = requests.post(url, data=payload, timeout=15, verify=False) soup = BeautifulSoup(req.content, 'html.parser') self._season_and_episode = {} set_trace() episodes = str(soup('a', {'class': 'magnet'})).split('</a>') for epi in episodes: for pat in self._patterns: data = self._match_pattern(pat, epi) if data is None: continue self.add_season_and_episode(data[0], data[1], data[2]) return self._instance
txinfo = proxy.gettransaction(fund_tx) txin = CMutableTxIn(COutPoint(fund_tx, txinfo['details'][0]['vout'])) # By default, python-bitcoinlib disables locktime via nSequence, so we must enable txin.nSequence = 0xfffffffe default_fee = 0.001*COIN txout = CMutableTxOut(amount - default_fee, senderpubkey.to_scriptPubKey()) tx = CMutableTransaction([txin], [txout]) tx.nLockTime = redeemblocknum # Sign the redeem script with Alice's private key ( for whose address the first payment path # is set up exclusively for ) sighash = SignatureHash(txin_redeemScript, tx, 0, SIGHASH_ALL) sig = sender_seckey.sign(sighash) + bytes([SIGHASH_ALL]) # Load the script sig of Bob's redemption transaction with the appropriate values txin.scriptSig = CScript([sig, sender_seckey.pub, 0, txin_redeemScript]) from ptpdb import set_trace set_trace() # Verify VerifyScript(txin.scriptSig, txin_scriptPubKey, tx, 0, (SCRIPT_VERIFY_P2SH,)) # Fast forward time proxy.generatetoaddress(lockduration, proxy.getnewaddress()) # Send txid = proxy.sendrawtransaction(tx) # Confirm proxy.generatetoaddress(1, proxy.getnewaddress())
def keyPressEvent(self, event): k = event.key() ctrl = event.modifiers() == Qt.ControlModifier alt = event.modifiers() == Qt.AltModifier shift = event.modifiers() == Qt.ShiftModifier if ctrl and k in (Qt.Key_Q, Qt.Key_W): QtWidgets.QApplication.quit() elif k == Qt.Key_D and ctrl: try: import ptpdb ptpdb.set_trace() except ImportError: import pdb pdb.set_trace() elif k == Qt.Key_O: outfile = self.get_user_ffmpeg_args()[0] try: if not ctrl: default_open(outfile) else: default_open(os.path.split(outfile)[0] or '.') except Exception as e: self.print_error(e) elif k == Qt.Key_Escape: self.setFocus(True) elif k == Qt.Key_I: self.print_video_info() elif k == Qt.Key_H: self.print(doc) ################################ if self.playback_pos is None or not self.state_loaded: return ################################ if k == Qt.Key_Space: self.player.pause = not self.player.pause elif k == Qt.Key_BracketRight: self.player.command('add', 'chapter', 1) elif k == Qt.Key_BracketLeft: self.player.command('add', 'chapter', -1) elif k == Qt.Key_Up: self.player.seek(5, 'relative-percent') elif k == Qt.Key_Down: self.player.seek(-5, 'relative-percent') elif k == Qt.Key_Left: if ctrl: self.player.seek(-1, 'relative', 'exact') elif alt: self.to_next_anchor(True) elif shift: self.keyframe_jumped = True self.to_next_keyframe(True) else: self.player.frame_back_step() elif k == Qt.Key_Right: if ctrl: self.player.seek(1, 'relative', 'exact') elif alt: self.to_next_anchor() elif shift: self.keyframe_jumped = True self.to_next_keyframe() else: self.player.frame_step() elif k == Qt.Key_Z: self.put_anchor() elif k == Qt.Key_X: self.del_anchor() elif k == Qt.Key_K: if not self.show_keyframes and not self.ipts: self.print('No keyframes information.') self.show_keyframes = not self.show_keyframes self.ui.seekbar.update() elif k == Qt.Key_F: self.shifts_dialog_ui.a.setValue(self.ffmpeg_shift_a) self.shifts_dialog_ui.b.setValue(self.ffmpeg_shift_b) self.shifts_dialog.show() self.update_statusbar()