def search(): global data keys = request.get_json() data['keys'] = keys #print('searc==>', data, 'keys==>', keys) # Thread(target=run,args=[data]).start() run(data) return {'message': 'succes search'}, 200
def whitelist(params, remote_ip): if not is_whitelist(params, remote_ip): #print 'whitelisting', remote_ip tools.run("iptables -I %s --source %s -j ACCEPT" % (params.chain_name, remote_ip)) tools.run("iptables -t nat -I %s --source %s -j ACCEPT" % (params.chain_name, remote_ip)) return True return False
def main(): args, cmd, capturer = arg.parse_args() log.configure_logging(args.output_directory, args.log_to_stderr) log.log_header() javac_commands, jars = capturer.gen_instance(cmd).capture() log.info('Results: %s', pprint.pformat(javac_commands)) tools.run(args, javac_commands, jars)
def main(): TESTS = ( 'count_doubles_rust', 'count_doubles_c', 'count_doubles_numba', 'count_doubles_pythran', 'count_doubles_python', ) expected = count_doubles_c(*ARGS) run('count_doubles', TESTS, expected=expected, repeat=10)
def main(): TESTS = ( 'fractal_numba', 'fractal_pythran', 'fractal_python', ) expected = image.copy() args = list(ARGS) args[-2] = expected fractal_numba(*args) run('mandelbrot', TESTS, expected=expected, repeat=1)
def main(): TESTS = ( # 'sum2d_rust', # 'sum2d_c', 'sum2d_numba', 'sum2d_pythran', 'sum2d_python', 'sum2d_numpy', ) expected = sum2d_numpy(*ARGS) run('sum2d', TESTS, expected=expected, repeat=100)
def install_vim(config_path='/usr/lib/python2.7/config-x86_64-linux-gnu', remove_build_dir=True, tag=None): with tempdir(remove_build_dir) as build_dir: print('Building Vim at folder {}'.format(build_dir)) vim_src_dir = os.path.join(build_dir, 'vim_src') logging.info('Cloning Vim repo') run(['hg', 'clone', 'https://vim.googlecode.com/hg/', vim_src_dir]) with cd(vim_src_dir): if tag: run(['hg', 'update', tag]) #-rv7-3-1034, -rv7-4b-022 run(['./configure', '--enable-multibyte', '--with-tlib=ncurses', '--enable-pythoninterp=yes', '--enable-rubyinterp=yes', '--with-features=huge', '--with-python-config-dir={}'.format(config_path)]) run(['make', vim_src_dir, '-j', '3']) run(['make', vim_src_dir, 'install']) logging.info('Vim compiled and installed. Linking to /usr/bin/vim') create_symlink('/usr/local/bin/vim', '/usr/bin/vim', backup=False)
def _get_gdrive_credentials(self, wkv): # Set up a Flow object to be used for authentication. Add one # or more of the following scopes. PLEASE ONLY ADD THE SCOPES # YOU NEED. For more information on using scopes please see # <https://developers.google.com/+/best-practices>. FLOW = flow_from_clientsecrets(CLIENT_SECRETS, scope=[ 'https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/drive.apps.readonly', 'https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.scripts', 'https://www.googleapis.com/auth/drive.readonly', ], message=MISSING_CLIENT_SECRETS_MESSAGE) # If the Credentials don't exist or are invalid, run through # the native client flow. The Storage object will ensure that # if successful the good Credentials will get written back to # gconf. storage = Storage(None, use_gconf=True) self._credentials = storage.get() # Commented out to force exercising the web authentification # code during debugging. # if self._credentials is None or self._credentials.invalid: self._credentials = run(FLOW, storage, wkv)
def update_hosts(ip): current_hostname = os.uname().nodename if current_hostname != hostname: print('Setting hostname {}'.format(hostname)) # hostname is initialized early on boot from initramfs, hostnamectl is not available when unit is started run('sysctl -w kernel.hostname="{}"'.format(hostname)) with open('/etc/hostname', 'w') as f: f.write(hostname) with open('/etc/hosts') as f: hosts_content = f.read() pattern = r'^[0-9.]{{7,15}}.*{}$'.format(current_hostname) hosts_content = re.sub(pattern, '{}\t{}\t{}'.format(ip, fqdn, hostname), hosts_content, flags=re.MULTILINE) print('Updating /etc/hosts') with open('/etc/hosts', 'w') as f: f.write(hosts_content)
def main(): args, cmd, capturer = arg.parse_args() log.configure_logging(args.output_directory, args.log_to_stderr) log.log_header() result = cache.retrieve(cmd, args, capturer) if not result: print "DLJC: Build command failed." sys.exit(1) javac_commands, jars, stats = result log.info('Results: %s', pprint.pformat(javac_commands)) output_json(os.path.join(args.output_directory, 'javac.json'), javac_commands) output_json(os.path.join(args.output_directory, 'jars.json'), jars) output_json(os.path.join(args.output_directory, 'stats.json'), stats) tools.run(args, javac_commands, jars)
def install_chrome(download_folder='/tmp'): filename = os.path.join(download_folder, 'google-chrome-stable_current_amd64.deb') url = 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb' run(['wget', '-O', filename, url]) try: run(['dpkg', '-i', filename]) except CalledProcessError: run(['apt-get', 'install', '-f'])
def click_statistics_unused_files(): global json_file, files_path,file_not_in_Remnote file_not_in_Remnote = run(json_file,files_path)
self.b = b return self.b def update(self, b, data, eps, C): """ Update portfolio weights to satisfy constraint b * x <= eps and minimize distance to previous weights. """ T, N = data.shape x = data[-1, :] x_mean = np.mean(x) le = np.maximum(0., np.dot(b, x) - eps) denominator = np.square(np.linalg.norm(x - x_mean)) if self.variant == 0: tau = le / denominator elif self.variant == 1: tau = np.minimum(C, le / denominator) elif self.variant == 2: tau = le / (denominator + 0.5 / C) # limit lambda to avoid numerical problems tau = np.minimum(100000, tau) # update portfolio b = b - tau * (data[T - 1, :] - x_mean) # project it onto simplex return tools.simplex_proj(b.ravel()) if __name__ == "__main__": tools.run(pamr_old())
# cloud_config from tools import run import os import glob print('Deleting host SSH keys') map(os.remove, glob.glob('/etc/ssh/ssh_host_*')) print('Reconfiguring openssh-server') run('dpkg-reconfigure openssh-server')
self.exp_w = np.ones((self.window - 1, m)) / m for t in range(n): if t >= min_period: b = self.get_b(data[:t, :], re_b) # double check (Normalize the constraint) b = b / np.sum(b) daily_portfolio[t, :] = b.reshape((1, m)) daily_ret[t] = np.dot( data[t, :], b) * (1 - tc / 2 * np.sum(np.absolute(b - re_b))) cum_ret = cum_ret * daily_ret[t] cumprod_ret[t] = cum_ret re_b = b * data[t, :][:, None] / daily_ret[t] for k in np.arange(1, self.window): self.exp_ret[k - 1, 0] = np.dot(self.exp_ret[k - 1, 0] * data[k, :], self.exp_w[k - 1, :].T) self.exp_ret[:, 0] /= np.sum(self.exp_ret[:, 0]) logging.info('%d\t%f\t%f\n' % (t + 1, daily_ret[t], cumprod_ret[t])) logging.info('tc=%f, Final Return: %.2f\n' % (tc, cum_ret)) if __name__ == "__main__": tools.run(anticort())
def install_heroku_toolbelt(): run('wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh | sh', shell=True)
# cloud_config = {} # groups = run('groups', fail=True) groups = [g.gr_name for g in grp.getgrall()] users = [p.pw_name for p in pwd.getpwall()] for user in cloud_config.get('Users', []): for group in user.get('Groups', []): # if group not in groups.stdout.split(): if group not in groups: groupadd_cmd = 'groupadd ' if user.get('System'): groupadd_cmd += '--system ' print('Creating missing group: {}'.format(group)) run(groupadd_cmd + group, fail=True) if user['Name'] not in users: adduser_cmd = 'adduser --disabled-password --gecos "" ' if user.get('System'): adduser_cmd += '--system ' print('Creating user {}'.format(user['Name'])) run(adduser_cmd + user['Name'], fail=True) for group in user.get('Groups', []): print('Adding user to group {}'.format(group)) run('adduser {} {}'.format(user['Name'], group)) if user.get('Password'): print('Setting password') submitted_password = ''.join(user['Password']) if isinstance(user['Password'], list) else user['Password']
def main(): six.print_("Functional fibonacci tests:") r1 = run('fib', TESTS, expected=fib_py(N), repeat=100000)
path = '/Users/ashbake/Documents/Research/Projects/PSG/psg_main/' import sys sys.path.append(path + 'psg/') from tools import pick_site, run if __name__ == '__main__': # define everything output_path = './outputs/' # where to dump final spectrum config_path = './configs/' # where to dump PSG intermediate config files plot_path = output_path # where to dump PSG final plot obs_time = '2021/04/20 10:50:49.661' # time of observation, must be in this format site = pick_site(sitename='palomar') l0, l1, res = 1100, 1900, 0.005 # wavelength range and resolving power # run psg, save telluric spectra to file outfile = run( l0, l1, res, obs_time, site, 'HIT', output_path=path + output_path, # path to save final spectrum config_path=path + config_path, # path to save intermediate config files plot_path=path + plot_path, # path so save plot extension='fits', # save as fits (currently only option) cleanup=True, # deletes intermediate files run_atm=True) # do or don't regenerate atm
import numpy as np from DataClass import data_io from KalmanFilterEq import KalmanFilterEq import tools import matplotlib.pyplot as plt n=4 sensor_data,true_data,LIDARFalse = data_io() kalmanFilter = KalmanFilterEq(n) flag= 1 state_est=tools.run(sensor_data,kalmanFilter,flag) p_x,p_y,Vel_x,Vel_y=tools.get_RMSE(state_est,true_data) lidar_x_sensor=[] lidar_y_sensor=[] x_pred=[] y_pred=[] x_true=[] y_true=[] radar_x_sensor=[] radar_y_sensor=[] i = 0 for s, p, t in zip(sensor_data, state_est, true_data): i += 1 if i % 4 == 0: continue if s.get_name() == 'LIDAR':
def up(params): rv = tools.run("iptables -L %s -n" % params.chain_name) if rv == 3: raise Exception("Permission denied: you need to be root") #if rv != 0: if True: tools.run("iptables -N %s" % params.chain_name) #tools.run("iptables -A %s -j LOG --log-prefix 'protected '" % params.chain_name) tools.run("iptables -A %s -j REJECT" % params.chain_name) tools.run("iptables -t nat -N %s" % params.chain_name) #tools.run("iptables -t nat -A %s -j LOG --log-prefix 'protected-nat '" % params.chain_name) tools.run("iptables -t nat -A %s -p tcp -j DNAT --to-destination :%d" % (params.chain_name, params.blackhole_port)) tools.run("iptables -t nat -A %s -j RETURN" % params.chain_name) if len(params.allow): for addr in params.allow: tools.run("iptables -I %s --source %s -j ACCEPT" % (params.chain_name, addr)) tools.run("iptables -t nat -I %s --source %s -j RETURN" % (params.chain_name, addr)) for port in params.protected_port: tools.run("iptables -I INPUT -p tcp --dport %d -j %s" % (port, params.chain_name)) tools.run("iptables -t nat -I PREROUTING -p tcp --dport %d -j %s" % (port, params.chain_name)) # at top: tcp-reject after prerouting (ie. when managed by docker-proxy) tools.run( "iptables -I INPUT -p tcp --dport %d -j REJECT --reject-with tcp-reset" % params.blackhole_port) tools.run( "iptables -I INPUT -p tcp --dport %d -j LOG --log-prefix 'refused! '" % params.blackhole_port)
def down(params): for port in params.protected_port: tools.run("iptables -D INPUT -p tcp --dport %d -j %s" % (port, params.chain_name)) tools.run("iptables -t nat -D PREROUTING -p tcp --dport %d -j %s" % (port, params.chain_name)) tools.run( "iptables -D INPUT -p tcp --dport %d -j REJECT --reject-with tcp-reset" % params.blackhole_port) tools.run( "iptables -D INPUT -p tcp --dport %d -j LOG --log-prefix 'refused! '" % params.blackhole_port) tools.run("iptables -F %s" % params.chain_name) tools.run("iptables -X %s" % params.chain_name) tools.run("iptables -t nat -F %s" % params.chain_name) tools.run("iptables -t nat -X %s" % params.chain_name)
#!/usr/bin/python3 # -*- coding: utf-8 -*- ''' A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext. But we almost never want to transform a single block; we encrypt irregularly-sized messages. One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even multiple of the blocksize. The most popular padding scheme is called PKCS#7. So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block. For instance, "YELLOW SUBMARINE" ... padded to 20 bytes would be: "YELLOW SUBMARINE\x04\x04\x04\x04" ''' import sys sys.path.append("../toolbox") import tools TEXT="YELLOW SUBMARINE" def main(): padded = tools.addPadding(TEXT, 20) print(padded) if __name__ =='__main__': tools.run(main)
def run(self, img): predictions = run(self.model, img) return predictions
for vm_disk, config_disk in zip(vm_disks, config_disks): scsi_disk, device = vm_disk mount_point = config_disk.get('MountPoint') if not mount_point: print('No mount point specified for {}. Skipping formatting..'.format( config_disk['DeviceNode'])) continue if glob.glob( '/sys/class/scsi_disk/{0}/device/block/{1}/{1}*'.format(*vm_disk)): print('Disk {} contains partitions. Skipping..'.format( config_disk['DeviceNode'])) else: print('Creating partition on /dev/{}'.format(device)) run('parted --script /dev/{} mklabel gpt mkpart primary 2048KiB 100%'. format(device), fail=True) time.sleep(1) file_system = config_disk.get('FileSystem', DEFAULT_FILE_SYSTEM) label = config_disk.get('Label', os.path.basename(mount_point)) print('Creating and mounting file system on partition /dev/{}1'.format( device)) if file_system == 'ext4': # if config_disk.get('LVM'): # run('mkfs.ext4 -L {0} /dev/{1}/{2}'.format(label, vg_name, lv_name), fail=True) # else: run('mkfs.ext4 -L {0} /dev/{1}1'.format(label, device), fail=True) else: sys.exit('Not supported file system')
Easiest way: use OpenSSL::Cipher and give it AES-128-ECB as the cipher. ''' import sys sys.path.append("../toolbox") import tools F='8.txt' def main(): with open(F,'r') as f: encList = f.readlines() for (idx,encHex) in enumerate(encList, 1): # print(['*' * 16, idx, '*'*16]) enc = tools.fromHex(encHex.strip()) dupes = tools.getDuplicateBlocks(enc, 16) if dupes: print('Found on line #%i' % idx) print(dupes) print(tools.toHex(enc, blockSize=16)) if __name__ =='__main__': tools.run(main)
#decMsg=decryptCBC(key, cipherMsg) #print(decMsg) print("*** Mitm") if a==1: print('attack header') receivedMsg = attackHeader(cipherMsg) else: print('attack tail') receivedMsg = attackTail(cipherMsg) #print(receivedMsg) print("*** Bob") decMsg=crypto.decryptCBC(key, receivedMsg) if adminCheck(decMsg): print("You are ADMIN") else: print("You are regular user") print(decMsg) def bothAttacks(): main(1) print('*' * 72) main(2) if __name__ =='__main__': tools.run(bothAttacks)
hosts_content, flags=re.MULTILINE) print('Updating /etc/hosts') with open('/etc/hosts', 'w') as f: f.write(hosts_content) print('Reading interfaces from /sys/class/net/') interfaces = {} for interface in os.listdir('/sys/class/net/'): with open('/sys/class/net/{}/address'.format(interface)) as f: mac = f.read().rstrip() interfaces.update({mac: interface}) print('Checking if network is managed by systemd') networkd_status = run('systemctl is-enabled systemd-networkd') if networkd_status.stdout.rstrip() in [b'enabled', b'enabled-runtime']: print('Generating networkd config files') main_ip = True for nic in cloud_config['NIC']: try: name = interfaces[nic['Mac'].lower()] nic_config = '[Match]\nName={}\n\n[Network]\n'.format(name) for ip in nic['Ip']: nic_config += 'Address={}\n'.format(ip) if nic.get('Gw') and main_ip: main_ip = False nic_config += 'Gateway={}\n'.format(nic['Gw']) for dns_server in cloud_config['DNS']['Servers']: nic_config += 'DNS={}\n'.format(dns_server) nic_config += 'Domains={}\n'.format(
aMessage = b''.join(msgBlocks) + injectedData tools.d(['attack: append blocks', blocks]) for block in blocks: msgHash = sha1.compute_sha1block(msgHash, block) printsha1(msgHash) return (aMessage[keylen:], msgHash) def main(): baseMessageHash = sha1mac(MESSAGE) printsha1(baseMessageHash) ### FIXME?: NEED TO KNOW LENGTH OF _KEY ### GUESS? (aMessage, aHash) = attackMessage(len(_KEY), MESSAGE, baseMessageHash) if not verify_sha1mac(aMessage, aHash): print("Invalid hash") return if checkAdmin(aMessage): print("You are ADMIN") else: print("You are regular user") if __name__ =='__main__': tools.run(main, True)
from tools import run import strategies if __name__ == '__main__': runs = 1000000 # Adjust values for any heist challenge_level = 12 # obtainable from the wiki: https://fallenlondon.fandom.com/wiki/Fallen_London_Wiki suspicion_gain = 2 # how many CP of suspicion are gained if the heist fails casing_loss = 10 # how many CP of Casing... are lost if the heist fails # run(casing level to aim for, etc..., heisting strategy) # You may want to start on higher or stop on lower levels of Casing... depending on the heist. run(9, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(10, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(11, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(12, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(13, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(14, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(15, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level) run(16, challenge_level, suspicion_gain, casing_loss, runs, strategies.heist_suboptimal_high_level)
blocks = sha1.buildBlocks(prevLen, injectedData) aMessage = b''.join(msgBlocks) + injectedData tools.d(['attack: append blocks', blocks]) for block in blocks: msgHash = sha1.compute_sha1block(msgHash, block) printsha1(msgHash) return (aMessage[keylen:], msgHash) def main(): baseMessageHash = sha1mac(MESSAGE) printsha1(baseMessageHash) ### FIXME?: NEED TO KNOW LENGTH OF _KEY ### GUESS? (aMessage, aHash) = attackMessage(len(_KEY), MESSAGE, baseMessageHash) if not verify_sha1mac(aMessage, aHash): print("Invalid hash") return if checkAdmin(aMessage): print("You are ADMIN") else: print("You are regular user") if __name__ == '__main__': tools.run(main, True)
def main(): six.print_("Recursive fibonacci tests:") r2 = run('rfib', RTESTS, expected=rfib_c(N), repeat=10)
cumprod_ret = np.ones((n, 1), np.float) daily_ret = np.ones((n, 1), np.float) b = np.ones((m,1), np.float) / m re_b = np.zeros((m,1), np.float) daily_portfolio = np.zeros((n, m)) for t in range(n): b = self.update(x, history) daily_portfolio[t,:] = b.reshape((1,m)) daily_ret[t] = np.dot(data[t,:],b)*(1-tc/2*np.sum(np.absolute(b-re_b))) cum_ret *= daily_ret[t] cumprod_ret[t] = cum_ret re_b = b * data[t,:][:,None] / daily_ret[t] logging.info('%d\t%f\t%f\n' % (t+1, daily_ret[t], cumprod_ret[t])) logging.info('tc=%f, Final Return: %.2f\n' % (tc, cum_ret)) self.pDiff = daily_ret def norm(x): if len(x.shape) == 1: axis=0 else: axis=1 return np.sqrt((x**2).sum(axis=axis)) if __name__ == "__main__": tools.run(rmr2())