def get_json(url): logger.info("GET: %s" % url) options = webdriver.ChromeOptions() options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') options.add_argument('--window-size=1280x1696') options.add_argument('--user-data-dir=/tmp/user-data') options.add_argument('--hide-scrollbars') options.add_argument('--enable-logging') options.add_argument('--log-level=0') options.add_argument('--v=99') options.add_argument('--single-process') options.add_argument('--data-path=/tmp/data-path') options.add_argument('--ignore-certificate-errors') options.add_argument('--homedir=/tmp') options.add_argument('--disk-cache-dir=/tmp/cache-dir') options.add_argument( 'user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' ) options.binary_location = os.getcwd() + "/bin/headless-chromium" driver = webdriver.Chrome(chrome_options=options) driver.get(url) pre = driver.find_element_by_tag_name("pre").text try: return json.loads(pre) except Exception as e: logger.err(e) return None finally: driver.close()
def lambda_handler(event, context): file = scrape_d.get_content() # upload the csv to s3 session = boto3.Session(aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET, region_name=REGION_NAME) s3 = session.resource('s3') bucket = s3.Bucket(S3_BUCKET) try: s3.meta.client.head_bucket(Bucket=S3_BUCKET) logger.info('Uploading result of scrape to S3') bucket.upload_file('/tmp/%s.csv' % file, '%s.csv' % file, ExtraArgs={'ACL': 'public-read'}) logger.success('Uploaded %s.csv to S3' % file) except ClientError as e: # 404 error, bucket does not exist. error_code = e.response['Error']['Code'] if error_code == '404': logger.err('Bucket(%s) does not exist' % S3_BUCKET) else: logger.err(e)
def __init__(self, username, password=None, key_filename=None, host=DEFAULT_SSH_HOST, port=DEFAULT_SSH_PORT): pkey = os.path.expanduser(key_filename) if key_filename is not None else None self.send_lock = threading.Lock() self.ssh_client = paramiko.SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) connect_kw = {"username": username, "compress": True, "look_for_keys": True} if password is not None: connect_kw["password"] = password connect_kw["look_for_keys"] = False if key_filename is not None: connect_kw["key_filename"] = key_filename connect_kw["look_for_keys"] = False try: self.ssh_client.connect(host, port, **connect_kw) except paramiko.ssh_exception.PasswordRequiredException as e: if e.message == "Private key file is encrypted": raise SshRpcKeyEncrypted() raise SshRpcError() except paramiko.ssh_exception.SSHException as e: if e.message == "No authentication methods available": raise SshRpcKeyNoAuthMethod() if e.message == "Authentication failed.": raise SshRpcKeyAuthFailed() raise SshRpcError(e.message) except socket.error as e: log.err("Unable to establish connection, please try again later ({})".format(e)) os._exit(1) self.ssh_transport = self.ssh_client.get_transport() self.ssh_transport.set_keepalive(30) self.ssh_channel = None self._server_update()
def image(file): try: img = pygame.image.load(file) log.info(f"Loading texture: {file}") return img except: log.err(f"Failed to load texture: {file}") return unknown
def do_check_grub(self): self.update_progress(_("Checking bootloader"),True) log(" --> Checking Grub configuration") if os.path.exists("/target/boot/grub/grub.cfg"): return True else: err("!No /target/boot/grub/grub.cfg file found!") return False
def __remove__(file): try: os.remove(file) except PermissionError: time.sleep(3) try: os.remove(file) except Exception as e: log.err(f"Failed to remove swap file \"{file}\"")
def create_nfs_maps(cluster_fileserver, cluster_name): logger = logging.getLogger("cluster-creation") cmd = 'ssh batfs0101 /hub/sysadmin/bin/getauto.devel2' ret, out = do_cmd(cmd) if ret != 0: logger.error('Checkout failed') #logger.error(out) else: logger.info(out) logger.info('Checking passed') sfs_ah = '/usr/bin/echo -e "%s\tbatfs-%s-nfs:/vmgr/&" >> /var/tmp/auto.sfs.template.devel.bat' % ( cluster_name, cluster_name) cmd = 'ssh batfs0101\t' "'%s'" % (sfs_ah) ret, out = do_cmd(cmd) logger.info(out) sfs_blr = '/usr/bin/echo -e "%s\tbatfs-%s-nfs:/vmgr/&" >> /var/tmp/sfs.worldwide.US-Billerica.devel' % ( cluster_name, cluster_name) cmd = 'ssh batfs0101\t' "'%s'" % (sfs_blr) ret, out = do_cmd(cmd) logger.info(out) proc = subprocess.run( ['ssh', 'batfs0101', '/hub/sysadmin/bin/putauto.devel2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input='Y\n', encoding='ascii') if proc != 0: logger.info(proc.stdout) else: logger.err(proc.stderr) dfs_maps_batdfs = "ssh dc-00 'dfsutil link add \\\mathworks\\batdfsroot\\{} \\\\batfs-{}-cifs\\vmgr\\{}'".format( cluster_name, cluster_name, cluster_name) print(dfs_maps_batdfs) ret, out = do_cmd(dfs_maps_batdfs) logger.info(out) dfs_maps_blrbat = "ssh dc-00 'dfsutil link add \\\mathworks\\blrbatroot\\{} \\\\batfs-{}-cifs\\vmgr\{}'".format( cluster_name, cluster_name, cluster_name) print(dfs_maps_blrbat) ret, out = do_cmd(dfs_maps_blrbat) logger.info(out) dfs_pro_ttl = "ssh dc-00 'dfsutil property ttl set \\\mathworks\\batdfsroot\\{} 86400'".format( cluster_name) ret, out = do_cmd(dfs_pro_ttl) logger.info(out) dfs_pro_bat_ttl = "ssh dc-00 'dfsutil property ttl set \\\mathworks\\blrbatroot\{} 86400'".format( cluster_name) ret, out = do_cmd(dfs_pro_bat_ttl) logger.info(out)
def reset(self): "Resets/restarts server uncleanly. No params." if self.reactor: args = sys.argv #take the original arguments args.insert(0, sys.executable) # add python os.execv(sys.executable, args) # replace self with new python. logger.err("Reset failed:", sys.executable, str(args)) return # This will not return. os.execv should overwrite us. else: logger.err("<Not resetting: no reactor>") pass
def get_op(x: str): """ Get the opcode of a given number :param x: number :return: none, print opcode """ to_plug = int(x) if x.find("0x") == -1 else int(x, 16) value = get_opcode(to_plug) if value is not None: excite(f'opcode for {x}: {value}') else: err(f'could not get opcode for {x}!')
def __init__(self, setup): self.setup = setup # find the squashfs.. self.media = config.get("loop_directory", "/dev/loop0") if (not os.path.exists(self.media)): err("Critical Error: Live medium (%s) not found!" % self.media) # sys.exit(1) inf("Using live medium: " + self.media) self.our_total = 0 self.our_current = 0
def get_instr(args): """ Get instructions for file with given arguments :param args: arguments :return: none, prints respectively """ # Weird list parsing _len = calculate_real_len(args) if _len == 3: if args[1] is not None and args[2] is not None: get_instr_s(*args) elif _len == 1: get_instr_a(args[0]) else: err(f'Invalid arguments! Given: {args}')
def parse(hexdump, from_loc: str, to_loc: str): """ Parse a hex dump from a location to another location :param hexdump: dump contents :param from_loc: starting location :param to_loc: end location :return: lines of hex between """ dump_as_arr = str(hexdump).split("\n") from_index = find(hexdump, from_loc) to_index = find(hexdump, to_loc) if int(from_loc, 16) > int(to_loc, 16): err(f'Cannot parse locations backwards! Asked to find {from_loc} to {to_loc}') return return dump_as_arr[from_index:to_index]
def load_config(config_path): """read config file""" if os.path.isfile(config_path): file = open(config_path, "r") content = file.read() inf("#Reading yaml file:" + config_path) log(content) else: err("{} doesn't exists. Please create config file!".format( config_path)) return {} try: return yaml.load(content, Loader=yaml.FullLoader) or {} except: return yaml.load(content) or {}
def run(cmd, vital=True): inf("Running: "+cmd) if "||" in cmd: mode = cmd.split("||")[0].strip() cmd = cmd.split("||")[1].strip() if "{distro_codename}" in cmd: cmd = cmd.replace("{distro_codename}", config.get("distro_codename", "linux")) if mode == "chroot": i = do_run_in_chroot(cmd) else: i = os.system(cmd) else: i = os.system(cmd) if vital and i != 0: err("Failed to run command (Exited with {}): {}".format(str(int(i/512)), cmd)) return i
def populateEmptyChunkInfo(self, dbName): """Populate self.emptyChunkInfo[dbName] with a set() containing chunkIds for the empty chunks of the particular db. If no empty chunk information can be found and loaded for the db, a default empty chunks file is used.""" sanitizedDbName = filter(lambda c: (c in string.letters) or (c in string.digits) or (c in ["_"]), dbName) if sanitizedDbName != dbName: logger.wrn("WARNING, dbName=", dbName, "contains questionable characters. sanitized=", sanitizedDbName) name = "empty_%s.txt" % sanitizedDbName info = self.loadIntsFromFile(name) if not info and type(info) != type(list): logger.err("Couldn't find %s, using %s." % (name, self.defaultEmptyChunks)) self.emptyChunkInfo[dbName] = self.emptyChunkInfo[""] else: self.emptyChunkInfo[dbName] = info pass
def answer(self, text, attachment=None, domain=None, title=None): receiver = 'chat_id' if self.dialog_id < 0 else 'user_id' receiver_id = abs(self.dialog_id) pars = { 'message': text, receiver: unicode(receiver_id), #'access_token': tokens.get_token(), 'guid': random.randint(0, 1000000), } if attachment is not None: pars['attachment'] = attachment if title is not None: pars['title'] = title if domain is not None: del pars[receiver] pars['domain'] = domain response = api.query(u'messages.send', args=pars, bot=self.bot) info = vk_utils.get_bot_info() if info is None: logger.err(u'Не удалось получить информацию о боте, ответ не отправлен. ') return None if response is None: logger.log(u'Ничего не пришло в ответ от Vk в classes.py@answer') return response if response.get('error') is None: if info['status'] != 'ok': #api.query(u'account.setOnline') vk_utils.set_bot_status(info, 'ok') else: logger.log(u'Пришла ошибка от VKAPI во время отправки сообщения. ' \ + unicode(response['error']['error_msg'])) if response['error']['error_code'] == 14: vk_utils.set_bot_status(info, 'error', response['error']['captcha_sid']) return response
def do_api(path, account_id): if os.path.isfile(API_KEY_FILE): with open(API_KEY_FILE) as f: key = f.read().strip() r = requests.get(API_ENDPOINT + path, auth=(account_id, key)) if r.status_code == 200: log.ok("Success!") return 0 else: if r.status_code == 401: log.err("Invalid account_id and/or api_key given") elif r.status_code == 403: log.err("You have no permission to use the that resource") elif r.status_code == 404: log.err("The endpoint could not be found") return 1 else: log.err("Could not find API key file [{key_file}]".format(key_file=API_KEY_FILE)) return 1
def run(rpc, recipe, is_dev): lc = 0 state = {"software_list": {}, "name": None, "is_dev": is_dev} try: for statement in rparser.parse(recipe): # Everything after # is comment. lc += 1 command = statement[0] args = statement[1:] state = rpc.call("rc_" + command, {"args": args, "state": state}) # recipe_execute_cmd(rpc, state, clean_line) return state except DocoptExit as e: log.err('Recipe contains an invalid command at Line {line}.'.format(line=lc)) log.err(e) except RecipeRuntimeError as e: log.err('Recipe command on line {line} failed with error: {e}'.format(line=lc, e=e))
def start_installation(self): # mount the media location. log(" --> Installation started") if(not os.path.exists("/target")): os.mkdir("/target") if(not os.path.exists("/source")): os.mkdir("/source") # Custom commands self.do_hook_commands("pre_install_hook") run("umount -lf /target/dev/shm") run("umount -lf /target/dev/pts") run("umount -lf /target/dev/") run("umount -lf /target/sys/") run("umount -lf /target/proc/") run("umount -lf /target/run/") self.mount_source() if self.setup.automated: self.create_partitions() else: self.format_partitions() self.mount_partitions() # Custom commands self.do_hook_commands("pre_rsync_hook") # Transfer the files SOURCE = "/source/" DEST = "/target/" self.our_current = 0 # (Valid) assumption: num-of-files-to-copy ~= num-of-used-inodes-on-/ self.our_total = int(subprocess.getoutput( "df --inodes /{src} | awk 'END{{ print $3 }}'".format(src=SOURCE.strip('/')))) log(" --> Copying {} files".format(self.our_total)) if config.get("netinstall",False): self.run_and_update(config.package_manager("create_rootfs")) pkgs = open("branding/netinstall_packages.txt").read().split("\n") else: if config.get("use_rsync",True) and 0 == os.system("which rsync &>/dev/null"): EXCLUDE_DIRS = "dev/* proc/* sys/* tmp/* run/* mnt/* media/* lost+found source target".split() # Add optional entries to EXCLUDE_DIRS for dirvar in config.get("exclude_dirs", ["home/*", "data/user/*"]): EXCLUDE_DIRS.append(dirvar) rsync_filter = ' '.join( '--exclude=' + SOURCE + d for d in EXCLUDE_DIRS) rsync = subprocess.Popen("rsync --verbose --archive --no-D --acls " "--hard-links --xattrs {rsync_filter} " "{src}* {dst}".format(src=SOURCE, dst=DEST, rsync_filter=rsync_filter), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while rsync.poll() is None: line = str(rsync.stdout.readline().decode( "utf-8").replace("\n", "")) if not line: # still copying the previous file, just wait time.sleep(0.1) else: self.our_current = min(self.our_current + 1, self.our_total) self.update_progress(_("Copying /%s") % line) log(_("rsync exited with return code: %s") % str(rsync.poll())) elif config.get("use_unsquashfs",True) and 0 == os.system("which unsquashfs &>/dev/null"): pwd = os.getcwd() os.chdir("/target") self.update_progress(_("Extracting rootfs."),pulse=True) run("unsquashfs /dev/loop0") run("mv /target/squashfs-root/* /target") run("rm -rf /target/squashfs-root") os.chdir(pwd) else: cp = subprocess.Popen("cp -prvf {src}* {dst}".format(src=SOURCE, dst=DEST), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while cp.poll() is None: line = str(cp.stdout.readline().decode( "utf-8")).split("'")[1] if not line: # still copying the previous file, just wait time.sleep(0.1) else: self.our_current = min(self.our_current + 1, self.our_total) self.update_progress(_("Copying /%s") % line) # Custom commands self.do_hook_commands("post_rsync_hook") # Steps: self.our_total = 12 self.our_current = 0 # chroot log(" --> Chrooting") self.update_progress(_("Entering the system ...")) run("mount --bind /dev/ /target/dev/") run("mount --bind /dev/shm /target/dev/shm") run("mount --bind /dev/pts /target/dev/pts") run("mount --bind /sys/ /target/sys/") run("mount --bind /proc/ /target/proc/") run("mount --bind /run/ /target/run/") if os.path.exists("/sys/firmware/efi"): run("mount --bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars") run("mv /target/etc/resolv.conf /target/etc/resolv.conf.bk") run("cp -f /etc/resolv.conf /target/etc/resolv.conf") if config.get("netinstall",False): cmd = config.package_manager("install_package",pkgs) self.run_and_update("chroot /target {}".format(cmd)) kernelversion = subprocess.getoutput("uname -r") if os.path.exists("/lib/modules/{0}/vmlinuz".format(kernelversion)): run( "cp /lib/modules/{0}/vmlinuz /target/boot/vmlinuz-{0}".format(kernelversion)) # add new user log(" --> Adding new user") self.our_current += 1 try: for cmd in config.distro["run_before_user_creation"]: run("chroot||"+cmd) except: err("This action not supported for your distribution.") self.update_progress(_("Adding new user to the system")) # TODO: support encryption run('chroot||useradd -m -s {shell} -c \"{realname}\" {username}'.format( shell=config.get("using_shell", "/bin/bash"), realname=self.setup.real_name, username=self.setup.username)) # Add user to additional groups for group in config.get("additional_user_groups", ["audio", "video", "netdev"]): run("chroot||usermod -aG {} {}".format(group, self.setup.username)) if (run("which chpasswd &>/dev/null") == 0) and config.get("use_chpasswd", True): fp = open("/target/tmp/.passwd", "w") fp.write(self.setup.username + ":" + self.setup.password1 + "\n") if config.get("set_root_password", True): fp.write("root:" + self.setup.password1 + "\n") fp.close() run("chroot||cat /tmp/.passwd | chpasswd") run("chroot||rm -f /tmp/.passwd") else: run("chroot||echo -e \"{0}\\n{0}\\n\" | passwd {1}".format( self.setup.password1, self.setup.username)) if config.get("set_root_password", True): run("chroot||echo -e \"{0}\\n{0}\\n\" | passwd".format( self.setup.password1)) self.our_current += 1 # Set autologin for user if they so elected if self.setup.autologin: # Auto Login Groups for i in config.display_manager["set_autologin"]: run(i.replace("{user}",self.setup.username)) # /etc/fstab, mtab and crypttab self.our_current += 1 self.update_progress(_("Writing filesystem mount information to /etc/fstab")) self.write_fstab()
def finish_installation(self): # Steps: self.our_total = 12 self.our_current = 4 # write host+hostname infos log(" --> Writing hostname") self.our_current += 1 self.update_progress(_("Setting hostname")) hostnamefh = open("/target/etc/hostname", "w") hostnamefh.write("%s\n" % self.setup.hostname) hostnamefh.close() hostsfh = open("/target/etc/hosts", "w") hostsfh.write("127.0.0.1\tlocalhost\n") hostsfh.write("127.0.1.1\t%s\n" % self.setup.hostname) hostsfh.write( "# The following lines are desirable for IPv6 capable hosts\n") hostsfh.write("::1 localhost ip6-localhost ip6-loopback\n") hostsfh.write("fe00::0 ip6-localnet\n") hostsfh.write("ff00::0 ip6-mcastprefix\n") hostsfh.write("ff02::1 ip6-allnodes\n") hostsfh.write("ff02::2 ip6-allrouters\n") hostsfh.write("ff02::3 ip6-allhosts\n") # Append hosts file from branding if os.path.isfile("./branding/hosts"): f = open("./branding/hosts","r").readlines() for line in f: hostsfh.write(line) hostsfh.close() # set the locale log(" --> Setting the locale") self.our_current += 1 self.update_progress(_("Setting locale")) run("echo \"%s.UTF-8 UTF-8\" >> /target/etc/locale.gen" % self.setup.language) run("chroot||locale-gen") run("echo \"\" > /target/etc/default/locale") run("chroot||localectl set-locale LANG=\"%s.UTF-8\"" % self.setup.language) run("chroot||localectl set-locale LANG=%s.UTF-8" % self.setup.language) open("/target/etc/locale.conf", "w").write("LANG=%s.UTF-8" % self.setup.language) # set the locale for gentoo / sulin if os.path.exists("/target/etc/env.d"): l = open("/target/etc/env.d/20language", "w") l.write("LANG={}.UTF-8".format(self.setup.language)) l.write("LC_ALL={}.UTF-8".format(self.setup.language)) l.flush() l.close() run("chroot||env-update") # set the timezone log(" --> Setting the timezone") self.our_current += 1 self.update_progress(_("Setting timezone")) run("echo \"%s\" > /target/etc/timezone" % self.setup.timezone) run("rm -f /target/etc/localtime") run("ln -s /usr/share/zoneinfo/%s /target/etc/localtime" % self.setup.timezone) # Keyboard settings X11 if not self.setup.keyboard_variant: self.setup.keyboard_variant = "" self.update_progress(("Settings X11 keyboard options")) newconsolefh = None if os.path.exists("/target/etc/X11/xorg.conf.d"): newconsolefh = open( "/target/etc/X11/xorg.conf.d/10-keyboard.conf", "w") elif os.path.exists("/target/usr/share/X11/xorg.conf.d/"): newconsolefh = open( "/target/usr/share/X11/xorg.conf.d/10-keyboard.conf", "w") if newconsolefh: newconsolefh.write('Section "InputClass"\n') newconsolefh.write('Identifier "system-keyboard"\n') newconsolefh.write('MatchIsKeyboard "on"\n') newconsolefh.write('Option "XkbLayout" "{}"\n'.format( self.setup.keyboard_layout)) newconsolefh.write('Option "XkbModel" "{}"\n'.format( self.setup.keyboard_model)) newconsolefh.write('Option "XkbVariant" "{}"\n'.format( self.setup.keyboard_variant)) if "," in self.setup.keyboard_layout: newconsolefh.write('Option "XkbOptions" "grp:ctrls_toggle"\n') newconsolefh.write('EndSection\n') newconsolefh.close() # set the keyboard options.. log(" --> Setting the keyboard") self.our_current += 1 self.update_progress(_("Setting keyboard options")) if os.path.exists("/target/etc/default/console-setup"): consolefh = open("/target/etc/default/console-setup", "r") newconsolefh = open("/target/etc/default/console-setup.new", "w") for line in consolefh: line = line.rstrip("\r\n") if(line.startswith("XKBMODEL=")): newconsolefh.write("XKBMODEL=\"%s\"\n" % self.setup.keyboard_model) elif(line.startswith("XKBLAYOUT=")): newconsolefh.write("XKBLAYOUT=\"%s\"\n" % self.setup.keyboard_layout) elif(line.startswith("XKBVARIANT=") and self.setup.keyboard_variant != ""): newconsolefh.write("XKBVARIANT=\"%s\"\n" % self.setup.keyboard_variant) else: newconsolefh.write("%s\n" % line) consolefh.close() newconsolefh.close() run("chroot||rm /etc/default/console-setup") run("chroot||mv /etc/default/console-setup.new /etc/default/console-setup") # lfs like systems uses vconsole.conf (systemd) if os.path.exists("/target/etc/vconsole.conf"): consolefh = open("/target/etc/vconsole.conf", "r") newconsolefh = open("/target/etc/vconsole.conf.new", "w") for line in consolefh: line = line.rstrip("\r\n") if(line.startswith("KEYMAP=")): if(self.setup.keyboard_variant != ""): newconsolefh.write( "KEYMAP=\"{0}-{1}\"\n".format(self.setup.keyboard_layout, self.setup.keyboard_variant)) else: newconsolefh.write("KEYMAP=\"{0}\"\n".format( self.setup.keyboard_layout)) else: newconsolefh.write("%s\n" % line) consolefh.close() newconsolefh.close() run("chroot||rm /etc/vconsole.conf") run("chroot||mv /etc/vconsole.conf.new /etc/vconsole.conf") # debian like systems uses this (systemd) if os.path.exists("/target/etc/default/keyboard"): consolefh = open("/target/etc/default/keyboard", "r") newconsolefh = open("/target/etc/default/keyboard.new", "w") for line in consolefh: line = line.rstrip("\r\n") if(line.startswith("XKBMODEL=")): newconsolefh.write("XKBMODEL=\"%s\"\n" % self.setup.keyboard_model) elif(line.startswith("XKBLAYOUT=")): newconsolefh.write("XKBLAYOUT=\"%s\"\n" % self.setup.keyboard_layout) elif(line.startswith("XKBVARIANT=") and self.setup.keyboard_variant != ""): newconsolefh.write("XKBVARIANT=\"%s\"\n" % self.setup.keyboard_variant) elif(line.startswith("XKBOPTIONS=")): newconsolefh.write("XKBOPTIONS=grp:ctrls_toggle") else: newconsolefh.write("%s\n" % line) consolefh.close() newconsolefh.close() run("chroot||rm /etc/default/keyboard") run("chroot||mv /etc/default/keyboard.new /etc/default/keyboard") # Keyboard settings openrc if os.path.exists("/target/etc/conf.d/keymaps"): newconsolefh = open("/target/etc/conf.d/keymaps", "w") if not self.setup.keyboard_layout: self.setup.keyboard_layout = "en" newconsolefh.write("keymap=\"{}{}\"\n".format( self.setup.keyboard_layout, self.setup.keyboard_variant)) newconsolefh.close() # remove pacman self.update_progress(_("Clearing package manager"),True) log(" --> Clearing package manager") log(config.get("remove_packages", ["17g-installer"])) run("chroot||yes | {}".format(config.package_manager( "remove_package_with_unusing_deps", config.get("remove_packages", ["17g-installer"])))) if self.setup.luks: with open("/target/etc/default/grub.d/61_live-installer.cfg", "w") as f: f.write("#! /bin/sh\n") f.write("set -e\n\n") f.write('GRUB_CMDLINE_LINUX="cryptdevice=%s:lvmlmde root=/dev/mapper/lvmlmde-root resume=/dev/mapper/lvmlmde-swap"\n' % self.auto_root_physical_partition) run("chroot||echo \"power/disk = shutdown\" >> /etc/sysfs.d/local.conf") # recreate initramfs (needed in case of skip_mount also, to include things like mdadm/dm-crypt/etc in case its needed to boot a custom install) log(" --> Configuring Initramfs") self.our_current += 1 self.update_progress(_("Generating initramfs"),pulse=True) for command in config.update_initramfs(): run("chroot||"+command) self.update_progress(_("Preparing bootloader installation"),pulse=True) try: grub_prepare_commands = config.distro["grub_prepare"] for command in grub_prepare_commands: run(command) except: err("Grub prepare process not available for your distribution!") # install GRUB bootloader (EFI & Legacy) log(" --> Configuring Grub") self.our_current += 1 if(self.setup.grub_device is not None): self.update_progress(_("Installing bootloader"),pulse=True) log(" --> Running grub-install") if os.path.exists("/sys/firmware/efi"): grub_cmd = config.distro["grub_installation_efi"] run(grub_cmd.replace("{disk}", self.setup.grub_device)) else: grub_cmd = config.distro["grub_installation_legacy"] run(grub_cmd.replace("{disk}", self.setup.grub_device)) # fix not add windows grub entry run("chroot||grub-mkconfig -o /boot/grub/grub.cfg") self.update_progress(_("Configuring bootloader"),pulse=True) self.do_configure_grub() grub_retries = 0 while (not self.do_check_grub()): self.do_configure_grub() grub_retries = grub_retries + 1 if grub_retries >= 5: self.error_message(message=_( "WARNING: The grub bootloader was not configured properly! You need to configure it manually.")) break # Custom commands self.do_hook_commands("post_install_hook") # now unmount it log(" --> Unmounting partitions") run("umount -lf /target/dev/shm") run("umount -lf /target/dev/pts") if os.path.exists("/sys/firmware/efi"): run("umount -lf /target/sys/firmware/efi/") if self.setup.gptonefi: run("umount -lf /target/boot/efi") run("umount -lf /target/media/cdrom") run("umount -lf /target/boot") run("umount -lf /target/dev/") run("umount -lf /target/sys/") run("umount -lf /target/proc/") run("umount -lf /target/run/") run("rm -f /target/etc/resolv.conf") run("mv /target/etc/resolv.conf.bk /target/etc/resolv.conf") for partition in self.setup.partitions: if(partition.mount_as is not None and partition.mount_as != "" and partition.mount_as != "/" and partition.mount_as != "swap"): self.do_unmount("/target" + partition.mount_as) self.do_unmount("/target") self.do_unmount("/source") self.update_progress(_("Installation finished"),done=True) log(" --> All done")
def main(): args = runner_arguments() logger.account_info(args) start_time = datetime.datetime.now().replace(microsecond=0) logger.info('Template runner start time: [{}]'.format(start_time)) # Create the blob client, for use in obtaining references to # blob storage containers and uploading files to containers. blob_client = azureblob.BlockBlobService( account_name=args.StorageAccountName, account_key=args.StorageAccountKey) # Create a batch account using AAD batch_client = create_batch_client(args) # Create a keyvault client using AAD keyvault_client_with_url = create_keyvault_client(args) # Clean up any storage container that is older than a 7 days old. utils.cleanup_old_resources(blob_client) repository_branch_name = args.RepositoryBranchName if repository_branch_name == "current": repository_branch_name = Repository('../').head.shorthand logger.info('Pulling resource files from the branch: {}'.format( repository_branch_name)) try: images_refs = [] # type: List[utils.ImageReference] with open(args.TestConfig) as f: try: template = json.load(f) except ValueError as e: logger.err( "Failed to read test config file due to the following error", e) raise e for jobSetting in template["tests"]: application_licenses = None if 'applicationLicense' in jobSetting: application_licenses = jobSetting["applicationLicense"] _job_managers.append( job_manager.JobManager( jobSetting["template"], jobSetting["poolTemplate"], jobSetting["parameters"], keyvault_client_with_url, jobSetting["expectedOutput"], application_licenses, repository_branch_name)) for image in template["images"]: images_refs.append( utils.ImageReference(image["osType"], image["offer"], image["version"])) run_job_manager_tests(blob_client, batch_client, images_refs, args.VMImageURL) except batchmodels.BatchErrorException as err: utils.print_batch_exception(err) raise finally: # Delete all the jobs and containers needed for the job # Reties any jobs that failed if args.CleanUpResources: utils.execute_parallel_jobmanagers("retry", _job_managers, batch_client, blob_client, _timeout / 2) utils.execute_parallel_jobmanagers("delete_resources", _job_managers, batch_client, blob_client) utils.execute_parallel_jobmanagers("delete_pool", _job_managers, batch_client) end_time = datetime.datetime.now().replace(microsecond=0) logger.print_result(_job_managers) logger.export_result(_job_managers, (end_time - start_time)) logger.info('Sample end: {}'.format(end_time)) logger.info('Elapsed time: {}'.format(end_time - start_time))
def get_content(): filename = datetime.datetime.today().strftime("%Y-%m-%d") product_urls = [] errors = [] for brand in BRANDS: target_url = 'https://www.ssense.com/en-ca/men/designers/%s.json' % brand src = requestwrap.get_json(target_url) src_page = int(src['meta']['page']) max_page = int(src['meta']['total_pages']) if src_page <= max_page: for obj in src['products']: product_urls.append(obj['url']) # get the next page if paginated src_page += 1 src = requestwrap.get_json(target_url + "?page=%s" % src_page) with open("/tmp/%s.csv" % filename, 'w') as csvfile: fieldnames = ['isSaleEnabled', 'isSaleSoon', 'isCaptchaEnabled', 'isSkuCaptchaProtected', 'productSku', 'productName', 'productGender', 'productComposition', 'productCategory', 'productOrigin', 'productInStock', 'productBrand', 'productRegPrice', 'productSalePrice', 'productDiscPrice', 'productCurrency', 'productIsUniSize', 'size', 'sizeSku', 'sizeInStock', 'dateRun'] writer = csv.writer(csvfile, dialect='excel') # Write header writer.writerow(fieldnames) for url in product_urls: src = requestwrap.get_json("https://www.ssense.com/en-ca%s.json" % url) try: isSaleEnabled = src['context']['isSaleEnabled'] isSaleSoon = src['context']['isSaleSoon'] isCaptchaEnabled = src['context']['isCaptchaEnabled'] isSkuCaptchaProtected = src['context']['isSkuCaptchaProtected'] productSku = src['product']['sku'] productName = src['product']['name'] productGender = src['product']['gender'] productComposition = src['product']['composition'] productCategory = src['product']['category']['name'] productOrigin = src['product']['countryOfOrigin'] productInStock = src['product']['inStock'] productBrand = src['product']['brand']['name'] productRegPrice = src['product']['price']['regular'] productSalePrice = src['product']['price']['sale'] productDiscPrice = src['product']['price']['discount'] productCurrency = src['product']['price']['currency'] productIsUniSize = src['product']['isUniSize'] productSizes = [] productSizeSkus = [] productSizeInStock = [] for sizeObj in src['product']['sizes']: productSizes.append(sizeObj['name']) productSizeSkus.append(sizeObj['sku']) productSizeInStock.append(str(sizeObj['inStock'])) # convert product(Sizes|SizeSkus|SizeInStock) to strings delimited by spaces productSizes = " ".join(productSizes) productSizeSkus = " ".join(productSizeSkus) productSizeInStock = " ".join(productSizeInStock) writer.writerow([isSaleEnabled, isSaleSoon, isCaptchaEnabled, isSkuCaptchaProtected, productSku, productName, productGender, productComposition, productCategory, productOrigin, productInStock, productBrand, productRegPrice, productSalePrice, productDiscPrice, productCurrency, productIsUniSize, productSizes, productSizeSkus, productSizeInStock, filename]) except KeyError: errors.append("ERROR: could not retrieve product details for %s" %s url) continue for e in errors: logger.err(e) return filename
def font(file): try: pyglet.font.add_file(file) except FileNotFoundError: log.err(f"Failed to load font: {file}")
return kernel_vars[key] if key in live: return live[key] if key in main: return main[key] except: return default return default # Distribution if (get("distribution", "auto") == "auto"): for distro_system in glob("configs/distribution/*"): distro = load_config(distro_system) if not distro: err("Failed to load: " + distro_system) elif "check_this_dir" in distro and os.path.exists( distro["check_this_dir"]): break else: distro = load_config("configs/distribution/{}.yaml".format( main["distribution"])) # Initramfs system if (get("initramfs_system", "auto") == "auto"): for initramfs_system in glob("configs/initramfs_systems/*"): initramfs = load_config(initramfs_system) if not initramfs: err("Failed to load: " + initramfs_system) elif "check_this_dir" in initramfs and os.path.exists( initramfs["check_this_dir"]):
def start_installation(self): # mount the media location. log(" --> Installation started") if (not os.path.exists("/target")): os.mkdir("/target") if (not os.path.exists("/source")): os.mkdir("/source") run("umount -lf /target/dev/shm") run("umount -lf /target/dev/pts") run("umount -lf /target/dev/") run("umount -lf /target/sys/") run("umount -lf /target/proc/") run("umount -lf /target/run/") self.mount_source() if self.setup.automated: self.create_partitions() else: self.format_partitions() self.mount_partitions() # Custom commands self.do_pre_install_commands() # Transfer the files SOURCE = "/source/" DEST = "/target/" EXCLUDE_DIRS = "home/* dev/* proc/* sys/* tmp/* run/* mnt/* media/* lost+found source target".split( ) # Add optional entries to EXCLUDE_DIRS for dirvar in config.get("exclude_dirs", ["/home"]): EXCLUDE_DIRS.append(dirvar) self.our_current = 0 # (Valid) assumption: num-of-files-to-copy ~= num-of-used-inodes-on-/ self.our_total = int( subprocess.getoutput( "df --inodes /{src} | awk 'END{{ print $3 }}'".format( src=SOURCE.strip('/')))) log(" --> Copying {} files".format(self.our_total)) rsync_filter = ' '.join('--exclude=' + SOURCE + d for d in EXCLUDE_DIRS) rsync = subprocess.Popen("rsync --verbose --archive --no-D --acls " "--hard-links --xattrs {rsync_filter} " "{src}* {dst}".format( src=SOURCE, dst=DEST, rsync_filter=rsync_filter), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while rsync.poll() is None: line = str(rsync.stdout.readline().decode("utf-8").replace( "\n", "")) if not line: # still copying the previous file, just wait time.sleep(0.1) else: self.our_current = min(self.our_current + 1, self.our_total) self.update_progress(_("Copying /%s") % line) log(_("rsync exited with return code: %s") % str(rsync.poll())) # Steps: self.our_total = 12 self.our_current = 0 # chroot log(" --> Chrooting") self.update_progress(_("Entering the system ...")) run("mount --bind /dev/ /target/dev/") run("mount --bind /dev/shm /target/dev/shm") run("mount --bind /dev/pts /target/dev/pts") run("mount --bind /sys/ /target/sys/") run("mount --bind /proc/ /target/proc/") run("mount --bind /run/ /target/run/") if os.path.exists("/sys/firmware/efi"): run("mount --bind /sys/firmware/efi/ /target/sys/firmware/efi/") run("mv /target/etc/resolv.conf /target/etc/resolv.conf.bk") run("cp -f /etc/resolv.conf /target/etc/resolv.conf") kernelversion = subprocess.getoutput("uname -r") if os.path.exists("/lib/modules/{0}/vmlinuz".format(kernelversion)): run("cp /lib/modules/{0}/vmlinuz /target/boot/vmlinuz-{0}".format( kernelversion)) # add new user log(" --> Adding new user") self.our_current += 1 try: for cmd in config.distro["run_before_user_creation"]: run("chroot||" + cmd) except: err("This action not supported for your distribution.") self.update_progress(_("Adding new user to the system")) # TODO: support encryption run('chroot||useradd -m -s {shell} -c \"{realname}\" {username}'. format(shell=config.get("using_shell", "/bin/bash"), realname=self.setup.real_name, username=self.setup.username)) # Add user to additional groups for group in config.get("additional_user_groups", ["audio", "video", "netdev"]): run("chroot||usermod -aG {} {}".format(group, self.setup.username)) if (run("which chpasswd &>/dev/null") == 0) and config.get( "use_chpasswd", True): fp = open("/target/tmp/.passwd", "w") fp.write(self.setup.username + ":" + self.setup.password1 + "\n") if config.get("set_root_password", True): fp.write("root:" + self.setup.password1 + "\n") fp.close() run("chroot||cat /tmp/.passwd | chpasswd") run("chroot||rm -f /tmp/.passwd") else: run("chroot||echo -e \"{0}\\n{0}\\n\" | passwd {1}".format( self.setup.password1, self.setup.username)) if config.get("set_root_password", True): run("chroot||echo -e \"{0}\\n{0}\\n\" | passwd".format( self.setup.password1)) self.our_current += 1 self.update_progress(_("Applying login settings")) # Set LightDM to show user list by default if config.get("list_users_when_auto_login", True): run(r"chroot||sed -i -r 's/^#?(greeter-hide-users)\s*=.*/\1=false/' /etc/lightdm/lightdm.conf" ) else: run(r"chroot||sed -i -r 's/^#?(greeter-hide-users)\s*=.*/\1=true/' /etc/lightdm/lightdm.conf" ) # Set autologin for user if they so elected if self.setup.autologin: # LightDM and Auto Login Groups run("""chroot||groupadd -r autologin && gpasswd -a {user} autologin && groupadd -r nopasswdlogin & & gpasswd -a {user} nopasswdlogin""" .format(user=self.setup.username)) run(r"chroot||sed -i -r 's/^#?(autologin-user)\s*=.*/\1={user}/' /etc/lightdm/lightdm.conf" .format(user=self.setup.username)) # /etc/fstab, mtab and crypttab self.our_current += 1 self.update_progress( _("Writing filesystem mount information to /etc/fstab")) self.write_fstab()
if __name__ == '__main__': # Initialization pyglet.resource.path.append('.') load.font("assets/minecraft/Minecraftia.ttf") # window = Game(width=856, height=482, caption=f"Minecraft 2D v{version}", resizable=True) pyglet.clock.schedule_interval(window.update, window.frame_rate) pyglet.clock.schedule_interval(window.cal_fps, 0.5) from Scenes.Initialization import Initialization window.set_minimum_size(128, 128) window.set_icon( load.image("assets/minecraft/textures/blocks/crafting_table_side.png")) window.scene = Initialization(window) try: pyglet.app.run() except Exception as e: log.err(f"Fatal Error: {e}") raise e log.info("Stopping Game") window.run = False log.info("Cleaning swap folder") for name in os.listdir("swap"): os.remove(f"swap/{name}") log.info("Game is stopped")
#!/usr/bin/env python3 from cmd import call_command, set_up_arguments, parser from logger import err if __name__ == '__main__': set_up_arguments() args = parser.parse_args() to_format = args.INT_TO_BC to_dump = args.FILE to_op = args.INT_TO_OP from_loc = args.FROM to_loc = args.TO no_args = not to_format and not to_dump and not to_op if no_args: err("Missing arguments! Use [-h] for more information.") if to_format: call_command('-b', to_format) elif to_op: call_command('-op', to_op) elif to_dump: call_command('-f', [(to_dump, from_loc, to_loc)])