def test_parse_name(self): _type, fsname, user, host, port = parse_name( 'ssh:23:user@hostname:rpool/data') assert _type == 'ssh' assert fsname == 'rpool/data' assert user == 'user' assert host == 'hostname' assert port == 23 _type, fsname, user, host, port = parse_name('rpool/data') assert _type == 'local' assert fsname == 'rpool/data' assert user == None assert host == None assert port == None
def grab_contact_data(current, itr): field = current.text.strip() # Handle special parsing requirements if field == "Name": contact_info.update_fields(parse_name(next(itr).text.strip())) elif field == "Phone": contact_info.update_field( tare_provided_fields[field], validators["phone"](next(iter).text.strip())) elif field == "Address": cleaned_re = re.compile("\s+") info = cleaned_re.sub(" ", next(itr).text.strip()) try: address = validators["address"](info.strip()) str_types = [str, unicode] address_fields = { "MailingStreet": (address.street if type(address.street) in str_types else " ".join(address.street)), "MailingCity": (address.city if type(address.city) in str_types else " ".join(address.city)), "MailingState": (address.state if type(address.state) in str_types else " ".join(address.state)), "MailingPostalCode": (address.zip if type( address.zip) in str_types else " ".join(address.zip)), } contact_info.update_fields(address_fields) except: pass elif field in tare_provided_fields.keys(): value = next(itr).text.strip() contact_info.update_field(tare_provided_fields[field], value)
def parse_case_worker_details(souped): """ Using the CASE_WORKER_SELECTOR grab essential data. This includes: TareId, Name, Email, Address, Region, County """ cw_data = {} divs = iter(souped.select("> div")) for div in divs: if "TARE Coord" in div.text: cw_data.update( parse_name(div.select_one("div:nth-of-type(2)").text.strip())) elif "Phone" in div.text: phone = valid_phone( div.select_one("div:nth-of-type(2)").text.strip()) if phone: cw_data["Phone"] = phone elif "Email" in div.text: email = valid_email( div.select_one("div:nth-of-type(2)").text.strip()) if email: cw_data["Email"] = email return cw_data
def main_batch(scan_no): cfg = utils.gen_cfg_el060(scan_no) cal = np.load('cal_fine_No%04d.npy' % (scan_no + 1), allow_pickle=True).item() names = utils.gen_file_list('nsums', scan_no, 'npy') f = open('solve_No%04d.txt' % (scan_no), 'w') res = [] for name in names: _, dm, time = utils.parse_name(name) # print(name) x, xe = make_sp_solve(cfg, cal, name) if len(x) == 0: continue f.write('%11.6f\t\t%.6f\t%.6f\t%.6f\t%.6f\t%.3f\n' % \ (time, x[0], x[1], xe[0], xe[1], dm)) res.append(x) f.close() res = np.array(res) ra = res[:, 0] dec = res[:, 1] print('SP: total %d, valid %d' % (len(names), res.shape[0])) print('ra: %f (%f)' % (np.mean(ra), np.std(ra))) print('dec: %f (%f)' % (np.mean(dec), np.std(dec)))
def main_single(): if len(sys.argv) < 2: print('./sp_fit.py sp_file') return fname = sys.argv[1] scan_no, dm, time = utils.parse_name(fname) cfg = utils.gen_cfg(scan_no) make_fit_sp(cfg, fname)
def clean_config(config): """Deletes old snapshots according to strategy given in config""" logtime = lambda: datetime.now().strftime('%b %d %H:%M:%S') print('{:s} INFO: Cleaning snapshots...'.format(logtime())) for conf in config: if not conf.get('clean', None): continue name = conf['name'] try: _type, fsname, user, host, port = parse_name(name) except ValueError as err: print('{:s} ERROR: Could not parse {:s}: {}...'.format( logtime(), name, err)) continue if _type == 'ssh': try: ssh = open_ssh(user, host, port=port, key=conf['key']) except (FileNotFoundError, SSHException): continue else: ssh = None try: # Children includes the base filesystem (filesystem) children = zfs.find(path=fsname, types=['filesystem', 'volume'], ssh=ssh) except (ValueError, DatasetNotFoundError, CalledProcessError) as err: print('{:s} ERROR: {}'.format(logtime(), err)) continue # Clean snapshots of parent filesystem clean_snap(children[0], conf) # Clean snapshots of all children that don't have a seperate config entry for child in children[1:]: # Check if any of the parents (but child of base filesystem) have a config entry for parent in children[1:]: if ssh: parent_name = 'ssh:{:d}:{:s}@{:s}:{:s}'.format( port, user, host, parent.name) else: parent_name = parent.name # Skip if any parent entry already in config if (child.name.startswith(parent.name) and parent_name in [entry['name'] for entry in config]): break else: clean_snap(child, conf) if ssh: ssh.close()
def main_single(): if len(sys.argv) < 2: print('./sp_calc.py .sp.npy') return name = sys.argv[1] scan_no, dm, time = utils.parse_name(name) cfg = utils.gen_cfg(scan_no) din = prep_din(cfg) make_fit_calc(cfg, din, name)
def main_single(): if len(sys.argv) < 2: print('./sp_calc.py .sp.npy') return name = sys.argv[1] scan_no, dm, time = utils.parse_name(name) cfg = utils.gen_cfg(scan_no) cfg.pols = [b'LL', b'RR'] cal = np.load('cal_fine_No%04d.npy' % (scan_no + 1), allow_pickle=True).item() x, xe = make_sp_solve(cfg, cal, name) print(x)
def send_config(config): """Tries to sync all entries in the config to their dest. Finds all children of the filesystem and calls send_snap on each of them.""" logtime = lambda: datetime.now().strftime('%b %d %H:%M:%S') print('{:s} INFO: Sending snapshots...'.format(logtime())) for conf in config: if not conf.get('dest', None): continue source_fs_name = conf['name'] if source_fs_name.startswith('ssh'): print('{:s} ERROR: Cannot send from remote location...'.format(logtime())) continue try: # Children includes the base filesystem (source_fs) source_children = zfs.find(path=source_fs_name, types=['filesystem', 'volume']) except (ValueError, DatasetNotFoundError, CalledProcessError) as err: print('{:s} ERROR: {}'.format(logtime(), err)) continue for backup_dest in conf['dest']: try: _type, dest_name, user, host, port = parse_name(backup_dest) except ValueError as err: print('{:s} ERROR: Could not parse {:s}: {}...' .format(logtime(), backup_dest, err)) continue if _type == 'ssh': dest_key = conf['dest_keys'].pop(0) if conf['dest_keys'] else None try: ssh = open_ssh(user, host, port=port, key=dest_key) except (FileNotFoundError, SSHException): continue dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name) else: ssh = None dest_name_log = dest_name # Check if base destination filesystem exists try: zfs.open(dest_name, ssh=ssh) except DatasetNotFoundError: print('{:s} ERROR: Destination {:s} does not exist...' .format(logtime(), dest_name_log)) continue except (ValueError, CalledProcessError) as err: print('{:s} ERROR: {}'.format(logtime(), err)) continue # Match children on source to children on dest dest_children_names = [child.name.replace(source_fs_name, dest_name) for child in source_children] # Send all children to corresponding children on dest for source, dest in zip(source_children, dest_children_names): send_snap(source, dest, ssh=ssh) if ssh: ssh.close()
for file in os.listdir(subdir_path): file_path = os.path.join(subdir_path, file) images.append(file_path) labels.append(role_type) package = np.vstack([images, labels]).transpose() np.random.shuffle(package) xs, ys = [], [] for i, (x, y) in enumerate(package): if file_num is not None and i >= file_num: break im = Image.open(x).resize((im_width, im_height)) im = np.array(im) / 255. xs.append(im) ys.append([(1 if j == int(y) else 0) for j in range(depth)]) xs = np.array(xs) ys = np.array(ys) return xs, ys PATH_ROLES = 'roles' if __name__ == '__main__': names, index2name, name2index = utils.parse_name() print(names, index2name) x_data, y_data = get_data(PATH_ROLES, name2index, file_num=1000) print(len(x_data))