def move_attempt(self, coordinate_orig_dest): """Attempts to make a move. Coordinates are given in the format frfr, where 'f' is the file and 'r' is the rank, and the first two characters refer to the origin square and the last two of them to the destination square""" print("Deep copy a lo loco") buffer_game = deepcopy(self) origin = coordinate_to_number(coordinate_orig_dest[:2]) print(f"Origin coordinate: {origin}") destination = coordinate_to_number(coordinate_orig_dest[2:]) #destination.append(None) vprint(f"Destination coordinate: {destination}") if origin == destination: print("Origin and destination are the same") return [False] else: #Proceed to evaluate whether there is a piece, and whether that piece is of color of current turn. origin_piece = self.chessboard[origin[0]][origin[1]].get_piece() if origin_piece == None: print("there's no piece in that position") return [False] elif origin_piece.color != self.turn: print(f"it's {self.turn.name} to move!") return [False] else: #This function should evaluate if the move is pseudo-legal. It should return a list on the format of [False] or [True, special condition, special condition arguments] if self.is_legal_move(): if self.is_pseudolegal_move(origin_piece): pass pseudo_evaluation = origin_piece.move_eval(destination) if pseudo_evaluation[0] == True: #pseudo-legal move special = pseudo_evaluation[1:] Piece.do_move(origin_piece, destination, special) return [True, origin_piece] else: return [False]
def are_pieces_on_the_way(): if abs(delta) == 1: vprint("Rook is only moving by one square") return False if delta < 0: orientation = -1 else: orientation = 1 print(f"orientation is {orientation}") print(direction) if direction == "H": for i in range(1, abs(delta)): print(f"evaluating an horizontal move of {i} squares") piece = a_game.chessboard[ox + (i * orientation)][oy].get_piece() if piece != None: return True return False elif direction == "V": for i in range(1, abs(delta)): print(f"evaluating a vertical move of {i} squares") piece = a_game.chessboard[ox][oy + (i * orientation)].get_piece() if piece != None: return True return False
def are_pieces_on_the_way(): delta = abs(dx - ox) #getting move directions if ox < dx: orientationH = 1 else: orientationH = -1 if oy < dy: orientationV = 1 else: orientationV = -1 vprint( f"Delta movement: {delta}. Orientation H: {orientationH}. Orientation V: {orientationV}" ) if abs(delta) == 1: print("Bishop is only moving by one square") return False else: for i in range(1, delta): #print(f"i value is {i}") if a_game.chessboard[ox + (i * orientationH)][ oy + (i * orientationV)].get_piece() != None: print("Found piece on the way") return True return False
def test(self): for condition in self.conditions: c_usable_idxs = self.willQueryUseIndexing(condition, {}) self.assert_( c_usable_idxs == self.usable_idxs, "\nQuery with condition: ``%s``\n" "Computed usable indexes are: ``%s``\n" "and should be: ``%s``" % (condition, c_usable_idxs, self.usable_idxs) ) condvars = self.requiredExprVars(condition, None) compiled = self.compileCondition(condition, condvars) c_idx_expr = compiled.index_expressions self.assert_( c_idx_expr == self.idx_expr, "\nWrong index expression in condition:\n``%s``\n" "Compiled index expression is:\n``%s``\n" "and should be:\n``%s``" % (condition, c_idx_expr, self.idx_expr) ) c_str_expr = compiled.string_expression self.assert_( c_str_expr == self.str_expr, "\nWrong index operations in condition:\n``%s``\n" "Computed index operations are:\n``%s``\n" "and should be:\n``%s``" % (condition, c_str_expr, self.str_expr) ) vprint( "* Query with condition ``%s`` will use " "variables ``%s`` for indexing." % (condition, compiled.index_variables) )
def check_docker_volume(config): vprint("Checking docker volume driver") if not exe_check("docker ps >/dev/null 2>&1"): return ff("Docker is not installed", "42BAAC76") if exe_check("docker plugin ls | grep {}".format(PLUGIN)): return ff("Datera Docker plugin is not installed", "6C531C5D") plugin = exe("docker plugin ls | grev -v DESCRIPTION | " "grep {}".format(PLUGIN)) if len(plugin.strip().split('\n')) > 1: wf("More than one version of Datera docker driver installed", "B3BF691D") if 'enabled' not in plugin or 'disabled' in plugin: ff("Datera docker plugin is not enabled") test_name = "ddct-test1" if not exe_check( "docker volume create -d {} --name {} --opt replica=1 --opt " "size=1".format(PLUGIN, test_name)): return ff("Could not create a volume with the Datera Docker plugin", "621A6F51") api = config['api'] try: api.app_instances.get(test_name) except ApiNotFoundError: return ff("Docker volume {} did not create on the Datera backend" "".format(test_name), "B106D1CD") if not exe_check("docker volume rm {}".format(test_name)): ff("Could not delete Docker volume {}".format(test_name), "AF3DB8B3")
def test(self): for condition in self.conditions: c_usable_idxs = self.willQueryUseIndexing(condition, {}) self.assert_( c_usable_idxs == self.usable_idxs, "\nQuery with condition: ``%s``\n" "Computed usable indexes are: ``%s``\n" "and should be: ``%s``" % (condition, c_usable_idxs, self.usable_idxs)) condvars = self.requiredExprVars(condition, None) compiled = self.compileCondition(condition, condvars) c_idx_expr = compiled.index_expressions self.assert_( c_idx_expr == self.idx_expr, "\nWrong index expression in condition:\n``%s``\n" "Compiled index expression is:\n``%s``\n" "and should be:\n``%s``" % (condition, c_idx_expr, self.idx_expr)) c_str_expr = compiled.string_expression self.assert_( c_str_expr == self.str_expr, "\nWrong index operations in condition:\n``%s``\n" "Computed index operations are:\n``%s``\n" "and should be:\n``%s``" % (condition, c_str_expr, self.str_expr)) vprint("* Query with condition ``%s`` will use " "variables ``%s`` for indexing." % (condition, compiled.index_variables))
def fix_multipath_1(): """Installs multipath tooling packages""" vprint("Fixing multipath settings") if get_os() == UBUNTU: exe("apt-get install multipath-tools -y") else: exe("yum install device-mapper-multipath -y")
def check_sysctl(config): vprint("Checking various sysctl settings") settings = sorted([ ("net.ipv4.tcp_timestamps", "0", "F0D7A1AD"), ("net.ipv4.tcp_sack", "1", "7A9AB850"), ("net.core.netdev_max_backlog", "250000", "7656C46C"), ("net.core.somaxconn", "1024", "34A7B822"), ("net.core.rmem_max", "16777216", "4C4B3F0B"), ("net.core.wmem_max", "16777216", "7F8479C2"), ("net.core.rmem_default", "8388608", "FBCA17D5"), ("net.core.wmem_default", "8388608", "68191DE5"), ("net.core.optmem_max", "8388608", "8FA26A66"), ("net.ipv4.tcp_rmem", "\"4096 87380 16777216\"", "2A6057BD"), ("net.ipv4.tcp_wmem", "\"4096 65536 16777216\"", "CD37F436"), ("net.ipv4.tcp_low_latency", "1", "6BE2899E"), ("net.ipv4.tcp_fin_timeout", "15", "59FD5DF7"), ("net.ipv4.tcp_syncookies", "1", "01C594E7"), ("net.ipv4.tcp_adv_win_scale", "1", "1F523B04"), ("net.ipv4.tcp_window_scaling", "1", "A8A6F381"), ("net.ipv4.tcp_max_syn_backlog", "8192", "2862CB28"), ("net.ipv4.tcp_tw_reuse", "1", "989229FC"), ("net.ipv4.tcp_synack_retries", "2", "55EF997B") ]) for setting, value, code in settings: found = exe("sysctl --values {}".format(setting)).strip() found = found.strip().strip("\"").split() value = value.strip().strip("\"").split() if len(found) == 1: found = found[0] if len(value) == 1: value = value[0] if found != value: ff("{}={} is not set. Found: {}".format(setting, value, found), code)
def check_mgmt(config): vprint("Checking mgmt interface mtu match") mgmt = config['mgmt_ip'] if is_l3: check_mtu_l3(mgmt, config) else: check_mtu_normal("MGMT", mgmt, config)
def check_vip1(config): vprint("Checking vip1 interface mtu match") vip1 = config['vip1_ip'] if is_l3: check_mtu_l3(vip1, config) else: check_mtu_normal("VIP1", vip1, config)
def get_install_script(): vprint("Retrieving installation script") if not exe_check("wget {}".format(INSTALL_SCRIPT)): print("Failed to get iscsi-recv installation script {}".format( INSTALL_SCRIPT)) sys.exit(1) if not exe_check("chmod +x {}".format(SCRIPT_NAME)): print("Failed to set iscsi-recv install script to executable: {}" "".format(SCRIPT_NAME))
def check_vip2(config): vprint("Checking vip2 interface mtu match") vip2 = config.get('vip2_ip') if not vip2: wf("No vip2_ip found", "416B534D") return if is_l3: check_mtu_l3(vip2, config) else: check_mtu_normal("VIP2", vip2, config)
def check_irq(config): vprint("Checking irqbalance settings, (should be turned off)") if not exe_check("which systemctl"): if not exe_check( "service irqbalance status | " "grep 'Active: active'", err=True): fix = "service irqbalance stop" return ff("irqbalance is active", "B19D9FF1", fix=fix) else: if not exe_check( "systemctl status irqbalance | " "grep 'Active: active'", err=True): fix = "systemctl stop irqbalance && systemctl disable irqbalance" return ff("irqbalance is active", "B19D9FF1", fix=fix)
def check_multipath(config): vprint("Checking multipath settings") if not exe_check("which multipath", err=False): ff("Multipath binary could not be found, is it installed?", "2D18685C") if not exe_check("which systemctl"): if not exe_check("service multipathd status | grep 'Active: active'", err=False): fix = "service multipathd start" ff("multipathd not enabled", "541C10BF", fix=fix) else: if not exe_check("systemctl status multipathd | grep 'Active: active'", err=False): fix = "systemctl start multipathd" ff("multipathd not enabled", "541C10BF", fix=fix)
def check_udev(config): vprint("Checking udev rules config") frules = "/etc/udev/rules.d/99-iscsi-luns.rules" if not os.path.exists(frules): fix = "A copy of the udev rules are available from: {}".format( UDEV_URL) ff("Datera udev rules are not installed", "1C8F2E07", fix=fix) snum = "/sbin/fetch_device_serial_no.sh" if not os.path.exists(snum): fix = ("A copy of fetch_device_serial_no.sh is available at: " "{}".format(FETCH_SO_URL)) ff("fetch_device_serial_no.sh is missing from /sbin", "6D03F50B", fix=fix)
def detect_glance_install(): for path in LOCATIONS: if os.path.isdir(path): return path else: result = None try: vprint("Normal cinder install not found, searching for driver") result = exe("sudo find / -name datera.py") if not result or result.isspace() or "glance-driver" in result: return None return result.strip().replace("/_drivers/datera.py", "") except (subprocess.CalledProcessError, ValueError): return None
def check_iscsi(config): vprint("Checking ISCSI settings") if not exe_check("which iscsiadm"): if get_pkg_manager() == APT: fix = "apt-get install open-iscsi" elif get_pkg_manager() == YUM: fix = "yum install iscsi-initiator-utils" ff("iscsiadm is not available, has open-iscsi been installed?", "EFBB085C", fix=fix) if not exe_check("ps -ef | grep iscsid | grep -v grep"): fix = "service iscsi start || systemctl start iscsid.service" ff("iscsid is not running. Is the iscsid service running?", "EB22737E", fix=fix) ifile = "/etc/iscsi/iscsid.conf" if not os.path.exists(ifile): ff("iscsid configuration file does not exist", "C6F2B356") return with io.open(ifile, 'r') as f: iconf = f.readlines() noopt = "node.session.timeo.noop_out_timeout" noopt_found = False noopi = "node.session.timeo.noop_out_interval" noopi_found = False for index, line in enumerate(iconf): if not noopt_found and noopt in line: noopt_found = True if "2" not in line: ff("{} is not set to '2' in iscsid.conf".format(noopt), "F6A49337") elif noopt_found and noopt in line: wf( "{} duplicate found in iscsid.conf, line {}".format( noopt, index), "D3E55910") if noopi in line: noopi_found = True if "2" not in line: ff("{} is not set to '2' in iscsid.conf".format(noopi), "E48C1907") elif noopi_found and noopi in line: wf( "{} duplicate found in iscsid.conf, line {}".format( noopi, index), "CA9AA865") if not noopt_found: ff("'{} = 2' is not present in iscsid.conf".format(noopt), "E29BF18A") if not noopi_found: ff("'{} = 2' is not present in iscsid.conf".format(noopi), "A2EED511")
def fix_multipath_conf_1(): """Writes completely new multipath.conf based off of template""" mfile = "/etc/multipath.conf" bfile = "/etc/multipath.conf.bak.{}".format(str(uuid.uuid4())[:4]) if os.path.exists(mfile): vprint("Found existing multipath.conf, moving to {}".format(bfile)) shutil.copyfile(mfile, bfile) with io.open("/etc/multipath.conf", "w+") as f: if get_os() == UBUNTU: f.write(MULTIPATH_CONF.replace("REPLACEME", "timer")) else: mconf = MULTIPATH_CONF.replace("REPLACEME", "timeout") # filter out getuid line which is deprecated in RHEL mconf = "\n".join((line for line in mconf.split("\n") if "getuid" not in line)) f.write(mconf)
def createIndexes(self, colname, ncolname, extracolname): if not self.indexed: return try: kind = self.kind vprint("* Indexing ``%s`` columns. Type: %s." % (colname, kind)) for acolname in [colname, ncolname, extracolname]: acolumn = self.table.colinstances[acolname] acolumn.createIndex( kind=self.kind, optlevel=self.optlevel, _blocksizes=small_blocksizes, _testmode=True) except TypeError, te: if self.colNotIndexable_re.search(str(te)): raise common.SkipTest( "Columns of this type can not be indexed." ) raise
def detect_cinder_install(): for path in LOCATIONS: if os.path.isdir(path): return path else: result = None try: vprint("Normal cinder install not found, searching for driver") result = exe("sudo find / -name datera_iscsi.py") if not result or result.isspace() or "cinder-driver" in result: raise ValueError("Cinder installation not found") return result.strip().replace( "/volume/drivers/datera/datera_iscsi.py", "") except (subprocess.CalledProcessError, ValueError): raise EnvironmentError( "Cinder installation not found. Usual locations: {}" "".format(LOCATIONS))
def check_cpufreq(config): vprint("Checking cpufreq settings") if not exe_check("which cpupower"): if get_os() == UBUNTU: version = exe("uname -r").strip() fix = "apt-get install linux-tools-{}".format(version) else: # RHEL puts this stuff in kernel-tools fix = "yum install kernel-tools" return ff("cpupower is not installed", "20CEE732", fix=fix) if not exe_check( "cpupower frequency-info --governors | " "grep performance", err=False): fix = ("No-fix -- if this system is a VM governors might not be " "available and this check can be ignored") return ff("No 'performance' governor found for system", "333FBD45", fix=fix)
def check_mtu_l3(ip, config): """ We just care that the packet gets there. Fragmentation is gonna happen with L3 routing. """ vprint("Performing L3 MTU check") sif = get_interface_for_ip(ip) if not sif: return ff( "Couldn't find interface with network matching ip {}" "".format(ip), "710BFC7E") # Ping check if not exe_check("ping -s 32000 -c 2 -W 1 {}".format(ip)): if not exe_check("ping -c 2 -W 1 {}".format(ip)): return ff("Could not ping interface [{}]".format(ip), "EC2D3621") ff( "Could not ping interface [{}] with large (32k) packet size, packet" "fragmentation may not be working correctly.".format(ip), "A4CA0D72")
def check_mtu_normal(name, ip, config): vprint("Performing MTU check") cname = iface_dict[name] sif = get_interface_for_ip(ip) if not sif: return ff( "Couldn't find interface with network matching ip {}" "".format(ip), "710BFC7E") try: match = MTU_RE.match(exe("ip ad show {} | grep mtu".format(sif))) except subprocess.CalledProcessError: return ff("Couldn't find client {} interface MTU".format(name), "CBF8CC4C") if not match: return ff("Couldn't find client {} interface MTU".format(name), "CBF8CC4C") local_mtu = match.groups(1)[0] cluster_mtu = None api = config['api'] if name == "MGMT": cluster_mtu = api.system.network.mgmt_vip.get( )['network_paths'][0]['mtu'] elif name == "VIP1": cluster_mtu = api.system.network.get( )['access_vip']['network_paths'][0]['mtu'] elif name == "VIP2": cluster_mtu = api.system.network.get( )['access_vip']['network_paths'][0]['mtu'] if not cluster_mtu: return ff("Couldn't find cluster {} interface MTU".format(cname), "057AF23D") if str(local_mtu) != str(cluster_mtu): ff( "Local interface {} MTU does not match cluster {} interface MTU " "[{} != {}]".format(sif, cname, local_mtu, cluster_mtu), "D7F667BC") # Ping check if not exe_check("ping -s 32000 -c 2 -W 1 {}".format(ip)): ff( "Could not ping interface with large (32k) packet size, packet " "fragmentation may not be working correctly", "A4CA0D72")
def check_block_devices(config): vprint("Checking block device settings") grub = "/etc/default/grub" if not os.path.exists(grub): return ff("Could not find default grub file at {}".format(grub), "6F7B6A25") with io.open(grub, "r") as f: data = f.readlines() line = filter(lambda x: x.startswith("GRUB_CMDLINE_LINUX_DEFAULT="), data) line2 = filter(lambda x: x.startswith("GRUB_CMDLINE_LINUX="), data) if len(line) != 1 and len(line2) != 1: return ff( "GRUB_CMDLINE_LINUX_DEFAULT and GRUB_CMDLINE_LINUX are" " missing from GRUB file", "A65B6D97") if ((len(line) > 0 and "elevator=noop" not in line[0]) and (len(line2) > 0 and "elevator=noop" not in line2[0])): fix = ("Add 'elevator=noop' to /etc/default/grub in the " "'GRUB_CMDLINE_LINUX_DEFAULT' line") return ff("Scheduler is not set to noop", "47BB5083", fix=fix)
def check_arp(config): vprint("Checking ARP settings") if not exe_check( "sysctl --all 2>/dev/null | " "grep 'net.ipv4.conf.all.arp_announce = 2'", err=False): fix = "sysctl net.ipv4.conf.all.arp_announce=2" ff("net.ipv4.conf.all.arp_announce != 2 in sysctl", "9000C3B6", fix=fix) if not exe_check( "sysctl --all 2>/dev/null | " "grep 'net.ipv4.conf.all.arp_ignore = 1'", err=False): fix = "sysctl net.ipv4.conf.all.arp_ignore=1" ff("net.ipv4.conf.all.arp_ignore != 1 in sysctl", "BDB4D5D8", fix=fix) gcf = "/proc/sys/net/ipv4/route/gc_interval" gc = int(exe("cat {}".format(gcf))) if gc != 5: fix = "echo 5 > {}".format(gcf) ff("{} is currently set to {}".format(gcf, gc), "A06CD19F", fix=fix)
def fix_block_devices_1(): """Updates GRUB with noop scheduling, requires restart""" vprint("Fixing block device settings") grub = "/etc/default/grub" bgrub = "/etc/default/grub.bak.{}".format(str(uuid.uuid4())[:4]) vprint("Backing up grub default file to {}".format(bgrub)) shutil.copyfile(grub, bgrub) vprint("Writing new grub default file") data = [] with io.open(grub, "r+") as f: for line in f.readlines(): if ("GRUB_CMDLINE_LINUX_DEFAULT=" in line and "elevator=noop" not in line): i = line.rindex("\"") lline = list(line) if lline[i-1] == "\"": lline.insert(i, "elevator=noop") else: lline.insert(i, " elevator=noop") line = "".join(lline) data.append(line) with io.open(grub, "w+") as f: print(data) f.writelines(data) if get_os() == UBUNTU: exe("update-grub2") else: exe("grub2-mkconfig -o /boot/grub2/grub.cfg")
def install_volume_driver(cinder_driver, ip, username, password, d_version): """Installs Cinder volume driver""" # Copy files to install location repo, loc = clone_driver(cinder_driver, d_version) dloc = os.path.join(loc, "volume/drivers") exe("cp -r {}/src/datera/ {}".format(repo, dloc)) # Modify etc file data = None with io.open(ETC, 'r') as f: data = f.readlines() # Place lines under [DEFAULT] insert = 0 for index, line in enumerate(data): if any((elem in line for elem in ("enabled_backends", "verbose", "debug"))): del data[index] elif "DEFAULT" in line: insert = index data.insert(insert + 1, "enabled_backends = datera") data.insert(insert + 1, "verbose = True") data.insert(insert + 1, "debug = True") # Write [datera] section tdata = ETC_TEMPLATE.format(ip=ip, login=username, password=password) data.extend(tdata.splitlines()) shutil.copyfile(ETC, ETC + ".bak.{}".format(str(uuid.uuid4())[:4])) with io.open(ETC, 'w') as f: for line in data: line = line.strip() f.write(line) f.write("\n") # Restart cinder-volume service restart = detect_service_restart_cmd("cinder-volume") vprint("Restarting the cinder-volume service") if loc == DEVSTACK_INSTALL: vprint("Detected devstack") else: vprint("Detected non-devstack") exe(restart)
def test_method(self): vprint("* Condition is ``%s``." % cond) # Replace bitwise operators with their logical counterparts. pycond = cond for (ptop, pyop) in [('&', 'and'), ('|', 'or'), ('~', 'not')]: pycond = pycond.replace(ptop, pyop) pycond = compile(pycond, '<string>', 'eval') table = self.table self.createIndexes(colname, ncolname, 'c_idxextra') table_slice = dict(start=1, stop=table.nrows - 5, step=3) rownos, fvalues = None, None # Test that both simple and nested columns work as expected. # Knowing how the table is filled, results must be the same. for acolname in [colname, ncolname]: # First the reference Python version. pyrownos, pyfvalues, pyvars = [], [], condvars.copy() for row in table.iterrows(**table_slice): pyvars[colname] = row[acolname] pyvars['c_extra'] = row['c_extra'] pyvars['c_idxextra'] = row['c_idxextra'] try: isvalidrow = eval(pycond, {}, pyvars) except TypeError: raise common.SkipTest( "The Python type does not support the operation." ) if isvalidrow: pyrownos.append(row.nrow) pyfvalues.append(row[acolname]) pyrownos = numpy.array(pyrownos) # row numbers already sorted pyfvalues = numpy.array(pyfvalues, dtype=sctype) pyfvalues.sort() vprint( "* %d rows selected by Python from ``%s``." % (len(pyrownos), acolname) ) if rownos is None: rownos = pyrownos # initialise reference results fvalues = pyfvalues else: self.assert_(numpy.all(pyrownos == rownos)) # check self.assert_(numpy.all(pyfvalues == fvalues)) # Then the in-kernel or indexed version. ptvars = condvars.copy() ptvars[colname] = table.colinstances[acolname] ptvars['c_extra'] = table.colinstances['c_extra'] ptvars['c_idxextra'] = table.colinstances['c_idxextra'] try: isidxq = table.willQueryUseIndexing(cond, ptvars) # Query twice to trigger possible query result caching. ptrownos = [ table.getWhereList( cond, condvars, sort=True, **table_slice ) for _ in range(2) ] ptfvalues = [ table.readWhere( cond, condvars, field=acolname, **table_slice ) for _ in range(2) ] except TypeError, te: if self.condNotBoolean_re.search(str(te)): raise common.SkipTest("The condition is not boolean.") raise except NotImplementedError: raise common.SkipTest( "The PyTables type does not support the operation." )
def check_single_volume_performance_fio_4k(config): vprint("Checking FIO performance, single volume") if not exe_check("which fio"): ff("FIO is not installed", "0BB2848F") api = config['api']
def create_test_method(type_, op, extracond): sctype = sctype_from_type[type_] # Compute the value of bounds. condvars = { 'bound': right_bound, 'lbound': left_bound, 'rbound': right_bound } for (bname, bvalue) in condvars.items(): if type_ == 'string': bvalue = str_format % bvalue bvalue = nxtype_from_type[type_](bvalue) condvars[bname] = bvalue # Compute the name of columns. colname = 'c_%s' % type_ ncolname = 'c_nested/%s' % colname # Compute the query condition. if not op: # as is cond = colname elif op == '~': # unary cond = '~(%s)' % colname elif op == '<': # binary variable-constant cond = '%s %s %s' % (colname, op, repr(condvars['bound'])) elif type(op) is tuple: # double binary variable-constant cond = ('(lbound %s %s) & (%s %s rbound)' % (op[0], colname, colname, op[1])) else: # binary variable-variable cond = '%s %s bound' % (colname, op) if extracond: cond = '(%s) %s' % (cond, extracond) def test_method(self): vprint("* Condition is ``%s``." % cond) # Replace bitwise operators with their logical counterparts. pycond = cond for (ptop, pyop) in [('&', 'and'), ('|', 'or'), ('~', 'not')]: pycond = pycond.replace(ptop, pyop) pycond = compile(pycond, '<string>', 'eval') table = self.table self.createIndexes(colname, ncolname, 'c_idxextra') table_slice = dict(start=1, stop=table.nrows - 5, step=3) rownos, fvalues = None, None # Test that both simple and nested columns work as expected. # Knowing how the table is filled, results must be the same. for acolname in [colname, ncolname]: # First the reference Python version. pyrownos, pyfvalues, pyvars = [], [], condvars.copy() for row in table.iterrows(**table_slice): pyvars[colname] = row[acolname] pyvars['c_extra'] = row['c_extra'] pyvars['c_idxextra'] = row['c_idxextra'] try: isvalidrow = eval(pycond, {}, pyvars) except TypeError: raise common.SkipTest( "The Python type does not support the operation.") if isvalidrow: pyrownos.append(row.nrow) pyfvalues.append(row[acolname]) pyrownos = numpy.array(pyrownos) # row numbers already sorted pyfvalues = numpy.array(pyfvalues, dtype=sctype) pyfvalues.sort() vprint("* %d rows selected by Python from ``%s``." % (len(pyrownos), acolname)) if rownos is None: rownos = pyrownos # initialise reference results fvalues = pyfvalues else: self.assert_(numpy.all(pyrownos == rownos)) # check self.assert_(numpy.all(pyfvalues == fvalues)) # Then the in-kernel or indexed version. ptvars = condvars.copy() ptvars[colname] = table.colinstances[acolname] ptvars['c_extra'] = table.colinstances['c_extra'] ptvars['c_idxextra'] = table.colinstances['c_idxextra'] try: isidxq = table.willQueryUseIndexing(cond, ptvars) # Query twice to trigger possible query result caching. ptrownos = [ table.getWhereList(cond, condvars, sort=True, **table_slice) for _ in range(2) ] ptfvalues = [ table.readWhere(cond, condvars, field=acolname, **table_slice) for _ in range(2) ] except TypeError, te: if self.condNotBoolean_re.search(str(te)): raise common.SkipTest("The condition is not boolean.") raise except NotImplementedError: raise common.SkipTest( "The PyTables type does not support the operation.") for ptfvals in ptfvalues: # row numbers already sorted ptfvals.sort() vprint("* %d rows selected by PyTables from ``%s``" % (len(ptrownos[0]), acolname), nonl=True) vprint("(indexing: %s)." % ["no", "yes"][bool(isidxq)]) self.assert_(numpy.all(ptrownos[0] == rownos)) self.assert_(numpy.all(ptfvalues[0] == fvalues)) # The following test possible caching of query results. self.assert_(numpy.all(ptrownos[0] == ptrownos[1])) self.assert_(numpy.all(ptfvalues[0] == ptfvalues[1]))
def test_method(self): vprint("* Condition is ``%s``." % cond) # Replace bitwise operators with their logical counterparts. pycond = cond for (ptop, pyop) in [('&', 'and'), ('|', 'or'), ('~', 'not')]: pycond = pycond.replace(ptop, pyop) pycond = compile(pycond, '<string>', 'eval') table = self.table self.createIndexes(colname, ncolname, 'c_idxextra') table_slice = dict(start=1, stop=table.nrows - 5, step=3) rownos, fvalues = None, None # Test that both simple and nested columns work as expected. # Knowing how the table is filled, results must be the same. for acolname in [colname, ncolname]: # First the reference Python version. pyrownos, pyfvalues, pyvars = [], [], condvars.copy() for row in table.iterrows(**table_slice): pyvars[colname] = row[acolname] pyvars['c_extra'] = row['c_extra'] pyvars['c_idxextra'] = row['c_idxextra'] try: isvalidrow = eval(pycond, {}, pyvars) except TypeError: raise common.SkipTest( "The Python type does not support the operation.") if isvalidrow: pyrownos.append(row.nrow) pyfvalues.append(row[acolname]) pyrownos = numpy.array(pyrownos) # row numbers already sorted pyfvalues = numpy.array(pyfvalues, dtype=sctype) pyfvalues.sort() vprint("* %d rows selected by Python from ``%s``." % (len(pyrownos), acolname)) if rownos is None: rownos = pyrownos # initialise reference results fvalues = pyfvalues else: self.assert_(numpy.all(pyrownos == rownos)) # check self.assert_(numpy.all(pyfvalues == fvalues)) # Then the in-kernel or indexed version. ptvars = condvars.copy() ptvars[colname] = table.colinstances[acolname] ptvars['c_extra'] = table.colinstances['c_extra'] ptvars['c_idxextra'] = table.colinstances['c_idxextra'] try: isidxq = table.willQueryUseIndexing(cond, ptvars) # Query twice to trigger possible query result caching. ptrownos = [ table.getWhereList(cond, condvars, sort=True, **table_slice) for _ in range(2) ] ptfvalues = [ table.readWhere(cond, condvars, field=acolname, **table_slice) for _ in range(2) ] except TypeError, te: if self.condNotBoolean_re.search(str(te)): raise common.SkipTest("The condition is not boolean.") raise except NotImplementedError: raise common.SkipTest( "The PyTables type does not support the operation.")
def create_test_method(type_, op, extracond): sctype = sctype_from_type[type_] # Compute the value of bounds. condvars = { 'bound': right_bound, 'lbound': left_bound, 'rbound': right_bound } for (bname, bvalue) in condvars.items(): if type_ == 'string': bvalue = str_format % bvalue bvalue = nxtype_from_type[type_](bvalue) condvars[bname] = bvalue # Compute the name of columns. colname = 'c_%s' % type_ ncolname = 'c_nested/%s' % colname # Compute the query condition. if not op: # as is cond = colname elif op == '~': # unary cond = '~(%s)' % colname elif op == '<': # binary variable-constant cond = '%s %s %s' % (colname, op, repr(condvars['bound'])) elif type(op) is tuple: # double binary variable-constant cond = ( '(lbound %s %s) & (%s %s rbound)' % (op[0], colname, colname, op[1]) ) else: # binary variable-variable cond = '%s %s bound' % (colname, op) if extracond: cond = '(%s) %s' % (cond, extracond) def test_method(self): vprint("* Condition is ``%s``." % cond) # Replace bitwise operators with their logical counterparts. pycond = cond for (ptop, pyop) in [('&', 'and'), ('|', 'or'), ('~', 'not')]: pycond = pycond.replace(ptop, pyop) pycond = compile(pycond, '<string>', 'eval') table = self.table self.createIndexes(colname, ncolname, 'c_idxextra') table_slice = dict(start=1, stop=table.nrows - 5, step=3) rownos, fvalues = None, None # Test that both simple and nested columns work as expected. # Knowing how the table is filled, results must be the same. for acolname in [colname, ncolname]: # First the reference Python version. pyrownos, pyfvalues, pyvars = [], [], condvars.copy() for row in table.iterrows(**table_slice): pyvars[colname] = row[acolname] pyvars['c_extra'] = row['c_extra'] pyvars['c_idxextra'] = row['c_idxextra'] try: isvalidrow = eval(pycond, {}, pyvars) except TypeError: raise common.SkipTest( "The Python type does not support the operation." ) if isvalidrow: pyrownos.append(row.nrow) pyfvalues.append(row[acolname]) pyrownos = numpy.array(pyrownos) # row numbers already sorted pyfvalues = numpy.array(pyfvalues, dtype=sctype) pyfvalues.sort() vprint( "* %d rows selected by Python from ``%s``." % (len(pyrownos), acolname) ) if rownos is None: rownos = pyrownos # initialise reference results fvalues = pyfvalues else: self.assert_(numpy.all(pyrownos == rownos)) # check self.assert_(numpy.all(pyfvalues == fvalues)) # Then the in-kernel or indexed version. ptvars = condvars.copy() ptvars[colname] = table.colinstances[acolname] ptvars['c_extra'] = table.colinstances['c_extra'] ptvars['c_idxextra'] = table.colinstances['c_idxextra'] try: isidxq = table.willQueryUseIndexing(cond, ptvars) # Query twice to trigger possible query result caching. ptrownos = [ table.getWhereList( cond, condvars, sort=True, **table_slice ) for _ in range(2) ] ptfvalues = [ table.readWhere( cond, condvars, field=acolname, **table_slice ) for _ in range(2) ] except TypeError, te: if self.condNotBoolean_re.search(str(te)): raise common.SkipTest("The condition is not boolean.") raise except NotImplementedError: raise common.SkipTest( "The PyTables type does not support the operation." ) for ptfvals in ptfvalues: # row numbers already sorted ptfvals.sort() vprint( "* %d rows selected by PyTables from ``%s``" % (len(ptrownos[0]), acolname), nonl=True ) vprint("(indexing: %s)." % ["no", "yes"][bool(isidxq)]) self.assert_(numpy.all(ptrownos[0] == rownos)) self.assert_(numpy.all(ptfvalues[0] == fvalues)) # The following test possible caching of query results. self.assert_(numpy.all(ptrownos[0] == ptrownos[1])) self.assert_(numpy.all(ptfvalues[0] == ptfvalues[1]))
def check_requirements(): vprint("Checking Requirements") for binary in REQUIREMENTS: if not exe_check("which {}".format(binary), err=False): return "missing" + binary