def test_isnan(self): import cmath assert not cmath.isnan(2+3j) assert cmath.isnan(float("nan")) assert cmath.isnan(complex("nanj")) assert cmath.isnan(complex("inf+nanj")) assert cmath.isnan(complex("nan+infj"))
def assertPreciseEqual(self, first, second, prec='exact', msg=None): """ Test that two scalars have similar types and are equal up to a computed precision. If the scalars are instances of exact types or if *prec* is 'exact', they are compared exactly. If the scalars are instances of inexact types (float, complex) and *prec* is not 'exact', then the number of significant bits is computed according to the value of *prec*: 53 bits if *prec* is 'double', 24 bits if *prec* is single. Any value of *prec* other than 'exact', 'single' or 'double' will raise an error. """ for tp in self._exact_typesets: # One or another could be the expected, the other the actual; # test both. if isinstance(first, tp) or isinstance(second, tp): self.assertIsInstance(first, tp) self.assertIsInstance(second, tp) exact_comparison = True break else: for tp in self._approx_typesets: if isinstance(first, tp) or isinstance(second, tp): self.assertIsInstance(first, tp) self.assertIsInstance(second, tp) exact_comparison = False break else: # Assume these are non-numeric types: we will fall back # on regular unittest comparison. self.assertIs(first.__class__, second.__class__) exact_comparison = True # If a Numpy scalar, check the dtype is exactly the same too # (required for datetime64 and timedelta64). if hasattr(first, 'dtype') and hasattr(second, 'dtype'): self.assertEqual(first.dtype, second.dtype) try: if cmath.isnan(first) and cmath.isnan(second): # The NaNs will compare unequal, skip regular comparison return except TypeError: # Not floats. pass if not exact_comparison and prec != 'exact': if prec == 'single': k = 2**-24 elif prec == 'double': k = 2**-53 else: raise ValueError("unsupported precision %r" % (prec,)) delta = k * (abs(first) + abs(second)) self.assertAlmostEqual(first, second, delta=delta, msg=msg) else: self.assertEqual(first, second, msg=msg)
def test_complex(): class X(meta.Entity): p = meta.Complex() success = [False, True, 0, 1, long_type(1), 1.0, 0.1, MAX_SAFE_INTEGER + 1, -MAX_SAFE_INTEGER - 1, 1 + 1j, 1j, [1, 1], (1, 1)] failure = ['한', u'한', b'\xed\x95\x9c', [1], (1,), ['a'], ('b',), {'a': 1}, entity, float('nan'), float('inf'), float('-inf'), [], [1], [1, 2, 3], [1, 'a']] x = X() for value in success: x.p = value assert isinstance(x.p, complex) if isinstance(value, (tuple, list)): assert x.p == complex(*value) else: assert x.p == value x.validate() encoded = X.p.dump(x.p) decoded = X.p.load(encoded) check_json(encoded) if isinstance(value, (tuple, list)): assert decoded == complex(*value) else: assert decoded == value for value in failure: with pytest.raises(ValueError): x.p = value # # allow_nan # class X(meta.Entity): p = meta.Complex(allow_nan=True) x = X() nans = (float('nan'), float('inf'), float('-inf')) values = list(nans) values.extend(complex(1, x) for x in nans) values.extend(complex(x, 1) for x in nans) values.extend(complex(x, y) for x in nans for y in nans) for value in values: x.p = value if cmath.isnan(value): assert cmath.isnan(x.p) else: assert x.p == value
def test_isnan(self): self.failIf(cmath.isnan(1)) self.failIf(cmath.isnan(1j)) self.failIf(cmath.isnan(INF)) self.assert_(cmath.isnan(NAN)) self.assert_(cmath.isnan(complex(NAN, 0))) self.assert_(cmath.isnan(complex(0, NAN))) self.assert_(cmath.isnan(complex(NAN, NAN))) self.assert_(cmath.isnan(complex(NAN, INF))) self.assert_(cmath.isnan(complex(INF, NAN)))
def test_isnan(self): self.assertFalse(cmath.isnan(1)) self.assertFalse(cmath.isnan(1j)) self.assertFalse(cmath.isnan(INF)) self.assertTrue(cmath.isnan(NAN)) self.assertTrue(cmath.isnan(complex(NAN, 0))) self.assertTrue(cmath.isnan(complex(0, NAN))) self.assertTrue(cmath.isnan(complex(NAN, NAN))) self.assertTrue(cmath.isnan(complex(NAN, INF))) self.assertTrue(cmath.isnan(complex(INF, NAN)))
def interpolation_rule(idx, value): """ - 감지된 nan 에 정해진 규칙에 따라 적당한 interpolation 값을 계산하여 반환해주는 함수 :param idx: nan 의 index - type: integer :param value: value data - type: np.array - shape: (length, 1) :return: 계산된 interpolation 값 - type: float """ denominator = 1 for i in xrange(idx, len(value)): if math.isnan(value[i]): denominator += 1 else: nominator = value[i] - value[idx - 1] break interpolated_value = value[idx - 1] + nominator / denominator return interpolated_value
def nan_score_filter(file_1, file_2, score): if math.isnan(score): print 'nan covariance occurred between' print '\t' + file_1 print '\t' + file_2 return score
def converge(self, z): self.total_queries += 1 go = self.func.eval_NR epsilon = self.epsilon steps = 0 while steps < self.max_steps: if cmath.isnan(z): break for i, t in enumerate(self.targets): if abs(z - t) < epsilon: c = self.target_c[i] fsteps = steps - log(log(c * abs(z - t)) / log(c * epsilon)) / log(2) self.total_steps += steps self.most_steps = max(self.most_steps, steps) return (i, fsteps) z = z - go(z) steps += 1 self.total_steps += steps self.num_failed += 1 return None
def is_integral(x): """Return whether the argument is equal to its integer part.""" if isinstance(x, complex): if x.imag: return False x = x.real return not cmath.isinf(x) and not cmath.isnan(x) and x == int(x)
def scaleZ_inv(T): try: zx = Z(1./T) except: return MAXV if isnan(zx) or isinf(zx): return MAXV vx = abs(zx) s = log(vx)/10 return zx * (s/vx)
def assertEqualWithNaN( self, v1: object, v2: object, ) -> None: if ((isinstance(v1, float) or isinstance(v1, np.floating)) and np.isnan(v1) and (isinstance(v2, float) or isinstance(v2, np.floating)) and np.isnan(v2)): return if ((isinstance(v1, complex) or isinstance(v1, np.complexfloating)) and cmath.isnan(v1) and (isinstance(v2, complex) or isinstance(v1, np.complexfloating)) and cmath.isnan(v2) # type: ignore ): return return self.assertEqual(v1, v2)
def computeMean(dataset, sexname, column='Title'): sum = 0 count = 0 for i in range(len(dataset[column])): if dataset[column][i] == sexname: if not cmath.isnan(dataset['Age'][i]): sum += float(dataset['Age'][i]) count += 1 aver = sum / count return aver
def scaleZ_inv(beta): try: zx = Z(1./beta) except: return MAXV if isnan(zx) or isinf(zx): return MAXV vx = abs(zx) s = log(vx)/5 #s = vx return zx * (s/vx)
def convert(self, value): if isinstance(value, numbers.Rational): return self._searchvalue(float(value)) else: if cmath.isinf(value): return self.inf() elif cmath.isnan(value): raise Exception("cannot cast nan to pnum") else: return self._searchvalue(value)
def float32_to_bfloat16(fval: float, truncate: bool = False) -> int: ival = int.from_bytes(struct.pack('<f', fval), 'little') if truncate: return ival >> 16 # NaN requires at least 1 significand bit set if isnan(fval): return 0x7FC0 # sign=0, exp=all-ones, sig=0b1000000 # drop bottom 16-bits # round remaining bits using round-to-nearest-even round = ((ival >> 16) & 1) + 0x7fff return (ival + round) >> 16
def scaleZ_inv(beta): try: zx = Z(1. / beta) except: return MAXV if isnan(zx) or isinf(zx): return MAXV vx = abs(zx) s = log(vx) / 5 #s = vx return zx * (s / vx)
def current_is_defined(self): if self.current == 0: return True elif isinstance(self.current, float) and math.isnan(self.current): return False elif isinstance(self.current, complex) and cmath.isnan(self.current): return False elif self.current: return True else: return False
def changeWeight(self): ''' print "R: " + str(self.R) print "A: " + str(self.a) print self.bestActionSet #2.8e+290 ''' #print self.bestActionSet for d in range(0, self.rank): if self.a[d] == self.bestActionSet[0]: #print self.R[d],self.a[d] r1 = self.R[d] #r1 = max(self.R) xj = [0.0 for col in range(0, self.numActions)] xj[self.a[d]] = r1 / self.prob[0][self.a[d]] for col in range(0, self.numActions): #self.weights[0][col] = min(self.weights[0][col]*math.exp(self.gamma*xj[col]/self.numActions),2.8e+290) self.weights[0][col] = self.weights[0][col] * math.exp( self.gamma * xj[col] / self.numActions) ''' sum1 = sum(self.weights[0]) for col in range(0,self.numActions): self.weights[0][col] = self.weights[0][col]/sum1 ''' #if self.a[0] = self.a[1]: elif self.a[d] == self.bestActionSet[1]: #min_index = max(range(0,self.rank), key=lambda col: self.R[col]) #r2 = max(self.R) - self.R[min_index] #r2 = self.R[min_index] r2 = self.R[d] xj = [0.0 for col in range(0, self.numActions)] xj[self.a[d]] = r2 / self.prob[1][self.a[d]] for col in range(0, self.numActions): #self.weights[1][col] = min(self.weights[1][col]*math.exp(self.gamma*xj[col]/self.numActions),2.8e+290) self.weights[1][col] = self.weights[1][col] * math.exp( self.gamma * xj[col] / self.numActions) ''' sum1 = sum(self.weights[1]) for col in range(0,self.numActions): self.weights[1][col] = self.weights[1][col]/sum1 ''' if isnan(sum(self.prob[0])) == True: print self.R print self.a print self.bestActionSet print self.prob print self.weights sys.exit(0)
def PassFail(RMSE_pp, RMSE_tt): check = 'Failed' if ((cmath.isnan(RMSE_pp)) or (cmath.isnan(RMSE_tt))): #Catch nan errors print 'NaN Encountered.' check = 'Failed' elif ((abs(RMSE_pp) >= THRESHOLD) or (abs(RMSE_tt) >= THRESHOLD)): # Catch if greather than threshold print 'At least one of the RMSE values is incorrect' print 'RMSE_pp=%7.5e' % RMSE_pp.real + '+%7.5e' % RMSE_pp.imag + 'j' print 'RMSE_tt=%7.5e' % RMSE_tt.real + '+%7.5e' % RMSE_tt.imag + 'j' check = 'Failed' elif ((abs(RMSE_pp) < THRESHOLD) or (abs(RMSE_tt) < THRESHOLD)): # Check values print 'RMSE_pp=%7.5e' % RMSE_pp.real + '+%7.5e' % RMSE_pp.imag + 'j' print 'RMSE_tt=%7.5e' % RMSE_tt.real + '+%7.5e' % RMSE_tt.imag + 'j' check = 'Passed' else: print 'Unexpected Error. Review Log Files.' check = 'Failed' return check
def voltage_is_defined(self): # TODO check that Voltage class is compatible with this. otherwise consider modifying it. if self.voltage == 0: return True elif isinstance(self.voltage, float) and math.isnan(self.voltage): return False elif isinstance(self.voltage, complex) and cmath.isnan(self.voltage): return False elif self.voltage: return True else: return False
def test_divergence_discharge(self): """Test divergence discharge.""" zo = complex(10, 10) we = Well(zo, 2 * cmath.pi, 1) z = complex(10, 20) div = Well.divergence_discharge(we, z) self.assertAlmostEqual(div, float(0)) z = zo div = Well.divergence_discharge(we, z) self.assertTrue(cmath.isnan(div))
def call(cb): for a in angles: res = cb(a) aStr = str(a) if isinf(res): print('angle: ' + aStr + ', value: Infinity') return elif isnan(res): print('angle: ' + aStr + ', value: NaN') return print('angle: ' + aStr + ', value: ' + str(res))
def PassFail(RMSE_pp, RMSE_tt, RMSE_pp_rel, RMSE_tt_rel, freqs): check = 'Passed' i = 0 #Catch nan errors for item in freqs: i = i + 1 if ((cmath.isnan(RMSE_pp[item])) or (cmath.isnan(RMSE_tt[item]))): print 'NaN Encountered.' check = 'Failed' #Catch if greater than threshold elif ((abs(RMSE_pp[item]) >= THRESHOLD) or (abs(RMSE_tt[item]) >= THRESHOLD)): print 'At least one of the RMSE values is unexpectedly high' print ' +------------------------------' print ' Phi-Phi Error (Rel. Error) = {0:.4e}'.format( RMSE_pp[item].real), '( {0:.4e}'.format( RMSE_pp_rel[item].real), ') | RMS Error at', item, 'GHz' print 'Theta-Theta Error (Rel. Error) = {0:.4e}'.format( RMSE_tt[item].real), '( {0:.4e}'.format( RMSE_tt_rel[item].real), ') | 100% inc/scatter coverage' if (i == len(freqs)): print ' +------------------------------' check = 'Failed' #Check values elif ((abs(RMSE_pp[item]) < THRESHOLD) or (abs(RMSE_tt[item]) < THRESHOLD)): print ' +------------------------------' print ' Phi-Phi Error (Rel. Error) = {0:.4e}'.format( RMSE_pp[item].real), '( {0:.4e}'.format( RMSE_pp_rel[item].real), ') | RMS Error at', item, 'GHz' print 'Theta-Theta Error (Rel. Error) = {0:.4e}'.format( RMSE_tt[item].real), '( {0:.4e}'.format( RMSE_tt_rel[item].real), ') | 100% inc/scatter coverage' if (i == len(freqs)): print ' +------------------------------' else: print 'Unexpected Error. Review Log Files.' check = 'Failed' return check
def isnan(x): """ Return True if the real or imaginary part of x is not a number (NaN). """ # try: # return [isnan(xi) for xi in x] # except TypeError: if isinstance(x, ADF): return isnan(x.x) else: if x.imag: return cmath.isnan(x) else: return math.isnan(x.real)
def sum_window(shared, history_len, future_len, out_arr, window_size, forward_window_size, arr_len, offset, min_size): """ This function is to compute the sum for the window See `window_kernel` for detailed arguments """ first = False s = 0.0 average_size = 0 for i in range(arr_len): if i + history_len < window_size - 1: out_arr[i] = np.nan elif future_len - i < forward_window_size + 1: out_arr[i] = np.nan else: if not first: for j in range(0, window_size + forward_window_size): if not (cmath.isnan( shared[offset + i - j + forward_window_size])): s += shared[offset + i - j + forward_window_size] average_size += 1 if average_size >= min_size: out_arr[i] = s else: out_arr[i] = np.nan first = True else: if not (cmath.isnan(shared[offset + i + forward_window_size])): s += shared[offset + i + forward_window_size] average_size += 1 if not (cmath.isnan(shared[offset + i - window_size])): s -= shared[offset + i - window_size] average_size -= 1 if average_size >= min_size: out_arr[i] = s else: out_arr[i] = np.nan
def tt_subscheck(subs): """Check whether the given list of subscripts are valid. Used for sptensor""" if (subs.size == 0): return True if (subs.ndim != 2): raise ValueError("Subscript dimensions is incorrect") for i in range(0, (subs.size / subs[0].size)): for j in range(0, (subs[0].size)): val = subs[i][j]; #print subs[i][j], val if( cmath.isnan(val) or cmath.isinf(val) or val < 0 or val != round(val) ): raise ValueError("Subscripts must be a matrix of non-negative integers"); return True;
def _load_(self, value, context): if isinstance(value, complex): pass elif isinstance(value, (integer_types, float)): value = complex(value) elif isinstance(value, (tuple, list)): if len(value) != 2: raise ValueError() if not isinstance(value[0], (integer_types, float)) or not isinstance(value[1], (integer_types, float)): raise ValueError() value = complex(value[0], value[1]) else: raise ValueError() if not self.get_options().allow_nan and (cmath.isnan(value) or cmath.isinf(value)): raise ValueError() return value
def forward_shift_window(shared, history_len, future_len, out_arr, window_size, forward_window_size, arr_len, offset, min_size): """ This function is to compute the forward element difference. See `window_kernel` for detailed arguments """ for i in range(arr_len): if i + history_len < window_size - 1: out_arr[i] = np.nan elif future_len - i < forward_window_size + 1: out_arr[i] = np.nan else: if (cmath.isnan(shared[offset + i + forward_window_size])): out_arr[i] = np.nan else: out_arr[i] = shared[offset + i + forward_window_size]
def tt_sizecheck(size): """Check whether the given size is valid. Used for Sptensor""" size = np.array(size) isOk = True if size.ndim != 1: isOk = False else: for i in range(0, len(size)): val = size[i] if cmath.isnan(val) or cmath.isinf(val) or val <= 0 or val != round(val): isOk = False if not isOk: raise ValueError("size must be a row vector of real positive integers") return isOk
def interpolation(data_dictionary): """ - nan 이 존재하는 data 를 interpolation 해주는 함수 :param data_dictionary: nan 이 존재하는 data_dictionary - type: dictionary :return: nan 이 interpolate 된 data_dictionary - type: dictionary """ for i in xrange(0, len(data_dictionary['value'])): if math.isnan(data_dictionary['value'][i]): data_dictionary['value'][i] = interpolation_rule(i, data_dictionary['value']) return data_dictionary
def tt_sizecheck(size): """Check whether the given size is valid. Used for sptensor""" size = numpy.array(size) isOk = True if size.ndim != 1: isOk = False else: for i in range(0, len(size)): val = size[i] if cmath.isnan(val) or cmath.isinf(val) or val <= 0 or val != round(val): isOk = False if not isOk: raise ValueError("size must be a row vector of real positive integers") return isOk
def tt_subscheck(subs): """Check whether the given list of subscripts are valid. Used for sptensor""" if subs.size == 0: return TRUE if subs.ndim != 2: raise ValueError("Subscript dimensions is incorrect") for i in range(0, (subs.size / subs[0].size)): for j in range(0, (subs[0].size)): val = subs[i][j] # print subs[i][j], val if cmath.isnan(val) or cmath.isinf(val) or val < 0 or val != round(val): raise ValueError("Subscripts must be a matrix of non-negative integers") return True
def logLiklihood_jit(P_TopicGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord, P_WordGivenTopic) -> float: ll = 0.0 NTopics = P_TopicGivenDocumentWord.shape[0] NWords = P_TopicGivenDocumentWord.shape[2] NDocs = P_TopicGivenDocumentWord.shape[1] for N in prange(NDocs): for M in range(NWords): partial_sum = 0.0 for K in range(NTopics): Nan_test = P_TopicGivenDocumentWord[K, N, M] * np.log( P_WordGivenTopic[M, K] * P_TopicGivenDocument[K, N]) if not cmath.isnan(Nan_test): partial_sum += Nan_test ll += partial_sum * Co_OccurenceTable[N, M] return ll
def backward_shift_window(shared, history_len, future_len, out_arr, window_size, forward_window_size, arr_len, offset, min_size): """ This function is to shfit elements backward See `window_kernel` for detailed arguments """ for i in range(arr_len): if i + history_len < window_size - 1: out_arr[i] = np.nan elif future_len - i < forward_window_size + 1: out_arr[i] = np.nan else: if (cmath.isnan(shared[offset + i - window_size + 1])): out_arr[i] = np.nan else: out_arr[i] = shared[offset + i - window_size + 1]
def conv_window(shared, history_len, out_arr, window_size, arr_len, offset, offset2, min_size): """ This function is to do convolution for one thread Arguments: ------ shared: numba.cuda.DeviceNDArray 3 chunks of data are stored in the shared memory the first [0, window_size) elements is the chunk of data that is necessary to compute the first convolution element. then [window_size, window_size + thread_tile * blockDim) elements are the inputs allocated for this block of threads the last [window_size + thread_tile, window_size + thread_tile + window_size) is to store the kernel values history_len: int total number of historical elements available for this chunk of data out_arr: numba.cuda.DeviceNDArray output gpu_array of size of `thread_tile` window_size: int the number of elements in the kernel arr_len: int the chunk array length, same as `thread_tile` offset: int indicate the starting index of the chunk array in the shared for this thread. offset: int indicate the starting position of the weights/kernel array min_size: int the minimum number of non-na elements """ for i in range(arr_len): if i + history_len < window_size - 1: out_arr[i] = np.nan else: s = 0.0 average_size = 0 for j in range(0, window_size): if not (cmath.isnan(shared[offset + i - j])): s += (shared[offset + i - j] * shared[offset2 + window_size - 1 - j]) average_size += 1 if average_size >= min_size: out_arr[i] = s else: out_arr[i] = np.nan
def changeWeight(self): ''' print self.R print self.a print self.bestActionSet ''' for d in range(0, self.rank): if self.a[d] == self.bestActionSet[0]: #print self.R[d],self.a[d] r1 = self.R[d] xj = [0.0 for col in range(0, self.numActions)] xj[self.a[d]] = r1 / self.prob[0][self.a[d]] for col in range(0, self.numActions): self.weights[0][col] = self.weights[0][col] * math.exp( self.gamma * xj[col] / self.numActions) #if self.a[0] = self.a[1]: elif self.a[d] == self.bestActionSet[1]: r2 = max(self.R) - self.R[d] xj = [0.0 for col in range(0, self.numActions)] xj[self.a[d]] = r2 / self.prob[1][self.a[d]] for col in range(0, self.numActions): self.weights[1][col] = self.weights[1][col] * math.exp( self.gamma * xj[col] / self.numActions) ''' else: #r2 = max(self.R) - self.R[d] r2 = self.R[d] xj = [0.0 for col in range(0,self.numActions)] xj[self.a[d]] = r2/self.prob[0][self.a[d]] for col in range(0,self.numActions): self.weights[0][col] = self.weights[0][col]*math.exp(self.gamma*xj[col]/self.numActions) xj = [0.0 for col in range(0,self.numActions)] xj[self.a[d]] = r2/self.prob[1][self.a[d]] for col in range(0,self.numActions): self.weights[1][col] = self.weights[1][col]*math.exp(self.gamma*xj[col]/self.numActions) ''' if isnan(sum(self.prob[0])) == True: print self.R print self.a print self.bestActionSet sys.exit(0)
def tt_sizecheck(size): """Check whether the given size is valid. Used for sptensor""" size = numpy.array(size); isOk = True; if(size.ndim != 1): isOk = False; else: for i in range(0, len(size)): val = size[i]; if(cmath.isnan(val) or cmath.isinf(val) or val <= 0 or val != round(val)): isOk = False; if(not isOk): raise ValueError("size must be a row vector of real positive integers"); return isOk;
def filter_volume(self, events: list) -> list: result = [] for event in events: bar = self.get_day_data(event.ticker, self.date_current) if cmath.isnan(bar.vol_avg): self.log_warn('Skipped event for %s, no volume data' % event.ticker) continue if bar.vol_avg < self.min_avg_volume: self.small_avg_volume_skipped += 1 self.log_warn( 'Skipped event for %s, average volume %d < %d' % (event.ticker, bar.vol_avg, self.min_avg_volume)) continue result.append(event) return result
def isnan(x, /) -> bool: '''Return True if x is nan in any way''' # C methods try: return _math.isnan(x) except Exception: pass try: return _cmath.isnan(x) except Exception: pass # allow for customized types try: return bool(x.isnan()) except Exception: pass raise TypeError(f'invalid type, type {type(x).__name__}')
def _formatter(self, v): """Format a complex into a string, showing up to ``precision`` decimal digits. This function is partly extracted from the open_source "FFC: the FEniCS Form Compiler", freely accessible at https://bitbucket.org/fenics-project/ffc.""" f = "%%.%dg" % self.precision f_int = "%%.%df" % 1 eps = 10.0**(-self.precision) if not isinstance(v, numbers.Number): return v.gencode(not_scope=True) elif isnan(v): return "NAN" elif abs(v.real - round(v.real, 1)) < eps and abs( v.imag - round(v.imag, 1)) < eps: formatter = f_int else: formatter = f re, im, zero = map(lambda arg: formatter % arg, (v.real, v.imag, 0)) return re if im == zero else re + ' + ' + im + ' * I'
def _print_nodes(self, nodes, node_id, img, overlaps): n = nodes[node_id] x = floor(n["x"] + n["width"] / 2) val = (n["key_cnt"] * 2 + len(n["children"]) + len(n["next_layer"])) / (2 * 8 + 9 + 8) y = n["y"] prev_val = img[y][x] if isnan(prev_val): img[y][x] = val else: img[y][x] += val overlaps[y][x] += 1 for c in n["children"]: self._print_nodes(nodes, c, img, overlaps) for c in n["next_layer"]: self._print_nodes(nodes, c, img, overlaps)
def _expression_scalar(expr, parameters): assert not expr.shape if isnan(expr.value): return coffee.Symbol("NAN") else: vr = expr.value.real rr = round(vr, 1) if rr and abs(vr - rr) < parameters.epsilon: vr = rr # round to nonzero vi = expr.value.imag # also checks if v is purely real if vi == 0.0: return coffee.Symbol(("%%.%dg" % parameters.precision) % vr) ri = round(vi, 1) if ri and abs(vi - ri) < parameters.epsilon: vi = ri return coffee.Symbol("({real:.{prec}g} + {imag:.{prec}g} * I)".format( real=vr, imag=vi, prec=parameters.precision))
def calculate_metabolites_score(self, feature): """ feature: peakel instance with several isotopes metabolites: list of metabolites """ for annot in feature.annotations: m = annot.metabolite # worst case if m.isotopic_pattern_neg is None: annot.score_isos = 'NA' continue ip = m.isotopic_pattern_pos if feature.polarity == 1 else m.isotopic_pattern_neg isotopic_pattern = [(float(a), float(b)) for a, b in eval(ip)] worst_rmsd, worst_mass_diff, peakel_index = self._calculate_worst_cases( feature, isotopic_pattern) # interpol_worst_rmsd, interpol_worst_mass_diff = 1.0, 1.0 # as we interpolate al line y = x we use directly the result and pass # it to the model mass_diff = calculate_mass_diff_da(feature, m.mono_mass) interpol_mass_diff = mass_diff / worst_mass_diff ponderated_mass_diff = self.transform_score( self.model(interpol_mass_diff)) * self.metrics[1][1] rmsd = self._calculate_rmsd_2(feature, peakel_index, isotopic_pattern) if isnan(rmsd) or rmsd == 0.0 or worst_rmsd == 0.0: # metab_with_score.append((m, ponderated_mass_diff)) annot.score_isos = ponderated_mass_diff continue interpol_rmsd = rmsd / worst_rmsd ponderated_rmsd = self.transform_score( self.model(interpol_rmsd)) * self.metrics[0][1] final_score = (ponderated_mass_diff + ponderated_rmsd) / ( self.metrics[0][1] + self.metrics[1][1]) annot.score_isos = final_score
def tt_subscheck(subs): """Check whether the given list of subscripts are valid. Used for sptensor""" isOk = True; if(subs.size == 0): isOk = True; elif(subs.ndim != 2): isOk = False; else: for i in range(0, (subs.size / subs[0].size)): for j in range(0, (subs[0].size)): val = subs[i][j]; if( cmath.isnan(val) or cmath.isinf(val) or val < 0 or val != round(val) ): isOk = False; if(not isOk): raise ValueError("Subscripts must be a matrix of non-negative integers"); return isOk;
def tt_subscheck(my_subs): """Check whether the given list of subscripts are valid. Used for Sptensor""" isOk = True if my_subs.size == 0: isOk = True elif my_subs.ndim != 2: isOk = False else: for i in range(0, (my_subs.size / my_subs[0].size)): for j in range(0, my_subs[0].size): val = my_subs[i][j] if cmath.isnan(val) or cmath.isinf(val) or val < 0 or val != round(val): isOk = False if not isOk: raise ValueError("Subscripts must be a matrix of non-negative integers") return isOk
def minDeep(arg, exclude=None, no_nan=False): """ Calculate min recursively for nested iterables, at any depth (arrays, matrices, tensors...) and for any type of iterable (list of tuples, tuple of sets, list of tuples of dictionaries...) """ inf = float("+Inf") if exclude is None: exclude = () if not isinstance(exclude, Iterable): exclude = (exclude, ) if isinstance(arg, tuple(exclude)): return inf try: if next(iter(arg)) is arg: # avoid infinite loops return min(arg) except TypeError: return arg try: mins = map(lambda x: minDeep(x, exclude), arg.keys()) except AttributeError: try: mins = map(lambda x: minDeep(x, exclude), arg) except TypeError: return inf try: if no_nan: res = min((x for x in mins if not cmath.isnan(x))) else: res = min(mins) except ValueError: res = inf return res
def test_eval_curry_hyp(name_a, name_b, value_a, value_b, op_math_function): try: expected = op_math_function(value_a, value_b) except Exception: assume(False) try: is_nan_value = isnan(expected) except OverflowError: is_nan_value = False assume(not is_nan_value) assume(name_a != name_b) a = Step(name_a) b = Step(name_b) expr = Step(op_math_function, a, b) a_dict = {name_a: value_a} b_dict = {name_b: value_b} curried = do_eval(expr, **a_dict) assert isinstance(curried, Step) observed = do_eval(curried, **b_dict) assert isinstance(observed, type(expected)) assert observed == expected
def calculate_metabolites_score(self, feature): """ feature: peakel instance with several isotopes metabolites: list of metabolites """ for annot in feature.annotations: m = annot.metabolite # worst case if m.isotopic_pattern_neg is None: annot.score_isos = 'NA' continue ip = m.isotopic_pattern_pos if feature.polarity == 1 else m.isotopic_pattern_neg isotopic_pattern = [(float(a), float(b)) for a, b in eval(ip)] worst_rmsd, worst_mass_diff, peakel_index = self._calculate_worst_cases(feature, isotopic_pattern) # interpol_worst_rmsd, interpol_worst_mass_diff = 1.0, 1.0 # as we interpolate al line y = x we use directly the result and pass # it to the model mass_diff = calculate_mass_diff_da(feature, m.mono_mass) interpol_mass_diff = mass_diff / worst_mass_diff ponderated_mass_diff = self.transform_score(self.model(interpol_mass_diff)) * self.metrics[1][1] rmsd = self._calculate_rmsd_2(feature, peakel_index, isotopic_pattern) if isnan(rmsd) or rmsd == 0.0 or worst_rmsd == 0.0: # metab_with_score.append((m, ponderated_mass_diff)) annot.score_isos = ponderated_mass_diff continue interpol_rmsd = rmsd / worst_rmsd ponderated_rmsd = self.transform_score(self.model(interpol_rmsd)) * self.metrics[0][1] final_score = (ponderated_mass_diff + ponderated_rmsd) / (self.metrics[0][1] + self.metrics[1][1]) annot.score_isos = final_score
def compare_with_tolerance(student_complex, instructor_complex, tolerance=default_tolerance, relative_tolerance=False): """ Compare student_complex to instructor_complex with maximum tolerance tolerance. - student_complex : student result (float complex number) - instructor_complex : instructor result (float complex number) - tolerance : float, or string (representing a float or a percentage) - relative_tolerance: bool, to explicitly use passed tolerance as relative Note: when a tolerance is a percentage (i.e. '10%'), it will compute that percentage of the instructor result and yield a number. If relative_tolerance is set to False, it will use that value and the instructor result to define the bounds of valid student result: instructor_complex = 10, tolerance = '10%' will give [9.0, 11.0]. If relative_tolerance is set to True, it will use that value and both instructor result and student result to define the bounds of valid student result: instructor_complex = 10, student_complex = 20, tolerance = '10%' will give [8.0, 12.0]. This is typically used internally to compare float, with a default_tolerance = '0.001%'. Default tolerance of 1e-3% is added to compare two floats for near-equality (to handle machine representation errors). Default tolerance is relative, as the acceptable difference between two floats depends on the magnitude of the floats. (http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/) Examples: In [183]: 0.000016 - 1.6*10**-5 Out[183]: -3.3881317890172014e-21 In [212]: 1.9e24 - 1.9*10**24 Out[212]: 268435456.0 """ if isinstance(tolerance, str): if tolerance == default_tolerance: relative_tolerance = True if tolerance.endswith('%'): tolerance = evaluator(dict(), dict(), tolerance[:-1]) * 0.01 if not relative_tolerance: tolerance = tolerance * abs(instructor_complex) else: tolerance = evaluator(dict(), dict(), tolerance) if relative_tolerance: tolerance = tolerance * max(abs(student_complex), abs(instructor_complex)) if isinf(student_complex) or isinf(instructor_complex): # If an input is infinite, we can end up with `abs(student_complex-instructor_complex)` and # `tolerance` both equal to infinity. Then, below we would have # `inf <= inf` which is a fail. Instead, compare directly. return student_complex == instructor_complex # because student_complex and instructor_complex are not necessarily # complex here, we enforce it here: student_complex = complex(student_complex) instructor_complex = complex(instructor_complex) # if both the instructor and student input are real, # compare them as Decimals to avoid rounding errors if not (instructor_complex.imag or student_complex.imag): # if either of these are not a number, short circuit and return False if isnan(instructor_complex.real) or isnan(student_complex.real): return False student_decimal = Decimal(str(student_complex.real)) instructor_decimal = Decimal(str(instructor_complex.real)) tolerance_decimal = Decimal(str(tolerance)) return abs(student_decimal - instructor_decimal) <= tolerance_decimal else: # v1 and v2 are, in general, complex numbers: # there are some notes about backward compatibility issue: see responsetypes.get_staff_ans()). return abs(student_complex - instructor_complex) <= tolerance
print('sine =', cmath.sin(c)) print('cosine =', cmath.cos(c)) print('tangent =', cmath.tan(c)) # hyperbolic functions c = 2 + 2j print('inverse hyperbolic sine =', cmath.asinh(c)) print('inverse hyperbolic cosine =', cmath.acosh(c)) print('inverse hyperbolic tangent =', cmath.atanh(c)) print('hyperbolic sine =', cmath.sinh(c)) print('hyperbolic cosine =', cmath.cosh(c)) print('hyperbolic tangent =', cmath.tanh(c)) # classification functions print(cmath.isfinite(2 + 2j)) # True print(cmath.isfinite(cmath.inf + 2j)) # False print(cmath.isinf(2 + 2j)) # False print(cmath.isinf(cmath.inf + 2j)) # True print(cmath.isinf(cmath.nan + 2j)) # False print(cmath.isnan(2 + 2j)) # False print(cmath.isnan(cmath.inf + 2j)) # False print(cmath.isnan(cmath.nan + 2j)) # True print(cmath.isclose(2+2j, 2.01+1.9j, rel_tol=0.05)) # True print(cmath.isclose(2+2j, 2.01+1.9j, abs_tol=0.005)) # False
def nice_float(x): """ Return a short string representation of a floating point number. Taken from the python-nicefloat module <http://labix.org/python-nicefloat>, which is based on the paper "Printing Floating-Point Numbers Quickly and Accurately" by Robert G. Burger and R. Kent Dybvig. """ # Special cases for 0, infinity, and NaN if not x: return '0.' if cmath.isinf(x): if x < 0: return '-Inf' return 'Inf' if cmath.isnan(x): return 'Nan' # Copied from http://labix.org/python-nicefloat f, e = math.frexp(x) if x < 0: f = -f f = int(f * 2**53) e -= 53 if e >= 0: be = 2**e if f != 2**52: r, s, mp, mm = f*be*2, 2, be, be else: be1 = be*2 r, s, mp, mm = f*be1*2, 4, be1, be elif e == -1074 or f != 2**52: r, s, mp, mm = f*2, 2**(1-e), 1, 1 else: r, s, mp, mm = f*4, 2**(2-e), 2, 1 k = 0 round = f % 2 == 0 while not round and r+mp*10 <= s or r+mp*10 < s: r *= 10 mp *= 10 mm *= 10 k -= 1 while round and r+mp >= s or r+mp > s: s *= 10 k += 1 l = [] while True: d, r = divmod(r*10, s) d = int(d) mp *= 10 mm *= 10 tc1 = round and r == mm or r < mm tc2 = round and r+mp == s or r+mp > s if not tc1: if not tc2: l.append(d) continue l.append(d+1) elif not tc2 or r*2 < s: l.append(d) else: l.append(d+1) break if k <= 0: l.insert(0, '0' * abs(k)) l.insert(0, '.') elif k < len(l): l.insert(k, '.') else: l.append('0' * (k - len(l))) l.append('.') n = ''.join(map(str, l)) # Further shorten the string using scientific notation if n.startswith('.000'): n = n[1:] p = 0 while n.startswith('0'): n = n[1:] p -= 1 n1 = (n[0] + '.' + n[1:] + 'e' + str(p-1)).replace('.e', 'e') n2 = n + 'e' + str(p-len(n)) n = n2 if len(n2) < len(n1) else n1 elif n.endswith('00.'): n = n[:-1] p = 0 while n.endswith('0'): n = n[:-1] p += 1 n = n + 'e' + str(p) if x < 0: n = '-' + n return n
def _is_nan(x): if isinstance(x, complex): return cmath.isnan(x) if isinstance(x, Decimal): return x.is_nan() return False
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1, msg=None): """Recursive workhorse for assertPreciseEqual().""" def _assertNumberEqual(first, second, delta=None): if (delta is None or first == second == 0.0 or math.isinf(first) or math.isinf(second)): self.assertEqual(first, second, msg=msg) # For signed zeros try: if math.copysign(1, first) != math.copysign(1, second): self.fail( self._formatMessage(msg, "%s != %s" % (first, second))) except TypeError: pass else: self.assertAlmostEqual(first, second, delta=delta, msg=msg) first_family = self._detect_family(first) second_family = self._detect_family(second) assertion_message = "Type Family mismatch. (%s != %s)" % (first_family, second_family) if msg: assertion_message += ': %s' % (msg,) self.assertEqual(first_family, second_family, msg=assertion_message) # We now know they are in the same comparison family compare_family = first_family # For recognized sequences, recurse if compare_family == "ndarray": dtype = self._fix_dtype(first.dtype) self.assertEqual(dtype, self._fix_dtype(second.dtype)) self.assertEqual(first.ndim, second.ndim) self.assertEqual(first.shape, second.shape) self.assertEqual(first.itemsize, second.itemsize) self.assertEqual(self._fix_strides(first), self._fix_strides(second)) if first.dtype != dtype: first = first.astype(dtype) if second.dtype != dtype: second = second.astype(dtype) for a, b in zip(first.flat, second.flat): self._assertPreciseEqual(a, b, prec, ulps, msg) return if compare_family == "sequence": self.assertEqual(len(first), len(second), msg=msg) for a, b in zip(first, second): self._assertPreciseEqual(a, b, prec, ulps, msg) return if compare_family == "exact": exact_comparison = True if compare_family in ["complex", "approximate"]: exact_comparison = False if compare_family == "unknown": # Assume these are non-numeric types: we will fall back # on regular unittest comparison. self.assertIs(first.__class__, second.__class__) exact_comparison = True # If a Numpy scalar, check the dtype is exactly the same too # (required for datetime64 and timedelta64). if hasattr(first, 'dtype') and hasattr(second, 'dtype'): self.assertEqual(first.dtype, second.dtype) try: if cmath.isnan(first) and cmath.isnan(second): # The NaNs will compare unequal, skip regular comparison return except TypeError: # Not floats. pass exact_comparison = exact_comparison or prec == 'exact' if not exact_comparison and prec != 'exact': if prec == 'single': bits = 24 elif prec == 'double': bits = 53 else: raise ValueError("unsupported precision %r" % (prec,)) k = 2 ** (ulps - bits - 1) delta = k * (abs(first) + abs(second)) else: delta = None if isinstance(first, self._complex_types): _assertNumberEqual(first.real, second.real, delta) _assertNumberEqual(first.imag, second.imag, delta) else: _assertNumberEqual(first, second, delta)
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1, msg=None): """Recursive workhorse for assertPreciseEqual().""" def _assertNumberEqual(first, second, delta=None): if (delta is None or first == second == 0.0 or math.isinf(first) or math.isinf(second)): self.assertEqual(first, second, msg=msg) # For signed zeros try: if math.copysign(1, first) != math.copysign(1, second): self.fail( self._formatMessage(msg, "%s != %s" % (first, second))) except TypeError: pass else: self.assertAlmostEqual(first, second, delta=delta, msg=msg) for tp in self._sequence_typesets: # For recognized sequences, recurse if isinstance(first, tp) or isinstance(second, tp): self.assertIsInstance(first, tp) self.assertIsInstance(second, tp) self.assertEqual(len(first), len(second), msg=msg) for a, b in zip(first, second): self._assertPreciseEqual(a, b, prec, ulps, msg) return for tp in self._exact_typesets: # One or another could be the expected, the other the actual; # test both. if isinstance(first, tp) or isinstance(second, tp): self.assertIsInstance(first, tp) self.assertIsInstance(second, tp) exact_comparison = True break else: for tp in self._approx_typesets: if isinstance(first, tp) or isinstance(second, tp): self.assertIsInstance(first, tp) self.assertIsInstance(second, tp) exact_comparison = False break else: # Assume these are non-numeric types: we will fall back # on regular unittest comparison. self.assertIs(first.__class__, second.__class__) exact_comparison = True # If a Numpy scalar, check the dtype is exactly the same too # (required for datetime64 and timedelta64). if hasattr(first, 'dtype') and hasattr(second, 'dtype'): self.assertEqual(first.dtype, second.dtype) try: if cmath.isnan(first) and cmath.isnan(second): # The NaNs will compare unequal, skip regular comparison return except TypeError: # Not floats. pass exact_comparison = exact_comparison or prec == 'exact' if not exact_comparison and prec != 'exact': if prec == 'single': bits = 24 elif prec == 'double': bits = 53 else: raise ValueError("unsupported precision %r" % (prec,)) k = 2 ** (ulps - bits - 1) delta = k * (abs(first) + abs(second)) else: delta = None if isinstance(first, self._complex_types): _assertNumberEqual(first.real, second.real, delta) _assertNumberEqual(first.imag, second.imag, delta) else: _assertNumberEqual(first, second, delta)
def isnan_usecase(x): return cmath.isnan(x)
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1, msg=None, ignore_sign_on_zero=False, abs_tol=None): """Recursive workhorse for assertPreciseEqual().""" def _assertNumberEqual(first, second, delta=None): if (delta is None or first == second == 0.0 or math.isinf(first) or math.isinf(second)): self.assertEqual(first, second, msg=msg) # For signed zeros if not ignore_sign_on_zero: try: if math.copysign(1, first) != math.copysign(1, second): self.fail( self._formatMessage(msg, "%s != %s" % (first, second))) except TypeError: pass else: self.assertAlmostEqual(first, second, delta=delta, msg=msg) first_family = self._detect_family(first) second_family = self._detect_family(second) assertion_message = "Type Family mismatch. (%s != %s)" % (first_family, second_family) if msg: assertion_message += ': %s' % (msg,) self.assertEqual(first_family, second_family, msg=assertion_message) # We now know they are in the same comparison family compare_family = first_family # For recognized sequences, recurse if compare_family == "ndarray": dtype = self._fix_dtype(first.dtype) self.assertEqual(dtype, self._fix_dtype(second.dtype)) self.assertEqual(first.ndim, second.ndim, "different number of dimensions") self.assertEqual(first.shape, second.shape, "different shapes") self.assertEqual(first.flags.writeable, second.flags.writeable, "different mutability") # itemsize is already checked by the dtype test above self.assertEqual(self._fix_strides(first), self._fix_strides(second), "different strides") if first.dtype != dtype: first = first.astype(dtype) if second.dtype != dtype: second = second.astype(dtype) for a, b in zip(first.flat, second.flat): self._assertPreciseEqual(a, b, prec, ulps, msg, ignore_sign_on_zero, abs_tol) return elif compare_family == "sequence": self.assertEqual(len(first), len(second), msg=msg) for a, b in zip(first, second): self._assertPreciseEqual(a, b, prec, ulps, msg, ignore_sign_on_zero, abs_tol) return elif compare_family == "exact": exact_comparison = True elif compare_family in ["complex", "approximate"]: exact_comparison = False elif compare_family == "enum": self.assertIs(first.__class__, second.__class__) self._assertPreciseEqual(first.value, second.value, prec, ulps, msg, ignore_sign_on_zero, abs_tol) return elif compare_family == "unknown": # Assume these are non-numeric types: we will fall back # on regular unittest comparison. self.assertIs(first.__class__, second.__class__) exact_comparison = True else: assert 0, "unexpected family" # If a Numpy scalar, check the dtype is exactly the same too # (required for datetime64 and timedelta64). if hasattr(first, 'dtype') and hasattr(second, 'dtype'): self.assertEqual(first.dtype, second.dtype) # Mixing bools and non-bools should always fail if (isinstance(first, self._bool_types) != isinstance(second, self._bool_types)): assertion_message = ("Mismatching return types (%s vs. %s)" % (first.__class__, second.__class__)) if msg: assertion_message += ': %s' % (msg,) self.fail(assertion_message) try: if cmath.isnan(first) and cmath.isnan(second): # The NaNs will compare unequal, skip regular comparison return except TypeError: # Not floats. pass # if absolute comparison is set, use it if abs_tol is not None: if abs_tol == "eps": rtol = np.finfo(type(first)).eps elif isinstance(abs_tol, float): rtol = abs_tol else: raise ValueError("abs_tol is not \"eps\" or a float, found %s" % abs_tol) if abs(first - second) < rtol: return exact_comparison = exact_comparison or prec == 'exact' if not exact_comparison and prec != 'exact': if prec == 'single': bits = 24 elif prec == 'double': bits = 53 else: raise ValueError("unsupported precision %r" % (prec,)) k = 2 ** (ulps - bits - 1) delta = k * (abs(first) + abs(second)) else: delta = None if isinstance(first, self._complex_types): _assertNumberEqual(first.real, second.real, delta) _assertNumberEqual(first.imag, second.imag, delta) else: _assertNumberEqual(first, second, delta)
def measure_pearson (datafname, labelsfile, outfname, maskfname='', exclufname='', exclude_idx=-1): #reading label file labels = np.loadtxt(labelsfile, dtype=int) if exclufname: exclus = np.loadtxt(exclufname, dtype=int) #reading input volume vol = nib.load(datafname) n = vol.get_shape()[3] if n != len(labels): err = 'Numbers do not match: ' + datafname + ' and ' + labelsfile raise IOError(err) elif exclufname: if n != len(exclus): err = 'Numbers do not match: ' + datafname + ' and ' + excludef raise IOError(err) exclude_log = '' if exclude_idx > -1: exclude_log = ' excluding subject ' + str(exclude_idx) au.log.debug ('Pearson correlation of ' + os.path.basename(datafname) + exclude_log) #reading volume data = vol.get_data() #excluding subjects if exclufname and exclude_idx > -1: exclus[exclude_idx] = 1 if exclufname: data = data [:,:,:,exclus == 0] labels = labels[exclus == 0] elif exclude_idx > -1: exclus = np.zeros(n, dtype=int) exclus[exclude_idx] = 1 data = data [:,:,:,exclus == 0] labels = labels[exclus == 0] subsno = data.shape[3] #preprocessing data shape = data.shape[0:3] siz = np.prod(shape) temp = data.reshape(siz, subsno) ind = range(len(temp)) if maskfname: mask = nib.load(maskfname) mskdat = mask.get_data() mskdat = mskdat.reshape(siz) ind = np.where(mskdat!=0)[0] #creating output volume file odat = np.zeros(shape, dtype=vol.get_data_dtype()) for i in range(len(ind)): idx = ind[i] x = temp[idx,:] p = stats.pearsonr (labels,x)[0]; #ldemean = labels - np.mean(labels) #xdemean = x - np.mean(x) #p = np.sum(ldemean * xdemean) / (np.sqrt(np.sum(np.square(ldemean))) * np.sqrt(np.sum(np.square(xdemean)))) if math.isnan (p): p = 0 odat[np.unravel_index(idx, shape)] = p au.save_nibabel(outfname, odat, vol.get_affine()) return outfname
def test_isnan(): assert cmath.isnan(float('inf')) == isnan(float('inf')) assert cmath.isnan(-float('inf')) == isnan(-float('inf')) assert cmath.isnan(float('nan')) == isnan(float('nan')) assert not isnan(sympy.oo) assert not isnan(-sympy.oo) assert isnan(sympy.nan) assert cmath.isnan(1.0) == isnan(1.0) assert cmath.isnan(0) == isnan(0) inf = float('inf') nan = float('nan') assert cmath.isnan(complex(inf)) == isnan(complex(inf)) assert cmath.isnan(complex(1, inf)) == isnan(complex(1, inf)) assert cmath.isnan(complex(1, -inf)) == isnan(complex(1, -inf)) assert cmath.isnan(complex(inf, 1)) == isnan(complex(inf, 1)) assert cmath.isnan(complex(inf, inf)) == isnan(complex(inf, inf)) assert cmath.isnan(complex(1, nan)) == isnan(complex(1, nan)) assert cmath.isnan(complex(nan, 1)) == isnan(complex(nan, 1)) assert cmath.isnan(complex(nan, nan)) == isnan(complex(nan, nan)) assert cmath.isnan(complex(inf, nan)) == isnan(complex(inf, nan)) assert cmath.isnan(complex(nan, inf)) == isnan(complex(nan, inf))