class BaseAction(implements(Action)): def __init__(self, command): self.command = command def execute(self): return self.command
class ThreeDGraphs(implements(GraphInterface)): def __init__(self): self def line(self): m = float(input("Enter the value of m: ")) b = float(input("Enter the value of b: ")) start = float(input("Enter the starting value of x: ")) end = float(input("Enter the end value of x: ")) xValues = np.arange(start, (end + 1)) yValues = m * xValues + b fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(m) + "x + " + str(b)) plt.show() def quadratic(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) start = float(input("Enter the starting value of x: ")) end = float(input("Enter the end value of x: ")) xValues = np.arange(start, (end + 1), 0.001) yValues = a * (xValues**2) + b * xValues + c fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "x^2 + " + str(b) + "x + " + str(c)) plt.show() def cubic(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) start = float(input("Enter the starting value of x: ")) end = float(input("Enter the end value of x: ")) xValues = np.arange(start, (end + 1), 0.001) yValues = a * (xValues**3) + b * (xValues**2) + c * xValues + d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "x^3 + " + str(b) + "x^2 + " + str(c) + "x + " + str(d)) plt.show() def quartic(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) e = float(input("Enter the value of e: ")) start = float(input("Enter the starting value of x: ")) end = float(input("Enter the end value of x: ")) xValues = np.arange(start, (end + 1), 0.001) yValues = (a * (xValues**4)) + (b * (xValues**3)) + ( c * (xValues**2)) + (d * xValues) + e fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "x^4 + " + str(b) + "x^3 + " + str(c) + "x^2 + " + str(d) + "x + " + str(e)) plt.show() def sin(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = a * np.sin(b * xValues + c) + d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "sin( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show() def cos(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = a * np.cos(b * xValues + c) + d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "cos( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show() def tan(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = a * np.tan(b * xValues + c) + d tol = 20 negTol = -20 yValues[yValues > tol] = np.inf yValues[yValues < negTol] = -np.inf fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "tan( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show() def sec(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = 1 / (a * np.cos(b * xValues + c) + d) tol = 20 negTol = -20 yValues[yValues > tol] = np.inf yValues[yValues < negTol] = -np.inf fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "sec( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show() def csc(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = 1 / (a * np.sin(b * xValues + c) + d) tol = 20 negTol = -20 yValues[yValues > tol] = np.inf yValues[yValues < negTol] = -np.inf fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "csc( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show() def cot(self): a = float(input("Enter the value of a: ")) b = float(input("Enter the value of b: ")) c = float(input("Enter the value of c: ")) d = float(input("Enter the value of d: ")) print( "The graph of this trigonometric function is going to be from -2pi to 2pi" ) xValues = np.arange(-2 * math.pi, 2 * math.pi, 0.001) yValues = 1 / (a * np.tan(b * xValues + c) + d) tol = 20 negTol = -20 yValues[yValues > tol] = np.inf yValues[yValues < negTol] = -np.inf fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_zlim(0, 1) ax.view_init(elev=57., azim=-77) ax.plot(xValues, yValues) ax.set_xticklabels([ '$-2\pi$', '$-3\pi/2$', '$-\pi$', '$-\pi/2$', '$0$', '$\pi/2$', '$\pi$', '$3\pi/2$', '$2\pi$' ]) ax.set_xlabel("x-Axis") ax.set_ylabel("y-Axis") ax.set_zlabel("z-Axis") ax.set_title("Graph of y = " + str(a) + "cot( " + str(b) + "x + " + str(c) + ") " + str(d)) plt.show()
class ChainMDP(implements(MDP)): #basic MDP class that has two actions (left, right), no terminal states and is a chain mdp with deterministic transitions def __init__(self, num_states, r_sa, gamma, init_dist): self.num_actions = 2 self.num_rows = 1 self.num_cols = num_states self.num_states = num_states self.gamma = gamma self.init_dist = init_dist self.terminals = [] self.r_sa = r_sa self.init_states = [] for s in range(self.num_states): if self.init_dist[s] > 0: self.init_states.append(s) self.P_left = self.get_transitions(policy="left") #print("P_left\n",self.P_left) self.P_right = self.get_transitions(policy="right") #print("P_right\n",self.P_right) self.Ps = [self.P_left, self.P_right] def get_transition_prob_matrices(self): return self.Ps def get_num_actions(self): return self.num_actions def transform_to_R_sa(self, reward_weights): #Don't do anything, reward_weights should be r_sa assert(len(reward_weights) == len(self.r_sa)) return reward_weights def get_readable_actions(self, action_num): if action_num == 0: return "<" elif action_num == 1: return ">" else: print("error, only two possible actions") sys.exit() def get_num_states(self): return self.num_states def get_reward_dimensionality(self): return len(self.r_sa) def set_reward_fn(self, new_reward): self.r_sa = new_reward def get_state_action_rewards(self): return self.r_sa def get_transition_prob(self, s1,a,s2): return self.Ps[a][s1][s2] def get_transitions(self, policy): P_pi = np.zeros((self.num_states, self.num_states)) if policy == "left": #action 0 #always transition one to left unless already at left border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if c > 0: P_pi[cnt, cnt - 1] = 1.0 else: P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 elif policy == "right": #action 1 #always transition one to right unless already at right border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if c < self.num_cols - 1: #transition to next state to right P_pi[cnt, cnt + 1] = 1.0 else: #self transition P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 return P_pi
class CertificateDetailsImpl(implements(CertificateDetails)): def __init__(self, name: str, issuer: CertificateDetails, certificate_type: CertificateType): self._name = name self._issuer = issuer self._certificate_type = certificate_type @property def name(self) -> str: return self._name @property def issuer(self) -> 'CertificateDetails': return self._issuer @property def certificate_type(self) -> CertificateType: return self._certificate_type @classmethod def determine_certificate_details(cls, cert_path: str) -> CertificateDetails: def _parse_certificate_path(elements: List[str]) -> Dict[str, str]: if len(elements) == 1: # This is a shortcut and assumes a root cert is provided result = {'root': elements[0]} elif len(elements) == 2 and elements[0] in ['server', 'client']: # This is a cert that's being "self issued" result = {elements[0]: elements[1]} else: del elements[0] result = {k: v for k, v in zip(*[iter(elements)] * 2)} return result elements = cert_path.split('/') path_elements = _parse_certificate_path(elements) if 'server' in path_elements: cert_name = path_elements['server'] cert_type = X509_CERTIFICATE_TYPES['server'] elif 'client' in path_elements: cert_name = path_elements['client'] cert_type = X509_CERTIFICATE_TYPES['client'] elif 'intermediate' in path_elements: cert_name = path_elements['intermediate'] cert_type = X509_CERTIFICATE_TYPES['intermediate'] elif 'root' in path_elements: cert_name = path_elements['root'] cert_type = X509_CERTIFICATE_TYPES['root'] else: raise ValueError( "The certificate type could not be determined ({})".format( path_elements.keys())) if len(elements) > 2: cert_issuer = cls.determine_certificate_details( cert_path.rsplit('/', 2)[0]) else: cert_issuer = None return CertificateDetailsImpl(name=cert_name, certificate_type=cert_type, issuer=cert_issuer)
class Fisioterapeuta(implements(Especialista)): def operacion(self): print "Recomendacion de fisioterapeuta"
class TripletDetectionSample(implements(Sample)): def __init__(self, data, target): self.data = data self.target = target self.output = None def visualize(self, parameters={}): # image_frame = PTImage.from_cwh_torch(self.data[0]) if parameters.get('mode', 'train') == 'train': image_pos = PTImage.from_cwh_torch(self.data[0]) image_neg = PTImage.from_cwh_torch(self.data[1]) image_anchor = PTImage.from_cwh_torch(self.output[0]) image_pos_map = PTImage.from_2d_wh_torch( F.sigmoid(self.output[1]).data) image_neg_map = PTImage.from_2d_wh_torch( F.sigmoid(self.output[2]).data) image_pos_tar = PTImage.from_2d_wh_torch(self.target[0]) image_neg_tar = PTImage.from_2d_wh_torch(self.target[1]) # target_box = Box.tensor_to_box(self.target[0].cpu(),image_pos.get_wh()) # objs = [Object(target_box,0,obj_type='T')] # pos_frame = Frame.from_image_and_objects(image_pos,objs) # ImageVisualizer().set_image(image_frame,parameters.get('title','') + ' : Frame') ImageVisualizer().set_image( image_anchor, parameters.get('title', '') + ' : anchor') ImageVisualizer().set_image( image_pos, parameters.get('title', '') + ' : pos_frame') ImageVisualizer().set_image( image_neg, parameters.get('title', '') + ' : neg_frame') ImageVisualizer().set_image( image_pos_tar, parameters.get('title', '') + ' : pos_target') ImageVisualizer().set_image( image_neg_tar, parameters.get('title', '') + ' : neg_target') ImageVisualizer().set_image( image_pos_map, parameters.get('title', '') + ' : pos_res') ImageVisualizer().set_image( image_neg_map, parameters.get('title', '') + ' : neg_res') else: img_frame = PTImage.from_cwh_torch(self.data[0]) img_frame_xcor = PTImage.from_2d_wh_torch( F.sigmoid(self.output[0]).data) # img_pos = PTImage.from_cwh_torch(self.data[1]) # img_neg = PTImage.from_cwh_torch(self.data[2]) # image_pos_map = PTImage.from_2d_wh_torch(F.sigmoid(self.output[1]).data) # image_neg_map = PTImage.from_2d_wh_torch(F.sigmoid(self.output[2]).data) ImageVisualizer().set_image( img_frame, parameters.get('title', '') + ' : Frame') ImageVisualizer().set_image( img_frame_xcor, parameters.get('title', '') + ' : Frame xcor') # ImageVisualizer().set_image(img_pos,parameters.get('title','') + ' : pos') # ImageVisualizer().set_image(image_pos_map,parameters.get('title','') + ' : pos xcor') # ImageVisualizer().set_image(img_neg,parameters.get('title','') + ' : neg') # ImageVisualizer().set_image(image_neg_map,parameters.get('title','') + ' : neg xcor') def set_output(self, output): self.output = output def get_data(self): return self.data def get_target(self): return self.target
class EquityPricingLoader(implements(PipelineLoader)): """A PipelineLoader for loading daily OHLCV data. Parameters ---------- raw_price_reader : zipline.data.session_bars.SessionBarReader Reader providing raw prices. adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader Reader providing price/volume adjustments. fx_reader : zipline.data.fx.FXRateReader Reader providing currency conversions. """ def __init__(self, raw_price_reader, adjustments_reader, fx_reader): self.raw_price_reader = raw_price_reader self.adjustments_reader = adjustments_reader self.fx_reader = fx_reader @classmethod def without_fx(cls, raw_price_reader, adjustments_reader): """ Construct an EquityPricingLoader without support for fx rates. The returned loader will raise an error if requested to load currency-converted columns. Parameters ---------- raw_price_reader : zipline.data.session_bars.SessionBarReader Reader providing raw prices. adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader Reader providing price/volume adjustments. Returns ------- loader : EquityPricingLoader A loader that can only provide currency-naive data. """ return cls( raw_price_reader=raw_price_reader, adjustments_reader=adjustments_reader, fx_reader=ExplodingFXRateReader(), ) def load_adjusted_array(self, domain, columns, dates, sids, mask): # load_adjusted_array is called with dates on which the user's algo # will be shown data, which means we need to return the data that would # be known at the **start** of each date. We assume that the latest # data known on day N is the data from day (N - 1), so we shift all # query dates back by a trading session. sessions = domain.all_sessions() shifted_dates = shift_dates(sessions, dates[0], dates[-1], shift=1) ohlcv_cols, currency_cols = self._split_column_types(columns) del columns # From here on we should use ohlcv_cols or currency_cols. ohlcv_colnames = [c.name for c in ohlcv_cols] raw_ohlcv_arrays = self.raw_price_reader.load_raw_arrays( ohlcv_colnames, shifted_dates[0], shifted_dates[-1], sids, ) # Currency convert raw_arrays in place if necessary. We use shifted # dates to load currency conversion rates to make them line up with # dates used to fetch prices. self._inplace_currency_convert( ohlcv_cols, raw_ohlcv_arrays, shifted_dates, sids, ) adjustments = self.adjustments_reader.load_pricing_adjustments( ohlcv_colnames, dates, sids, ) out = {} for c, c_raw, c_adjs in zip(ohlcv_cols, raw_ohlcv_arrays, adjustments): out[c] = AdjustedArray( c_raw.astype(c.dtype), c_adjs, c.missing_value, ) for c in currency_cols: codes_1d = self.raw_price_reader.currency_codes(sids) codes = repeat_first_axis(codes_1d, len(dates)) out[c] = AdjustedArray( codes, adjustments={}, missing_value=None, ) return out @property def currency_aware(self): # Tell the pipeline engine that this loader supports currency # conversion if we have a non-dummy fx rates reader. return not isinstance(self.fx_reader, ExplodingFXRateReader) def _inplace_currency_convert(self, columns, arrays, dates, sids): """ Currency convert raw data loaded for ``column``. Parameters ---------- columns : list[zipline.pipeline.data.BoundColumn] List of columns whose raw data has been loaded. arrays : list[np.array] List of arrays, parallel to ``columns`` containing data for the column. dates : pd.DatetimeIndex Labels for rows of ``arrays``. These are the dates that should be used to fetch fx rates for conversion. sids : np.array[int64] Labels for columns of ``arrays``. Returns ------- None Side Effects ------------ Modifies ``arrays`` in place by applying currency conversions. """ # Group columns by currency conversion spec. by_spec = defaultdict(list) for column, array in zip(columns, arrays): by_spec[column.currency_conversion].append(array) # Nothing to do for terms with no currency conversion. by_spec.pop(None, None) if not by_spec: return fx_reader = self.fx_reader base_currencies = self.raw_price_reader.currency_codes(sids) # Columns with the same conversion spec will use the same multipliers. for spec, arrays in by_spec.items(): rates = fx_reader.get_rates( rate=spec.field, quote=spec.currency.code, bases=base_currencies, dts=dates, ) for arr in arrays: multiply(arr, rates, out=arr) def _split_column_types(self, columns): """Split out currency columns from OHLCV columns. Parameters ---------- columns : list[zipline.pipeline.data.BoundColumn] Columns to be loaded by ``load_adjusted_array``. Returns ------- ohlcv_columns : list[zipline.pipeline.data.BoundColumn] Price and volume columns from ``columns``. currency_columns : list[zipline.pipeline.data.BoundColumn] Currency code column from ``columns``, if present. """ currency_name = EquityPricing.currency.name ohlcv = [] currency = [] for c in columns: if c.name == currency_name: currency.append(c) else: ohlcv.append(c) return ohlcv, currency
class HDF5FXRateReader(implements(FXRateReader)): """An FXRateReader backed by HDF5. Parameters ---------- group : h5py.Group Top-level group written by an :class:`HDF5FXRateWriter`. default_rate : str Rate to use when ``get_rates`` is called requesting the default rate. """ def __init__(self, group, default_rate): self._group = group self._default_rate = default_rate @classmethod def from_path(cls, path): """Construct from a file path. Parameters ---------- path : str Path to an HDF5 fx rates file. """ return cls(h5py.File(path)) @lazyval def dts(self): """Row labels for rate groups. """ return pd.DatetimeIndex( self._group[INDEX][DTS][:].astype('M8[ns]'), tz='UTC', ) @lazyval def currencies(self): """Column labels for rate groups. """ # Currencies are stored as fixed-length bytes in the file, but we want # `str` objects in memory. byte_strings = self._group[INDEX][CURRENCIES][:] if six.PY3: values = [c.decode('ascii') for c in byte_strings] else: values = byte_strings.astype(object) return pd.Index(values) def get_rates(self, rate, quote, bases, dts): """Get rates to convert ``bases`` into ``quote``. See :class:`zipline.data.fx.base.FXRateReader` for details. """ if rate == DEFAULT_FX_RATE: rate = self._default_rate self._check_dts(self.dts, dts) date_ixs = self.dts.searchsorted(dts, side='right') - 1 currency_ixs = self.currencies.get_indexer(bases) return self._read_rate_block( rate, quote, row_ixs=date_ixs, col_ixs=currency_ixs, ) def _read_rate_block(self, rate, quote, row_ixs, col_ixs): try: dataset = self._group[DATA][rate][quote][RATES] except KeyError: raise ValueError( "FX rates not available for rate={}, quote_currency={}." .format(rate, quote) ) # OPTIMIZATION: Row indices correspond to dates, which must be in # sorted order. Rather than reading the entire dataset from h5, we can # read just the interval from min_row to max_row inclusive. # # We don't bother with a similar optimization for columns because in # expectation we're going to load most of the # array, so it's easier to pull all columns and reindex in memory. For # rows, however, a quick and easy optimization is to pull just the # slice from min(row_ixs) to max(row_ixs). min_row = row_ixs[0] max_row = row_ixs[-1] rows = dataset[min_row:max_row + 1] # +1 to be inclusive of end return rows[row_ixs - min_row][:, col_ixs] def _check_dts(self, stored, requested): """Validate that requested dates are in bounds for what we have stored. """ request_start, request_end = requested[[0, -1]] data_start, data_end = stored[[0, -1]] if request_start < data_start: raise ValueError( "Requested fx rates starting at {}, but data starts at {}" .format(request_start, data_start) ) if request_end > data_end: raise ValueError( "Requested fx rates ending at {}, but data ends at {}" .format(request_end, data_end) )
def save(self, repertoire=None, ext=None, path=os.getcwd(), delim="/", returnOK=False, **xargs): ID = self.ID cvOpti = self._cvOpti dataOpti = self._dataOpti # repertoire = ifelse(repertoire is None,StudyProject.DEFAULT_REP,repertoire) # ext=ifelse(ext is None,StudyProject.DEFAULT_EXT,ext) # repo=path+delim+repertoire # if not os.path.exists(repo): # os.makedirs(repo) # filo=repo+delim+ID+ext # sl=StudyProject.clone(self,deep=False) # sl=StudyProject.clone(self,deep=True) ff = {} for k, v_ in self._studies.items(): v = v_.clone() # print(isinstance(v_,implements(IProject))) if isinstance(v_, implements(IProject)): # v__=v.project # v=v_.clone(withoutProject=True) # li2=v._idCvBased li = v.getIdData() # v._project=v__ if dataOpti and v.getProprocessDataFromProjectFn() is not None: v._datas = {} #v._cv={}# #\____/=\____/# if cvOpti: ddd = list(vizGet(v._cv.keys())) if isinstance( v._cv, dict) else v._cv # for k2,v2 in v._cv.items(): v._cv = ddd # v.setDataTrainTest(id_="_ZERO") # v.setIdData(li) v.setProject(v.ID) # print(v) ff[k] = vizGet(v) # print(ff) stu = self._studies cvv = self._cv self._studies = ff # copye=StudyProject.clone(sl,deep=True) # copye._studies=ff # sl=copye def reloadS(self, stu, cvv): self._studies = stu self._cv = cvv if returnOK: return StudyClass( obj=self, fin=lambda obj=self, stu=stu, cvv=cvv: reloadS(obj, stu, cvv)) else: self.__class__.Save(self, ID, repertoire=repertoire, ext=ext, path=path, delim=delim, **xargs) self._studies = stu self._cv = cvv
class BaseSuperviseProject(BaseSupervise, implements(IProject)): EXPORTABLE = [ "project", "idDataProject", "proprocessDataFromProjectFn", "proprocessDataFromProjectFnOpts", "isProcessedDataFromProject", "cv" ] EXPORTABLE_ARGS = dict(underscore=True) cvrCls = CvResultats cviCls = CrossValidItemProject @abstractmethod def __init__(self, ID=None, datas: DatasSupervise = None, models: Models = None, metric: Metric = None, cv: Dict[str, CrossValidItemProject] = studyDico({}), project: StudyProject = None, *args, **xargs): super().__init__(ID, datas, models, metric) self._project = project self._cv = cv def init(self): super().init() self._idDataProject = None # self._idCvBased=None # self._cv=StudyDict() self._proprocessDataFromProjectFn = None self._proprocessDataFromProjectFnOpts = {} self.begin() def begin(self): self._isProcessedDataFromProject = False def setProject(self, p): self._project = p def getProject(self): return self.project def getProprocessDataFromProjectFn(self): return self.proprocessDataFromProjectFn def getIdData(self): return self._idDataProject def setIdData(self, i): self._idDataProject = i def setDataTrainTest(self, X_train=None, y_train=None, X_test=None, y_test=None, namesY=None, id_=None, force=False): if self.isProcessedDataFromProject and not force: raise Exception( "[BaseSuperviseProject setDataTrainTest] processing deja fait pour les données du projet (et force est à False)" ) if id_ is None and np.any( mapl(lambda a: a is None, [X_train, X_test, y_train, y_test])): raise KeyError( "if id_ is None, all of [X_train,X_test,y_train,y_test] must be specified " ) if id_ is not None and self.project is None: raise KeyError("if id_ is specified, project must be set") if id_ is not None and id_ not in self.project.data: raise KeyError("id_ not in global") if id_ is not None: # y=self.project.data[id_] classif = self.isClassif super().setDataTrainTest(*self.project.data[id_].get(), classif=classif) # self._datas=self.project.data[id_] self._idDataProject = id_ else: classif = self.isClassif super().setDataTrainTest(X_train, y_train, X_test, y_test, classif=classif) def proprocessDataFromProject(self, fn=None, force=False, pipelineX=None, pipelineY=None): classif = self.isClassif if self.isProcessedDataFromProject and not force: raise Exception( "[BaseSuperviseProject proprocessDataFromProject] processing deja fait pour les données du projet (et force est à False)" ) if fn is not None: self._proprocessDataFromProjectFn = fn # self._proprocessDataFromProjectFnOpts=dict(classif=classif) super().setDataTrainTest(*fn( *self._datas.get(deep=True, optsTrain=dict(withNamesY=False))), classif=classif) self._isProcessedDataFromProject = True if pipelineX is not None: self.pipelineX = pipelineX if pipelineY is not None: self.pipelineY = pipelineY elif fn is None: X_train, y_train, X_test, y_test = self.datas.get() if pipelineX is not None: X_train = pipelineX.fit_transform(X_train) X_test = pipelineX.fit_transform(X_test) if pipelineY is not None: y_train = pipelineY.fit_transform(y_train) y_test = pipelineY.fit_transform(y_test) return self.proprocessDataFromProject(fnReturn( (X_train, y_train, X_test, y_test)), pipelineX=pipelineX, pipelineY=pipelineY, force=force) def check(self): if not self.isProcessedDataFromProject and self.proprocessDataFromProjectFn is not None: warnings.warn( "Attention vous devez appeler impérativement la méthode proprocessDataFromProject de l'object '{}' reçu pour que les données soit les bonnes" .format(getClassName(self))) def __repr__(self, ind=1, orig=False): if orig: return object.__repr__(self) txt = super().__repr__(ind=ind) nt = "\n" + "\t" * ind stri = txt[: -1] + nt + "project : {}," + nt + "idDataProject : {}," + nt + "proprocessDataFromProjectFn : {}," + nt + "isProcessedDataFromProject : {}]" # print(securerRepr(self.project,ind+2,onlyID=True)) # print(self) # print(stri) return stri.format(securerRepr(self.project, ind + 2, onlyID=True), self.idDataProject, self.proprocessDataFromProjectFn, self.isProcessedDataFromProject) def clone(self, ID=None, withoutProjects=True, newIDS=False, *args, **xargs): p = self._project self._project = ( p.ID if not isStr(p) else p) if p is not None else None r = super().clone(ID=ID, newIDS=newIDS, *args, **xargs) self._project = p r._project = p return r @classmethod def Export(cls, obj, save=True, saveArgs={}, me="BaseSuperviseProject", *args, **xargs): # print("ici")# TODO: TWO LOOP ON wHY ? po = obj._project if po is not None and not isStr(po): obj._project = po.ID oo = cls.Export__(cls, obj, save=save, saveArgs=saveArgs, *args, **xargs) try: oo._project = po except: pass return oo @classmethod def import__(cls, ol, loaded, me="BaseSuperviseProject", *args, **xargs): # print("ici")# TODO: TWO LOOP ON wHY ? # print(loaded) if loaded is None: return cls.import___(cls, ol, loaded, *args, **xargs) # print("p",loaded["ID"]) po = loaded["_project"] if po is not None and isStr(po): loaded["_project"] = None # print(loaded["_project"]) rep = cls.import___(cls, ol, loaded, *args, **xargs) # if isStr(po): rep._project = po return rep def addModelsToCurrCV(self, models: List, names=None, nameCV=None, *args, **xargs): cvCurr = self.currCV li = cvCurr.ID cloneE = self.clone() cloneE.setModels(self.models + models, force=True) # return cloneE namesM = cloneE.namesModels[-len(models):] cloneE.setModels(models, names=namesM if names is None else names, force=True) params = cvCurr.args params["cv"] = cvCurr.cv # params["nameCV"]=randomString() if "namesMod" in params: del params["namesMod"] params["noAddCv"] = True params["recreate"] = True cloneE.computeCV(*args, **params, **xargs) res = cloneE.currCV.resultats cvCurr.ID = randomString() if nameCV is None else nameCV for k, v in res.items(): cvCurr.resultats[k] = v cvCurr._based = li self.setModels(self.models + models, force=True) self._nameCvCurr = cvCurr.ID self._cv[cvCurr.ID] = self._cv[li] del self._cv[li] # self.idCvBased=li self._project.addCV(cvCurr.ID, self._cv[cvCurr.ID]) def computeCV(self, cv=5, random_state=42, shuffle=True, classifier=True, nameCV=None, recreate=False, parallel=True, metric=None, models=None, noAddCv=False, **xargs): rep = super().computeCV(cv=cv, random_state=random_state, shuffle=shuffle, classifier=classifier, nameCV=nameCV, recreate=recreate, parallel=parallel, metric=metric, models=models, **xargs) # print(rep) classif = self.isClassif D = self.cviCls self._cv[self._nameCvCurr] = D.fromCVItem(self.currCV) # self._nameCvCurr=resu[0] # rep._based = None if not noAddCv: self._project.addCV(self._nameCvCurr, self.currCV) return rep def duplicateToProject(self, nameDuplicate, force=False): if nameDuplicate in self._project.data and not recreate: warnings.warn( f'study "{nameDuplicate}" already in project, set force=True to force' ) return rep = self._project.addOrGetStudy(nameDuplicate, self.clone, recreate=force) print(f"\tDuplicated From {self.ID}") return rep # from ..study.studyClassif import CvResultatsClassif # from ..study.studyClassif import CrossValidItemClassifProject # from ..study.studyClassif import DatasSuperviseClassif
def getOrCreate(cls, ID, repertoire=None, ext=None, path=os.getcwd(), delim="/", imported=True, noDefaults=False, recreate=False, clone=False, deep=True, chut=True, save_load_load={}, save_load_get_path={}, import_kwargs={}): from . import IProject # repertoire = ifelse(repertoire is None,StudyProject.DEFAULT_REP,repertoire) # ext=ifelse(ext is None,StudyProject.DEFAULT_EXT,ext) def clonee(rrt): return getStaticMethodFromObj(rrt, "clone")(rrt, deep=deep) # repo=path+delim+repertoire # print(repo) if recreate: rep = StudyProject(ID) print(f"Project '{ID}' created") return rep # if imported: # repo = cls.get_repertoire(repertoire) repertoire, ext = cls.get_rep_ext(repertoire, ext, chut=chut) # repos=cls.build_repertoire(repertoire,path=os.getcwd(),dp=cls.DEFAULT_PATH if not noDefaults else "",delim="/", # fn=lambda: StudyProject(ID),returnFn=True) repos, (ok, filo) = cls.build_rep_ext( repertoire, ext, ID, dp=cls.DEFAULT_PATH if not noDefaults else "", chut=chut, recreate=recreate, returnFn=True, fn=lambda repo: StudyProject(ID)) # print(repos) # print(ok) # print(filo) # raise Exception("kd") if len(repos) > 1: return repos[1] # print(repo) # if not os.path.exists(repo): # return StudyProject(ID) # filo,ok=cls.build_ext(repo,cls.get_ext(ext),ID, # delim=delim,recreate=recreate,chut=chut) # print(filo) filo = SaveLoad.getPath(filo, addExtension=True, **save_load_get_path) if not os.path.isfile(filo): rep = StudyProject(ID) print(f"Project '{ID}' created") return rep if imported: rep = cls.Import(filo, addExtension=False, chut=chut, noDefaults=True, path="", **import_kwargs) print(f"Project '{ID}' Imported") return rep sl = SaveLoad.load(filo, addExtension=False, chut=chut, **save_load_load) # sl=cls.Load(filo,noDefaults=True,addExtension=False,path="",**xargs) sf = {} for k, v_ in sl._studies.items(): v = ifelse(clone, lambda: clonee(v_), lambda: v_)() if isinstance(v, implements(IProject)): v.begin() v.setProject(sl) #print(v.idData) if sl.dataOpti and v.getProprocessDataFromProjectFn( ) is not None: v.setDataTrainTest(id_=v.getIdData(), force=True) try: v.proprocessDataFromProject( v.getProprocessDataFromProjectFn(), **v._proprocessDataFromProjectFnOpts) except Exception as e: raise e warnings.warn( "[StudyProject getOrCreate] pb with {} when proprocessDataFromProject" .format(k)) pass #print("Error") #print(inst) #print(v.isProcess) v.check() sf[k] = v sl._studies = sf print(f"Project '{ID}' Loaded") return sl
class HTTPClient(implements(Sink)): """ An HTTPClient is a Sink that sends data written to it to the specified server """ def __init__(self, **defaults): """ Creates an HTTPClient Arguments: The keyword arguments provided act as duplicates for the fields read from records written to this client. For more info see HTTPClient.write() """ self.defaults = defaults def write(self, records): """ Sends records to an HTTP server Each record that is not a dictionary is ignored. Each dictionary record is overlayed onto the defaults provided to the client. The following fields must be defined in either the record or the overlay. * method - the HTTP method to use * url - the URL to send to The following additional fields may also be provided * params - a dictionary of query string parameters * data - data to be sent in the body of the request * headers - a dictionary of HTTP headers """ for record in records: self._write_one(record) def _write_one(self, record): optionals = {"params": {}, "data": {}, "headers": {}} record = ChainMap(record, self.defaults, optionals) if "method" not in record: raise ValueError("Must provide 'method'") if "url" not in record: raise ValueError("Must provide 'url'") time_sent = time.time() response = request(record["method"], record["url"], params=record["params"], data=record["data"], headers=record["headers"]) response_time = time.time() - time_sent if response.ok: events.request_success.fire( request_type=record["method"], name=record["url"], response_time=response_time, response_length=len(response.content), ) else: try: response.raise_for_status() events.request_failure.fire( request_type=record["method"], name=record["url"], response_time=response_time, response_length=len(response.content), exception=RuntimeError( f"Request failed with {response.content}")) except HTTPError as error: events.request_failure.fire(request_type=record["method"], name=record["url"], response_time=response_time, response_length=len( response.content), exception=error)
class HashBlockTestWithBitcoinCore(implements(ICommand)): def __init__(self) -> None: super().__init__() self.name = 'Test hash transaction with Bitcoin core RPC' self.rpcCommand = RPCBitcoinCoreCommand('vincent', 'vincent', 8332) def do_command(self, data_dir): logging.debug('Command %s run with data directory in this path %s', self.name, data_dir) files_blk = os.listdir(data_dir) dao_json = DAOJson() length = len([iq for iq in os.scandir(data_dir)]) start = 0 self.printProgressBar(start, length, length=50) for file_blk in files_blk: logging.debug('****** Files BLK.json ******') logging.debug(file_blk) if os.path.isfile(os.path.join(data_dir, file_blk)): path_file = os.path.join(data_dir, file_blk) object_json = dao_json.load(path_file) logging.debug('***** JSON Object *****') logging.debug('Num blocks %i', len(object_json['blocks'])) blockCount = 1 for block in object_json['blocks']: result = self.rpcCommand.callCommand( 'getblock', block['hashBlock']) if result != "OK": logging.warning(len(object_json['blocks'])) logging.warning( 'At the file %s inside the the block with hash %s is not valid', file_blk, block['hashBlock']) logging.warning(blockCount) else: logging.debug(block['hashBlock']) blockCount += 1 start += 1 self.printProgressBar(start, length, length=50) # Print iterations progress create an submodule separate def printProgressBar(self, iteration, total, prefix='Progress', suffix='Complete', decimals=1, length=100, fill='█', printEnd="\r"): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) bar_length - Optional : character length of bar (Int) """ percent = ("{0:." + str(decimals) + "f}").format( 100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd) # Print New Line on Complete if iteration == total: print()
class HDF5FXRateReader(implements(FXRateReader)): """An FXRateReader backed by HDF5. Parameters ---------- group : h5py.Group Top-level group written by an :class:`HDF5FXRateWriter`. default_rate : str Rate to use when ``get_rates`` is called requesting the default rate. """ def __init__(self, group, default_rate): self._group = group self._default_rate = default_rate if self.version != HDF5_FX_VERSION: raise ValueError( "FX Reader version ({}) != File Version ({})".format( HDF5_FX_VERSION, self.version, )) @classmethod def from_path(cls, path, default_rate): """ Construct from a file path. Parameters ---------- path : str Path to an HDF5 fx rates file. default_rate : str Rate to use when ``get_rates`` is called requesting the default rate. """ return cls(h5py.File(path), default_rate=default_rate) @lazyval def version(self): try: return self._group.attrs['version'] except KeyError: # TODO: Remove this. return 0 @lazyval def dts(self): """Column labels for rate groups. """ raw_dts = self._group[INDEX][DTS][:].astype('M8[ns]') if not is_sorted_ascending(raw_dts): raise ValueError(f"dts are not sorted for {self._group}!") return pd.DatetimeIndex(raw_dts, tz='UTC') @lazyval def currencies(self): """Row labels for rate groups. """ # Currencies are stored as fixed-length bytes in the file, but we want # `str` objects in memory. bytes_array = self._group[INDEX][CURRENCIES][:] objects = bytes_array_to_native_str_object_array(bytes_array) return pd.Index(objects) def get_rates(self, rate, quote, bases, dts): """Get rates to convert ``bases`` into ``quote``. See :class:`zipline.data.fx.base.FXRateReader` for details. """ if rate == DEFAULT_FX_RATE: rate = self._default_rate check_dts(dts) col_ixs = self.dts.searchsorted(dts, side='right') - 1 row_ixs = self.currencies.get_indexer(bases) try: dataset = self._group[DATA][rate][quote][RATES] except KeyError: raise ValueError( "FX rates not available for rate={}, quote_currency={}.". format(rate, quote)) # OPTIMIZATION: Column indices correspond to dates, which must be in # sorted order. Rather than reading the entire dataset from h5, we can # read just the interval from min_col to max_col inclusive # # However, we also need to handle two important edge cases: # # 1. row_ixs contains -1 for any currencies we don't know about. # 2. col_ixs contains -1 for dts before the start of self.dts. # # If either of the above cases obtains, we want to return NaN for the # corresponding output locations. # We handle each of these cases by reading raw data into a buffer with # one extra column and one extra row. When we then permute the raw data # into the correct order, any row or column indices with values of -1 # will pull from the extra row/column, which will always contain NaN. slice_begin = max(col_ixs[0], 0) slice_end = max(col_ixs[-1], 0) + 1 # +1 to be inclusive of end date. # Allocate a buffer full of NaNs with one extra column and row. See # OPTIMIZATION notes above. buf = np.full( (len(self.currencies) + 1, slice_end - slice_begin + 1), np.nan, ) buf[:-1, :-1] = dataset[:, slice_begin:slice_end] # Permute the rows into place, pulling from the empty NaN locations for # row and column indices of -1. out = buf[:, col_ixs - slice_begin][row_ixs] # Transpose everything to maintain dts as row labels, currencies as col # labels which is expected everywhere else. return out.transpose()
class QFunctionSGD(implements(IAgent)): def __init__(self, player, numFeatures, batchSize=100, gamma=1, decay=0.99, alpha=0.1, minWeight=0, maxWeight=0): self.player = player self.numFeatures = numFeatures self.weights = np.random.uniform(minWeight, maxWeight, size=numFeatures) self.s, self.a, self.r = None, None, None self.batch = [] self.batches = 0 self.batchSize = batchSize self.gamma = gamma self.alpha = alpha self.q = None # Momentum self.velocity = np.zeros(numFeatures) self.mu = 0.999 # Cache for ADAGRAD and RMSprop self.g = np.zeros(numFeatures) # RMSprop self.decay = decay random.seed(0) def Q(self, state, action): return np.sum(np.dot(state.getFeatures(self.player, action), self.weights)) def getMove(self, state): reward = state.getReward(self.player) actions = state.getActions(self.player) self.updateBatch(state, reward, actions) self.s = copy.deepcopy(state) self.a = actions[argmax([self.Q(state, aP) for aP in actions])] return self.a def getTrainedMove(self, state): actions = state.getActions(self.player) return actions[argmax([self.Q(state, aP) for aP in actions])] def finalize(self, state): self.updateBatch(state, state.getReward(self.player), None) def getInfo(self): return self.weights def updateBatch(self, state, reward, actions): if self.s is not None: if actions is None: q = (1 - self.alpha) * self.Q(self.s, self.a) + self.alpha * reward else: q = (1 - self.alpha) * self.Q(self.s, self.a) + self.alpha * (reward + self.gamma * max([self.Q(state, aP) for aP in actions])) self.batch.append((self.s, self.a, q)) if len(self.batch) == self.batchSize: # Update weights newWeights = self.weights differences = np.array([data[2] - self.Q(data[0], data[1]) for data in self.batch]) calculatedFeatures = [-data[0].getFeatures(self.player, data[1]) for data in self.batch] gradients = np.dot(differences, calculatedFeatures) for j in range(self.numFeatures): gradient = gradients[j] / self.batchSize # Vanilla # newWeights[j] -= self.alpha * gradient # Momentum #self.velocity[j] = self.mu * self.velocity[j] + gradient #newWeights[j] -= self.alpha * self.velocity[j] # ADAGRAD # self.g[j] += gradient ** 2 # newWeights[j] -= self.alpha * gradient / (np.sqrt(self.g[j]) + 0.0000001) # RMSProp self.g[j] = (self.decay * self.g[j]) + ((1 - self.decay) * gradient ** 2) newWeights[j] -= self.alpha * gradient / (np.sqrt(self.g[j]) + 0.000000001) self.weights = newWeights self.batch = []
class EnemyWeakPropertiesFactory(implements(EnemyPropertiesFactory)): def create_move_behavior(self): return MoveFast()
class BlazeEventsLoader(implements(PipelineLoader)): """An abstract pipeline loader for the events datasets that loads data from a blaze expression. Parameters ---------- expr : Expr The expression representing the data to load. next_value_columns : dict[BoundColumn -> raw column name] A dict mapping 'next' BoundColumns to their column names in `expr`. previous_value_columns : dict[BoundColumn -> raw column name] A dict mapping 'previous' BoundColumns to their column names in `expr`. resources : dict, optional Mapping from the loadable terms of ``expr`` to actual data resources. odo_kwargs : dict, optional Extra keyword arguments to pass to odo when executing the expression. data_query_time : time, optional The time to use for the data query cutoff. data_query_tz : tzinfo or str The timezone to use for the data query cutoff. Notes ----- The expression should have a tabular dshape of:: Dim * {{ {SID_FIELD_NAME}: int64, {TS_FIELD_NAME}: datetime, {EVENT_DATE_FIELD_NAME}: datetime, }} And other dataset-specific fields, where each row of the table is a record including the sid to identify the company, the timestamp where we learned about the announcement, and the event date. If the '{TS_FIELD_NAME}' field is not included it is assumed that we start the backtest with knowledge of all announcements. """ __doc__ = __doc__.format(SID_FIELD_NAME=SID_FIELD_NAME, TS_FIELD_NAME=TS_FIELD_NAME, EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME) @preprocess(data_query_tz=optionally(ensure_timezone)) def __init__(self, expr, next_value_columns, previous_value_columns, resources=None, odo_kwargs=None, data_query_time=None, data_query_tz=None): dshape = expr.dshape if not istabular(dshape): raise ValueError( 'expression dshape must be tabular, got: %s' % dshape, ) required_cols = list( required_event_fields(next_value_columns, previous_value_columns)) self._expr = bind_expression_to_resources( expr[required_cols], resources, ) self._next_value_columns = next_value_columns self._previous_value_columns = previous_value_columns self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {} check_data_query_args(data_query_time, data_query_tz) self._data_query_time = data_query_time self._data_query_tz = data_query_tz def load_adjusted_array(self, domain, columns, dates, sids, mask): raw = load_raw_data(sids, dates, self._data_query_time, self._data_query_tz, self._expr, self._odo_kwargs) return EventsLoader( events=raw, next_value_columns=self._next_value_columns, previous_value_columns=self._previous_value_columns, ).load_adjusted_array( domain, columns, dates, sids, mask, )
class MyInterface(implements(SquareInterface)): def calc_square(self, arg1): self.__result = arg1**2 # square of arg1 def show_square(self): print('Square = ' + str(self.__result))
class ActionDecorator(implements(Action)): def __init__(self, action: Action): self.action = action def execute(self): pass
class DataLoader(implements(LoadInterface)): def load_datum(self, full_path): with open(os.path.join(full_path, '__dump__.pickle'), 'rb') as f: doc = pickle.load(f) return doc
class Node(implements(GenericNode), TransMethods): ''' ''' #Difficulty: #in seconds BLOCK_GENERATION_INTERVAL = 10 #in blocks DIFFICULTY_ADJUSTMENT_INTERVAL = 10 #How much do you get for finding a block COINBASE_AMOUNT = 50 pubfirst_path = os.path.abspath( os.path.join(__file__, '..', '..', 'rsa_keys/FirstPUB.pub')) def __init__(self, wallet, transpool): self.wallet = wallet self.transpoll = transpool self.__firstpublicFileKey = open(self.pubfirst_path, "r").read() self.__firstpublicKey = RSA.importKey(self.__firstpublicFileKey) self.first_transaction = self.getCoinbaseTransaction( self.__firstpublicKey.exportKey(), 0) self.first_block = Block( BlockHeader( 0, "May your spirit be always backed by enough firepower.", 00000000, 0, 0), BlockPayload([self.first_transaction])) self.first_block.currentHash = self.calculateHash( self.first_block.blockHeader, self.first_block.blockPayload) self.blockchain = [self.first_block] self.unspentTransOuts = self.processTransactions( self.blockchain[0].blockPayload.data, [], 0) def getBlockchain(self): """Returns the whole blockchain.""" return self.blockchain def getCurrentTimestamp(self): """Gets the current timestamp in a proper POSIX format (double).""" return datetime.datetime.now().replace(tzinfo=timezone.utc).timestamp() def getLatestBlock(self): """Gets the last block from the Blockchain.""" return self.blockchain[-1] def getUnspentTransOuts(self): """Returns a deepcopy of the unspent Transaction Outs.""" return deepcopy(self.unspentTransOuts) def setUnspentTransOuts(self, newUnspentTransOuts): """Replaces unspentTransOuts and prints that it happened.""" print("Replacing UnspentTransOuts with new ones.") self.unspentTransOuts = newUnspentTransOuts def calculateHash( self, BlockHeader, BlockPayload): #TODO VERIFY the consistency of hash generation """Calculates the hash for the supplied BlockHeader and BlockPayload.""" h = hashlib.sha256( (str(BlockHeader) + '' + str(BlockPayload)).encode("utf-8")) return h.hexdigest() def generateNextBlockHeader(self): """Creates the NextBlockHeader based on the current chain and fills it with the appropriate values.""" previousBlock = self.getLatestBlock() difficulty = self.getDifficulty() nonce = 0 nextIndex = previousBlock.blockHeader.index + 1 nextTimestamp = self.getCurrentTimestamp() newBlockHeader = BlockHeader(nextIndex, previousBlock.currentHash, nextTimestamp, difficulty, nonce) return newBlockHeader def generateNextBlockPayload(self, transactions): """Creates a BlockPayload filled with supplied transactions.""" return BlockPayload(transactions) def generateNextBlockWithTransaction(self, receiverAddress, ammountToSend): """Creates a new block for the purpose of including a new transaction. This method generates the coinbase transaction and the outgoing transaction and calls generateRawNextBlock to add the transactions to the blockchain.""" if not self.isValidAddress(receiverAddress): print( "Receivers address is not valid. We do not support interdimensional transactions." ) if not isinstance(ammountToSend, float): print( "ammountToSend is not a float. That is not how numbers work.") coinbaseTrans = self.getCoinbaseTransaction( self.wallet.getPublicFromWallet(), self.getLatestBlock().index + 1) transaction = self.wallet.createTransaction( receiverAddress, ammountToSend, self.wallet.getPrivateFromWallet(), self.getUnspentTxOuts(), self.transpoll.getTransactionPool()) transactionList = [coinbaseTrans, transaction] return self.generateRawNextBlock(transactionList) def findNextBlock(self, block): """Proof of work, finds the hash that matches the given difficulty for a block.""" while True: nonce = 0 hash = self.calculateHash(block.blockHeader, block.blockPayload) if self.hashDifficultyCheck(hash, block.blockHeader.difficulty): block.blockHeader.nonce = nonce block.currentHash = hash return block nonce += 1 def hashDifficultyCheck(self, hash, difficulty): """Checks if the hash when written in binary starts with enough zeroes.""" hashinbinary = bin(int(hash, 16))[2:].zfill(len(hash) * 4) return hashinbinary.startswith('0' * difficulty) def isValidBlockStructure(self, block): """Checks the Block fields if they contain the right types.""" return isinstance(block.blockHeader,BlockHeader) & \ isinstance(block.blockPayload, BlockPayload) & \ isinstance(block.blockHeader.index, int) & \ isinstance(block.blockHeader.previousHash, str) & \ isinstance(block.blockHeader.timestamp, float) & \ isinstance(block.blockHeader.difficulty, int) & \ isinstance(block.blockHeader.nonce, int) & \ isinstance(block.currentHash, str) & \ isinstance(block.blockPayload.data, object) def isTimestampValid(self, newBlock, previousBlock): """Checks if the Timestamp is within the specified time""" return previousBlock.blockHeader.timestamp - 60 < newBlock.blockHeader.timestamp and\ newBlock.blockHeader.timestamp - 60 < self.getCurrentTimestamp() def hashMatchesBlockContent(self, block): """Validation of the block hash.""" return self.calculateHash(block.blockHeader, block.blockPayload) == block.currentHash def hasValidHash(self, block): """Checks the if the hash is correctly calculated, including difficulty.""" if not self.hashMatchesBlockContent(block): print("invalid hash got:" + block.currentHash) return False if not self.hashDifficultyCheck(block.currentHash, block.blockHeader.difficulty): print("hash difficulty is not valid, should be:" + block.blockHeader.difficulty + "got" + bin(int(block.currentHash, 16))[2:].zfill( len(block.currentHash) * 4)) return False return True def isNewBlockValid(self, newBlock, previousBlock): """Checks the validity of any new block.""" if not self.isValidBlockStructure(newBlock): print("invalid block structure") return False if previousBlock.blockHeader.index + 1 != newBlock.blockHeader.index: print("invalid index") return False if previousBlock.currentHash != newBlock.blockHeader.previousHash: print("invalid previous hash") return False if not self.isTimestampValid(newBlock, previousBlock): print("invalid timestamp") if not self.hasValidHash(newBlock): print("invalid hash") return False return True def addBlockToChain(self, newBlock): """Attempts to add a supplied block to the chain. Checks the necessary requirements, processes transactions, sets UnspentTXOuts and updates the Pool.""" if self.isNewBlockValid(newBlock, self.getLatestBlock()): unspentOuts = self.processTransactions(newBlock.blockPayload.data, self.getUnspentTransOuts(), newBlock.blockHeader.index) if unspentOuts == None: print("Block transactions are not valid.") return False else: self.blockchain.append(newBlock) self.setUnspentTransOuts(unspentOuts) self.transpoll.updateTransactionPool(self.unspentTransOuts) return True return False def generateRawNextBlock(self, transactions): """Creates the block, fills it with supplied transactions and attempts to add it to the chain and broadcast the success.""" newBlock = self.findNextBlock( Block(self.generateNextBlockHeader(), self.generateNextBlockPayload(transactions))) if self.addBlockToChain(newBlock): #self.broadcastLatest() #TODO not implemented return newBlock else: return None def getDifficulty(self): """Calculates the current difficulty.""" latestBlock = self.blockchain[-1] if len(self.blockchain ) - 1 % self.DIFFICULTY_ADJUSTMENT_INTERVAL == 0 and len( self.blockchain) - 1 != 0: return self.getAdjustedDifficulty(latestBlock) else: return latestBlock.blockHeader.difficulty def getAdjustedDifficulty(self, latestBlock): """Adjusts the difficulty if necessary based on the hashrate calculated from previous blocks.""" prevAdjustmentBlock = self.blockchain[ len(self.blockchain) - self.DIFFICULTY_ADJUSTMENT_INTERVAL] timeExpected = self.BLOCK_GENERATION_INTERVAL * self.DIFFICULTY_ADJUSTMENT_INTERVAL timeTaken = latestBlock.timestamp - prevAdjustmentBlock.timestamp if timeTaken < timeExpected / 2: return prevAdjustmentBlock.difficulty + 1 elif timeTaken > timeExpected * 2: return prevAdjustmentBlock.difficulty - 1 else: return prevAdjustmentBlock.difficulty def getSumDifficulty(self, aBlockchain): """Calculates the sum difficulty of a given chain.""" return reduce((lambda x, y: x + y), list( map(lambda block: 2**block.blockHeader.difficulty, aBlockchain))) def generateNextBlock(self): """Creates a Coinbase transaction then adds the transactions awaiting in the Transaction Pool, lastly it uses generateRawNextBlock to create the actual block.""" coinbaseTrans = self.getCoinbaseTransaction( self.wallet.getPublicFromWallet(), len(self.getBlockchain())) blockData = [coinbaseTrans] + self.transpoll.getTransactionPool() return self.generateRawNextBlock(blockData) def validateBlockChain(self, blockchaintovalidate: []) -> []: """Checks the validity of a given blockchain, return unspent transOuts if it is valid.""" if not str(blockchaintovalidate[0]) == str(self.first_block): print( "This is not even the correct blockchain are you even trying?") return None #Block is valid if the structure is valid and the transactions are valid. aUnspentTransOuts = [] for previousBlock, currentBlock, nxtBlock in Utilities.previous_and_next( blockchaintovalidate): if not previousBlock == None and not self.isNewBlockValid( currentBlock, previousBlock): return None aUnspentTransOuts = self.processTransactions( currentBlock.data, aUnspentTransOuts, currentBlock.index) if aUnspentTransOuts == None: print("Invalid transactions") return None return aUnspentTransOuts def replaceChain(self, newBlocks): """If the Node receives a new blockchain this is used to verify and replace it if necessary.""" aUnspentTransOuts = self.isValidChain(newBlocks) if aUnspentTransOuts is not None: validChain = True if validChain and self.getSumDifficulty( newBlocks) > self.getSumDifficulty(self.getBlockchain()): print( "Received a better blockchain, exchanging it for your old one for free!" ) self.blockchain = newBlocks self.setUnspentTransOuts(aUnspentTransOuts) self.transpoll.updateTransactionPool(self.unspentTransOuts) #broadcastLatest() #TODO else: print( "Received a new blockchain but it doesn't look good. In to the trash it goes" ) def getOwnersUnspentTransactionOutputs(self): return self.wallet.findUnspentTransOuts( self.wallet.getPublicFromWallet(), self.getUnspentTransOuts( )) #TODO import findUnspentTransOuts from wallet
class TripletDetectionLoader(implements(Loader)): def __init__(self, source, crop_size, anchor_size, obj_types=None, mode='train'): self.source = source self.crop_size = crop_size self.obj_types = obj_types self.anchor_size = anchor_size self.frame_ids = [] self.perturbations = { 'translation_range': [-0.0, 0.0], 'scaling_range': [2.0, 2.0] } self.mode = mode #index all the frames that have at least one item we want # TODO turn this into a re-usable filter module if self.mode == 'train': for i, frame in enumerate(self.source): crop_objs = filter(lambda x: x.obj_type in self.obj_types, frame.get_objects()) if (len(crop_objs) > 0): self.frame_ids.append(i) print 'The source has {0} items'.format(len(self.source)) if len(self.frame_ids) == 0: raise NoFramesException('No Valid Frames Found!') print '{0} frames found'.format(len(self.frame_ids)) # find a negative crop in a frame, must not contain an object of interest def find_negative_crop(self, frame, objects): # pick a random crop, check that it does not overlap with an existing target # TODO, this is inefficient, fix this algorithm later frame_size = frame.image.get_wh() max_attempts = 10 for i in range(0, max_attempts): randcx = random.randrange(self.crop_size[0] / 2, frame_size[0] - self.crop_size[0] / 2) randcy = random.randrange(self.crop_size[1] / 2, frame_size[1] - self.crop_size[1] / 2) new_box = Box(randcx - self.crop_size[0] / 2, randcy - self.crop_size[1] / 2, randcx + self.crop_size[0] / 2, randcy + self.crop_size[1] / 2) box_found = all( Box.intersection(x.box, new_box) is None for x in objects) if box_found: return new_box return None def load_train(self): frame1, frame2, neg_box, pos_box, anchor_box = None, None, None, None, None # TODO, this should probably break if never find anything for a while while neg_box is None: indices = random.sample(self.frame_ids, 2) frame1, frame2 = [self.source[x] for x in indices] frame1_objs = filter(lambda x: x.obj_type in self.obj_types, frame1.get_objects()) frame2_objs = filter(lambda x: x.obj_type in self.obj_types, frame2.get_objects()) # get random pos boxes pos_box = random.choice(frame1_objs).box anchor_box = random.choice(frame2_objs).box # find random neg crop neg_box = self.find_negative_crop(frame1, frame1_objs) perturbed_pos_box = RandomPerturber.perturb_crop_box( pos_box, self.perturbations) affine_crop0 = crop_image_resize(frame1.image, perturbed_pos_box, self.crop_size) pos_crop = affine_crop0.apply_to_image(frame1.image, self.crop_size) affine_crop1 = crop_image_resize(frame2.image, anchor_box, self.anchor_size) anchor_crop = affine_crop1.apply_to_image(frame2.image, self.anchor_size) affine_crop2 = crop_image_resize(frame1.image, neg_box, self.crop_size) neg_crop = affine_crop2.apply_to_image(frame1.image, self.crop_size) # neg_crop.visualize(display=True,title='neg') # now find all the boxes that intersect with the perturbed_pos_box intersected_boxes = [] for obj in filter(lambda x: x.obj_type in self.obj_types, frame1.get_objects()): if Box.intersection(obj.box, perturbed_pos_box) is not None: intersected_boxes.append(obj.box) intersected_boxes = list( map(lambda x: affine_crop0.apply_to_box(x), intersected_boxes)) # test display # disp_frame = Frame.from_image_and_objects(pos_crop,[Object(box_crop)]) # disp_frame.visualize(display=True,title='pos frame') # pos_crop.visualize(display=True,title='pos crop') pos = torch.Tensor( pos_crop.to_order_and_class( Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float)) neg = torch.Tensor( neg_crop.to_order_and_class( Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float)) anchor = torch.Tensor( anchor_crop.to_order_and_class( Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float)) # pos_map = generate_response_map_from_boxes(pos_crop.get_hw(),intersected_boxes) # PTImage.from_2d_numpy(pos_map).visualize(display=True,title='pos frame') pos_map = torch.Tensor( generate_response_map_from_boxes(pos_crop.get_hw(), intersected_boxes)) neg_map = torch.Tensor( generate_response_map_from_boxes(pos_crop.get_hw())) data = [pos, neg, anchor] target = [pos_map, neg_map, anchor] return TripletDetectionSample(data, target) def load_test(self): frame = random.choice(self.source) random_t = torch.Tensor(3, self.anchor_size[0], self.anchor_size[1]) frame_t = torch.Tensor( frame.image.to_order_and_class( Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float)) data = [frame_t, random_t] target = [torch.Tensor(1)] # dummy target return TripletDetectionSample(data, target) # pick a frame to generate positive and negative crop def next(self): return self.load_train() if self.mode == 'train' else self.load_test()
class SQLAlchemyDataProvider(implements(DataProviderInterface)): """ A SQLAlchemy implementation of the DataProviderInterface. """ def __init__(self, app): self.app = app db.init_app(app) def get_all_active_game_ids(self): with self.app.app_context(): games = Game.query.all() return [ game.id for game in games if game.state is GameDAO.GAME_STATE_IN_PROGRESS ] def create_game(self, game_id, columns, rows, players): with self.app.app_context(): db.session.add( Game(id=game_id, columns=columns, rows=rows, initial_players=dumps(players), active_players=dumps(players))) db.session.commit() def get_game_by_id(self, game_id, player_id=None, serialize_players=False): with self.app.app_context(): game = Game.query.filter_by(id=game_id).first() if not game: return None else: game_dao = GameDAO(game.id, game.columns, game.rows, state=game.state, winner=game.winner, moves=game.moves) if player_id or serialize_players: game_dao.active_players_list = loads(game.active_players) game_dao.initial_players_list = loads(game.initial_players) if player_id and not any( player_id in player for player in game_dao.active_players_list): return None return game_dao def get_game_for_player_with_board(self, game_id, player_id): with self.app.app_context(): game = self.get_game_by_id(game_id, player_id=player_id) game.board = [['' for x in range(game.rows)] for y in range(game.columns)] move_type_gen = (move for move in game.moves if move.move_type == MoveDAO.TYPE_MOVE) for move in move_type_gen: row_index = game.rows - 1 while row_index >= 0: if game.board[move.column][row_index] is '': game.board[move.column][row_index] = move.player_id break row_index -= 1 return game def persist_new_move_and_game_state(self, game_dao, player_id, move_type, column=None): with self.app.app_context(): game = Game.query.filter_by(id=game_dao.id).first() game.active_players = dumps(game_dao.active_players_list) game.current_active_player_index = game_dao.current_active_player_index game.winner = game_dao.winner game.state = game_dao.state game.moves.append( Move(player_id=player_id, move_type=move_type, column=column)) db.session.commit()
class BasicGridMDP(implements(MDP)): def __init__(self, num_rows, num_cols, r_s, gamma, init_dist, terminals = [], debug=False): self.num_actions = 4 self.num_rows = num_rows self.num_cols = num_cols self.num_states = num_rows * num_cols self.gamma = gamma self.init_dist = init_dist self.terminals = terminals self.debug = debug self.r_s = r_s self.r_sa = self.transform_to_R_sa(self.r_s) print("transformed R(s,a)", self.r_sa) self.init_states = [] for s in range(self.num_states): if self.init_dist[s] > 0: self.init_states.append(s) self.P_left = self.get_transitions(policy="left") if self.debug: print("P_left\n",self.P_left) self.P_right = self.get_transitions(policy="right") if self.debug: print("P_right\n",self.P_right) self.P_up = self.get_transitions(policy="up") if self.debug: print("_up\n",self.P_up) self.P_down = self.get_transitions(policy="down") if self.debug: print("P_down\n",self.P_down) self.Ps = [self.P_left, self.P_right, self.P_up, self.P_down] #actions:0,1,2,3 def get_transition_prob_matrices(self): return self.Ps def get_num_actions(self): return self.num_actions def get_num_states(self): return self.num_states def get_readable_actions(self, action_num): if action_num == 0: return "<" elif action_num == 1: return ">" elif action_num == 2: return "^" elif action_num == 3: return "v" else: print("error, only four possible actions") sys.exit() def get_transition_prob(self, s1,a,s2): return self.Ps[a][s1][s2] #Note that I'm using r_s as the reward dim not r_sa! def get_reward_dimensionality(self): return len(self.r_s) #NOTE: the dimensionality still needs to be checked. def uses_linear_approximation(self): return False def get_state_action_rewards(self): return self.r_sa #assume new reward is of the form r_s def set_reward_fn(self, new_reward): self.r_s = new_reward #also update r_sa self.r_sa = self.transform_to_R_sa(self.r_s) def transform_to_R_sa(self, reward_weights): assert(len(reward_weights) == self.num_states) return np.tile(reward_weights, self.num_actions) def get_transitions(self, policy): P_pi = np.zeros((self.num_states, self.num_states)) if policy == "left": #action 0 #always transition one to left unless already at left border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if cnt not in self.terminals: #no transitions out of terminal if c > 0: P_pi[cnt, cnt - 1] = 1.0 else: P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 elif policy == "right": #action 1 #always transition one to right unless already at right border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if cnt not in self.terminals: #no transitions out of terminal if c < self.num_cols - 1: #transition to next state to right P_pi[cnt, cnt + 1] = 1.0 else: #self transition P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 elif policy == "up": #action 2 #always transition one to left unless already at left border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if cnt not in self.terminals: #no transitions out of terminal if r > 0: P_pi[cnt, cnt - self.num_cols] = 1.0 else: P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 elif policy == "down": #action 3 #always transition one to left unless already at left border cnt = 0 for r in range(self.num_rows): for c in range(self.num_cols): if cnt not in self.terminals: #no transitions out of terminal if r < self.num_rows - 1: P_pi[cnt, cnt + self.num_cols] = 1.0 else: P_pi[cnt,cnt] = 1.0 #increment state count cnt += 1 return P_pi
class RotatingLogsLogger(implements(LoggerInterface)): """ The name of this logger """ LOGGER_NAME = 'Rotating Log' """ Logs will contain data worth this time-frame @see https://docs.python.org/3/library/logging.handlers.html#logging.handlers.TimedRotatingFileHandler and https://www.blog.pythonlibrary.org/2014/02/11/python-how-to-create-rotating-logs/ """ ROTATION_DURATION = 'h' """ How much time to wait before creating a new log file @see https://docs.python.org/3/library/logging.handlers.html#logging.handlers.TimedRotatingFileHandler and https://www.blog.pythonlibrary.org/2014/02/11/python-how-to-create-rotating-logs/ """ ROTATION_INTERVAL = 12 """ How many log files to store in addition to the current log file. @see https://docs.python.org/3/library/logging.handlers.html#logging.handlers.TimedRotatingFileHandler and https://www.blog.pythonlibrary.org/2014/02/11/python-how-to-create-rotating-logs/ """ LOGS_BACKUP_COUNT = 60 def __init__(self): self.logger = None def debug(self, message: str, context: Dict): """ Log a debug level message :param message: :param context: :return: """ logger = self.get_logger() logger.debug(message, extra=context) def error(self, message: str, context: Dict): """ Log an error level message :param message: :param context: :return: """ logger = self.get_logger() logger.error(message, extra=context) def info(self, message: str, context: Dict): """ Log an info level message :param message: :param context: :return: """ logger = self.get_logger() logger.info(message, extra=context) def warning(self, message: str, context: Dict): """ Log a warning level message :param message: :param context: :return: """ logger = self.get_logger() logger.warning(message, extra=context) def get_logger(self): """ Instantiate and return the logger instance :return: logger instance """ # TODO:: Implement async logging so that # logging does not affect performance as we scale if self.logger is not None: return self.logger logger = CoreLoggerWrapper(self.LOGGER_NAME) logger.setLevel(logging.DEBUG) # Setup configuration for the rotating logs storage handler = TimedRotatingFileHandler(LOGS_LOCATION_PATH, when=self.ROTATION_DURATION, interval=self.ROTATION_INTERVAL, backupCount=self.LOGS_BACKUP_COUNT) formatter = HelpersContainer.json_formatter() handler.setFormatter(formatter) logger.addHandler(handler) self.logger = logger return self.logger def get_log_id(self) -> str: """ Get a unique identifier of each log record :return: """ return self.logger.get_log_id()
class ProgressHooks(implements(PipelineHooks)): """ Hooks implementation for displaying progress. Parameters ---------- publisher_factory : callable Function producing a new object with a ``publish()`` method that takes a ``ProgressModel`` and publishes progress to a consumer. """ def __init__(self, publisher_factory): self._publisher_factory = publisher_factory self._reset_transient_state() def _reset_transient_state(self): self._start_date = None self._end_date = None self._model = None self._publisher = None @classmethod def with_widget_publisher(cls): """ Construct a ProgressHooks that publishes to Jupyter via ``IPython.display``. """ return cls(publisher_factory=IPythonWidgetProgressPublisher) @classmethod def with_static_publisher(cls, publisher): """Construct a ProgressHooks that uses an already-constructed publisher. """ return cls(publisher_factory=lambda: publisher) def _publish(self): self._publisher.publish(self._model) @contextmanager def running_pipeline(self, pipeline, start_date, end_date): self._start_date = start_date self._end_date = end_date try: yield except Exception: if self._model is None: # This will only happen if an error happens in the Pipeline # Engine beteween entering `running_pipeline` and the first # `computing_chunk` call. If that happens, just propagate the # exception. raise self._model.finish(success=False) self._publish() raise else: self._model.finish(success=True) self._publish() finally: self._reset_transient_state() @contextmanager def computing_chunk(self, terms, start_date, end_date): # Set up model on first compute_chunk call. if self._model is None: self._publisher = self._publisher_factory() self._model = ProgressModel( start_date=self._start_date, end_date=self._end_date, ) try: self._model.start_chunk(terms, start_date, end_date) self._publish() yield finally: self._model.finish_chunk(terms, start_date, end_date) self._publish() @contextmanager def loading_terms(self, terms): try: self._model.start_load_terms(terms) self._publish() yield finally: self._model.finish_load_terms(terms) self._publish() @contextmanager def computing_term(self, term): try: self._model.start_compute_term(term) self._publish() yield finally: self._model.finish_compute_term(term) self._publish()
class RaftServer(implements(ConnectionListner)): def __init__(self, name, rpcServer, consensusModule): self.id = None self.name = name self.consensusModule = consensusModule self.rpcServer = rpcServer self.peerinfo = [] def start_rpc(self): print(f"{self.name} : listening on {self.rpcServer.address}") for item in [self.request_vote, self.append_entries]: self.rpcServer.register_function(item) self.rpcServer.register_listener(self) t = Thread(name="Rpcthread", target=self.rpcServer.serve_forever) t.daemon = True t.start() def connected(self, name, client): # print(f"{self._name} : received connection from {cluster_info_by_name[name]}") # self.peerinfo.append((name, client)) pass def disconnected(self, name, client): # self.peerinfo.remove((name, client)) pass def connect_rpc_clients(self): while True: for server, address in cluster_info_by_name.items(): if server != self.name: if self.consensusModule.peers.get(server, None) is None: print( f"{self.name} : trying to connect to {server} at {address}" ) try: remote = RPCProxy(cluster_info_by_name[server], authkey=b"peekaboo", timeout=2) except Exception as e: print( f"{self.name} : [ {server} ] generated an exception: {e}" ) else: self.consensusModule.peers[server] = remote time.sleep(1) def connect_all_peers(self): Thread(name="PeerThread", target=self.connect_rpc_clients, daemon=True).start() def request_vote(self, RequestVote): print( f"{self.name} : Vote Request : Received vote request from Candidate {RequestVote}" ) return self.consensusModule.handle_request_vote_response(RequestVote) def append_entries(self, AppendEntry): print( f"{self.name} : AppendEntry Request : Received appendEntry request from Candidate {AppendEntry}" ) return self.consensusModule.handle_append_entry_response(AppendEntry)
class MoveNormal(implements(MoveBehavior)): def move(self, enemy): enemy.update_x(enemy.get_direction_x()) enemy.update_y(enemy.get_direction_y())
class Cell(implements(Subject)): TYPE2COL = {'path': 'brown', 'tower': 'black', 'other': 'white'} def __init__(self, canvas, x, y, size, type='other'): self._canv = canvas self._x = x self._y = y self._size = size self._ulx = x * size # upper-left x self._lrx = self._ulx + size # lower-right x self._uly = y * size # upper-left y self._lry = self._uly + size # lower-right y self._tag = "cell" + str(x) + str(y) self._id = None self._type = None # use subclassing? # True when the mouse is in this cell. self._mouseIn = False self._tower_placement_mode = False self._tower_being_placed = None self.set_type(type) self._id = self._canv.create_rectangle(self._ulx, self._uly, self._lrx, self._lry, fill=Cell.TYPE2COL[self._type], tag=self._tag) self._canv.tag_bind(self._id, "<Enter>", self.highlight) self._canv.tag_bind(self._id, "<Leave>", self.clear) self._canv.tag_bind(self._id, "<Button-1>", self.handleClick) # Needed for Subject implementation self._observers = [] def clear(self, event=None): self._mouseIn = False if self._tower_placement_mode: self._canv.delete(self._rangeCircleId) self._canv.itemconfig(self._id, fill=Cell.TYPE2COL[self._type]) def highlight(self, event=None): # Show green where the mouse is. self._canv.itemconfig(self._id, fill='green') self._mouseIn = True if self._tower_placement_mode: range_size = self._tower_being_placed.get_range() self._rangeCircleId = \ self._canv.create_oval(self.get_center_x() - range_size, self.get_center_y() - range_size, self.get_center_x() + range_size, self.get_center_y() + range_size, outline="black") def registerObserver(self, observer): self._observers.append(observer) def removeObserver(self, observer): self._observers.remove(observer) def notifyObservers(self): for o in self._observers: o.update(self) def handleClick(self, event=None): self.notifyObservers() def __contains__(self, xy): '''Return True if the given x,y tuple is in the rectangle, False otherwise.''' x, y = xy return self._ulx < x < self._lrx and self._uly < y < self._lry def get_x(self): return self._x def get_y(self): return self._y def get_center(self): return self.get_center_x(), self.get_center_y() def get_center_x(self): return self._ulx + (self._size / 2) def get_center_y(self): return self._uly + (self._size / 2) def set_type(self, type): assert type in ('path', 'tower', 'other') self._type = type # should use sub-class? if self._id is not None: self._canv.itemconfig(self._id, fill=Cell.TYPE2COL[self._type]) def enable_tower_placement_mode(self, tower): self._tower_placement_mode = True self._tower_being_placed = tower def disable_tower_placement_mode(self): self._tower_placement_mode = False self._tower_being_place = None
trading_days = self.all_sessions() try: return trading_days[trading_days.searchsorted(dt)] except IndexError: raise ValueError( "Date {} was past the last session for domain {}. " "The last session for this domain is {}.".format( dt.date(), self, trading_days[-1].date() ) ) Domain = implements(IDomain) Domain.__doc__ = """ A domain represents a set of labels for the arrays computed by a Pipeline. A domain defines two things: 1. A calendar defining the dates to which the pipeline's inputs and outputs should be aligned. The calendar is represented concretely by a pandas DatetimeIndex. 2. The set of assets that the pipeline should compute over. Right now, the only supported way of representing this set is with a two-character country code describing the country of assets over which the pipeline should compute. In the future, we expect to expand this functionality to include more general concepts. """
class Car(implements(Moveable)): ''' Instantiation for real cars. ''' def __init__(self, x, v, lane, model: IDM, lane_change: LaneChange, length): self.pos, self.vel, self.lane, self.model, self.lane_change, \ self.length = x, v, lane, model, lane_change, length self.acc = 0 # current acceleration self.acc_history = 0 # acc 1 calculation before self.tdelay = 0 # cumulative waiting time self.Tdelay = T_DELAY_CHANGE # time to check whether change lane or not def __copy__(self): return Car(self.pos, self.vel, self.lane, self.model, self.lane_change, \ self.length) @property def vel(self): return self.__vel @vel.setter def vel(self, v): if v > SPEED_LIMIT_KMH / 3.6: self.__vel = SPEED_LIMIT_KMH / 3.6 else: self.__vel = v def time_to_change(self, dt): ''' Count the wait time to check whether to change lane or not. :param dt: update time interval :return: bool, whether the cumulative time exceed threshold ''' self.tdelay += dt if self.tdelay > self.Tdelay: self.Tdelay -= self.tdelay return True return False def translate(self, dt): self.pos += dt * self.vel def accelerate(self, dt, fwd=None): ''' Need to calculate the acceleration before call it ''' assert (fwd == None) if fwd != None: self.acceleration(fwd) self.vel += self.acc * dt if (self.vel < 0.): self.vel = 0. def acceleration(self, fwd=None): if fwd == None: return self.acc else: return self.model.calc_acc(self, fwd) def distance_to(self, fwd): return fwd.pos - self.pos - self.length def change(self, f_old, b_old, f_new, b_new): return self.lane_change.change_ok(self, f_old, b_old, f_new, b_new)