def domains(): """ List the domains that have errors """ errors_obj = Data(args) domains = errors_obj.domains() json_domains = [] for d in domains: json_domains.append(d.__dict__) return jsonify({'domains': json_domains, 'hash': hashlib.md5(str(json_domains)).hexdigest()})
def error_list(self): self.screen_offsets[1] = self.write_lines( self.screen2, 'Errors ({0} of {1})'.format(self.error_index + 1, self.error_max), 1 if self.screens.ERRORS != self.current_screen else 3 ) if self.current_domain: errors = self.data.errors(self.current_domain.host, self.error_mode) self.error_max = len(errors) cnt = 0 self.current_error = None for i in errors: try: wide = curses.COLS > 120 if self.error_index == cnt: self.current_error = i if self.should_skip(self.error_index, cnt, self.window_height, 1 if wide else 2): if wide: mid_width = curses.COLS - (30 + 5 + 10 + 5 + 10 + 1) self.screen2.addstr('{0: <{width1}}:{1: <{width2}}{2: <{width3}}{3: <{width4}}{4: <{width5}}{5: <{width6}}'.format( i.file[-30:], i.line, i.message.replace("\n", "|")[-mid_width:], i.level, i.count, Data.format_datetime(i.time), width1=30, width2=5, width3=mid_width, width4=10, width5=5, width6=10 ), curses.color_pair(2 if self.error_index == cnt else 0)) else: self.screen2.addstr('{0: <{width2}}{1: <{width3}}{2: <{width4}}{3: <{width5}}\n{4}\n'.format( "{0}:{1}".format( i.file[-30:], i.line ), i.level, i.count, Data.format_datetime(i.time), i.message.replace("\n", "|"), width2=(curses.COLS - 10 - 5 - 10 - 1), width3=10, width4=5, width5=10 ), curses.color_pair(2 if self.error_index == cnt else 0)) except curses.error as e: pass cnt += 1 else: self.write_lines( self.screen2, 'No domain', 3 )
def errors(host, mode=1): """ List the errors for a domain """ errors_obj = Data(args) errs = errors_obj.errors(host, mode) json_errs = [] for e in errs: err_dict = e.__dict__ err_dict['ptime'] = pretty_date(err_dict['time']) err_dict['iso_time'] = datetime.fromtimestamp(err_dict['time']).isoformat() json_errs.append(err_dict) return jsonify({'errors': json_errs, 'hash': hashlib.md5(str(json_errs)).hexdigest()})
def delete_error(id, group=False): """ Deletes an error based on ID """ errors_obj = Data(args) try: error = errors_obj.get_error(id) except LookupError: return jsonify({'success': False}) if group: errors_obj.delete_type(error) else: errors_obj.delete_entry(error) return jsonify({'success': True})
# Check for led configuration arguments matrixOptions = led_matrix_options(args) # Initialize the matrix matrix = RGBMatrix(options = matrixOptions) # Print some basic info on startup debug.info("{} - v{} ({}x{})".format(SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height)) # Read scoreboard options from config.json if it exists config = ScoreboardConfig("config.json", matrix.width, matrix.height) debug.set_debug_status(config) # Create a new data object to manage the MLB data # This will fetch initial data from MLB data = Data(config) # Render the standings or an off day screen def display_standings(matrix, data): try: StandingsRenderer(matrix, matrix.CreateFrameCanvas(), data).render() except: # Out of season off days don't always return standings so fall back on the offday renderer OffdayRenderer(matrix, matrix.CreateFrameCanvas(), datetime(data.year, data.month, data.day)).render() # Check if we should just display the standings if config.display_standings: display_standings(matrix, data) # Otherwise, we'll start displaying games depending on config settings else:
def estimate_t_and_predict(path_dataset_history, batch_type, batch_sizes, num_predictions, estimated, predicted): flag = False for batch_size in batch_sizes: if path_dataset_history + '/' + batch_type + '/size_' + str( batch_size) not in estimated: flag = True elif path_dataset_history + '/' + batch_type + '/size_' + str( batch_size) not in predicted: flag = True if flag: if os.path.isfile(path_dataset_history + '/data_obj.pickle'): with open(path_dataset_history + '/data_obj.pickle', 'rb') as f: d = pickle.load(f) else: edges = pd.read_csv(os.path.dirname(path_dataset_history) + '/edges', header=None) event_log = pd.read_csv(path_dataset_history + '/event_log', header=None) d = Data() d.load_data_data_frame(event_log, edges) with open(path_dataset_history + '/data_obj.pickle', 'wb') as f: pickle.dump(d, f) with open(path_dataset_history + '/contagion.pickle', 'rb') as file: cc = pickle.load(file) with open(path_dataset_history + '/adjacency.pickle', 'rb') as file: a = pickle.load(file) new_path_dataset_history = path_dataset_history.split('/') new_path_dataset_history[4] = 'paper/' + model new_path_dataset_history = '/' + os.path.join( *new_path_dataset_history) for batch_size in batch_sizes: if path_dataset_history + '/' + batch_type + '/size_' + str( batch_size) not in estimated: m = MCDOI() m.assign_contagions_correlation_matrix(cc) m.assign_adjacency_matrix(a) m.fit_only_thresholds_states(d, batch_type=batch_type, batch_size=batch_size) file_name = new_path_dataset_history + '/' + batch_type + '/size_' + str( batch_size) + '/threshold.pickle' os.makedirs(os.path.dirname(file_name), exist_ok=True) with open(file_name, 'wb') as threshold_file: pickle.dump(m.thresholds.matrix, threshold_file) with open(directory + 'estimated_thresholds', 'a+', encoding='utf-8') as handle: handle.write(path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '\n') result = m.predict(num_predictions) save_results( result, new_path_dataset_history + '/' + batch_type + '/size_' + str(batch_size), num_predictions) with open(directory + 'predicted', 'a+', encoding='utf-8') as handle: handle.write(path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '\n') elif path_dataset_history + '/' + batch_type + '/size_' + str( batch_size) not in predicted: with open( new_path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '/threshold.pickle', 'rb') as file: t = pickle.load(file) m = MCDOI() m.assign_contagions_correlation_matrix(cc) m.assign_adjacency_matrix(a) m.assign_thresholds_matrix(t) m.fill_state_matrix(d) result = m.predict(num_predictions) save_results( result, new_path_dataset_history + '/' + batch_type + '/size_' + str(batch_size), num_predictions) with open(directory + 'predicted', 'a+', encoding='utf-8') as handle: handle.write(path_dataset_history + '/' + batch_type + '/size_' + str(batch_size) + '\n')
def run(): # Get supplied command line arguments commandArgs = args() if commandArgs.terminal_mode: height, width = os.popen('stty size', 'r').read().split() termMatrix = TermMatrix() termMatrix.width = int(width) termMatrix.height = int(height) matrix = Matrix(termMatrix) else: # Check for led configuration arguments matrixOptions = led_matrix_options(commandArgs) matrixOptions.drop_privileges = False # Initialize the matrix matrix = Matrix(RGBMatrix(options=matrixOptions)) # Print some basic info on startup debug.info("{} - v{} ({}x{})".format(SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height)) # Read scoreboard options from config.json if it exists config = ScoreboardConfig("config", commandArgs, (matrix.width, matrix.height)) debug.set_debug_status(config) data = Data(config) # Event used to sleep when rendering # Allows Web API (coming in V2) and pushbutton to cancel the sleep # Will also allow for weather alert to interrupt display board if you want sleepEvent = threading.Event() if data.config.dimmer_enabled: dimmer = Dimmer(data, matrix) dimmerThread = threading.Thread(target=dimmer.run, args=()) dimmerThread.daemon = True dimmerThread.start() if data.config.pushbutton_enabled: pushbutton = PushButton(data, matrix, sleepEvent) pushbuttonThread = threading.Thread(target=pushbutton.run, args=()) pushbuttonThread.daemon = True pushbuttonThread.start() if data.config.weather_enabled: if data.config.weather_data_feed.lower() == "owm": owmweather = owmWxWorker(data, sleepEvent) owmweatherThread = threading.Thread(target=owmweather.run, args=()) owmweatherThread.daemon = True owmweatherThread.start() elif data.config.weather_data_feed.lower() == "ec": ecweather = ecWxWorker(data, sleepEvent) ecweatherThread = threading.Thread(target=ecweather.run, args=()) ecweatherThread.daemon = True ecweatherThread.start() else: debug.error( "No valid weather providers selected, skipping weather feed") data.config.weather_enabled = False if data.config.weather_show_alerts and data.config.weather_enabled: if data.config.weather_alert_feed.lower() == "ec": ecalert = ecWxAlerts(data, sleepEvent) ecalertThread = threading.Thread(target=ecalert.run, args=()) ecalertThread.daemon = True ecalertThread.start() elif data.config.weather_alert_feed.lower() == "nws": nwsalert = nwsWxAlerts(data, sleepEvent) nwsalertThread = threading.Thread(target=nwsalert.run, args=()) nwsalertThread.daemon = True nwsalertThread.start() else: debug.error( "No valid weather alerts providers selected, skipping alerts feed" ) data.config.weather_show_alerts = False # # Run check for updates against github on a background thread on a scheduler # updateCheck = True if updateCheck: scheduler = BackgroundScheduler() checkupdate = UpdateChecker(data, scheduler) scheduler.start() MainRenderer(matrix, data, sleepEvent).render()
from data.data import Data import renderers.standings import mlbgame import debug SCRIPT_NAME = "MLB LED Scoreboard" SCRIPT_VERSION = "3.0.1" # Get supplied command line arguments args = args() # Check for led configuration arguments matrixOptions = led_matrix_options(args) # Initialize the matrix matrix = RGBMatrix(options=matrixOptions) # Print some basic info on startup debug.info("{} - v{} ({}x{})".format(SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height)) # Read scoreboard options from config.json if it exists config = ScoreboardConfig("config", matrix.width, matrix.height) debug.set_debug_status(config) # Create a new data object to manage the MLB data # This will fetch initial data from MLB data = Data(config) MainRenderer(matrix, data).render()
def notify_realtime_earnings(self): # 实时持仓盈亏检测通知 while True: df_position = DataFrame() if os.path.exists(self.__file_path_position): try: df_position = Utils.read_data(self.__file_path_position) except: # 可能在修改文件,等一分钟 sleep(60) continue else: ERROR('file {0} not exists.'.format(self.__file_path_position)) return cur_time = Utils.cur_time() hour = int(cur_time.split(':')[0]) minute = int(cur_time.split(':')[1]) if hour < 9 or (hour == 9 and minute < 30): LOG( 'notify_realtime_earnings: morning\n{0} hours {1} minutes later market open'\ .format( int( Utils.now2market_morning_time() / 3600 ), int( Utils.now2market_morning_time() % 3600 / 60 ) ) ) sleep(Utils.now2market_morning_time()) elif (hour == 11 and minute >= 30) or hour == 12: LOG('notify_realtime_earnings: nooning\n{0} hours {1} minutes later market open' .format(int(Utils.now2market_nooning_time() / 3600), int(Utils.now2market_nooning_time() % 3600 / 60))) sleep(Utils.now2market_nooning_time()) elif hour >= 15: LOG('notify_realtime_earnings: market close') break content_notify = '' content_notify += '{0}\n'.format(cur_time) total_earn = 0 for index in df_position.index: code = '%06d' % df_position.loc[index]['code'] name = df_position.loc[index]['name'] try: df_realtime_quotes = Data().get_realtime_quotes(code) buy_price = float(df_position.loc[index]['buy_price']) cur_price = float(df_realtime_quotes['price']) position = df_position.loc[index]['position'] earn = (cur_price - buy_price) * position total_earn += earn content_notify += '-{0} {1} cur:{2:.2f} cost:{3:.2f} sell:{4:.2f} position:{5} earn:{6:.2f}\n'\ .format( code, name, cur_price, buy_price, float( df_position.loc[ index ][ 'sell_price' ] ), position, earn) except: pass content_notify += 'total_earn:{0:.2f}'.format(total_earn) if SEND_EMAIL: Utils.send_email(content_notify, 'position notification') sleep(60 * 10) else: LOG(content_notify) sleep(60 * 10)
analyse_class = model.spill_wave.Analyse() list_process = [] list_process.append( Process(target=Notify().notify_investment_opportunity)) # list_process.append( Process( target = Notify().notify_realtime_earnings ) ) list_process.append(Process(target=Notify().serve_query_request)) for process in list_process: process.start() for process in list_process: process.join() else: # 当日18点之后方可更新数据 file_date = Utils.cur_date() Data(file_date).update_all() analyse_class = model.spill_wave.Analyse(file_date) # analyse_class.statistics() list_process = [] list_process.append( Process(target=analyse_class.find_spill_wave_stock)) list_process.append(Process(target=Profit().calc_profit_grow)) list_process.append(Process(target=Pe().calc_pe)) for process in list_process: process.start() for process in list_process: process.join() Basics().create_basics_table()
# These columns are treated as independent by the model, which means that # the dependence of ``G`` and ``F`` can not be learned, but allows more # efficient batch processing. # batch_size = 20 # bptt = 35 bptt = 59 # dataset = 'data/mini_train_set.txt' # dataset = 'data/full_train_set.txt' # dataFile = 'data/half_train_set.txt' dataFile = 'data/1k_train_set.txt' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dataset = Data(dataFile, device, BPTT=bptt, BATCH_SIZE=batch_size) train_data = dataset.train val_data = dataset.train test_data = dataset.train print(train_data.shape) # val_data = batchify(val_txt, eval_batch_size) # test_data = batchify(test_txt, eval_batch_size) print("train_data", train_data.shape) # print("val_data", val_data.shape) # print("test_data", test_data.shape) # print("train_data 0,0", train_data[0][0]) ######################################################################
import sys sys.path.append("..") import json import time import io from data.data import Data, fileLink data = Data() class Model: # Get Data From Database def getDataFromDatabase(self): try: fileOpen = io.open(fileLink, mode='r', encoding='utf-8') for line in fileOpen: item = json.loads(line) inputTime = item.get('Input date') item['Input date'] = time.strftime("%d/%m/%Y", time.localtime(inputTime)) print(item) except IOError: print(fileLink) def getDataFromDatabasePassExam(self): try: fileOpen = io.open(fileLink, mode='r', encoding='utf-8') for line in fileOpen:
''' Created on 02/12/2015 @author: Alexandre Yukio Yamashita ''' from data.data import Data from sklearn.decomposition import PCA if __name__ == '__main__': path = "../homesite_data/resources/parsed_data.bin" homesite = Data() homesite.load_parsed_data(path) print homesite.train_x.shape pca = PCA(n_components = 0.99) pca.fit(homesite.train_x) print pca.transform(homesite.train_x).shape
x_test[y_test_label == label], \ y_test[y_test_label == label] def get_specific_label_poison_data(self, label): x_train, y_train, x_test, y_test = self.get_poison_data() y_train_label = np.argmax(y_train, axis=1) y_test_label = np.argmax(y_test, axis=1) return x_train[y_train_label == label], \ y_train[y_train_label == label], \ x_test[y_test_label == label], \ y_test[y_test_label == label] def get_specific_label_data(self, label): y_train = self.y_train.argmax(axis=1) y_test = self.y_test.argmax(axis=1) return self.x_train[y_train == label], \ self.y_train[y_train == label], \ self.x_test[y_test == label], \ self.y_test[y_test == label], \ self.is_poison_train[y_train == label], \ self.is_poison_test[y_test == label] if __name__ == '__main__': json_name = sys.argv[1] param = Param(json_name) param.load_json() data = Data(param) data.load_data() data.gen_backdoor()
import numpy as np import pandas as pd import seaborn as sns from data.data import Data from feature_engineering import feat_utils import pickle # with open(r'../data/training_df.pkl', 'rb') as f: # df = pickle.load(f) # df = Data('full').df ''' before feature engineering ''' # sns_plt = sns.countplot(y=df["attack_type"]) # sns_plt = sns.distplot(df["duration"], kde=False) # sns_plt = sns.distplot(df["same_srv_rate"], kde=False) # sns_plt = sns.countplot(x=df["is_host_login"]) # sns_plt = sns.countplot(df["protocol_type"]) # sns_plt = sns.countplot(y=df["service"]) ''' during feature engineering ''' # df = feat_utils.merge_sparse_feature(df) # sns_plt = sns.countplot(y=df["service"]) # sns_plt = sns.countplot(y=df["attack_type"])
import os from flask import * from data.data import Data from services.Cheltuieli import Cheltuieli service = Cheltuieli() cheltuiala1 = Data("1", "13", "haine", "pantofi") service.add_cheltuiala(cheltuiala1) cheltuiala1 = Data("2", "12", "haine", "sapca") service.add_cheltuiala(cheltuiala1) cheltuiala1 = Data("3", "11", "haine", "blug") service.add_cheltuiala(cheltuiala1) app = Flask(__name__) app._static_folder = os.path.abspath("templates/static") app.config['TEMPLATES_AUTO_RELOAD'] = True app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 @app.route('/', methods=['GET']) def index(): return render_template('/index.html') @app.route('/afiseaza', methods=['GET', 'PUT']) def afiseaza(): cheltuieli = service.get_all_cheltuieli() total_cheltuieli = 0 code = 200
class BitcoinEnv(Environment): EPISODE_LEN = 1000 def __init__(self, hypers, cli_args={}): """Initialize hyperparameters (done here instead of __init__ since OpenAI-Gym controls instantiation)""" self.hypers = h = Box(hypers) self.cli_args = cli_args # cash/val start @ about $3.5k each. You should increase/decrease depending on how much you'll put into your # exchange accounts to trade with. Presumably the agent will learn to work with what you've got (cash/value # are state inputs); but starting capital does effect the learning process. self.start_cash, self.start_value = 5., 5. # .4, .4 # We have these "accumulator" objects, which collect values over steps, over episodes, etc. Easier to keep # same-named variables separate this way. acc = dict( ep=dict( i=-1, # +1 in reset, makes 0 returns=[], uniques=[], ), step=dict(), # setup in reset() ) self.acc = Box(train=copy.deepcopy(acc), test=copy.deepcopy(acc)) self.data = Data(ep_len=self.EPISODE_LEN, arbitrage=h.custom.arbitrage, indicators={}) # gdax min order size = .01btc; kraken = .002btc self.min_trade = {Exchange.GDAX: .01, Exchange.KRAKEN: .002}[EXCHANGE] self.update_btc_price() # Action space # see {last_good_commit_ for action_types other than 'single_discrete' # In single_discrete, we allow buy2%, sell2%, hold (and nothing else) self.actions_ = dict(type='int', shape=(), num_actions=3) # Observation space # width = step-window (150 time-steps) # height = nothing (1) # channels = features/inputs (price actions, OHCLV, etc). self.cols_ = self.data.df.shape[1] shape = (h.custom.net.step_window, 1, self.cols_) self.states_ = dict(type='float', shape=shape) def __str__(self): return 'BitcoinEnv' def close(self): pass @property def states(self): return self.states_ @property def actions(self): return self.actions_ # We don't want random-seeding for reproducibilityy! We _want_ two runs to give different results, because we only # trust the hyper combo which consistently gives positive results. def seed(self, seed=None): return def update_btc_price(self): self.btc_price = 8000 # try: # self.btc_price = int(requests.get(f"https://api.cryptowat.ch/markets/{EXCHANGE.value}/btcusd/price").json()['result']['price']) # except: # self.btc_price = self.btc_price or 8000 def xform_data(self, df): # TODO here was autoencoder, talib indicators, price-anchoring raise_refactor() def get_next_state(self): acc = self.acc[self.mode.value] X, _ = self.data.get_data(acc.ep.i, acc.step.i) return X.values[:, np.newaxis, :] # height, width(nothing), depth def reset(self): acc = self.acc[self.mode.value] acc.step.i = 0 acc.step.cash, acc.step.value = self.start_cash, self.start_value acc.step.hold_value = self.start_value acc.step.totals = Box(trade=[self.start_cash + self.start_value], hold=[self.start_cash + self.start_value]) acc.step.signals = [] if self.mode == Mode.TEST: acc.ep.i = self.acc.train.ep.i + 1 elif self.mode == Mode.TRAIN: acc.ep.i += 1 self.data.reset_cash_val() self.data.set_cash_val(acc.ep.i, acc.step.i, 0., 0.) return self.get_next_state() def execute(self, action): acc = self.acc[self.mode.value] totals = acc.step.totals h = self.hypers act_pct = {0: -.02, 1: 0, 2: .02}[action] act_btc = act_pct * (acc.step.cash if act_pct > 0 else acc.step.value) fee = { Exchange.GDAX: 0.0025, # https://support.gdax.com/customer/en/portal/articles/2425097-what-are-the-fees-on-gdax- Exchange.KRAKEN: 0.0026 # https://www.kraken.com/en-us/help/fees }[EXCHANGE] # Perform the trade. In training mode, we'll let it dip into negative here, but then kill and punish below. # In testing/live, we'll just block the trade if they can't afford it if act_pct > 0: if acc.step.cash < self.min_trade: act_btc = -(self.start_cash + self.start_value) elif act_btc < self.min_trade: act_btc = 0 else: acc.step.value += act_btc - act_btc * fee acc.step.cash -= act_btc elif act_pct < 0: if acc.step.value < self.min_trade: act_btc = -(self.start_cash + self.start_value) elif abs(act_btc) < self.min_trade: act_btc = 0 else: acc.step.cash += abs(act_btc) - abs(act_btc) * fee acc.step.value -= abs(act_btc) acc.step.signals.append(float(act_btc)) # clipped signal # acc.step.signals.append(np.sign(act_pct)) # indicates an attempted trade # next delta. [1,2,2].pct_change() == [NaN, 1, 0] # pct_change = self.prices_diff[acc.step.i + 1] _, y = self.data.get_data(acc.ep.i, acc.step.i) # TODO verify pct_change = y[self.data.target] acc.step.value += pct_change * acc.step.value total_now = acc.step.value + acc.step.cash totals.trade.append(total_now) # calculate what the reward would be "if I held", to calculate the actual reward's _advantage_ over holding hold_before = acc.step.hold_value acc.step.hold_value += pct_change * hold_before totals.hold.append(acc.step.hold_value + self.start_cash) reward = 0 acc.step.i += 1 self.data.set_cash_val(acc.ep.i, acc.step.i, acc.step.cash / self.start_cash, acc.step.value / self.start_value) next_state = self.get_next_state() terminal = int(acc.step.i + 1 >= self.EPISODE_LEN) if acc.step.value < 0 or acc.step.cash < 0: terminal = True if terminal and self.mode in (Mode.TRAIN, Mode.TEST): # We're done. acc.step.signals.append(0) # Add one last signal (to match length) reward = self.get_return() if np.unique(acc.step.signals).shape[0] == 1: reward = -(self.start_cash + self.start_value ) # slam if you don't do anything if terminal and self.mode in (Mode.LIVE, Mode.TEST_LIVE): raise_refactor() # if acc.step.value <= 0 or acc.step.cash <= 0: terminal = 1 return next_state, terminal, reward def get_return(self, adv=True): acc = self.acc[self.mode.value] totals = acc.step.totals trade = (totals.trade[-1] / totals.trade[0] - 1) hold = (totals.hold[-1] / totals.hold[0] - 1) return trade - hold if adv else trade def episode_finished(self, runner): if self.mode == Mode.TRAIN: return True acc = self.acc.test totals = acc.step.totals signals = np.array(acc.step.signals) n_uniques = np.unique(signals).shape[0] ret = self.get_return() hold_ret = totals.hold[-1] / totals.hold[0] - 1 acc.ep.returns.append(float(ret)) acc.ep.uniques.append(n_uniques) # Print (limit to note-worthy) lt_0 = (signals < 0).sum() eq_0 = (signals == 0).sum() gt_0 = (signals > 0).sum() completion = int(acc.ep.i * self.data.ep_stride / self.data.df.shape[0] * 100) steps = f"\tSteps: {acc.step.i}" fm = '%.3f' print( f"{completion}%{steps}\tTrade: {fm%ret}\tHold: {fm%hold_ret}\tTrades:\t{lt_0}[<0]\t{eq_0}[=0]\t{gt_0}[>0]" ) return True def run_deterministic(self, runner, print_results=True): next_state, terminal = self.reset(), False while not terminal: next_state, terminal, reward = self.execute( runner.agent.act(next_state, deterministic=True, independent=True)) if print_results: self.episode_finished(None) def train_and_test(self, agent): runner = Runner(agent=agent, environment=self) train_steps = 20000 # TODO something self.data.df.shape[0]... self.EPISODE_LEN... try: while self.data.has_more(self.acc.train.ep.i): self.mode = Mode.TRAIN # max_episode_timesteps not required, since we kill on (cash|value)<0 or max_repeats runner.run(timesteps=train_steps) self.mode = Mode.TEST self.run_deterministic(runner, print_results=True) except IndexError: # FIXME data.has_more() issues pass except KeyboardInterrupt: # Lets us kill training with Ctrl-C and skip straight to the final test. This is useful in case you're # keeping an eye on terminal and see "there! right there, stop you found it!" (where early_stop & n_steps # are the more methodical approaches) print('Keyboard interupt, killing training') pass def run_live(self, agent, test=True): raise_refactor()
from data.data import Data from data.numpy_file import save_np_array, load_np_array from data.plot import plot import numpy as np from statistics.confusion_matrix import confusion_matrix from statistics.performance import compute_performance_metrics, compute_auc if __name__ == '__main__': ''' Classify data changing balancing ratio. ''' # Train and test random forests. # load_path = "../homesite_data/resources/oversampled_normalized_data_ratio_2.5.bin" load_path = "../homesite_data/resources/oversampled_normalized_data_ratio_2.bin" homesite = Data() homesite.load_sliptted_data(load_path) del homesite.test_x # Deleted to save memory. clf_ann = NeuralNetwork(path = "../homesite_data/ann_weights.bin", lr = 0.00005, \ lamb = 0) train_output_ann = clf_ann.get_hidden_output(homesite.train_x) validation_output_ann = clf_ann.get_hidden_output(homesite.validation_x) # train_output_ann = np.hstack((train_output_ann, homesite.train_x)) # validation_output_ann = np.hstack((validation_output_ann, homesite.validation_x)) for c in range(1, 10): # Train classifier. print "Training classifier." clf = AdaBoostClassifier(n_estimators=1 + 100 * c) clf.fit(train_output_ann, homesite.train_y)
from controller import Controller from data.data import Data c = Controller(Data(filename='data\data.csv'), learning_rate=0.0001) c.gradient_descent(iterations=500) c.plot()
import tensorflow as tf from data.data import Data from data.metadata import get_metadata from monitor import Monitor from train import Train from model import Model from config import init_config opts = init_config() #initialization of datasets meta_data = get_metadata() train_set = Data(meta_data['train']) val_set = Data(meta_data['val']) #initialization of controle config monitor = Monitor(opts) #construction of the model model = Model(opts) #training trainer = Train(monitor, model) trainer.train(opts, monitor, train_set, val_set, model) #testing #tester = Test(model) #tester.test(monitor, train_set, model)
if index < 3: content += ts.notice_content(df_news['url'][index]) content += '\n\n' content += '\n' except: pass LOG(content) dict_stock_info[code] = content return dict_stock_info if __name__ == '__main__': df_model_basics = Basics().get_basics().set_index('code') ls_all_stock_data = Data().get_all_stock_data() df_latest_news = ts.get_latest_news(top=1000) while True: content = '' str_input = input('input stock code or \'news\':\n') try: int(str_input) pattern = re.compile( '[●┊\-■:∶%;!?;&.,:?!.‘’“”"\'、,。><(())\[\]\{\}【】―《》『』//・…_——\s]+' ) ls_code = re.split(pattern, str_input.strip()) Query.query_stock_info(ls_code, ls_all_stock_data, df_model_basics) except: if len(str_input.split()) == 1: for index in range(0, 20):
self.w_h = np.load(f) self.w_o = np.load(f) f.close() self.w_h = theano.shared(self._floatX(self.w_h)) self.w_o = theano.shared(self._floatX(self.w_o)) if __name__ == '__main__': ''' Train neural network. ''' # oversampled_path = "../../homesite_data/resources/oversampled_normalized_data_ratio_2.5.bin" oversampled_path = "../../homesite_data/resources/oversampled_normalized_data_ratio_2.bin" homesite_data = Data() homesite_data.load_sliptted_data(oversampled_path, one_hot = True) # Train neural network. clf = NeuralNetwork(input_units = 644, hidden_units = 50, output_units = 2, \ lr = 0.00005, lamb = 0.) # clf.fit(homesite_data, batch_size = 128, # max_iterations = 100, save_interval = 10, # path = "../homesite_data/ann_weights.bin") # Test neural network. # clf = NeuralNetwork(path = "../../homesite_data/ann_weights.bin", lr = 0.05, lamb = 0.000005) # Test classifier. print 'Testing classifier.' predicted_labels = clf.predict_proba(homesite_data.validation_x)[:, 1]
def query_stock_info(ls_code, ls_all_stock_data, df_model_basics): [ df_stock_basics, df_quarter_report_data, df_profit_data, df_operation_data, df_growth_data, df_debtpaying_data, \ df_cashflow_data, df_divi_data, df_forcast_quarter_report_data, df_restrict_stock_data, df_concept_classified ] = ls_all_stock_data space = lambda x: ' ' * x # 方便区分不同季度数据 pd.options.mode.chained_assignment = None # 不显示warn信息 default='warn' dict_stock_info = {} for code in ls_code: try: basics = df_stock_basics.loc[int(code)] content = '\n{0} {1}\n'.format(code, basics['name']) try: cur_price = float( Data().get_k_line_data(code).iloc[-1]['close']) except: cur_price = float( Data().get_realtime_quotes(code)['price']) content += '\nbasics:\n上市日期:{0}\n所属行业:{1}\t行业市盈率排名:{6}\n地区:{2}\n市盈率(动态):{3}\n市盈率(静态):{4:.2f}\n市净率:{5}\n'\ .format( basics[ 'timeToMarket' ], basics[ 'industry' ], basics[ 'area' ], basics[ 'pe' ], \ cur_price / float( basics[ 'esp' ] ), float( basics[ 'pb' ] ), df_model_basics[ 'rank_pe' ][ int( code ) ] ) content += '每股公积金:{0}\n每股未分配利润:{1}\n'\ .format( basics[ 'reservedPerShare' ], basics[ 'perundp' ] ) content += '总市值:{0:.2f} 亿元\n流动市值:{1:.2f} 亿元\n'\ .format( cur_price * float( basics[ 'totals' ] ), cur_price * float( basics[ 'outstanding' ] ) ) content += '总资产:{0:.2f} 亿元\n固定资产:{1:.2f} 亿元\n流动资产:{2:.2f} 亿元\n'\ .format( float( basics[ 'totalAssets' ] ) / 10000, float( basics[ 'fixedAssets' ] ) / 10000, \ float( basics[ 'liquidAssets' ] ) / 10000 ) except: content = '\n{0}\n'.format(code) try: content += '\nconcept:\n' id_concept = 1 id_rank = 1 name_concept = '_'.join(['concept', str(id_concept)]) name_rank = '_'.join(['rank_pe', str(id_rank)]) while df_model_basics[name_concept][int(code)] is not np.nan: content += '{0} 市盈率排名:{1}\n'.format( df_model_basics[ name_concept ][ int( code ) ], \ df_model_basics[ name_rank ][ int( code ) ] ) id_concept += 1 id_rank += 1 if id_concept > 20: break name_concept = '_'.join(['concept', str(id_concept)]) name_rank = '_'.join(['rank_pe', str(id_rank)]) content += '\n' except: pass try: profit = df_profit_data.loc[int(code)].sort_values( by=['year', 'quarter'], axis=0, ascending=True).drop_duplicates() content += '\nprofit:\n排名:{0}\n年份 季度 净资产收益率 净利润(百万) 每股收益(元)每股主营业务收入(元)\n'\ .format( df_model_basics[ 'rank_profit_grow' ][ int( code ) ] ) for id in range(profit.index.size): content += '{5}{0} {1} {2:-10.2f} {3:-12.2f} {4:-15.2f} {6:-20.2f}\n'.format( profit.iloc[ id ][ 'year' ], profit.iloc[ id ][ 'quarter' ], \ profit.iloc[ id ][ 'roe' ], profit.iloc[ id ][ 'net_profits' ], profit.iloc[ id ][ 'eps' ], \ space( int( profit.iloc[ id ][ 'quarter' ] ) - 1 ), profit.iloc[ id ][ 'bips' ] ) except: pass try: operation = df_operation_data.loc[int(code)].sort_values( by=['year', 'quarter'], axis=0, ascending=True).drop_duplicates() content += '\noperation:\n年份 季度 应收账款周转天数 存货周转天数 流动资产周转天数\n' for id in range(operation.index.size): content += '{5}{0} {1} {2:-16.2f} {3:-8.2f} {4:-15.2f}\n'.format( operation.iloc[ id ][ 'year' ], \ operation.iloc[ id ][ 'quarter' ],operation.iloc[ id ][ 'arturndays' ], operation.iloc[ id ][ 'inventory_days' ], \ operation.iloc[ id ][ 'currentasset_days' ], space( int( operation.iloc[ id ][ 'quarter' ] ) - 1 ) ) except: pass try: debtpaying = df_debtpaying_data.loc[int(code)].sort_values( by=['year', 'quarter'], axis=0, ascending=True).drop_duplicates() content += '\ndebtpaying:\n年份 季度 流动比率 利息支付倍数 股东权益比率 股东权益增长率\n' for col in ['currentratio', 'icratio', 'sheqratio', 'adratio']: for id in range(debtpaying.index.size): try: float(debtpaying[col].iloc[id]) except: debtpaying[col].iloc[id] = np.nan for id in range(debtpaying.index.size): content += '{5}{0} {1} {2:-8.2f} {3:-12.2f} {4:-10.2f} {6:-14.2f}\n'.format( debtpaying.iloc[ id ][ 'year' ], \ debtpaying.iloc[ id ][ 'quarter' ], float( debtpaying.iloc[ id ][ 'currentratio' ] ), float( debtpaying.iloc[ id ][ 'icratio' ] ), \ float( debtpaying.iloc[ id ][ 'sheqratio' ] ), space( int( debtpaying.iloc[ id ][ 'quarter' ] ) - 1 ), \ float( debtpaying.iloc[ id ][ 'adratio' ] ) ) except: pass try: divi = df_divi_data.loc[int(code)] content += '\ndivision:\n年份 公布日期 分红金额(每10股) 转增股数(每10股)\n' if type(divi) == pd.Series: divi = divi.to_frame().T if type(divi) == pd.DataFrame: divi = divi.sort_values(by=['year', 'report_date'], axis=0, ascending=True) for id in range(divi.index.size): content += '{0} {1} {2:-12d} {3:-16d}\n'.format( divi.iloc[ id ][ 'year' ], divi.iloc[ id ][ 'report_date' ], int( divi.iloc[ id ][ 'divi' ] ), \ int( divi.iloc[ id ][ 'shares' ] ) ) else: ERROR('divi type error.') except: pass try: forcast_quarter_data = df_forcast_quarter_report_data.loc[int( code)] content += '\nforcast quarter report:\n发布日期 业绩变动类型 上年同期每股收益 业绩变动范围\n' if type(forcast_quarter_data) == pd.Series: forcast_quarter_data = forcast_quarter_data.to_frame().T if type(forcast_quarter_data) == pd.DataFrame: forcast_quarter_data = forcast_quarter_data.sort_values( by='report_date', axis=0, ascending=True) for id in range(forcast_quarter_data.index.size): content += '{0} {1:>8s} {2:-14.2f} {3:>12s}\n'.format( forcast_quarter_data.iloc[ id ][ 'report_date' ], \ forcast_quarter_data.iloc[ id ][ 'type' ], float( forcast_quarter_data.iloc[ id ][ 'pre_eps' ] ), \ forcast_quarter_data.iloc[ id ][ 'range' ] ) else: ERROR('forcast_quarter_data type error.') except: pass try: restrict = df_restrict_stock_data.loc[int(code)] content += '\nrestrict:\n解禁日期 解禁数量(万股) 占总盘比率\n' if type(restrict) == pd.Series: restrict = restrict.to_frame().T if type(restrict) == pd.DataFrame: restrict = restrict.sort_values(by='date', axis=0, ascending=True) for id in range(restrict.index.size): content += '{0} {1:-12.2f} {2:-10.2f}\n'.format( restrict.iloc[ id ][ 'date' ], \ float( restrict.iloc[ id ][ 'count' ] ), float( restrict.iloc[ id ][ 'ratio' ] ) ) else: ERROR('restrict type error.') except: pass try: df_news = ts.get_notices(code) content += '\nnotice:\n' for index in range(0, 10): # df_news.index: content += '{3}、{0}\t{1}\tdate:{2}\n'.format( df_news[ 'title' ][ index ], \ df_news[ 'type' ][ index ], df_news[ 'date' ][ index ], index + 1 ) if index < 3: content += ts.notice_content(df_news['url'][index]) content += '\n\n' content += '\n' except: pass LOG(content) dict_stock_info[code] = content return dict_stock_info
def notify_investment_opportunity(self): df_spill_wave_stock = Utils.read_data( model.spill_wave.Analyse().spill_wave_stock_file) df_model_basics = Basics().get_basics().set_index('code') while True: cur_time = Utils.cur_time() hour = int(cur_time.split(':')[0]) minute = int(cur_time.split(':')[1]) if hour < 9 or (hour == 9 and minute < 30): LOG( 'notify_investment_opportunity: morning\n{0} hours {1} minutes later market open'\ .format( int( Utils.now2market_morning_time() / 3600 ), int( Utils.now2market_morning_time() % 3600 / 60 ) ) ) sleep(Utils.now2market_morning_time()) elif (hour == 11 and minute >= 30) or hour == 12: LOG('notify_investment_opportunity: nooning\n{0} hours {1} minutes later market open' .format(int(Utils.now2market_nooning_time() / 3600), int(Utils.now2market_nooning_time() % 3600 / 60))) sleep(Utils.now2market_nooning_time()) elif hour >= 15: LOG('notify_investment_opportunity: market close') break content_notify = '' content_notify += '{0}\n'.format(cur_time) for index in df_spill_wave_stock.index: code = '%06d' % df_spill_wave_stock.loc[index]['code'] name = df_spill_wave_stock.loc[index]['name'] try: df_realtime_quotes = Data().get_realtime_quotes(code) if float(df_realtime_quotes['price']) >= (float( df_spill_wave_stock.loc[index]['buy_price']) * 0.99): content_notify += '-{0} {1} cur price:{2:.2f} buy price:{3:.2f} sell price:{4:.2f} expect earn:{5:.2f}\n'\ .format( code, name, float( df_realtime_quotes[ 'price' ] ), \ float( df_spill_wave_stock.loc[ index ][ 'buy_price' ] ), \ float( df_spill_wave_stock.loc[ index ][ 'sell_price' ] ), \ float( df_spill_wave_stock.loc[ index ][ 'expect_earn_rate' ] ), \ float( df_spill_wave_stock.loc[ index ][ 'min_earn_rate' ] ) ) content_notify += '\tprofit rank:{0}\n \tindustry:{1} pe rank:{2}\n'.format( df_model_basics[ 'rank_profit_grow' ][ int( code ) ],\ df_model_basics[ 'industry' ][ int( code ) ], df_model_basics[ 'rank_pe' ][ int( code ) ] ) id_concept = 1 id_rank = 1 name_concept = '_'.join(['concept', str(id_concept)]) name_rank = '_'.join(['rank_pe', str(id_rank)]) while df_model_basics[name_concept][int( code)] is not np.nan: content_notify += '\tconcept:{0} pe rank:{1}\n'.format( df_model_basics[ name_concept ][ int( code ) ], \ df_model_basics[ name_rank ][ int( code ) ] ) id_concept += 1 id_rank += 1 if id_concept > 20: break name_concept = '_'.join( ['concept', str(id_concept)]) name_rank = '_'.join(['rank_pe', str(id_rank)]) content_notify += '\n' except: pass if SEND_EMAIL: # 如果发送邮件,10分钟发一次 Utils.send_email(content_notify, 'opportunity notification') sleep(10 * 60) else: LOG('*********************************') LOG(content_notify) LOG('*********************************') sleep(60 * 5)
from crawler.download import Download from crawler.parse import Parser from stock.stock import Stock from data.data import Data import stock.stock_real_time as srt import stock.stock_minutes as sm import stock.stock_day as sd import stock.stock_update as sup from tools import log log = log.get_logger() stock_manager = Stock() stock_data = Data('./data_csv/file.csv') # 腾讯股票列表信息 tencent_url = 'https://stock.gtimg.cn/data/index.php?' # 股票数据网站 data_url = 'http://www.sse.com.cn/js/common/ssesuggestdata.js' # 新浪获取股票实时价格 sina_get_stock_url = 'http://hq.sinajs.cn/list=' def get_stock_data(code): url = sina_get_stock_url + code sk_data = dict() req = Download(url) text = req.get_html_text() if text == '':
def calc_data(self): self.data = Data(patients=self.patients)
def run(): # Kill the splash screen if active stop_splash_service() # Get supplied command line arguments commandArgs = args() if commandArgs.terminal_mode and sys.stdin.isatty(): height, width = os.popen('stty size', 'r').read().split() termMatrix = TermMatrix() termMatrix.width = int(width) termMatrix.height = int(height) matrix = Matrix(termMatrix) else: # Check for led configuration arguments matrixOptions = led_matrix_options(commandArgs) matrixOptions.drop_privileges = False # Initialize the matrix matrix = Matrix(RGBMatrix(options=matrixOptions)) #Riff to add loading screen here loading = Loading(matrix) loading.render() # Read scoreboard options from config.json if it exists config = ScoreboardConfig("config", commandArgs, (matrix.width, matrix.height)) data = Data(config) #If we pass the logging arguments on command line, override what's in the config.json, else use what's in config.json (color will always be false in config.json) if commandArgs.logcolor and commandArgs.loglevel != None: debug.set_debug_status(config, logcolor=commandArgs.logcolor, loglevel=commandArgs.loglevel) elif not commandArgs.logcolor and commandArgs.loglevel != None: debug.set_debug_status(config, loglevel=commandArgs.loglevel) elif commandArgs.logcolor and commandArgs.loglevel == None: debug.set_debug_status(config, logcolor=commandArgs.logcolor, loglevel=config.loglevel) else: debug.set_debug_status(config, loglevel=config.loglevel) # Print some basic info on startup debug.info("{} - v{} ({}x{})".format(SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height)) if data.latlng is not None: debug.info(data.latlng_msg) else: debug.error("Unable to find your location.") # Event used to sleep when rendering # Allows Web API (coming in V2) and pushbutton to cancel the sleep # Will also allow for weather alert to interrupt display board if you want sleepEvent = threading.Event() # Start task scheduler, used for UpdateChecker and screensaver, forecast, dimmer and weather scheduler = BackgroundScheduler() scheduler.start() # Any tasks that are scheduled go below this line # Make sure we have a valid location for the data.latlng as the geocode can return a None # If there is no valid location, skip the weather boards #Create EC data feed handler if data.config.weather_enabled or data.config.wxalert_show_alerts: if data.config.weather_data_feed.lower( ) == "ec" or data.config.wxalert_alert_feed.lower() == "ec": try: data.ecData = ECData(coordinates=(data.latlng)) except Exception as e: debug.error( "Unable to connect to EC, try running again in a few minutes" ) sys.exit(0) if data.config.weather_enabled: if data.config.weather_data_feed.lower() == "ec": ecWxWorker(data, scheduler) elif data.config.weather_data_feed.lower() == "owm": owmweather = owmWxWorker(data, scheduler) else: debug.error( "No valid weather providers selected, skipping weather feed") data.config.weather_enabled = False if data.config.wxalert_show_alerts: if data.config.wxalert_alert_feed.lower() == "ec": ecalert = ecWxAlerts(data, scheduler, sleepEvent) elif data.config.wxalert_alert_feed.lower() == "nws": nwsalert = nwsWxAlerts(data, scheduler, sleepEvent) else: debug.error( "No valid weather alerts providers selected, skipping alerts feed" ) data.config.weather_show_alerts = False if data.config.weather_forecast_enabled and data.config.weather_enabled: wxForecast(data, scheduler) # # Run check for updates against github on a background thread on a scheduler # if commandArgs.updatecheck: data.UpdateRepo = commandArgs.updaterepo checkupdate = UpdateChecker(data, scheduler, commandArgs.ghtoken) if data.config.dimmer_enabled: dimmer = Dimmer(data, matrix, scheduler) screensaver = None if data.config.screensaver_enabled: screensaver = screenSaver(data, matrix, sleepEvent, scheduler) if data.config.screensaver_motionsensor: motionsensor = Motion(data, matrix, sleepEvent, scheduler, screensaver) motionsensorThread = threading.Thread(target=motionsensor.run, args=()) motionsensorThread.daemon = True motionsensorThread.start() if data.config.pushbutton_enabled: pushbutton = PushButton(data, matrix, sleepEvent) pushbuttonThread = threading.Thread(target=pushbutton.run, args=()) pushbuttonThread.daemon = True pushbuttonThread.start() MainRenderer(matrix, data, sleepEvent).render()
class CarlaEnvironment(Environment): def __init__(self): self.actors = [] self.data = Data() self.vehicle = None self.path = None logger.warning('Halting threads') self.data.put(DataKey.THREAD_HALT, True) self.c = ControllerThread(self.data) self.p = PollerThread(self.data) self.s = SpectatorFollowThread(self.data) self.connection = Connection() def setup(self): logger.debug('Environment setup') self.connection.connect() self.__set_conditions() # self.__update_path() self.__spawn() self.reset() def start(self): self.c.start() self.p.start() self.s.start() def reset(self): logger.debug('Resetting actors') self.clear() logger.warning('Halting threads') self.data.put(DataKey.THREAD_HALT, True) self.vehicle.apply_control(icarla.vehicle_control(throttle=0, steer=0)) icarla.set_velocity(self.vehicle, icarla.vector3d()) logger.debug('Environment reset successful') def clear(self): # Clears all data -> removes thread_halt -> threads can resume logger.debug('Clearing data') self.data.clear() def pull(self): return self.data.get(), Path(self.path.points), self.path.direction( ) # data, path and starting direction def put(self, key, data): self.data.put(key, data) def check(self): s = Status() s.check(self) return s def __set_conditions(self): current_map_name = self.connection.world.get_map().name # Loading correct map if current_map_name != MAP_NAME: logger.info(f'Loading map: {MAP_NAME} <- {current_map_name}') try: self.connection.world = self.connection.client.load_world( MAP_NAME) except RuntimeError as r: logger.critical(f'{r}') raise r else: # Destroying old actors actors = self.connection.world.get_actors() for actor in actors.filter('vehicle.*.*'): actor.destroy() for actor in actors.filter('sensor.*.*'): actor.destroy() if len(actors.filter('vehicle.*.*')) > 0 and len( actors.filter('sensor.*.*')) > 0: logger.debug('Cleaned up old actors') else: logger.warning('Issues while cleaning up old actors') # Setting nice weather self.__set_weather() def __set_weather(self): weather = self.connection.world.get_weather() weather.precipitation = 0.0 weather.precipitation_deposits = 0.0 weather.wetness = 0.0 self.connection.world.set_weather(weather) logger.debug('Applied nice weather') def __spawn(self): logger.debug('Spawning actors, sensors') if self.path is None: spawn_vehicle(self, [0.0, 0.0], [0.0, 0.0, 0.0]) else: spawn_vehicle(self, self.path.start, self.path.direction()) spawn_camera(self) spawn_radar(self) spawn_collision(self) spawn_obstacle(self) def __update_path(self, i): self.path = get_path() # TODO (3) make prettier # if r < 1/6: # logger.info('Environment: normal short') # self.path.slice(None, 20) # elif r < 2/6: # logger.info('Environment: backwards short') # self.path.slice(70, None) # self.path.invert() # elif if i % 4 is 0: logger.info('Environment: normal full') pass elif i % 4 is 1: logger.info('Environment: backwards full') self.path.invert() elif i % 4 is 2: logger.info('Environment: normal turn (left)') self.path.slice(30, 60) elif i % 4 is 3: logger.info('Environment: backwards turn (right)') self.path.slice(40, 70) self.path.invert() def load_path(self, i): self.__update_path(i) icarla.move( self.vehicle, icarla.transform(self.path.start[0], self.path.start[1], 0.25).location) icarla.rotate(self.vehicle, icarla.rotation_from_radian(self.path.direction())) self.s.set_knowledge(self.connection.world.get_spectator(), self.vehicle, self.path)
from program.data.data import Data from program.mapping.mapping import Mapping from program.mapping.log import Logging from pathlib import Path from json import load as json_load from os.path import join as path_join from os import getpid from psutil import Process BASEPATH = Path(__file__).parent.absolute() logging = Logging(BASEPATH) # Create an instance of the logging data_instance = Data(BASEPATH, logging) def create_list_coordinates(interval, instances): x_interval, y_interval = int(360 / interval), int(180 / interval) coordinates_list = [] for x in range(x_interval): new_list = [] for y in range(y_interval): new_list.append( Coordinates( coords := (x * interval, y * interval), interval, (1731000 / 36, 1731000 / 18), data_instance.get_climate_with_coordinates( coords, (interval, interval)), instances)) # stdout.write(f"\rCreated coordinate ({x},{y}). {x_interval * y_interval}")
from data.data import Data from data.timer import Timer import signal import sys import time def signal_handler(_signal, _frame): print("Goodbye!\n") stop() sys.exit(0) if __name__ == '__main__': data = Data.get_instance() state = Alert.get_instance() data.update(.016) last_time = time.clock() timer = Timer() signal.signal(signal.SIGINT, signal_handler) fwd() while True: try: print(state) dt = timer.check_dt() if dt < Constants.CYCLE_MIN_PERIOD: time.sleep(Constants.CYCLE_MIN_PERIOD - dt)
''' Classify data ''' results_f = open("ada_results.csv", 'w') wr = csv.writer(results_f) accuracy_history = [] precision_history = [] recall_history = [] auc_history = [] confusion_matrix_history = np.array([]) confusion_matrix_history.shape = (2, 2, 0) path = "resources/parsed_data.bin" homesite = Data() homesite.load_parsed_data(path) homesite.z_norm_train_test_by_feature() del homesite.test_x # Deleted to save memory. # reduced_range = range(0,100) # homesite.train_x = homesite.train_x[reduced_range] # homesite.train_y = homesite.train_y[reduced_range] C = [256, 512] for c in C: # Creating classifier. mean_acc = 0.0 mean_recall = 0.0 mean_precision = 0.0
from sklearn.ensemble.forest import RandomForestClassifier from sklearn.ensemble.weight_boosting import AdaBoostClassifier from sklearn.tree.tree import DecisionTreeClassifier from data.data import Data from statistics.confusion_matrix import confusion_matrix from statistics.performance import compute_performance_metrics, compute_auc import numpy as np if __name__ == '__main__': ''' Classify data. ''' oversampled_path = "resources/oversampled_normalized_data_ratio_2.bin" homesite = Data() homesite.load_sliptted_data(oversampled_path) del homesite.test_x # Deleted to save memory. print homesite.train_x.shape # Creating classifier. # clf = DecisionTreeClassifier() clf = RandomForestClassifier(max_features=100) # clf = AdaBoostClassifier(n_estimators = 10) # clf = svm.SVC(gamma = 0.00005) # clf = RandomForestClassifier() # clf = MultiplePLS(n_classifiers = 10, n_samples = 5000, n_positive_samples = 2500, threshold = 0.9, acc = 0.999) # clf = svm.LinearSVC() # Train classifier. print "Training classifier."
import tensorflow as tf import threading as th from data.data import Data from data.metadata import get_metadata from config import init_config import numpy as np import time opts = init_config() #initialization of datasets meta_data = get_metadata() train_set = Data(meta_data['train']) n_thread = 1 nBatch = 128 batch_size = 253 * 256 sess = tf.Session() data_q = tf.FIFOQueue(10000, [tf.float32], shapes=[batch_size, 1]) compt_q = tf.FIFOQueue(1, tf.int64) compteur = tf.placeholder(tf.int64, []) data = tf.placeholder(tf.float32, [batch_size, 1]) data_inc = data_q.enqueue(data) data_dec = data_q.dequeue() compt_inc = compt_q.enqueue(compteur) compt_dec = compt_q.dequeue() sess.run(compt_inc, feed_dict={compteur: 0}) run_options = tf.RunOptions(timeout_in_ms=100)
def work(self, entry): if self.watching(entry['domain']): self.data.add_entry(entry) def watching(self, domain): if 'all' in self.domains or (domain is None and 'none' in self.domains): return True return domain in [s.lower() for s in self.domains] if __name__ == "__main__": domains = args.domains.split(' ') bcolors.print_colour("S8: Mysql Stasher\n", bcolors.OKGREEN, bcolors.UNDERLINE) bcolors.print_colour("Stashing: %s domain(s)\n" % ', '.join(domains), bcolors.OKGREEN) bcolors.print_colour("Press Ctrl-C to exit\n", bcolors.OKGREEN) try: data = Data(args) c = Processor(data, domains) s = Subscriber(args.subscription) s.work(c.work) except (KeyboardInterrupt, SystemExit, StopIteration): pass except ConnectionError as err: Data.invalid_connection(err, args) """ TODO ---- Need to be able to filter for event severity and/or event contents """
def update_cheltuiala(self, id_c, pret, tip, descriere): if self.cheltuieli[id_c] == "": return False data = Data(id_c, pret, tip, descriere) self.cheltuieli[id_c] = data return True