Пример #1
0
 def __init__(self):
     self._db = DataHandler()
     self._pointspreads = PointSpreads()
     self._team_idxs = {}
     self._team_ids = {}
     self._pagerank = {}
     self._team_data = {}
Пример #2
0
def pipeline(city, slackbot, export, sitedata, sitedata_format, cdrdata,
             cdrdata_format, net_analysis, vis, sampling, sample_size):

    logging.config.dictConfig(logger_config)

    # ---------------------------------------
    # Get Params and Constants, Load data
    # ---------------------------------------
    nest = DataHandler(city, slackbot, export, sitedata, sitedata_format,
                       cdrdata, cdrdata_format, net_analysis, vis, sampling,
                       sample_size)

    if nest.click_params.sitedata:
        # -----------------------
        # Preprocess Museum data
        # -----------------------
        site_data = MuseumDataPreProcess(click_params=nest.click_params,
                                         params=nest.params,
                                         site_raw_data=nest.site_raw_data)

        # ---------------------------------------
        # Site Data Analysis
        # ---------------------------------------
        SiteAnalysis(click_params=nest.click_params,
                     params=nest.params,
                     data_feature_extracted=site_data.data_feature_extracted)

    if nest.click_params.net_analysis:
        # ---------------------------------------
        # Network Analysis
        # ---------------------------------------
        network_analysis = NetworkAnalysis(
            params=nest.params, data=site_data.data_feature_extracted)

    if nest.click_params.vis:
        # --------------------------------------------
        # Create Fountain Visualization of museum data
        # --------------------------------------------
        FountainViz(network_analysis=network_analysis, museum_data=site_data)

    if nest.click_params.cdrdata:
        # --------------------
        # Preprocess Cdr data
        # --------------------
        cdr_data = CDRPreProcess(click_params=nest.click_params,
                                 params=nest.params,
                                 cdr_raw_data=nest.cdr_raw_data)

        # ---------------------------------------
        # CDR Data Analysis
        # ---------------------------------------
        CDRAnalysis(params=nest.params,
                    data_feature_extracted=cdr_data.data_feature_extracted)

        if slackbot:
            SlackBot(nest, network_analysis, channel='city-flows-bot')
Пример #3
0
def main():
    sess = tf.Session()
    config = get_config(is_train=False)
    mkdir(config.result_dir)

    reg = RegNet(sess, config, "RegNet", is_train=False)
    reg.restore(config.ckpt_dir)
    dh = DataHandler("Curious_data", is_train=False)

    for i in range(10):
        result_i_dir = config.result_dir + "/{}".format(i)
        mkdir(result_i_dir)

        batch_x, batch_y = dh.sample_pair(config.batch_size, i)
        reg.deploy(result_i_dir, batch_x, batch_y)
Пример #4
0
 def __init__(self, team_id):
     self.id = team_id
     self._db = DataHandler()
     self._data = None
     self._ranks = None
     self._name = None
     self._features = None
     self._start_rank = {}
     self.aggregator = AggregatorCollector([Aggregator(stat, stat_agg(stat, False)) for stat in STATS] +\
                                           [Aggregator('fgpct', pct_agg('fga', 'fgm')),
                                               Aggregator('fgpct3', pct_agg('fga3', 'fgm3')),
                                               Aggregator('ftpct', pct_agg('fta', 'ftm')),
                                               Aggregator('fgpct', pct_agg('fga', 'fgm', True)),
                                               Aggregator('fgpct3', pct_agg('fga3', 'fgm3', True)),
                                               Aggregator('ftpct', pct_agg('fta', 'ftm', True))] +
                                           [Aggregator('wpct', lambda g, t, p, tv: int(p == 'w'))])
Пример #5
0
    def setup_daq(self):
        max_hist_size = 50
        self.spectrumPlotWidget = SpectrumPlotWidget(self.noisePlot, 0)
        self.timetracePlotWidget = TimetracePlotWidget(self.timetracePlot, 0)
        self.timeNoisePlotWidget = WaterfallPlotWidget(
            self.timeNoisePlot,
            self.histogramPlotLayout,
            max_history_size=max_hist_size)
        self.sample_rate = 500000
        self.points_per_shot = 50000

        self.configuration = Configuration()
        self.meas_data_writer = NoiseExperimentWriter(
            "D:\\TestData", timetrace_buffer_size=self.points_per_shot * 10)
        self.meas_data_writer.open_experiment("test_experiment")
        self.meas_data_writer.open_measurement("meas1")

        self.data_storage = DataHandler(sample_rate=self.sample_rate,
                                        points_per_shot=self.points_per_shot,
                                        max_history_size=max_hist_size,
                                        writer=self.meas_data_writer)
        self.data_storage.data_updated.connect(
            self.spectrumPlotWidget.update_plot)
        self.data_storage.average_updated.connect(
            self.spectrumPlotWidget.update_average)
        self.data_storage.data_updated.connect(
            self.timetracePlotWidget.update_plot)
        self.data_storage.history_updated.connect(
            self.timeNoisePlotWidget.update_plot)
        #self.data_storage.peak_hold_max_updated.connect(self.spectrumPlotWidget.update_peak_hold_max)
        #self.data_storage.peak_hold_min_updated.connect(self.spectrumPlotWidget.update_peak_hold_min)

        self.fans_controller = FANS_controller(
            "ADC", self.data_storage, configuration=self.configuration)

        for channel in AI_CHANNELS.indexes:
            self.fans_controller._set_fans_ai_channel_params(
                AI_MODES.AC, CS_HOLD.OFF, FILTER_CUTOFF_FREQUENCIES.f150,
                FILTER_GAINS.x15, PGA_GAINS.x100, channel)

        self.fans_controller.init_acquisition(
            self.sample_rate, self.points_per_shot, [AI_CHANNELS.AI_104]
        )  #[AI_CHANNELS.AI_101,AI_CHANNELS.AI_102,AI_CHANNELS.AI_103,AI_CHANNELS.AI_104])
Пример #6
0
    def __init__(self, platform):
        # setup configuration
        self.config = Config()
        term = self.config.types[platform]['term']
        extension = self.config.types[platform]['extension']

        self.language = self.config.types[platform]['language']
        self.platform = self.config.types[platform]['platform']

        # setup request handler
        self.requester = RequestHandler(term, extension)

        # setup data handler
        self.data = DataHandler()

        # configure crawler specifics
        self.size_range = SortedSet()
        self.size_range.update([0, 384001])  # stick to GitHub size restrictions
        self.initial_items = []
        print "Started GitHub crawler at {}".format(asctime(localtime(time())))
Пример #7
0
    def download_season(self, years):
        """ Downloads the statcast data for a season as a csv. It adds a unique id (uid) for each pitch and then saves the csv in the folder statcast_data/[year].csv.
        
        Parameters
        ----------
        years (str, int, list of strings, list of ints) : the season(s) to download

        Returns
        -------
        None
        """
        dh = DataHandler()
        if not isinstance(years, list):
            years = [years]
        for year in years:
            data = statcast(start_dt=self.season_dates[year]["start"],
                            end_dt=self.season_dates[year]["end"])
            data["index"] = data.apply(lambda row: self._create_uid(row),
                                       axis=1)
            data = data.set_index("index")
            data = data.sort_index().reset_index(drop=False)
            data = dh.set_data_types(data)
            data.to_feather(f"statcast_data/{year}")
Пример #8
0
def main():
  sess = tf.Session()

  reg = RegNet(sess, config, "RegNet", is_train=True)
  dh = DataHandler("Curious_data", is_train=True)

  # Start training
  for step in range(config.iteration):
    batch_x, batch_y, mri_affine, us_affine, mri_shape, us_shape, mri_mark, us_mark = dh.sample_pair(config.batch_size)
    loss, grid_x, grid_y, grid_z =\
        reg.fit(batch_x, batch_y, mri_mark, us_mark, get_initial_map(mri_affine, us_affine, mri_shape, us_shape))
    grid_x = np.rint(grid_x).astype(int)
    grid_y = np.rint(grid_y).astype(int)
    grid_z = np.rint(grid_z).astype(int)

    # Calculate the mean target registration error
    mTREs = 0.
    for j in range(len(batch_x)):
      for i in range(len(mri_mark)):
          x_coord = mri_mark[j, i, :].astype(int)
          item = j * config.im_size[0] * config.im_size[1] * config.im_size[2] +\
              x_coord[0] * config.im_size[1] * config.im_size[2] +\
              x_coord[1] * config.im_size[2] + x_coord[2]
          y_coord = np.array([grid_x[item],grid_y[item], grid_z[item]]) + 1
          y_coord = y_coord * (us_shape[j,:3,0].astype(float)/config.im_size) 
          y_coord = nibabel.affines.apply_affine(us_affine[j, :], y_coord)
          y_mark = us_mark[j, i, :] * (us_shape[j,:3,0].astype(float)/config.im_size) 
          y_mark = nibabel.affines.apply_affine(us_affine[j, :], y_mark)
          mTREs += ((y_coord - y_mark) ** 2 ).sum() ** 0.5

    mTREs = mTREs / (len(mri_mark)*len(batch_x))
    print("iter {:>6d} : {} loss: {:>6f} mTREs: {:>6f}".format(step+1, loss, mTREs))

    # Save checkpoint
    if (step+1) % 100 == 0:
      reg.save(config.ckpt_dir, step+1)
Пример #9
0
	def __init__(self):
		self.min_x = 100000000000
		self.min_y = 100000000000
		self.max_x = 0
		self.max_y = 0
		self.height = 480
		self.width = 640

		self.graph = Graph()
		self.graph.load_data("path/path.data")

		# thread = threading.Thread(target=self.graph.load_data, args=["path/path.data"])
		# thread.start()

    #threads.append(thread)
		# process = multiprocessing.Process(target = self.graph.load_data, args=["path/path.data"])
		# process.start()

		self.data_handler = DataHandler("data")
		self.tree = QuadTree(3)
		self.active_nodes = []
		self.zoom_level = 1
		self.fill_metadata()
		self.active_nodes.append(0)
Пример #10
0
                     bins=100,
                     label='Generated data')
        #axs[i].set_ylim(0, 0.43)
        axs[i].title.set_text(ftrs[i])
        axs[i].legend()
    wandb.log({'epoch': e, 'hist': wandb.Image(plt)})
    plt.show()


if __name__ == '__main__':
    CONFIG_PATH = "./config.yaml"

    config = load_config()

    config = DotDict(config)
    fulldata = DataHandler(config=config)

    bashCommand = "wandb login <key>"

    process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
    output, error = process.communicate()
    wandb.init(project='...', entity='...')

    teacher = RICHGAN(config).to(config.utils.device)

    student = StudentRICHGAN(config).to(config.utils.device)

    teacher.load(config.experiment.checkpoint_path)

    g_update_freq = 1
from data import DataHandler
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from itertools import product

from cfg import freqs, npulsess, amps, durs, mttxss, mn1p8s, mn1p9s

# label
# 'socnrn(mn1p7:%.3fx)(mn1p8:%.3fx)(%.3fnAx%.3fms)(%.3fHz)' % (mttxs, mn1p8, amp, dur, freq)
dh = DataHandler()
dh('data/n1p7.json', 'n1p7')

#                mul ,    amp
ixsre = 'mn1p7:(.....).*(.....)nAx.*'
data = dh.return_arr(ixsre, lambda x: x.max())


def arr(start, end, incr):
    return np.array(
        [float("%.3f" % (x)) for x in np.arange(start, end + incr / 2, incr)])


muls = mn1p8s


def create_surface(muls, amps):
    surface = np.zeros((len(amps), len(muls)))
    for i, mul in enumerate(muls):
        for j, amp in enumerate(amps):
Пример #12
0
from data import DataHandler
from portfolio import Portfolio
from stratergy import Stratergy
from execution import ExecutionHandler
import queue

if __name__ == "__main__":

    csv_dir = "E:\\Code\\EventDrivenTrading\\stock_dfs"
    symbol_list = ["AAPL", "ABBV"]
    events = queue.Queue()
    historicData = DataHandler(events, csv_dir, symbol_list)
    portfolio = Portfolio(historicData, events)
    stratergys = [
        Stratergy("AAPL", historicData, events),
        Stratergy("ABBV", historicData, events),
    ]
    simpleExecution = ExecutionHandler(events)

    count = 0
    while True:
        count += 1
        # Get market event/data
        if historicData.finished == False:
            historicData.update_bars()
        else:
            break

        # handle events in que
        while True:
Пример #13
0
 def __init__(self):
     self._dh = DataHandler()
Пример #14
0
import plotly.express as px
import plotly.graph_objects as go

import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate

import pandas as pd
import secrets
from data import DataHandler
from main_scraper import Conector

d = DataHandler(Conector(secrets.API_KEY, secrets.PROJECT_TOKEN))

df_localization = pd.read_csv(
    'geo_data.csv',
    sep=';',
    encoding=DataHandler.find_encoding('geo_data.csv'))

#merge with ISO Codes
df = pd.merge(df_localization,
              d.country_data,
              how='left',
              left_on='Country',
              right_on='name')
df = DataHandler.conversion(df)
df_data = DataHandler.rename(d.country_data)
Пример #15
0
#   文件名称:train.py
#   创 建 者:YuLianghua
#   创建日期:2019年12月03日
#   描    述:
#
#================================================================
# import os
# import sys
# sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))

import tensorflow as tf

from data import DataHandler
from DSSM import DSSM

data_handler = DataHandler('./data/vocab.txt', 
                           max_char_length=20)
# train data
train_path = './data/train.csv'
train_p, train_h, train_y = data_handler.load_data(train_path)

# dev data
dev_path = './data/dev.csv'
dev_p, dev_h, dev_y = data_handler.load_data(dev_path)

# test data
test_path = './data/test.csv'
test_p, test_h, test_y = data_handler.load_data(test_path)

# print(len(train_p), len(dev_p), len(test_p))
# print(train_p[0], dev_p[0], test_p[0])
Пример #16
0
 def __init__(self):
     self.label_pickle = os.path.join(PICKLE_DIR, '{:d}_labels.pkl')
     self.feature_pickle = os.path.join(PICKLE_DIR, '{:d}_features.pkl')
     self._db = DataHandler()
     self.league = League()
Пример #17
0
 def __init__(self, season):
     self._db = DataHandler()
     self.season = season
     self.league = League()
     self.pred_path = os.path.join(self.pred_dir, '{:d}.csv'.format(season))
Пример #18
0
 def __init__(self):
     self._data = None
     self._db = DataHandler()
     self._seasons = {}
Пример #19
0
    network = tflearn.regression(resnet1(x),
                                 optimizer=mom,
                                 loss='categorical_crossentropy')

    print np.shape(X)
    print np.shape(Y)
    print network

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(X,
              Y,
              n_epoch=50,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=data_handler.mini_batch_size,
              run_id='cifar10_cnn')


if __name__ == '__main__':
    import sys

    bl = sys.argv[1]
    nb = int(sys.argv[2])
    mbs = int(sys.argv[3])
    nep = int(sys.argv[4])

    handler = DataHandler(bl, nb, mbs)
    #train_nn(0,handler)
    train_nn_tflearn(handler, nep)
Пример #20
0
import time
import http.server as http
from pprint import pprint
from data import DataHandler
from parse import Parser
import json

HOST_NAME = '127.0.0.1'
PORT_NUMBER = 8080

dataHandler = DataHandler()
parser = Parser()


class MyHandler(http.BaseHTTPRequestHandler):
    def do_GET(self):
        """Respond to a GET request."""
        self.send_response(200)
        self.send_header("Content-type", "application/json")
        self.send_header('Access-Control-Allow-Origin', '*')
        self.end_headers()
        res = ""
        if self.path.startswith("/sid"):
            res = str(dataHandler.getSid())
        elif self.path.startswith("/res"):
            sid = int(self.path[5:])
            texts = dataHandler.getTexts(sid)
            res = json.dumps(texts[-min(len(texts), 5):])
        elif self.path.startswith("/stats"):
            sid = int(self.path[7:])
            res = json.dumps(dataHandler.getStats(sid))
Пример #21
0
def train(cfg) -> None:
	device = torch.device(cfg.device)
	print(f"Using device {device}")

	runs = None
	if cfg.use_run_setup == True:
		runs = RunBuilder.get_runs(cfg.run_setup)
	else:
		runs = RunBuilder.get_runs(
			OrderedDict({
				"lr": [cfg.lr],
				"num_epochs": [cfg.num_epochs]
			})
		)
	assert runs != None


	data_handler = DataHandler(cfg)
	train_dataset, validation_dataset = data_handler.get_datasets()
	train_loader, validation_loader = data_handler.get_data_loaders()
	training_dataset_size, validation_dataset_size = data_handler.get_datasets_sizes()


	best_model_wts = None
	best_acc = 0.0
	best_config = None

	for run in runs:
		comment = f"Run setup -- {run}"
		print(comment)

		model = model_rcnn.create_model(num_classes=3)
		model.to(device)
		params = [p for p in model.parameters() if p.requires_grad]
		optimizer = optim.Adam(params, lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4)

		# Check if resume
		if cfg.use_run_setup == False and cfg.resume == True:
			checkpoint = torch.load("./checkpoints/ckp.pt")
			model.load_state_dict(checkpoint["model_state_dict"])
			optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
			run.num_epochs -= checkpoint["epoch"]

		# loss_criterion = ...

		log_dir = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
		writer = SummaryWriter(log_dir)

		since = time.time()
		for epoch in range(run.num_epochs):
			print('Epoch {}/{}'.format(epoch, run.num_epochs))
			print('-' * 10)

			# train for one epoch, printing every 10 iterations
			train_one_epoch(model, optimizer, train_loader, device, epoch, print_freq=10)
			evaluate(model, validation_loader, device)

		time_elapsed = time.time() - since
		print('Training complete in {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60
		))

	print(f"Best configuration: {best_config}")
	return model