Example #1
0
def load_data(args):
    dm = DataManager(args['type_learning'], args['name_data'])

    if args['type_learning'][:2] == 'RL':
        return dm.get_env()
    elif args['type_learning'][:2] == 'SL':
        return dm
def run_binary_classification(datasource, tag1, tag2, verbose=True):
    """
    Trains a binary classifier to distinguish between TaggedPhraseDataSource
    phrases tagged with tag1 and phrases tagged with tag2.
    
    This returns the accuracy of the binary classifier on the test
    partition.
    
    """
    vectorizer = lambda p: w2v.get_word_vector(p)
    phrase_recognizer = lambda p: vectorizer(p) is not None
    dmanager = DataManager(datasource, [tag1, tag2], vectorizer,
                           phrase_recognizer)
    classifier = SimpleClassifier(300, 100, 2)
    net = train_net(classifier,
                    dmanager,
                    batch_size=32,
                    n_epochs=30,
                    learning_rate=0.001,
                    verbose=False)
    acc, misclassified = evaluate(net, dmanager, 'test')
    if verbose:
        for tag in sorted(dmanager.tags):
            print('{} phrases are tagged with "{}".'.format(
                dmanager.num_phrases[tag], tag))
        print('\nERRORS:')
        for (phrase, guessed, actual) in sorted(misclassified):
            print('"{}" classified as "{}"\n  actually: "{}".'.format(
                phrase, guessed, actual))
        print("\nOverall test accuracy = {:.2f}".format(acc))
    return acc
 def test_tags(self):
     manager = DataManager(WordnetDataSource(),
                           ['edible_fruit.n.01', 'wheeled_vehicle.n.01'])
     assert manager.tag_index('edible_fruit.n.01') == 0
     assert manager.tag_index('wheeled_vehicle.n.01') == 1
     assert manager.tag(0) == 'edible_fruit.n.01'
     assert manager.tag(1) == 'wheeled_vehicle.n.01'
Example #4
0
 def __init__(self, _stock_num, _target_month, _budget=50000000):
     self.account = _AccountEnv(_budget)
     self.stock_num = _stock_num
     self.DataManager = DataManager.DataManager(self.stock_num)
     self.day_list = self.DataManager.get_daylist_in_month(_target_month)
     self.day_idx = 0
     self.account = ForeignAccount()
Example #5
0
 def __init__(self, _stock_num):
     self.DataManager = DataManager.DataManager(_stock_num)
     self.name = DataUtils.get_stock_num_to_name(_stock_num)
     self.num = _stock_num
     self.queue_sell = asyncio.Queue()
     self.stock_df = self.DataManager.get_dataframe()
     self.count = 0
     self.amount_foreign = 0
     self.amount_agency = 0
Example #6
0
 def build_all(
     self,
     resource_limit=None
 ) -> Tuple[List[Process], ResourceManager, DataManager]:
     models = self.create_process_model()
     process_list = [x.id for x in models]
     rm = ResourceManager(
         self.create_resources(resource_limit=resource_limit))
     dm = DataManager(self.create_data(), process_list=process_list)
     return models, rm, dm
Example #7
0
 def _setup_data_mgr(self):
     log.info("_setup_data_mgr")
     #
     if not "data" in self.config:
         log.error("no 'data' section in config")
         return None
     config_data = self.config["data"]
     #
     data_mgr = DataManager()
     if not data_mgr.initialize(self.directory, config_data,
                                self.size_total, self.size_chunk):
         return None
     return data_mgr
Example #8
0
 def __init__(self, _stock_num, _target_month, _global_actor,
              _global_critic):
     self.global_actor = _global_actor
     self.global_critic = _global_critic
     self.local_actor, self.local_critic = BaseModel.BaseModel(
         64, 1).build_model()
     self.local_actor._make_predict_function()
     self.local_critic._make_predict_function()
     self.local_actor.set_weights(self.global_actor.get_weights())
     self.local_critic.set_weights(self.global_critic.get_weights())
     self.EnvForeign = EnvForeign.ForeignEnv(_stock_num, _target_month)
     dataManager = DataManager.DataManager(_stock_num)
     self.day_list = dataManager.get_daylist_in_month(_target_month)
     self.dataset_list = dataManager.get_dataset_in_month(_target_month)
Example #9
0
 def __init__(self, app, WSGI_PATH_PREFIX):
     self.app = app
     # self.cmpmgr = CompressionManager( app, compresslevel=9 )
     self.DataManager = DataManager()
     mngrs = {
         # 'Compression': self.cmpmgr,
         'DataManager': self.DataManager
     }
     self.app.config.setdefault('Managers', mngrs)
     self.app.config['Managers'] = mngrs
     print('             Registered Application Managers')
     print(
         '------------------------------------------------------------------'
     )
Example #10
0
    def __init__( self, nets=None, parent=None, width=3 ):
        Frame.__init__( self, parent )

        #data structure
        self.netManager = NetManager()
        self.dataManager = DataManager()
        self.scheduler = Scheduler()
        self.hosts = []
        # UI
        self.top = self.winfo_toplevel()
        self.top.title( 'Mininet节点调度子系统' )
        self.createMenuBar()
        self.menubar = self.createFramBar()
        self.createCfram()
        cleanUpScreens()
        self.pack( expand=True, fill='both' )
Example #11
0
 def __init__(self, app, WSGI_PATH_PREFIX):
     self.app = app
     # self.cmpmgr = CompressionManager( app, compresslevel=9 )
     self.DataManager = DataManager(app)
     self.Engine = Engine(
         {"postgres_db": self.DataManager.get_pg_connection_string()})
     mngrs = {
         # 'Compression': self.cmpmgr,
         'DataManager': self.DataManager,
         'Engine': self.Engine
     }
     self.app.config.setdefault('Managers', mngrs)
     self.app.config['Managers'] = mngrs
     print('             Registered Application Managers')
     print(
         '------------------------------------------------------------------'
     )
import socket
from main import Gesture
from data import DataManager
import controller
import Xlib.display
import win32gui

app_id = 1
app = Gesture(app_id)
manager = DataManager()
app.train(manager.folders, manager.names, manager.labels)
app.initClassifier()
last_msg = "0.00"
value_to_controller = -1

s = socket.socket()
host = socket.gethostname()
port = 8000
s.bind((host, port))
s.listen(5)

while True:
    c, addr = s.accept()
    print '\nGot connection from', addr

    msg = []
    msg = c.makefile().read(-1)
    w = win32gui
    window_in_focus_name = w.GetWindowText(w.GetForegroundWindow())
    print window_in_focus_name
    print(len(msg))
Example #13
0
 def next_image(self):
     try:
         self.change_image(DataManager().next_img())
     except StopIteration:
         QMessageBox.question(self, '', 'All job done.', QMessageBox.Yes)
         self.image_ended.emit()
Example #14
0
#
#
################################################################

import tensorflow as tf
import argparse
import sys
import os
import random
import numpy as np
from data import DataManager
from network import TextClassification

filters = [3, 4, 5]
emb_size = 128
db = DataManager()
checkpoint = 100
FLAGS = None
classes = 2


def generate_indices(l, nr_batch):
    indices = range(0, l)
    for _ in range(20):
        random.shuffle(indices)
    return indices[0:nr_batch]


def main(_):
    if not os.path.exists("models"):
        os.makedirs("models")
Example #15
0
from tqdm import tqdm

tf.logging.set_verbosity(tf.logging.INFO)

flags = tf.app.flags
flags.DEFINE_string('mode', 'train', 'Mode of application : train (or) sample')
flags.DEFINE_integer('max_steps', 10, 'Numer of epochs')
flags.DEFINE_string('log_path', 'log_dir', 'Directory to log summaries')
flags.DEFINE_string('save_path', 'chkpt', 'Directory to save model')

FLAGS = flags.FLAGS

from model import Model
from data import DataManager

dm = DataManager()

inputs = tf.placeholder(tf.float32, shape=[None, 20], name="inputs")
target = tf.placeholder(tf.float32, shape=[None, 10], name="target")

with tf.variable_scope('Model'):
  model = Model(inputs, target)

tf.summary.scalar('error', model.error)
tf.summary.scalar('loss', model.loss)

merged = tf.summary.merge_all()

def main(_):
  
  saver = tf.train.Saver()
Example #16
0
def main(args):
    if args['seed'] >= 0:
        np.random.seed(args['seed'])

    d = args['d']
    K = args['K']
    T_vals = [10**args['N']]

    w = np.abs(np.random.rand(K, d)) / np.sqrt(d)

    env = LinearContextualBandit(w,
                                 sigma=args['sigma'],
                                 x_norm=args['x_normalization'])
    data_env = env

    # data = load_h5_dataset('data_train.h5')
    # idx = (data['labels'] < K).flatten()
    # env = CifarBandit(features=data['features'][idx], labels=data['labels'][idx], sigma=args['sigma'])
    # data_env = env
    # # test_data = load_h5_dataset('data_test.h5')
    # # test_idx = (test_data['labels'] < K).flatten()
    # # data_env = CifarBandit(features=test_data['features'][test_idx], labels=test_data['labels'][test_idx], sigma=args['sigma'])

    for data_size in args["data_sizes"]:
        if args['perturbations']:
            print(f'Creating Dataset of size N={data_size}')
            data_manager = DataManager(data_env, d, K, data_size)
        else:
            data_manager = None

        trainer = Trainer(env=env, w=w, **args)

        iters = [
            range(args['n_seeds']), args['L_values'], args['alpha_values']
        ]

        n_jobs = min(
            args['n_seeds'] * len(args['L_values']) *
            len(args['alpha_values']), args['max_jobs'])
        regret_tmp = []
        for t in T_vals:
            print(f'Starting {n_jobs} jobs for t={t}')
            regret_tmp.append(
                Parallel(n_jobs=n_jobs)(
                    delayed(trainer.execute)(
                        t, L, alpha_l, data_manager=data_manager)
                    for seed, L, alpha_l in product(*iters)))

        # Gather results and save
        folder_name = datetime.datetime.now().__str__().replace(' ', '_')
        os.mkdir(folder_name)

        regret = np.zeros((args['n_seeds'], T_vals[0], len(args['L_values']),
                           len(args['alpha_values'])))
        i = 0
        for seed, ll, gg in product(*[range(len(x)) for x in iters]):
            regret[seed, :, ll, gg] = regret_tmp[0][i]
            i += 1

        args['data_size'] = data_size
        save_data = {'data': regret, 'args': args}
        np.save(f'{folder_name}/data.npy', save_data)
        with open(f'{folder_name}/args.yml', 'w') as outfile:
            yaml.dump(args, outfile, default_flow_style=False)

        # Plot
        for aa, alpha_l in enumerate(args['alpha_values']):
            fig, ax = plt.subplots(1)
            x_axis = np.repeat(np.array(range(T_vals[0]))[np.newaxis, :],
                               len(args['L_values']),
                               axis=0)
            ax.plot(x_axis.T, np.mean(regret, axis=0)[:, :, aa])
            # ax.fill_between(t_vals, mean-std/2, mean+std/2, facecolor=colors[ll], alpha=0.1)
            plt.title(f'Regret, d={d}, alpha_l={alpha_l}')
            plt.legend([f'L={L}' for L in args['L_values']])
            # plt.show()
            plt.savefig(f'{folder_name}/regret_gamma_{alpha_l}.png')
Example #17
0
    def __init__(self, config, mode, seed):
        self.config: C.Configuration = config
        self.mode = mode
        self.seed = seed

        # For reproducibility
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        torch.manual_seed(self.seed)

        # Load MNIST dataset
        print("Preparing datasets")
        self.data_manager = DataManager(
            n_train=self.config.N_TRAIN,
            n_eval=self.config.N_EVAL,
            inpt_shape=self.config.INPT_SHAPE,
            grid_shape=self.config.GRID_SHAPE,
            label_shape=self.config.LABEL_SHAPE,
            assignments=self.config.ASSIGNMENTS,
            inpt_norm=self.config.INPT_NORM,
            intensity=self.config.INTENSITY,
            label_intensity=self.config.LABEL_INTENSITY)
        self.trn_set = self.data_manager.get_train(
            self.config.CLASSES,
            PoissonEncoder(time=self.config.TIME, dt=self.config.DT),
            self.config.BATCH_SIZE)
        self.trn_set4eval = self.data_manager.get_train4eval(
            self.config.CLASSES,
            PoissonEncoder(time=self.config.TIME, dt=self.config.DT),
            self.config.EVAL_BATCH_SIZE)
        self.val_set = self.data_manager.get_val(
            self.config.CLASSES,
            PoissonEncoder(time=self.config.TIME, dt=self.config.DT),
            self.config.EVAL_BATCH_SIZE)
        self.tst_set = self.data_manager.get_test(
            self.config.CLASSES,
            PoissonEncoder(time=self.config.TIME, dt=self.config.DT),
            self.config.EVAL_BATCH_SIZE)

        # Build network
        print("Preparing network")
        self.network: Net = Net(inpt_shape=self.config.GRID_SHAPE,
                                neuron_shape=self.config.NEURON_SHAPE,
                                lbound=self.config.V_LB,
                                vrest=self.config.V_REST,
                                vreset=self.config.V_RESET,
                                vth=self.config.V_TH,
                                theta_w=self.config.THETA_W,
                                sigma=self.config.SIGMA,
                                conn_strength=self.config.CONN_STR,
                                sigma_lateral_exc=self.config.SIGMA_EXC,
                                exc_strength=self.config.EXC_STR,
                                sigma_lateral_inh=self.config.SIGMA_INH,
                                inh_strength=self.config.INH_STR,
                                dt=self.config.DT,
                                refrac=self.config.REFR,
                                tc_decay=self.config.V_DECAY,
                                tc_trace=self.config.TR_DECAY,
                                nu=self.config.LR)
        # Direct network to GPU
        if P.GPU: self.network.to_gpu()

        # Object for network monitoring
        print("Preparing stats manager")
        self.stats_manager = utils.StatsManager(self.network,
                                                self.config.CLASSES,
                                                self.config.ASSIGNMENTS)
Example #18
0
import plotly.graph_objects as go
import numpy as np
import pandas as pd

from data import DataManager
from time_accessor import WeekTimeAccessor
from itertools import product
from dash_table import DataTable

from selenium.webdriver.support import expected_conditions as EC

app = dash.Dash(
    __name__,
    external_stylesheets=["https://codepen.io/chriddyp/pen/bWLwgP.css"])
app.title = "Allocate++"
data = DataManager().get_data()

selection_div = html.Div(children=[],
                         style={
                             "display": "grid",
                             "grid-template-columns": "repeat(4, 10fr)",
                             "grid-gap": "10px"
                         })

for unit, content in data.groupby("Unit"):
    unit_div = html.Div(children=[html.H5(children=unit)])
    for group, choices in content.groupby("Group"):
        unit_div.children.append(html.Label(group))
        unit_div.children.append(
            dcc.Dropdown(id=f"{unit}:{group}",
                         options=choices[["Day", "Time"]].apply(
Example #19
0
    bs = 32
    buffer_size = int(1e4)
    maximal_eps_duration_eval = 200
    maximal_eps_duration_train = 200
    num_episodes = 1000
    num_epidodes_shift = 1000

    # set up comet.ml
    comet_exp = Experiment(api_key="go2q9NSgpaDoutQIk5IFAWEOz",
                           project_name='Causality-RL',
                           auto_param_logging=True,
                           auto_metric_logging=False,
                           parse_args=True,
                           auto_output_logging=None)

    data_buffer = DataManager(buffer_size, 5, 2, remove_random=False)

    model_joint = ModelJoint(1, 20)
    model_correct = ModelCausal(1, 20, correct=True)
    model_incorrect = ModelCausal(1, 20, correct=False)

    optimizer_joint = torch.optim.RMSprop(model_joint.parameters(), lr=0.001)
    optimizer_correct = torch.optim.RMSprop(model_correct.parameters(),
                                            lr=0.001)
    optimizer_incorrect = torch.optim.RMSprop(model_incorrect.parameters(),
                                              lr=0.001)

    env = get_modified_MountainCarEnv()
    #import ipdb; ipdb.set_trace(context=5)

    # create dataset for evaluation
Example #20
0
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
from datetime import date, time, datetime, timedelta
from data import DataManager
from os.path import isfile

app = dash.Dash(__name__)
app.title = 'Mood Tracker'
server = app.server

datam = DataManager()
UIDs = datam.uids


def fill_table(df):
    if df is None:
        return []

    pos, neg = [], []
    for i, val in enumerate(df['types']):
        if val == 'Pos':
            pos.append(df['actions'][i])
        else:
            neg.append(df['actions'][i])

    maxlen = min(len(pos), len(neg))
Example #21
0
 def save_sample(self):
     DataManager().save_sample(self.img_view.current_image,
                               sum(self.joints_table.joints, []),
                               self.category_box.currentIndex())
     self.img_view.next_image()
Example #22
0
    right4 = ["Vol+", "Vol-", "VolMute", "Win", "PrtScr", "BACK"]
    left5 = list(r"!?\"'@#~¬")
    right5 = list(r"`.,&\|") + ["BACK"]

    options = {"left_0": left0, "right_0": right0,
                "left_1": left1, "right_1": right1,
                "left_2": left2, "right_2": right2,
                "left_3": left3, "right_3": right3,
                "left_4": left4, "right_4": right4,
                "left_5": left5, "right_5": right5}

    dimensions = {"screenwidth": 1024, "screenheight": 576, 
                "overlay_width": 1024, "overlay_height": 576}

    # Initialise DataManager
    points = DataManager(dimensions, options, FPS, hub_radius=200)

    # Initialise InputManager, get idle_axis, update points with idle_axis.
    inputs = InputManager(joystick_num=0, points=points, rumble_ms=40)
    points.update_idle_points(inputs.idle_axis)
    
    # Initialise the display.
    display = DisplayManager(points=points, no_frame=False)

    # Used to manage how fast it updates
    clock = pygame.time.Clock()

    # Loop variables.
    done = False
    variant = 0
    wait_time = 220
Example #23
0
from data import DataManager

if __name__ == '__main__':
    data_manager = DataManager()
    while True:
        data_manager.get_and_process_frame()
        # data_manager.get_and_process_marker_frame()
Example #24
0
 def __init__(self, bot: commands.Bot):
     self.bot = bot
     self.instance = DataManager()
Example #25
0
        self.main_layout.addWidget(self.img_view)
        self.main_layout.addLayout(self.detail_layout)

        self.joints_hover.raise_()

    @pyqtSlot(name='enable_confirm_button')
    def enable_confirm_button(self):
        if self.joints_table.filled() and self.category_box.is_chosen():
            self.confirmButton.setEnabled(True)

    @pyqtSlot(name='disable_confirm_button')
    def disable_confirm_button(self):
        self.confirmButton.setEnabled(False)

    @pyqtSlot(name='save_sample')
    def save_sample(self):
        DataManager().save_sample(self.img_view.current_image,
                                  sum(self.joints_table.joints, []),
                                  self.category_box.currentIndex())
        self.img_view.next_image()


if __name__ == '__main__':
    app = QApplication(sys.argv)

    window = MainWindow(size=QSize(WINDOW_W, WINDOW_H), title=WINDOW_TITLE)
    window.img_view.change_image(next(DataManager().image_loader))
    window.show()

    app.exec_()