def setup(self, bottom, top):

        params = yaml.load(self.param_str)
        self.check_params(params)

        caffe_root = params['caffe_root']
        db_dir = params['db_dir']
        self.group_cnt = params['group_cnt']

        self.batch_size = params['batch_size']
        lmdb_db_path_q, lmdb_db_path_t, meta_db_path_q, meta_db_path_t = self.define_db_paths(
            db_dir)

        self.im_shape = params['im_shape']

        self.q_loader = loader.Loader(lmdb_db_path_q, meta_db_path_q,
                                      caffe_root, self.im_shape[1])
        self.t_loader = loader.Loader(lmdb_db_path_t, meta_db_path_t,
                                      caffe_root, self.im_shape[1])

        self.load_from_q_base = True
        self.is_finished = False
        self.group_id = 0
        self.item_id = 0
        #data
        top[0].reshape(self.batch_size, self.im_shape[0], self.im_shape[1],
                       self.im_shape[2])
        #labels
        top[1].reshape(self.batch_size)
        #group
        top[2].reshape(1)
        #database (query or test)
        top[3].reshape(1)
예제 #2
0
    def create_dataset(cfg, image_data):
        if image_data:
            imgs = image_to_array(image_data)
            imgs_str = image_to_string(image_data)
            masks = []
            names = image_data
        else:
            files_csv = dataset_to_path(cfg.test_data,
                            os.path.join(cfg.data_dir, 'test'), cfg.dim)
            imgs_str = []
            for file_csv in files_csv:
                df = pd.read_csv(file_csv, header=None)
                img_paths = df[0]
                imgs_str.extend(image_to_string(img_paths))

            l = loader.Loader(cfg)
            imgs, masks, names = l.create_eval_data(dataset_to_path(cfg.test_data,
                                os.path.join(cfg.data_dir, 'test'), cfg.dim))

        dataset = namedtuple('data_collection', ['images', 'images_str' 'masks', 'names'])
        dataset.images = imgs
        dataset.images_str = imgs_str
        dataset.masks = masks
        dataset.names = names

        return dataset
예제 #3
0
    def __init__(self, master=None, logger=None):
        super().__init__(master)
        self.master = master
        self.logger = logger

        self.configure(bg=DARKCOLOR)
        self.pack()
        self.master.title("MAIA - Maine AI Arena")

        # Create the msgr objs
        self.msg_queue = queue.Queue()
        self.imsgr = msgs.IMsgr(self.msg_queue)
        self.omsgr = msgs.OMsgr(self.msg_queue)

        self.ldr = loader.Loader(self.logger)
        self.sim = sim.Sim(self.imsgr)

        # log.LogInit()
        # log_setting = self.ldr.getMainConfigData('debug')
        # if type(log_setting)==bool:
        #     log.LogSetDebug(log_setting)
        #     log.LogDebug("DEBUG IS ON")

        self.combat_log = []

        self.BuildUI()

        self.UIMap = None
예제 #4
0
def main():
    parser = argparse.ArgumentParser(description='Generate sample tree data under the protracted speciation model')
    parser.add_argument('--output', '-o')
    parser.add_argument('--schema', '-s', choices=['newick', 'nexus'], required=True,
                        help='Tree schema: Newick, Nexus')
    parser.add_argument('--config', '-c', default="default", required=True, help='')
    args = parser.parse_args()
    args.parser = parser

    config = cl.Loader(args.output)
    headers = config.load_headers()

    # load run parameters
    generated_sample_parameters = config.get_generate_sample_values()
    generated_protracted_speciation_process_parameters = config.generate_protracted_speciation_process_values()

    try:
        # getting trees
        get_trees = call_sample_tree(generated_sample_parameters, generated_protracted_speciation_process_parameters)
        # generating Sequences & saving trees, creating file names
        a = rng_file_name()
        for i in range(len(get_trees)):
            names = ["lineage", "orthospecies"]
            file_name = names[i][:3] + '_' + a + "." + str(args.schema)
            file_output(get_trees[i], args, file_name)
            parameters_to_json(generated_sample_parameters, generated_protracted_speciation_process_parameters, headers,
                              file_name)
    except:
        pass
예제 #5
0
def main(argv=None):
    parser = argparse.ArgumentParser(description="Training driver")
    parser.add_argument("-v", "--verbose", default=False, action="store_true")
    parser.add_argument("-c", "--config_file", default="config.json")

    parsed_arguments = parser.parse_args()
    arguments = vars(parsed_arguments)

    is_verbose = arguments['verbose']
    config_file = arguments['config_file']

    if is_verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    with open(config_file) as fid:
        config = json.load(fid)
        initialize_defaults(config)

    data_dir = config.get("data_dir")
    output_path = config.get("output_path")
    ldr = loader.Loader(data_dir, config)
    model = Model(is_verbose)

    if tf.gfile.Exists(output_path):
        tf.gfile.DeleteRecursively(output_path)
    tf.gfile.MakeDirs(output_path)

    run_training(model, ldr, config)
예제 #6
0
 def __init__(self, size_x, size_y, yaml, max_e=1000, fps=20):
     self.x = size_x
     self.y = size_y
     self.max_entities = max_e
     self.entities = []
     self.fps = fps
     self.loader = loader.Loader(yaml)
예제 #7
0
    def __init__(self):
        self.score = 0

        self.loader = loader.Loader()
        self.gamemode = fsm([('Playing', 'GameOver', lambda x: x == 'GameOver'),
                             ('MainMenu', 'Playing', lambda x: x == 'NewGame', lambda x, y: self.newgame()),
                             ('MainMenu', 'Playing', lambda x: x == 'Resume'),
                             ('Playing', 'MainMenu', lambda x: x == 'MainMenu'),
                             ('GameOver', 'Playing', lambda x: x == 'GameOver')])
        self.gamemode.start('MainMenu')

        self.menu = kezmenu.KezMenu(['SPACE ACE!', lambda: True],
                                    ['New Game', lambda: self.gamemode.event('NewGame')],
                                    ['Exit', lambda: pygame.event.post(pygame.event.Event(pygame.QUIT))])
        self.menu.center = True  # enable horizontal menu item centering
        self.menu.color = WHITE
        self.menu.focus_color = GREEN
        self.menu.options[0]['focus_color'] = RED
        self.menu.options[0]['color'] = RED
        self.menu.options[0]['font'] = pygame.font.Font("fonts/OverdriveInline.ttf", 75)
        self.menu._fixSize()
        self.menu.center_at(SCREEN_CENTER)  # center entire menu to screen

        self.enemy_list = pygame.sprite.Group()
        self.player_list = pygame.sprite.Group()
        self.all_sprites_list = pygame.sprite.Group()

        self.events = None

        # Create the player
        self.player = sprites.Player(self.loader.PLAYER_IMAGE)
        self.all_sprites_list.add(self.player)
        self.player_list.add(self.player)
예제 #8
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Generate sample tree data under the protracted speciation model')
    parser.add_argument('--t',
                        metavar='N',
                        nargs='+',
                        help='Incoming tree parameter')
    parser.add_argument('--s',
                        metavar='N',
                        nargs='+',
                        help='Incoming tree parameter')
    parser.add_argument('--output')
    args = parser.parse_args()
    args.parser = parser

    config = cl.Loader(args.output)
    headers = config.load_headers()

    for i in args.s:
        file = i[:-14] + '_t_params.json'
        with open(file, 'r') as f:
            d = json.load(f)
        with open(i, 'r') as r:
            d2 = json.load(r)
        d.update(d2)
        dtc.write_params_to_txt(d, headers)
예제 #9
0
파일: player.py 프로젝트: scottjrodgers/sc
def main():
    import os
    import time
    import tempoclock
    import loader
    import server

    ctl = server.connect(spew=True)
    ctl.sendMsg('/dumpOSC', 1)

    SYNTHDEF_PATH = os.path.join(os.path.expanduser('~'), '.pksampler',
                                 'synthdefs')
    SYNTHDEFS = (
        'JASStereoSamplePlayer.scsyndef',
        'JASSine.scsyndef',
    )
    for fname in SYNTHDEFS:
        ctl.sendMsg('/d_load', os.path.join(SYNTHDEF_PATH, fname))

    player = Player(ctl)
    ldr = loader.Loader(ctl)
    bid = ldr.load('/Users/patrick/.pksampler/clicks/click_1.wav')

    clock = tempoclock.TempoClock(140.0)

    beats = [clock.spb() * i for i in range(100)]
    now = time.time() + 1
    freqs = [440, 550, 220, 200, 460]
    synth = Synth()
    synth.name = 'JASSine'
    for seconds in beats:
        abs = now + seconds
        freqs = freqs[1:] + freqs[:1]
        synth['freq'] = freqs[0]
        player.play(synth, abs, abs + 1)
예제 #10
0
    def __init__(self):
        load = loader.Loader("TETRIS")
        load.read()

        memory = mmu.MMU(load.data)
        processor = cpu.CPU(memory)
        display = video.Video()

        display.mainloop(processor.step)
예제 #11
0
def load_tests():
    '''
    Create a TestLoader and load tests for the directory given by the config.
    '''
    testloader = loader_mod.Loader()
    log.test_log.message(terminal.separator())
    log.test_log.message('Loading Tests', bold=True)
    testloader.load_root(config.config.directory)
    return testloader
예제 #12
0
    def setup(self):
        self.usage = [
            'usage:', 'start process_name [waittime]',
            'stop process_name stop_dep?', 'status process_name collect_dep?',
            'restart process_name restart_dep?', 'ls [user]'
        ]
        self.usage = '\n    '.join(self.usage)

        super(LauncherServer, self).setup()
        self.loader = loader.Loader(self.server.dconn)
        self.load()
예제 #13
0
 def __init__(self, cfg, info):
     self.cfg = cfg
     self.info = info
     # set seed before calling loader ?
     tf.set_random_seed(cfg.seed)
     self.dataloader = loader.Loader(cfg)
     self.model = model.Model(cfg)
     self.update_csv_path()
     self.make_paths()
     self.make_dirs()
     self.n_train, self.n_val, self.n_test = self.get_data_info()
예제 #14
0
    def setup(self, bottom, top):
        # params is a python dictionary with layer parameters.
        params = yaml.load(self.param_str)
        # Check the paramameters for validity.
        self.check_params(params)
        db_dir = params['db_dir']
        lmdb_db_path_q, lmdb_db_path_t, meta_db_path_q, meta_db_path_t = self.define_db_names(
            db_dir)

        self.max_query_size = params['max_query_size']
        self.im_shape = params['im_shape']
        self.pos_test_num = params['pos_test_num']
        self.neg_test_num = params['neg_test_num']
        caffe_root = params['caffe_root']
        self.group_cnt = params['group_cnt']

        self.filterQFile = ''
        if (os.path.exists(db_dir + '/dup')):
            self.filterQFile = db_dir + '/dup'

        self.q_loader = loader.Loader(lmdb_db_path_q,
                                      meta_db_path_q,
                                      caffe_root,
                                      self.im_shape[1],
                                      filterQFile=self.filterQFile)
        self.q_loader.set_group_cnt(self.group_cnt)

        self.t_loader = loader.Loader(lmdb_db_path_t, meta_db_path_t,
                                      caffe_root, self.im_shape[1])

        self.t_loader.set_group_cnt(self.group_cnt)

        self.batch_size = self.pos_test_num + self.neg_test_num + self.max_query_size

        #data
        top[0].reshape(self.batch_size, self.im_shape[0], self.im_shape[1],
                       self.im_shape[2])
        #label
        top[1].reshape(self.batch_size)
        #current group id
        top[2].reshape(1)
예제 #15
0
 def loadText(self, e):
     dlg = wx.FileDialog(self,
                         "Wybierz plik do otwarcia",
                         "",
                         "*.*",
                         style=wx.FD_OPEN)
     if dlg.ShowModal() == wx.ID_OK:
         self.textFile = dlg.GetFilename()
         dir = dlg.GetDirectory()
     self.sub = loader.Loader(os.path.join(dir, self.textFile))
     dlg.Destroy()
     self.text = self.sub.getText()
예제 #16
0
    def setUp(self):
        self.repo = rubiks_repository.RubiksRepository()
        modules = [x.get_module_path() for x in self.repo.get_modules()]
        kube_loader.load(*modules)
        self.collection = output.OutputCollection(loader.Loader(self.repo), self.repo)
        self.collection.confidential = output.ConfidentialOutput
        self.collection.written = []

        self.collection.clusterless['testns'] = {'namespace-testns': FakeOutputMember(self.collection, 'namespace-testns')}
        self.collection.clusterless['testns']['namespace-testns'].kobj = kube_objs.Namespace('testns')
        self.collection.clusterless['testns']['namespace-testns'].is_namespace = True
        self.collection.clusterless['testns']['namespace-testns'].cluster = None
예제 #17
0
 def __init__(self, dataDir):
     """init the internal instances of stuff"""
     self.dataDir = dataDir
     self.logger = logging.getLogger("util")
     self.logger.debug("Data dir is: " + dataDir)
     self.problemSet = loader.Loader(dataDir).problemsDictionary
     self.evaluator = CodeRunner.Evaluate(dataDir)
     self.langs = self.evaluator.getLangs()
     self.logger.info("Supporting the following languages: " +
                      str(self.langs))
     self.passwd = json.load(open(os.path.join(dataDir, "passwd.json")))
     self.scoreboard = scoring.ScoreBoard(dataDir)
def run_test(distance, angle, start, end, plot_):
    if angle < 10:
        angle_str = "0" + str(int(angle))
    else:
        angle_str = str(int(angle))
    if distance < 10:
        distance_str = "0" + str(int(distance))
    else:
        distance_str = str(int(distance))

    stri = "A2_CH_" + angle_str + '_' + distance_str
    loaded = loader.Loader("../measurements/measurement_1/", stri)

    signals = loaded.get_measurements()

    # cut them to the beginning of a signal
    if end < 0:
        end = len(signals[0])

    for i in range(0, len(signals)):
        signals[i] = signals[i][start:end]

    #also plot them
    if plot_:
        plt.figure()

        x = range(0, len(signals[0]))
        for i in range(0, len(signals)):
            plt.subplot(8, 1, i + 1)

            plt.plot(x, signals[i])

        plt.show()

    #main calculation
    array = GeometryLibrary.calculateMicrophoneArray_2(
        0.35, GeometryLibrary.getPoint(0, 0))
    source_pos = GeometryLibrary.getPoint(0, distance)

    distances = []  #TODO calculate them
    for i in range(0, len(signals)):
        distances.append(GeometryLibrary.distance(source_pos, array[i]))
        #print(distances[i])

    speed_of_sound = 343.3
    sample_rate = loaded.get_meta_data()["sampling_rate"]
    calib = calibrator.Calibrator(signals, distances, speed_of_sound,
                                  sample_rate)

    ks = calib.run_calibration()

    print(ks)
예제 #19
0
파일: main.py 프로젝트: wangyu-ustc/deepagg
 def __init__(self, num_p, num_q, k_ability, k_difficulty, num_classes):
     """  
     Create blocks and initialize members
     """
     self.loader = loader.Loader()
     self.num_classes = num_classes
     self.num_participants = num_p
     self.num_questions = num_q
     self.k_ability = k_ability
     self.k_difficulty = k_difficulty
     self.feature = feature.FeatureRepresenter(self.num_participants,
                                               self.num_questions,
                                               k_ability, k_difficulty, 2)
     self.block1 = block1.Block1(k_ability + k_difficulty + 2)
     self.block2 = block2.Block2(self.num_participants, self.num_classes)
예제 #20
0
    def setup(self, bottom, top):
        params = yaml.load(self.param_str)
        self.check_params(params)
        caffe_root = params['caffe_root']
        db_dir = params['db_dir']
        self.group_cnt = params['group_cnt']
        self.batch_size = params['batch_size']
        self.set_triplet_size = params['set_triplet_size']
        self.im_shape = params['im_shape']

        lmdb_db_path = db_dir + '/' + 'test_all'
        meta_db_path = db_dir + '/' + 'meta_test_all'
        self.loader = loader.Loader(lmdb_db_path, meta_db_path, caffe_root,
                                    self.im_shape[1])
        self.group_id = 0
예제 #21
0
def do_rerun():
    # Init early parts of log
    with RunLogHandler() as log_handler:
        # Load previous results
        results = result.InternalSavedResults.load(
            os.path.join(config.config.result_path,
                         config.constants.pickle_filename))

        rerun_suites = (suite.uid for suite in results if suite.unsucessful)

        # Use loader to load suites
        loader = loader_mod.Loader()
        test_schedule = loader.load_schedule_for_suites(*rerun_suites)

        # Execute the tests
        run_schedule(test_schedule, log_handler)
예제 #22
0
파일: score.py 프로젝트: statX/cinc17
def load_model(model_path, is_verbose, batch_size):

    # TODO, (awni), would be good to simplify loading and
    # not rely on random seed for validation set.
    config_file = os.path.join(model_path, "config.json")
    with open(config_file, 'r') as fid:
        config = json.load(fid)
    data_conf = config['data']
    ldr = loader.Loader(data_conf['path'], batch_size, seed=data_conf['seed'])

    evl = evaler.Evaler(model_path,
                        is_verbose,
                        batch_size=batch_size,
                        class_counts=None)

    return evl, ldr
예제 #23
0
파일: main.py 프로젝트: hirox/capsule
def make_train_dataset():
    global vals

    __loader = loader.Loader()
    __loader.load()

    vals = __loader.to_percentage_from_start()
    #vals = __loader.to_percentage_from_last()

    # 正規化(n_samples, n_features)
    #scaler = MinMaxScaler(feature_range=(0, 1))
    #vals = scaler.fit_transform(vals)

    __loader.save_data()

    df = pd.DataFrame(vals)
    df.plot(figsize=(15, 5)).get_figure().savefig("graph_orig.png")
예제 #24
0
    def setup(self, bottom, top):
        params = yaml.load(self.param_str)
        self.check_params(params)
        caffe_root = params['caffe_root']
        db_dir = params['db_dir']
        self.group_cnt = params['group_cnt']
        self.class_num = params['class_num']
        self.train_per_class = params['train_per_class']
        self.test_per_class = params['test_per_class']
        self.batch_len = params['batch_len']
        self.im_shape = params['im_shape']

        lmdb_db_path = db_dir + '/' + 'test_all'
        meta_db_path = db_dir + '/' + 'meta_test_all'
        self.loader = loader.Loader(lmdb_db_path, meta_db_path, caffe_root,
                                    self.im_shape[1])
        self.group_size = self.class_num * self.train_per_class + self.class_num * self.test_per_class
예제 #25
0
파일: game.py 프로젝트: Obsttube/pyMARS
    def set_up(self, window_width_and_height):
        self.__core = core.Core(self.core_size)
        self.gui = window.GUI(window_width_and_height, self.__core)
        self.__core.set_gui_callback(self.gui)
        main_loader = loader.Loader(self.__core)

        # load instructions

        first_warior = warior.Warior("first_warior", "#ff0000")  # todo index
        first_warior.set_gui_callback(self.gui)
        self.__core.add_warior(first_warior)
        start_index = main_loader.load_file_at(first_warior, "dwarf.txt", 0,
                                               self.core_size)
        first_warior.add_process(start_index)

        second_warior = warior.Warior("second_warior", "#00ff00")
        second_warior.set_gui_callback(self.gui)
        self.__core.add_warior(second_warior)
        start_index = main_loader.load_file_at(second_warior, "agony3.1.txt",
                                               100, self.core_size)
        second_warior.add_process(start_index)
예제 #26
0
def do_run():
    # Initialize early parts of the log.
    with RunLogHandler() as log_handler:
        if config.config.uid:
            uid_ = uid.UID.from_uid(config.config.uid)
            if isinstance(uid_, uid.TestUID):
                log.test_log.error(
                    'Unable to run a standalone test.\n'
                    'Gem5 expects test suites to be the smallest unit '
                    ' of test.\n\n'
                    'Pass a SuiteUID instead.')
                return
            test_schedule = loader_mod.Loader().load_schedule_for_suites(uid_)
            if get_config_tags():
                log.test_log.warn(
                    "The '--uid' flag was supplied,"
                    " '--include-tags' and '--exclude-tags' will be ignored.")
        else:
            test_schedule = load_tests().schedule
            # Filter tests based on tags
            filter_with_config_tags(test_schedule)
        # Execute the tests
        run_schedule(test_schedule, log_handler)
예제 #27
0
    def get_command_line_input(self, argv):
        '''Parse the command line input that specifies data file paths.'''
        data_file_path = ""

        try:
            opts, _ = getopt.getopt(argv, "", ["path=", "start=", "stop="])
        except getopt.GetoptError:
            print(
                "article_analytics.py --path <pattern_of_path_to_data_files> --start \
                <start_of_file_index_range> --end <end_of_file_index_range>")
            sys.exit(2)

        for option, value in opts:
            if option == "--path":
                data_file_path = value
            elif option == "--start":
                self.range_start = int(value)
            else:
                self.range_end = int(value)

        data_loader = loader.Loader()
        _, file_extension = os.path.splitext(data_file_path)

        if file_extension == ".json":
            data_loader.load_json(data_file_path, self.range_start,
                                  self.range_end)
            self.df = data_loader.get_data()
        elif file_extension == ".csv":
            data_loader.load_csv(data_file_path, self.range_start,
                                 self.range_end)
            self.df = data_loader.get_data()
        else:
            print(
                "Unrecognizable data file format. Data file must be in .csv or .json format!"
            )
            sys.exit()
예제 #28
0
    def setup(self, bottom, top):
        params = yaml.load(self.param_str)
        self.check_params(params)
        caffe_root = params['caffe_root']
        db_dir = params['db_dir']
        self.group_cnt = params['group_cnt']
        self.batch_size = params['batch_size']
        self.set_triplet_size = params['set_triplet_size']
        self.im_shape = params['im_shape']
        for key in params:
            print '|' + key + '|'
        if ('train_set_size' in params):
            self.train_set_size = params['train_set_size']
            self.test_set_size = params['test_set_size']
            self.test_pos_size = params['test_pos_size']
            self.test_neg_size = self.test_set_size - self.test_pos_size
        else:
            self.train_set_size = -1
            self.test_set_size = -1
            self.test_pos_size = -1
            self.test_neg_size = -1

        if ('meta_mode' in params):
            self.meta_mode = True
            print 'META MODE ENABLED'
        else:
            self.meta_mode = False
            print 'META MODE DISABLED'

        lmdb_db_path = db_dir + '/' + 'test_all'
        meta_db_path = db_dir + '/' + 'meta_test_all'
        self.loader = loader.Loader(lmdb_db_path, meta_db_path, caffe_root,
                                    self.im_shape[1])
        self.update_group_ids()

        self.group_id = 0
예제 #29
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Generate sample tree data under the protracted speciation model')
    parser.add_argument('--ts', metavar='N', nargs='+', help='Incoming trees')
    parser.add_argument('--output_dir')
    parser.add_argument('--params',
                        metavar='N',
                        nargs='+',
                        help='Incoming parameter files')
    args = parser.parse_args()
    args.parser = parser

    config = cl.Loader(args.output_dir)

    # get seqgen parameters
    get_seqgen_param = config.get_seq_gen_values()
    # write seqgen params to file
    for i in range(len(args.ts)):
        id = args.ts[i].split('.')[0]
        seqgen_params_with_id = {'id': id, **get_seqgen_param}
        parameters_to_json(id, seqgen_params_with_id)
    # generate seqs
    seqgen_to_file(args.ts, get_seqgen_param)
예제 #30
0
    def main_screen(self):
        self.screen = Tk()

        self.code_loader = loader.Loader(password)

        self.entry_test = autocomplete.AutocompleteEntryBox(lista, self.screen)

        self.screen.geometry("600x500+100+100")

        self.screen.title("LoaderGui")

        Label(text="LoaderGui V0.01",
              bg="grey",
              width="300",
              height="2",
              font=("Calibri", 13)).pack()
        Label(text="").pack()
        Label(self.screen, text="Uni ID").pack()
        self.entry_test.pack()
        Label(text="").pack()

        Button(text="Upload code",
               height="1",
               width="30",
               command=self.load_screen).pack()
        Label(text="").pack()
        Button(text="Stop robot", height="1", width="30",
               command=self.stop).pack()
        Label(text="").pack()
        Button(text="Fetch output",
               height="1",
               width="30",
               command=self.load_screen).pack()
        Label(text="").pack()

        self.screen.mainloop()