Exemplo n.º 1
0
def check_data_integrity(bot):
    '''Checks and fixes the file integrity of Data.\n
    A.K.A Replaces files which are currupt, missing, ETC.\n 
    Also replaces values that have been set incorrectly.'''

    for server in bot.servers:
        server_path = "Data/" + server.id

        #This is for the main server folder.
        if not os.path.exists(server_path):
            os.makedirs(server_path)

        #And this is for the json files contained within.
        check_json(server_path + "/RolesConfig.json", {})
        check_json(server_path + "/CommandConfig.json", {})
        check_json(server_path + "/EventConfig.json", {})
        check_json(server_path + "/FunctionConfig.json", {})
        check_json(server_path + "/UserConfig.json", {})
        if check_json(server_path + "/ServerConfig.json", server_config):
            set_default_config_values(server)
        
        update_config_keys(server)
        replace_invalid_values(server)
        remove_absent_servers(bot)
        
        for member in server.members:
            if member.server.get_channel(configs.get_config(configs.server_config, server.id)["MainChannel"]).permissions_for(member).administrator:
                if member.id not in configs.get_config(configs.user_config, server.id):
                    configs.set_config(server.id, "Users", member.id)
                configs.set_config(server.id, "Users", member.id + " / GodMode / true")
Exemplo n.º 2
0
    def __call__(self, cls):
        # establish clsfile, pkg_root for cls:
        clsfile = inspect.getfile(cls)
        mod = inspect.getmodule(cls)
        pkg = mod.__name__.split('.')[0]
        pkg_root = os.path.abspath(pr.resource_filename(pkg, '.'))

        # try to get default values from default config and section:
        defaults = {
            '__class_file': clsfile,
            '__module': cls.__module__,
            '__pkg_root': pkg_root,
        }

        if self.defaults_fn is not None and self.def_sections is not None:
            if not os.path.isabs(self.defaults_fn):
                # locate defaults_fn in same directory as class file
                self.defaults_fn = os.path.join(os.path.dirname(clsfile),
                                                self.defaults_fn)
            else:
                # locate defaults_fn in package root, unless it already points to a real file:
                if not os.path.exists(self.defaults_fn):
                    self.defaults_fn = os.path.join(pkg_root,
                                                    self.defaults_fn[1:])
            def_config = get_config(self.defaults_fn)
            for dsect in self.def_sections:
                defaults.update(to_dict(def_config, dsect))

        # get class config and inject values:
        config_fn = os.path.join(os.path.dirname(clsfile),
                                 replace_ext(os.path.basename(clsfile), 'ini'))
        cls_config = get_config(config_fn, defaults=defaults)
        load_attributes_from_config(cls, cls_config, cls.__name__)
        return cls
Exemplo n.º 3
0
def main():
    config = get_config(mode='train')
    val_config = get_config(mode='valid')
    with open(os.path.join(config.save_path, 'config.json'), 'w') as json_f:
        config.to_json(json_f)

    raw_data = load_json(config.all_path)
    train_data_loader = get_loader(raw_data=raw_data,
                                   max_len=config.max_len,
                                   batch_size=config.batch_size,
                                   shuffle=True,
                                   user_map_dict=config.user_map_dict,
                                   max_users=config.max_users)

    raw_data = load_json(val_config.all_path)
    eval_data_loader = get_loader(raw_data=raw_data,
                                  max_len=val_config.max_len,
                                  batch_size=val_config.eval_batch_size,
                                  shuffle=False,
                                  user_map_dict=config.user_map_dict,
                                  max_users=config.max_users)

    model_solver = getattr(solvers, "Solver{}".format(config.model))
    solver = model_solver(config,
                          train_data_loader,
                          eval_data_loader,
                          is_train=True)

    solver.build()
    solver.train()
    solver.writer.close()

    return config
Exemplo n.º 4
0
 def test_register_and_retrieve_config(self):
     form_builder = self.instance.get_form_builder()
     lazydictionary_post = get_config('test')
     form = form_builder({'setting1':'wooot', 'setting2':'2', 'site':'1'}, {})
     self.assertTrue(form.is_valid(), form.errors)
     form.save()
     self.assertNotEqual(0, len(get_config('test').items()))
     self.assertNotEqual(0, len(lazydictionary_post.items()))
Exemplo n.º 5
0
def get_dataset(dataset_name, params):
    if dataset_name == "quickdraw":
        ds_config = get_config(dataset_name)().parse(
            "split={}".format("c300_msl65_scaled_48"))
        dataset_proto = get_dataset_sketch(dataset_name)(SKETCH_DATA_DIR,
                                                         ds_config)

        dataset = dataset_proto.load(repeat=True)[0]

        dataset = dataset.map(lambda im, lab: tf.py_func(
            lambda x, y: (x[0], quickdraw_lookup[y[0].decode('utf-8').rstrip(
                '\x00')]), (im, lab), (tf.float32, tf.int64)))
        dataset = dataset.map(lambda im, lab:
                              (im, tf.one_hot(lab, len(quickdraw_classes))))
        train, height, width, colors = input_fn_dataset(
            dataset, params['batch_size'])
        return DataSet(dataset_name, train, None, None, height, width, colors,
                       len(quickdraw_classes))
    elif dataset_name == "fs_omniglot":
        ds_config = get_config(params['fso_config'])().parse("mode=batch")
        dataset_proto = get_dataset_sketch(dataset_name)(SKETCH_DATA_DIR,
                                                         ds_config)

        (dataset, _), class_list = dataset_proto.load(repeat=True)
        fso_lookup = {
            class_string: i
            for i, class_string in enumerate(class_list)
        }

        dataset = dataset.map(lambda im, lab: tf.py_func(
            lambda x, y: (x[0], fso_lookup[y[0].decode('utf-8').rstrip('\x00')]
                          ), (im, lab), (tf.float32, tf.int64)))
        dataset = dataset.map(lambda im, lab:
                              (im, tf.one_hot(lab, len(class_list))))

        train, height, width, colors = input_fn_dataset(
            dataset, params['batch_size'])
        return DataSet(dataset_name, train, None, None, height, width, colors,
                       len(class_list))
    else:
        train, height, width, colors = _DATASETS[dataset_name + '_train'](
            batch_size=params['batch_size'])
        test = _DATASETS[dataset_name + '_test'](batch_size=1)[0]
        train = train.map(lambda v: dict(
            x=v['x'], label=tf.one_hot(v['label'], _NCLASS[dataset_name])))
        test = test.map(lambda v: dict(
            x=v['x'], label=tf.one_hot(v['label'], _NCLASS[dataset_name])))
        if dataset_name + '_train_once' in _DATASETS:
            train_once = _DATASETS[dataset_name +
                                   '_train_once'](batch_size=1)[0]
            train_once = train_once.map(lambda v: dict(
                x=v['x'], label=tf.one_hot(v['label'], _NCLASS[dataset_name])))
        else:
            train_once = None
        return DataSet(dataset_name, train, test, train_once, height, width,
                       colors, _NCLASS[dataset_name])
Exemplo n.º 6
0
 def test_is_not_encrypted(self):
     form_builder = self.complex_instance.get_form_builder()
     lazydictionary_post = get_config('testcomplex')
     test_user = User.objects.get_or_create(username='******')[0]
     form = form_builder({'amount': '5.00', 'user': test_user.pk, 'site': '1'}, {})
     self.assertTrue(form.is_valid(), form.errors)
     form.save()
     self.assertTrue(form.is_valid(), form.errors)
     self.assertNotEqual(0, len(get_config('testcomplex').items()))
     self.assertNotEqual(0, len(lazydictionary_post.items()))
     conf = Configuration.objects.get(key='testcomplex')
     self.assertTrue('5.00' in conf.data)
Exemplo n.º 7
0
def sms_request(mob_number, vcode):
    data = urllib.parse.urlencode({
        'appid': get_config('appid'),
        'to': mob_number,
        'content': get_config(key='content') + vcode,
        'signature': get_config('app_key')
    })
    data = data.encode('utf-8')
    request = urllib.request.Request(
        "https://api.submail.cn/message/send.json", method='POST')
    request.add_header("Content-Type",
                       "application/x-www-form-urlencoded;charset=utf-8")
    f = urllib.request.urlopen(request, data)
    return f.read().decode('utf-8')
Exemplo n.º 8
0
 def test_complex_config(self):
     form_builder = self.complex_instance.get_form_builder()
     lazydictionary_post = get_config('testcomplex')
     test_user = User.objects.get_or_create(username='******')[0]
     form = form_builder({'amount':'5.00', 'user':test_user.pk, 'site':'1'}, {})
     self.assertTrue(form.is_valid(), form.errors)
     form.save()
     self.assertNotEqual(0, len(get_config('testcomplex').items()))
     self.assertNotEqual(0, len(lazydictionary_post.items()))
     config = get_config('testcomplex')
     self.assertTrue(isinstance(config['amount'], Decimal))
     self.assertEqual(Decimal('5.00'), config['amount'])
     self.assertTrue(isinstance(config['user'], User))
     self.assertEqual(test_user.pk, config['user'].pk)
Exemplo n.º 9
0
 def test_complex_config(self):
     form_builder = self.complex_instance.get_form_builder()
     lazydictionary_post = get_config('testcomplex')
     test_user = User.objects.get_or_create(username='******')[0]
     form = form_builder({'amount':'5.00', 'user':test_user.pk, 'site':'1'}, {})
     self.assertTrue(form.is_valid(), form.errors)
     form.save()
     self.assertNotEqual(0, len(get_config('testcomplex').items()))
     self.assertNotEqual(0, len(lazydictionary_post.items()))
     nuke_cache()
     config = get_config('testcomplex')
     self.assertTrue(isinstance(config['amount'], Decimal))
     self.assertEqual(Decimal('5.00'), config['amount'])
     self.assertTrue(isinstance(config['user'], User))
     self.assertEqual(test_user.pk, config['user'].pk)
Exemplo n.º 10
0
Arquivo: test.py Projeto: NoSyu/CDMM-B
def main():
    config = get_config(mode='test')

    with open(os.path.join(config.save_path, 'config.json'), 'r') as json_f:
        temp_config_str = json_f.read()
        config.max_users = int(
            re.findall(r"'max_users': ([0-9]+?),", temp_config_str)[0])
        config.max_len = int(
            re.findall(r"'max_len': ([0-9]+?),", temp_config_str)[0])
        config.rnn_hidden_size = int(
            re.findall(r"'rnn_hidden_size': ([0-9]+?),", temp_config_str)[0])

    raw_data = load_json(config.all_path)
    test_data_loader = get_loader(raw_data=raw_data,
                                  max_len=config.max_len,
                                  batch_size=config.batch_size,
                                  shuffle=False,
                                  user_map_dict=config.user_map_dict,
                                  max_users=config.max_users)

    model_solver = getattr(solvers, "Solver{}".format(config.model))
    solver = model_solver(config, None, test_data_loader, is_train=False)

    solver.build()
    solver.test()

    return config
Exemplo n.º 11
0
def get_racy_default(opt, config, 
        default=NotUsed, prj=NotUsed, option_value=Undefined):

    if config is None:
        config = get_option('CONFIG',config=configs.DEFAULT_CONFIG)

    defaults_source = configs.get_config(config, raise_on_not_found = False)
    if not defaults_source:
        defaults_source = configs.get_config(configs.DEFAULT_CONFIG)


    res = defaults_source.get(opt)

    allowedvalues.check_value_with_msg(opt, res, config, exceptionnal_case)

    return res
Exemplo n.º 12
0
def get_user_options(opt, config, default, prj=NotUsed, option_value=Undefined):
    """Lookup for a user.options file in RACY_CONFIG_DIR. Get the user 
    option 'opt' if exists. A user option is overrided by a user config option
    """
    import racy.renv as renv

    files = [
            ( renv.dirs.config, 'user.options'),
            ]

    file_opts = get_file_options(None, files)
    res = file_opts.get(opt, default)

    options_file = rutils.get_first_existing_file(files)
    ckconfig = config
    if options_file is not None:
        ckconfig = ":".join([options_file, config])
    allowedvalues.check_value_with_msg(opt, res, ckconfig, exceptionnal_case)

    old_res = res

    loc = renv.dirs.user_configs
    source = configs.get_config(config, path=loc,
            include_defaults = False, raise_on_not_found = False)

    if source:
        res = source.get(opt, res)

        if old_res != res:
            ckconfig = ":".join([loc,config])
            allowedvalues.check_value_with_msg(opt, res, ckconfig, 
                                            exceptionnal_case)

    return res
Exemplo n.º 13
0
    def make(self, key):
        # Find path with sorted data
        config_type = (Experiment() & key).fetch1('config_type')
        cfg = configs.get_config(config_type)
        sorted_path = os.path.join(data_path, cfg['sorted_path'].format(**key))
        if not os.path.isdir(sorted_path):
            print('Neural recordings for {session_id} in {experiment_id} are not found'.format(**key))
            return
        possible_paths = [sorted_path]
        possible_paths.extend([os.path.join(sorted_path, path) for path in os.listdir(sorted_path) if os.path.isdir(os.path.join(sorted_path, path))])
        print(possible_paths)
        sorted_path = [path for path in possible_paths if os.path.isfile(os.path.join(path, 'phy.log'))]
        print(sorted_path)
        sorted_path = sorted_path[0]
        try:
            key['spike_times'] = np.load(os.path.join(sorted_path, 'spike_times.npy'))
            key['spike_clusters'] = np.load(os.path.join(sorted_path, 'spike_clusters.npy'))
            key['spike_templates'] = np.load(os.path.join(sorted_path, 'spike_templates.npy'))
            key['amplitudes'] = np.load(os.path.join(sorted_path, 'amplitudes.npy'))
            key['channel_positions'] = np.load(os.path.join(sorted_path, 'channel_positions.npy'))
            key['channel_map'] = np.load(os.path.join(sorted_path, 'channel_map.npy'))
            key['cluster_info'] = pd.read_csv(os.path.join(sorted_path, 'cluster_info.tsv'), sep='\t', header=0, index_col=0).to_dict()

            self.insert1(key)

            print('Populated sorted recordings for {session_id} in {experiment_id}'.format(**key))
        except Exception as e:
            print("Error populating sorted recording for {session_id} in {experiment_id}: ".format(**key), e)
Exemplo n.º 14
0
   def make(self, key):
       import h5py
       config_type = (Experiment() & key).fetch1('config_type')
       cfg = configs.get_config(config_type)

       wavesurfer_path = os.path.join(data_path, cfg['wavesurfer_path'].format(**key))
       print("Wavesurfer path", wavesurfer_path)
       if not os.path.isdir(wavesurfer_path):
           print('Ball recordings for {session_id} in {experiment_id} are not found'.format(**key))
           return
       wavesurfer_files = [f for f in os.listdir(wavesurfer_path) if f.endswith('.h5') and key['subsession_id'] in f] # Assumes subsession_ids are not overlapping!!!
       print(wavesurfer_files)
       # speedV0 = 16800 # analog signal when the animal is not moving
       # sweep = '0001'
       with h5py.File(os.path.join(wavesurfer_path, wavesurfer_files[0]), "r") as ws_file:
           sweep = [sub for sub in ws_file.keys() if 'sweep' in sub][0]
           trigger_traces = np.array(ws_file.get(sweep + '/analogScans'))
           # ball_speed = (trigger_traces[2, :]-speedV0)/speedV0 # recorded with 20kHz
           key['ball_readout'] = trigger_traces  # ball_speed

           sync_trace = (EphysRaw() & key).fetch1('sync_trace')
           try:
               key['pxi_scaling_factor'], key['pxi_offset'] = self._get_pxi_matching(trigger_traces, sync_trace)

               self.insert1(key)
           except Exception as e:
               print(e)
Exemplo n.º 15
0
 def test_nuke_cache(self):
     my_config = get_config('test')
     my_config._load()
     nuke_cache()
     self.assertFalse(hasattr(my_config.data, 'config'))
     my_config._load()
     self.assertTrue(hasattr(my_config.data, 'config'))
Exemplo n.º 16
0
def send_email(data):
    NOREPLY_EMAIL = '*****@*****.**'

    mailgun_request_data = {
        "from":
        "%s <%s>" % (data.get('sender_name', 'Team Trotto'),
                     data.get('reply_to', NOREPLY_EMAIL)),
        "to":
        data['recipient_email'],
        "subject":
        data['subject'],
        "text":
        data['plaintext'],
        "html":
        data['html']
    }

    if data.get('html') is not None:
        mailgun_request_data['html'] = data['html']

    response = requests.post(
        "https://api.mailgun.net/v3/mg.trot.to/messages",
        auth=("api", configs.get_config()['mailgun']['general_use_api_key']),
        data=mailgun_request_data)

    response.raise_for_status()
Exemplo n.º 17
0
def main():
    """Main function to run model."""
    config = get_config(os.environ)

    sys.path.append(os.path.join('tasks', config.task_folder))
    # pylint: disable=import-error
    from trainer import train
    from tester import test
    # pylint: enable=import-error

    if config.is_distributed:
        torch.cuda.set_device(config.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')

    logger = setup_logger(config.work_dir, distributed_rank=get_rank())
    logger.info(f'Using {config.num_gpus} GPUs.')
    logger.info(f'Collecting environment info:{get_env_info()}')
    logger.info(f'------------------------------')
    logger.info(f'Running configurations:')
    for key, val in config.__dict__.items():
        logger.info(f'  {key}: {val}')
    logger.info(f'------------------------------')

    if config.run_mode == 'train':
        train(config, logger)
    elif config.run_mode == 'test':
        test(config, logger)
Exemplo n.º 18
0
def main():
    seed_everything()
    args = get_config()
    current_time = str(datetime.datetime.now()).replace(" ", "_")
    exp_name = f"{current_time}_{args.model_name}"
    print(args,
          file=codecs.open(f"{args.log_dir}/{exp_name}.log", "w", "utf-8"))
    logger = create_logger(f"{args.log_dir}", exp_name)
    device = torch.device(
        f"cuda:{args.device}" if torch.cuda.is_available() else "cpu")
    pad_token_id = 0
    data_dir = args.data_dir
    train_ds = IWSLTDataset(f"{data_dir}/train.de",
                            f"{data_dir}/train.en",
                            max_length=128)
    src_wd2id, tgt_wd2id = train_ds.src_wd2id, train_ds.tgt_wd2id
    tgt_id2wd = {j: i for i, j in tgt_wd2id.items()}
    val_ds = IWSLTDataset(f"{data_dir}/valid.de",
                          f"{data_dir}/valid.en",
                          src_wd2id=src_wd2id,
                          tgt_wd2id=tgt_wd2id)
    collate_fn = CollateFn(0, True)
    if args.debug:
        train_ds = train_ds[:400]
    train_dl = DataLoader(train_ds,
                          batch_size=args.batch_size,
                          num_workers=2,
                          shuffle=args.shuffle_data,
                          collate_fn=collate_fn)
    val_dl = DataLoader(val_ds,
                        batch_size=args.batch_size,
                        num_workers=2,
                        collate_fn=collate_fn,
                        shuffle=False)
    train_step = math.ceil(len(train_ds) / args.batch_size)
    src_vocab_size = len(src_wd2id)
    tgt_vocab_size = len(tgt_wd2id)
    model = get_model(args, src_vocab_size, tgt_vocab_size)
    if args.init_param_width != 0:
        init_fn = functools.partial(init_uni_weights,
                                    width=args.init_param_width)
        model.apply(init_fn)
    count_parameters(model)
    model = model.to(device)
    optimizer = get_optimizer(
        args, model)  # optim.Adam(model.parameters(), lr=args.lr)
    if args.scheduler == "mutli_step":
        milestones = [int(train_step * i) for i in range(5, args.n_epoch, 0.5)]
    else:
        milestones = None
    scheduler = get_scheduler(args, optimizer, milestones)
    train_fn(train_dl,
             val_dl,
             model,
             optimizer,
             scheduler,
             device,
             logger,
             tgt_id2wd,
             args=args)
Exemplo n.º 19
0
def runserver(config_path=None):
    config = get_config(config_path)
    uvicorn.run(
        app='scheduleapp:app',
        reload=config.FAST_API.RELOAD,
        port=config.FAST_API.PORT,
        host=str(config.FAST_API.HOST),
    )
Exemplo n.º 20
0
def main():
    """
    Main training function
    """
    cfg = get_config(is_train=True)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu_num)
    print(cfg)
    run(cfg)
Exemplo n.º 21
0
 def test_nuke_cache(self):
     my_config = get_config('test')
     my_config._load()
     nuke_cache()
     for key in CONFIGS.keys():
         self.assertFalse(hasattr(CONFIG_CACHE, key))
     self.assertFalse(my_config.loaded)
     my_config._load()
     self.assertTrue(my_config.loaded)
Exemplo n.º 22
0
def prepare():
    dataset_config: HParams = configs.get_config(FLAGS.dataset_cfgset)().parse(
        FLAGS.dataset_cfgs)
    log_hparams(dataset_config)

    logging.info("Getting and preparing dataset: %s", FLAGS.dataset)
    dataset = datasets.get_dataset(FLAGS.dataset)(FLAGS.data_dir,
                                                  dataset_config)
    dataset.prepare(FLAGS)
Exemplo n.º 23
0
 def test_register_and_retrieve_config(self):
     form_builder = self.instance.get_form_builder()
     lazydictionary_post = get_config('test')
     form = form_builder({'setting1':'wooot', 'setting2':'2', 'site':'1'}, {})
     self.assertTrue(form.is_valid(), form.errors)
     form.save()
     lazydictionary_post._reset()
     self.assertNotEqual(0, len(lazydictionary_post.items()))
     self.assertNotEqual(0, len(lazydictionary_post.items()))
def experiment():
    model_config: HParams = configs.get_config(FLAGS.model_cfgset)().parse(
        FLAGS.model_cfgs)
    model = models.get_model(FLAGS.model)(FLAGS.dir, FLAGS.id, model_config)

    train_dataset_config: HParams = configs.get_config(
        FLAGS.train_dataset_cfgset)().parse(FLAGS.train_dataset_cfgs)
    train_dataset = datasets.get_dataset(FLAGS.train_dataset)(
        FLAGS.data_dir, train_dataset_config)
    train_tf_dataset = train_dataset.load(repeat=True)

    if FLAGS.eval_dataset:
        eval_dataset_config: HParams = configs.get_config(
            FLAGS.eval_dataset_cfgset)().parse(FLAGS.eval_dataset_cfgs)
        eval_dataset = datasets.get_dataset(FLAGS.eval_dataset)(
            FLAGS.data_dir, eval_dataset_config)
        eval_tf_dataset = eval_dataset.load(repeat=False)
    else:
        eval_dataset_config = None
        eval_tf_dataset = None

    if (not FLAGS.distributed) or (hvd.rank() == 0):
        logging.info(
            "Creating Model: %s | Loading Train Dataset: %s | Loading Eval Dataset: %s",
            FLAGS.model, FLAGS.train_dataset, FLAGS.eval_dataset)
        log_hparams(model_config, train_dataset_config, eval_dataset_config)
        logging.info("Beginning training loop")

    # Debugging NaN errors.
    if FLAGS.check_numerics:
        tf.debugging.enable_check_numerics()

    while True:
        try:
            model.train(train_tf_dataset, FLAGS.train_steps, FLAGS.print_freq,
                        FLAGS.save_freq, eval_tf_dataset, FLAGS.eval_freq)
        except tf.errors.AbortedError:
            logging.info(
                "InvalidArgumentError received from training function. Restarting training."
            )
            continue
        else:
            break
Exemplo n.º 25
0
def replace_invalid_values(server):
    "Replaces values in servers configs that are invalid"
    #Replacing main_channel if channel does not exist
    main_channel = configs.get_config(configs.server_config, server.id)["MainChannel"]
    reset = True
    for channel in server.channels:
        if channel.id == main_channel:
            reset = False
    if reset:
        configs.set_config(server.id, "Server", "MainChannel / " + get_default_config(server)["MainChannel"])
Exemplo n.º 26
0
def main(
    dataset='mnist',
    seed=0,
    num_shards=250,
    num_slices=1,
  ):
  rng = random.PRNGKey(seed)

  X, y, X_test, y_test = get_dataset(dataset)
  config = get_config(dataset)

  temp, rng = random.split(rng)
  X, y = shuffle(temp, X, y)

  print('X: {}, y: {}'.format(X.shape, y.shape))
  print('X_test: {}, y_test: {}'.format(X_test.shape, y_test.shape))

  # X[0<=i<num_shards][0<=j<num_slices] refers to the j'th slice of the i'th shard
  X, y = shard_and_slice(num_shards, num_slices, X, y)

  init_params, predict = config['clf']

  try:
    params = pickle.load(open('private_aggregation.pkl', 'rb'))
  except:
    print('Training full model (Shards={}, Slices={})...'.format(num_shards, num_slices))
    # params[0 <= i < num_shards][0 <= j <= num_slices] refers to the params trained on the first j slices of the i'th shard,
    # i.e., j == 0 yields randomly initialized params trained on no data, j == 1 yields params trained on the first slice, etc.
    params = get_trained_sharded_and_sliced_params(rng, init_params, predict, X, y, train)
    pickle.dump(params, open('private_aggregation.pkl', 'wb'))

  targets = np.argmax(y_test, axis=1)
  predictions = sharded_and_sliced_predict(params, predict, X_test)
  nonprivate_accuracy = np.mean(predictions == targets)
  print('Accuracy (nonprivate): {:.4}\n'.format(nonprivate_accuracy))

  print('Example votes:')
  print(get_votes(params, predict, X_test)[:20])

  mechanism_names = ['Exp. Mech.', 'LNMax']
  mechanisms = [exponential_mechanism, lnmax]
  mechanism_accs = []
  per_example_epsilons = [0.001, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]

  for mechanism in mechanisms:
    epsilon_accs = []
    print('Mechanism: {}'.format(mechanism))
    for per_example_epsilon in per_example_epsilons:
      temp, rng = random.split(rng)
      agg = lambda votes: mechanism(rng, votes, per_example_epsilon)
      predictions = sharded_and_sliced_predict(params, predict, X_test, agg)
      accuracy = np.mean(predictions == targets)
      print('Accuracy (eps={:.4}): {:.4}\n'.format(per_example_epsilon, accuracy))
      epsilon_accs.append(accuracy)
    mechanism_accs.append(epsilon_accs)
Exemplo n.º 27
0
def check_permisson(bot, command_name, member):
    "Checks if user has permission for command(commandName)."
    has_perm = False
    user_config = configs.get_config(configs.user_config, member.server.id)
    if member.id in user_config:
        if user_config[member.id]["GodMode"]:
            has_perm = True

        #User specific perms
        for role in user_config[member.id]["Permissions"]:
            if role == command_name:
                has_perm = True
    
    #Role perms
    for role in member.roles:
        if role.name in configs.get_config(configs.role_config, member.server.id):
            if command_name in configs.get_config(configs.role_config, member.server.id)[role.name]["Permissions"]:
                has_perm = True
    
  
    return has_perm  
Exemplo n.º 28
0
def main():
    # Init config.
    config = get_config()
    logger.info(config)
    # Get device.
    device = get_device(config.use_cpu, gpu_id=config.gpu_id)
    # Set maximum number of threads.
    torch.set_num_threads(config.cpu_nums)
    # Train or test.
    if config.action == 'train':
        train(config, device, config.ReinforcementOrSupervised)
    elif config.action == 'test':
        test(config, device, config.ReinforcementOrSupervised)
def main(argv):
    """Create directories and configure python settings"""

    # Setup Directory
    experiment_dir = os.path.join(FLAGS.dir, FLAGS.id)
    if not os.path.exists(experiment_dir):
        os.makedirs(os.path.join(experiment_dir, "logs"), exist_ok=True)

    # Setup Logging
    FLAGS.alsologtostderr = True
    logging.get_absl_handler().use_absl_log_file(FLAGS.logfile, os.path.join(experiment_dir, "logs"))

    # Setup seeds
    if FLAGS.random_seed:
        np.random.seed(FLAGS.random_seed)
        tf.random.set_seed(FLAGS.random_seed)

    # Log Flags
    log_flags(FLAGS)

    drawer_id = "05-22_quickdraw_sweep_pixel_weight_28/drawer_enc_tadam_huge_interval0.05_step10000_maxweight0.5-quickdraw_ST1_msl64_28"
    drawer_config: HParams = configs.get_config("drawer/huge")().parse("")
    drawer: DrawerModel = models.get_model("drawer_enc_tadam")(FLAGS.dir, drawer_id, drawer_config, training=False)

    dataset_config: HParams = configs.get_config("quickdraw")().parse("split=T2_msl64_28,shuffle=True,batch_size={}".format(16))
    dataset_proto = datasets.get_dataset('quickdraw')(FLAGS.data_dir, dataset_config)
    dataset = dataset_proto.load(repeat=False)[0]

    # clustering_methods = [sklearn.manifold.TSNE(n_components=2), sklearn.decomposition.PCA(n_components=2), umap.UMAP()]
    clustering_method = DBSCAN(eps=4.2)
    try:
        hyper_embed(drawer, dataset, clustering_method, min_samples=6)
    except:
        exception = traceback.format_exc()
        logging.info(exception)

    logging.info("Complete")
Exemplo n.º 30
0
def seed_database():
    app.config.from_object(get_config())
    db.init_app(app)
    user_datastore = SQLAlchemyUserDatastore(db, User, Role)
    security = Security(app, user_datastore)
    user_datastore.create_user(email='*****@*****.**', password='******')
    user = db.session.query(User).first()
    post = Post()
    post.user = user
    post.title = "Test Title"
    post.content = 'This is the post content.'
    post.post_image = 'https://source.unsplash.com/random'
    post.post_image_alt = 'Unsplashed Image'
    db.session.add(post)
    db.session.commit()
Exemplo n.º 31
0
def evaluate_df(candidate_df: pd.DataFrame) -> pd.DataFrame:
    evaluation_df = copy.copy(candidate_df)
    evaluation_df = pd.concat(
        [evaluation_df,
         pd.DataFrame(columns=["score", "key_frame_labels"])])

    test_config = get_config(mode='test')
    solver = Solver(test_config)
    solver.build()

    for indx, row in evaluation_df.iterrows():
        video_name = row["unique_clip_name"]
        frames, score = solver.test(indx, video_name)
        evaluation_df["key_frame_labels"][indx] = frames
        evaluation_df["score"][indx] = score
    return evaluation_df
Exemplo n.º 32
0
def mongo_db():
    global current_db
    if(current_db==None):
        client = MongoClient(
            configs.get_config().no_sql.host,
            configs.get_config().no_sql.port
        )
        if (configs.get_config().no_sql.user != ""):
            client[configs.get_config().no_sql.name].authenticate(configs.get_config().no_sql.user,
                                                                  configs.get_config().no_sql.password)
        current_db = client[configs.get_config().no_sql.name]


    return current_db
Exemplo n.º 33
0
    def make(self, key):
        #            ap_meta_path: varchar(512)
            # lf_path: varchar(512)
            # lf_meta_path: varchar(512)
        # TODO: add file length as metadata
        config_type = (Experiment() & key).fetch1('config_type')
        cfg = configs.get_config(config_type)

        base_path = cfg['ephys_path'].format(**key)
        path = os.path.join(data_path, base_path)
        print(path)
        if not os.path.isdir(path):
            print('Neural recordings for {session_id} in {experiment_id} are not found'.format(**key))
            return

        subsession_type, subsession_iter = (Subsession() & key).fetch1('type', 'iteration')
        ap_files = [f for f in os.listdir(path) if f.endswith('.ap.bin') and 
        ((f.split('_')[1]==subsession_type and f.split('_')[0] == f"{subsession_iter:04d}") or 
        (f.split('_')[0]==subsession_type and f.split('_')[1] == f"{subsession_iter:04d}"))] # Flip f.split('_') naming depending on Arnau or Ania's data
        ap_files.sort()
        print(ap_files)
        file = ap_files[0]

        # starts, lengths = (EphysRaw() & {'experiment_id': key['experiment_id'], 'mouse_id': key['mouse_id'], 'session_id': key['session_id']}).fetch('start', 'length')
        # print("Starts, lengths:", starts, lengths)
        # if len(starts) == 0:
        #     start = 0
        # else:
        #     last_idx = np.argmax(starts)
        #     start = starts[last_idx] + lengths[last_idx]


        rel_file_path = os.path.join(base_path, file)
        # id, stimulus_type = file.split('_')[:2]
        key['ap_path'] = rel_file_path
        key['meta_path'] = rel_file_path[:-3] + 'meta'
        key['subsession_type'] = subsession_type
        key['subsession_iter'] = subsession_iter
        sync_trace = neuropixels_utils.extract_sync(Path(os.path.join(path, file)))
        key['sync_trace'] = sync_trace
        key['length'] = sync_trace.shape[1]

        # key['start'] = start

        self.insert1(key)

        print('Populated neural recordings for {session_id} in {experiment_id}'.format(**key))
Exemplo n.º 34
0
    def make(self, key):
        stimulus_type, iteration = (Subsession() & key).fetch1('stimulus_type', 'iteration')
        print("Importing stimulus", stimulus_type, str(iteration))
        config_type = (Experiment() & key).fetch1('config_type')

        cfg = configs.get_config(config_type)

        recs_path = os.path.join(data_path, cfg['facecam_path'].format(**key), stimulus_type)
        if not os.path.exists(recs_path):
            print('Face recording for {session_id} {experiment_id}, {stimulus_type} {iteration} NOT found!'.format(stimulus_type=stimulus_type, iteration=iteration, **key))
            return
        runs = [os.path.splitext(f.split('_')[1])[0] for f in os.listdir(recs_path) if f.endswith('.camlog')]
        session_date = (Session() & key).fetch1('session_date')
        trial_run = sorted(runs)[0]  # Iterations start from index 1
        print("{session_date:%Y%m%d}_{trial_run}".format(trial_run=trial_run, session_date=session_date))

        tiffs = [f for f in os.listdir(recs_path) if
                 f.startswith("{session_date:%Y%m%d}_{trial_run}".format(trial_run=trial_run, session_date=session_date)) and f.endswith('.tif')]
        tiffs = sorted(tiffs)
        print(tiffs)
        max_merged_tiffs = 15
        i = 0
        while i*max_merged_tiffs < len(tiffs):
            data_tiff = DataTiff(os.path.join(recs_path, tiffs[i*max_merged_tiffs]))
            for tiff in tiffs[i*max_merged_tiffs+1:(i+1)*max_merged_tiffs]:
                print(f"Loading {tiff}")
                temp = DataTiff(os.path.join(recs_path, tiff))
                try:
                    data_tiff.merge_tiff(temp)
                except ValueError as err:
                    print(f"Was not able to load {tiff} with error: {err}")
            print("Merged tiffs")
            key['part'] = i
            key['recording'] = data_tiff.data
            n_frames, height, width = data_tiff.data.shape
            key['n_frames'] = n_frames
            key['width'] = width
            key['height'] = height
            print(f"Inserting part {i} for {stimulus_type}, {iteration}")
            self.insert1(key)
            i += 1

        print('Populated a face recording for {session_id} in {experiment_id}'.format(**key))
Exemplo n.º 35
0
 def make(self, key):
     config_type = (Experiment() & key).fetch1('config_type')
     cfg = configs.get_config(config_type)
     base_path = os.path.join(data_path, cfg['wavesurfer_path'].format(
         **key))  # could also be experiment_id/neuropixels/session_id
     subsession_files = [
         os.path.splitext(f)[0] for f in os.listdir(base_path)
         if f.endswith('.h5')
     ]
     for subsession in subsession_files:
         key['subsession_id'] = subsession
         iteration, type = subsession.split('_')[:2]
         try:
             key['type'] = type
             key['iteration'] = int(iteration)
         except:
             key['type'] = iteration
             key['iteration'] = int(type)
         self.insert1(key)
Exemplo n.º 36
0
def run_cli(user, gpu, config, checkpoint=None):
    if checkpoint:
        assert os.path.isfile(checkpoint), \
        f"Checkpoint doesn't exist ({checkpoint})"
    """ Get config + env setup. """
    cfg = configs.get_config(config)

    # experiment setup
    _set_seed(cfg['experiment']['seed'])

    if user == 'yzhang46' and gpu >= 0:
        device = f'cuda:{int(gpu)}' if torch.cuda.is_available() else 'cpu'
    else:
        device = f'cuda:0' if torch.cuda.is_available() else 'cpu'
    cfg['experiment']['device'] = device
    print(f" > Using device: {device}.")

    experiment = experiments.get_module(cfg['experiment']['name'])
    experiment.run(cfg, checkpoint=checkpoint)
#
"""
	Module for communicating with the servo controller.
	"""

# Imports #
# Paths
import sys

sys.path.append("../Libraries")
# Configs
import configs
import configobj

global config, enable
config = configs.get_config("Servo Controller")
enabled = config["enabled"]
mc_name = config["micro_controller"]
# Logging
import logging

log = logging.getLogger(config["logger_name"])
# micro_controller_network
import micro_controller_network

mc = micro_controller_network.get_object(mc_name)

# Static Functions #
def init():
    global config, initialized, servos
    servos = {}
Exemplo n.º 38
0
 def test_nuke_cache(self):
     get_config('test').items()
     nuke_cache()
     for key in CONFIGS.keys():
         self.assertFalse(hasattr(CONFIG_CACHE, key))
Exemplo n.º 39
0
#
#  laser.py
#  
#
#  Created by William Woodall on 2/24/09.
#  Copyright (c) 2009 Auburn University. All rights reserved.
#
'''
Module for controlling the Hokuyo URG-04LX Laser Range Finder.
'''
# Imports #
# Configs
import configs
try:
	config = configs.get_config('Laser Range Finder')
	enabled = config['enabled']
except NameError:
	enabled = True
# Logging
import logging
datalog = logging.getLogger("Dashboard")
datalog.propagate = False
try:
	log = logging.getLogger(config['logger_name'])
except NameError:
	pass
# Networking for dashboard
from Networking import *
# Events
import events
# Threading
#  Created by William Woodall on 2/19/09.
#  Copyright (c) 2009 Auburn University. All rights reserved.
#
'''
	Module for controlling and managing object detection.
	'''

# Imports #
# Paths
import sys
sys.path.append("../Libraries")
# Configs
import configs
import configobj
global config, enable
config = configs.get_config('Obj Detection')
enabled = config['enabled']
mc_name = config['micro_controller']
# Logging
import logging
log = logging.getLogger(config['logger_name'])
# Events
import events
# Conditions
from threading import Event
# micro_controller_network
import micro_controller_network
mc = micro_controller_network.get_object(mc_name)

# Static Variables
return_codes = {'\x70':'Micro Switch Triggered',
Exemplo n.º 41
0
#  
#  Created by William Woodall on 12/31/08.
#  Copyright (c) 2008 Auburn University. All rights reserved.
#

'''
	Module for controlling a sabertooth2x10 motor controller.
	TODO: 
	* Documentation...
	'''

# Imports #
# Configs
import configs
global config, enabled
config = configs.get_config('Sabertooth2x10')
enabled = config['enabled']
# Logging
import logging
log = logging.getLogger(config['logger_name'])
# PySerial
if enabled:
	try:
		from serial import Serial
	except Exception as e:
		log.error("Serial cannot be imported, you may need to install it: %s" % e)
else:
	log.warning("The Motor Controller is disabled!  This means the motors will quietly do nothing!")

# Static Functions #
def init():
Exemplo n.º 42
0
#  antenna_array.py
#  
#
#  Created by William Woodall on 2/20/09.
#  Copyright (c) 2009 Auburn University. All rights reserved.
#

# Imports #
# Paths
import sys
sys.path.append("../Libraries")
# Configs
import configs
import configobj
global config
config = configs.get_config('Antenna Array')
enabled = config['enabled']
mc_name = config['micro_controller']
# Logging
import logging
log = logging.getLogger(config['logger_name'])
# Events
import events
# Event
from threading import Event
# Sleep
from time import sleep
# micro_controller_network
import micro_controller_network
mc = micro_controller_network.get_object(mc_name)
Exemplo n.º 43
0
 def test_empty_config(self):
     lazydictionary_pre = get_config('test')
     self.assertEqual(0, len(lazydictionary_pre.items()))
Exemplo n.º 44
0
dash_log.propagate = False
serverHandler = LoggingServerHandler('', PORT)
dash_log.addHandler(serverHandler)
dash_log.critical("Loaded Dash Log.")
log.debug("Using logging config file %s" % loggingConfigFile.name)

# Start Configurations
try:
	configFile = open('Configurations/robot.cfg')
except IOError:
	try:
		configFile = open('Configurations/robot.cfg.default')
	except:
		log.error("No config file could be opened.")
configs.init(configFile)
config = configs.get_config()
log.debug("Using config file %s" % configFile.name)

# Initialize modules #

# Sabertooth2x10
import sabertooth2x10 as saber
saber.init()
mc = saber.get_object()
# Laser Range Finder
import laser
laser.init()
lrf = laser.get_object()
lrf.clear()
# MCN
import micro_controller_network as mcn
#  
#
#  Created by William Woodall on 1/29/09.
#  Copyright (c) 2009 Auburn University. All rights reserved.
#
'''
	Module for interfacing with the Micro Controller Network.
	TODO:
	'''

# Imports #
# Configs
import configs
import configobj
global config, enabled
config = configs.get_config('Micro Controller Network')
enabled = config['enabled']
# Logging
import logging
log = logging.getLogger(config['logger_name'])
# Threading
from threading import Thread, Lock, Timer
# Sleep
from time import sleep
# PySerial
if enabled:
	try:
		from serial import Serial
	except Exception as e:
		log.error("Serial cannot be imported, you may need to install it: %s" % e)
else:
#  angular_accelerometer.py
#  
#
#  Created by William Woodall on 2/21/09.
#  Copyright (c) 2009 Auburn University. All rights reserved.
#

# Imports #
# Paths
import sys
sys.path.append("../Libraries")
# Configs
import configs
import configobj
global config
config = configs.get_config('Angular Accelerometer')
mc_name = config['micro_controller']
# Logging
import logging
log = logging.getLogger(config['logger_name'])
# Events
import events
# Event
from threading import Event
# Sleep
from time import sleep
# micro_controller_network
import micro_controller_network
mc = micro_controller_network.get_object(mc_name)

# Static Variables