コード例 #1
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_view_values(self):
     d = DotMap(self._get_dict())
     dd = d.to_dict()
     for k, v in d.viewitems():
         self.assertTrue(set(dd).issuperset({k: v}))
     for k in d.viewkeys():
         self.assertTrue(k in dd)
     for v in d.viewvalues():
         self.assertTrue(v in dd.values())
コード例 #2
0
ファイル: sensed_tests.py プロジェクト: sli/sensed
def test_test_mode() -> None:
    '''
    Tests server and client by starting the server in a thread, then
    requesting meta and sensor data. Asserts that this data is in the
    correct format and returns None on success.
    '''
    print('Finding default configs...')
    if os.path.isdir('docs'):
        server_config_file = 'docs/sensed-config.sample.toml'
        client_config_file = 'docs/senselog-config.sample.toml'
    elif os.path.isdir('../docs'):
        server_config_file = '../docs/sensed-config.sample.toml'
        client_config_file = '../docs/senselog-config.sample.toml'

    print('Running sensed test mode test...')

    with open(server_config_file) as f:
        server_config = DotMap(toml.load(f))
    dt_sensor = datetime.Sensor(server_config)
    ni_sensor = netifaces.Sensor(server_config)
    cam_sensor = camera.Sensor(server_config)
    hat_sensor = hat.Sensor(server_config)
    server_config.sensors = DotMap({'datetime': dt_sensor,
                                    'netifaces': ni_sensor,
                                    'camera': cam_sensor,
                                    'hat': hat_sensor})
    server = socketserver.UDPServer((server_config.sensed.host,
                                    server_config.sensed.port),
                                    SensedServer)
    server.sensors = server_config.sensors
    server.config = server_config

    server_thread = threading.Thread(target=server.serve_forever)
    server_thread.daemon = True
    server_thread.start()

    with open(client_config_file) as f:
        client_config = DotMap(toml.load(f))
    client = SensedClient(client_config)

    meta = client.get_all_meta()
    sensors = client.get_all_sensors()

    assert isinstance(meta, list)
    assert isinstance(meta[0], dict)
    assert isinstance(sensors, list)
    assert isinstance(sensors[0], dict)

    print('Packet test passed. Shutting down...')

    server.shutdown()

    print('Test complete.')
コード例 #3
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_popitem(self):
     m = DotMap()
     m.people.john.age = 32
     m.people.john.job = 'programmer'
     m.people.mary.age = 24
     m.people.mary.job = 'designer'
     m.people.dave.age = 55
     m.people.dave.job = 'manager'
     p = m.people.popitem()
     self.assertEqual(p[0], 'dave')
     i = m.popitem()
     self.assertEqual(i[0], 'people')
     self.assertEqual(0, len(m))
コード例 #4
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_update(self):
     d = DotMap(self._get_dict())
     d.update([('rat', 5), ('bum', 4)], dog=7, cat=9)
     self.assertEqual(d.rat, 5)
     self.assertEqual(d['bum'], 4)
     self.assertEqual(d.dog, 7)
     self.assertEqual(d['cat'], 9)
     d.update({'lol': 1, 'ba': 2})
     self.assertEqual(d.rat, 5)
     self.assertEqual(d['bum'], 4)
     self.assertEqual(d.dog, 7)
     self.assertEqual(d['cat'], 9)
     self.assertEqual(d.lol, 1)
     self.assertEqual(d['ba'], 2)
コード例 #5
0
    def load_deploy(self):
        """
        Loads the deployment map
        :raises IOError: if can't read map
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if not self.deploy_file:
            raise AssertionError

        try:
            self.update_template_vars()
            self.deploy_dataset = DataSet(self.deploy_file,
                                          additional_template_vars=self.template_vars)
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
            logobj(self.deploy_dataset.content)
            self.deploy = DotMap(self.deploy_dataset.content)
            logobj(self.deploy)
            self.update_template_vars()

        except IOError as e:
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
            # must be a new install or else we don't have permissions.

        autonames_dataset = DataSet(self.autonames_file, additional_template_vars=self.template_vars)
        autonames = DotMap(autonames_dataset.content)
        self.config.update(autonames)

        caches_dataset = DataSet(self.caches_file, additional_template_vars=self.template_vars)
        caches = DotMap(caches_dataset.content)
        self.config.update(caches)
コード例 #6
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_eq(self):
     d = DotMap(self._get_dict())
     m = DotMap()
     m.people.john.age = 32
     m.people.john.job = 'programmer'
     m.people.mary.age = 24
     m.people.mary.job = 'designer'
     m.people.dave.age = 55
     m.people.dave.job = 'manager'
     self.assertNotEqual(d, m)
     m.clear()
     m.a = 1
     m.b = 2
     m.subD.c = 3
     m.subD.d = 4
     self.assertEqual(d, m)
コード例 #7
0
ファイル: test.py プロジェクト: soixantecircuits/pyying
    def test_shoot(self):
        settings = DotMap(pyStandardSettings.getSettings())
        spacebroSettings = settings.service.spacebro
        media = DotMap({
            'albumId': 'myAlbumId',
        })
        spacebroSettings.clientName = "pyying-test"
        spacebroClient = SpacebroClient(spacebroSettings.toDict())

        # Listen
        spacebroClient.on(str(spacebroSettings.client['out'].outMedia.eventName), self.on_inMedia)
        spacebroClient.wait(seconds=timeout)
        spacebroClient.emit(str(spacebroSettings.client['in'].shoot.eventName), media.toDict())
        spacebroClient.wait(seconds=shoot_timeout)

        self.assertEqual(DotMap(self.inMediaValue).albumId, media.albumId)
        self.assertTrue(path.exists(str(self.inMediaValue['path'])))
        spacebroClient.disconnect()
コード例 #8
0
def Logger(name):
	logger = logging.getLogger(name)
	logger.setLevel(logging.DEBUG)

	ch = logging.StreamHandler()
	ch.setLevel(logging.DEBUG)

	if color:
		formatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
	else:
		formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

	ch.setFormatter(formatter)

	logger.addHandler(ch)

	log_functions = DotMap()
	log_functions.log = logger.debug 
	log_functions.error = logger.error 

	return log_functions
コード例 #9
0
    def load_state(self):
        """
        Load current state (may be empty)
        :raises IOError: if filesystem exceptions prevent config file read or initial write
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if self.state_file is None:
            pass
        else:

            try:
                self.update_template_vars()
                self.state_dataset = DataSet(self.state_file,
                                             additional_template_vars=self.template_vars)
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
                logobj(self.state_dataset.content)
                self.state = DotMap(self.state_dataset.content)
                logobj(self.state)
                self.update_template_vars()
            except IOError as e:
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
                # must be a new install or else we don't have permissions.
                # Try to create an empty config and see what happens.
                try:
                    self.state_dataset = DataSet(self.state_file, create_if_missing=True,
                                                 additional_template_vars=self.template_vars)
                    logobj(self.state_dataset.content)
                    self.state = DotMap(self.state_dataset.content)
                    logobj(self.state)
                    self.update_template_vars()
                except IOError as cf:
                    logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(cf))
                    print("Unable to create a new state file: " + repr(cf))
                    # and presumably crash, though at some point we should tell
                    # the user to make sure they're mounting /opt correctly in Docker
                    raise
コード例 #10
0
    def from_html(html, url=None, download_date=None):
        """
        Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
        uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
        to extract the publishing date and title.
        :param html:
        :param url:
        :return:
        """
        extractor = article_extractor.Extractor(
            ['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])

        title_encoded = ''.encode()
        if not url:
            url = ''

        # if an url was given, we can use that as the filename
        filename = urllib.parse.quote_plus(url) + '.json'

        item = NewscrawlerItem()
        item['spider_response'] = DotMap()
        item['spider_response'].body = html
        item['url'] = url
        item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
        item['html_title'] = title_encoded
        item['rss_title'] = title_encoded
        item['local_path'] = None
        item['filename'] = filename
        item['download_date'] = download_date
        item['modified_date'] = None
        item = extractor.extract(item)

        tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
        final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
        # final_article = DotMap(tmp_article)
        return final_article
コード例 #11
0
 def load_script(self):
     """
     Loads the install script
     :raises IOError: if can't read script
     """
     logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
     if self.script_file is None:
         pass
     else:
         try:
             self.update_template_vars()
             self.script_dataset = DataSet(self.script_file, additional_template_vars=self.config.toDict(),
                                           additional_template_yaml=self.config.yaml)
             self.script = DotMap(self.script_dataset.content)
             self.update_template_vars()
         except IOError as e:
             # If this fails, there's nothing more we can do.
             # Something is broken in the install container.
             logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
             print("Unable to locate script: " + repr(e))
             raise
コード例 #12
0
    def load_config(self):
        """
        Loads the config
        :raises IOError: if can't read config
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        try:
            self.update_template_vars()
            self.config_dataset = DataSet(self.config_file)
            self.config = DotMap(self.config_dataset.content)
            logobj(self.config)
            self.update_template_vars()
        except IOError as e:
            # If this fails, there's nothing more we can do.
            # Something is broken in the install container.
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
            print("Unable to locate config file: " + repr(e))
            raise

        if len(self.config.ui.script_file) != 0:
            self.script_file = self.config.ui.script_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for script in: ' + repr(self.script_file))

        if len(self.config.ui.state_file) != 0:
            self.state_file = self.config.ui.state_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for state in: ' + repr(self.state_file))

        if len(self.config.ui.deploy_file) != 0:
            self.deploy_file = self.config.ui.deploy_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for deploy in: ' + repr(self.deploy_file))
コード例 #13
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_clear(self):
     d = DotMap(self._get_dict())
     self.assertEqual(1, d.a)
     d.clear()
     self.assertEqual(0, len(d))
     self.assertEqual(d.a, DotMap())
コード例 #14
0
ファイル: incidents.py プロジェクト: martindstone/martbot
    def slack_action(self, team, user, req):
        if req.actions[0].name == 'incidents':
            incident_id = req.actions[0].selected_options[0].value
            response_url = req.response_url

            incident = pd.request(oauth_token=user.pd_token,
                                  endpoint="/incidents/{}".format(incident_id))

            r = requests.post(response_url,
                              headers={"Content-type": "application/json"},
                              json={
                                  "text":
                                  "",
                                  "attachments":
                                  slack_formatters.make_incident_attachments(
                                      incident.get('incident')),
                                  "replace_original":
                                  True
                              })

        elif req.actions[0].name == 'acknowledge' or req.actions[
                0].name == 'resolve':
            incident_id = req.actions[0].value
            response_url = req.response_url
            me = DotMap(
                pd.request(oauth_token=user["pd_token"], endpoint="users/me"))
            headers = {"From": me.user.email}
            body = {
                "incidents": [{
                    "id": incident_id,
                    "type": "incident_reference",
                    "status": "{}d".format(req.actions[0].name)
                }]
            }
            incident = pd.request(oauth_token=user["pd_token"],
                                  endpoint="incidents",
                                  method="PUT",
                                  addheaders=headers,
                                  data=body)
            r = requests.post(response_url,
                              headers={"Content-type": "application/json"},
                              json={
                                  "text":
                                  "",
                                  "attachments":
                                  slack_formatters.make_incident_attachments(
                                      incident.get('incidents')[0]),
                                  "replace_original":
                                  True
                              })
        elif req.actions[0].name == 'annotate':
            incident_id = req.actions[0].value
            sc = SlackClient(team["slack_app_token"])
            trigger_id = req.trigger_id

            call = sc.api_call("dialog.open",
                               trigger_id=trigger_id,
                               dialog={
                                   "callback_id":
                                   "incidents {}".format(incident_id),
                                   "title":
                                   "Add a note",
                                   "submit_label":
                                   "Annotate",
                                   "elements": [{
                                       "name":
                                       "note",
                                       "label":
                                       "Note",
                                       "type":
                                       "textarea",
                                       "optional":
                                       False,
                                       "placeholder":
                                       "Add your note text here"
                                   }]
                               })
        elif req.submission.note:
            me = DotMap(
                pd.request(oauth_token=user["pd_token"], endpoint="users/me"))
            headers = {
                "Content-type": "application/json",
                "From": me.user.email
            }
            body = {"note": {"content": req.submission.note}}
            incident_id = req.callback_id.split()[1]
            incident = pd.request(oauth_token=user["pd_token"],
                                  endpoint="incidents/{}/notes",
                                  method="POST",
                                  addheaders=headers,
                                  data=body)
            print(incident)
        else:
            req.pprint()
コード例 #15
0
    def build_data(
        self,
        dataset_name=None,
        shared_vocab_fields=None,
        max_len=None,
    ):
        _fields = {
            field.name: (
                field.field if field.field != None else field.name,
                Field(
                    sequential=field.sequential,
                    tokenize=word_tokenize
                    if field.tokenize is None else field.tokenize,
                    lower=field.lowercase,
                    fix_length=field.fix_length,
                    max_len=max_len,
                    init_token=bos_token if
                    (field.sos and field.sequential) else None,
                    eos_token=eos_token if
                    (field.eos and field.sequential) else None,
                    include_lengths=field.include_lengths,
                    batch_first=True,
                    tokenizer=field.tokenizer,
                    use_vocab=field.use_vocab,
                    pad_token=field.pad_token,
                    postprocessing=field.postprocess,
                ),
            )
            for field in self.dataset_fields
        }

        if shared_vocab_fields:
            if isinstance(shared_vocab_fields[0], list) == False:
                shared_vocab_fields = [shared_vocab_fields]
            self.share_vocab_fields(_fields, shared_vocab_fields)
        if shared_vocab_fields:
            assert (_fields[shared_vocab_fields[0][0]][1] == _fields[
                shared_vocab_fields[0][1]][1])

        seperate_fields = removeDuplicates([_fields[i][1] for i in _fields])

        print("Loading", dataset_name)
        self.train, self.valid, self.test = data.TabularDataset.splits(
            path=os.path.join(self.dataset_path, dataset_name),
            train="_train.json",
            validation="_valid.json",
            test="_test.json",
            format="json",
            fields=_fields,
        )
        print("Dataset:", dataset_name, "loaded")

        for f in seperate_fields:
            try:
                if f.use_vocab:
                    f.build_vocab(
                        self.train,
                        self.valid,
                        self.test,
                        specials=special_tokens_list,
                        min_freq=1,
                        max_size=10000
                        if f.tokenize == word_tokenize else 100000,
                    )

            except:
                pass

        self.vocab = {}

        for key in _fields:
            try:
                self.vocab[_fields[key][0]] = _fields[key][1].vocab
            except:
                self.vocab[_fields[key][0]] = _fields[key][1].tokenizer
        self.vocab = DotMap(self.vocab)
コード例 #16
0
class ControllerBase(object):

    def __init__(self, parent):
        self.parent = parent

        self.state = DotMap(
            keepActive=False,
            needs_render=True
        )
        self.persistentState = DotMap()
        self.settings = DotMap()
        self.widgets = DotMap()
        self.dialog = None
        
    def get_state(self):
        return self.state

    def set_state(self, state):
        self.state = state

    def init_states(self, **kwargs):
        for name, value in kwargs.iteritems():
            self.state[name] = value

    def get_persistent_state(self):
        return self.persistentState

    def set_persistent_state(self, persistent_state):
        self.persistentState = persistent_state

    def init_persistent_states(self, **kwargs):
        for name, value in kwargs.iteritems():
            self.persistentState[name] = value

    def init_settings(self, **kwargs):
        for name, value in kwargs.iteritems():
            self.settings[name] = value

    def keep_active(self):
        return self.state.keepActive or (self.has_dialog() is True and self.dialog.keep_active() is True)
        
    def background(self):
        pass

    def foreground(self):
        pass
    
    def render(self):
        print 'render ' + str(time()) + ' - ' + self.__class__.__name__

        self.state.needs_render = False

    def update(self, execution_type='fg'):
        updated = False
        if execution_type == 'fg':
            self.foreground()
            if self.needs_update() is True:
                self.pre_render()
                self.render()
                if self.has_dialog() is True and self.dialog.state.needs_render is True:
                    self.dialog.render()
                updated = True
        else:
            self.background()

        return updated

    def needs_update(self):
        return self.state.needs_render is True or (self.has_dialog() is True and self.dialog.state.needs_render is True)

    def pre_render(self):
        pass

    def create_widget(self, widget_type, name, callback=None, parent=None, **kwargs):
        if parent is None:
            parent = self
        
        def noop():
            pass

        if callback is None:
            callback = noop

        if name in self.widgets.toDict().keys():
            raise Exception('Widget ' + name + ' already defined in ' + self.__class__.__name__)
            
        if widget_type == 'button':
            widget = Button(parent, name, **kwargs)

            def default_action():
                noop()
        elif widget_type == 'checkbox':
            widget = Checkbox(parent, name, **kwargs)

            def default_action():
                widget.toggle()
        elif widget_type == 'scrollarea':
            widget = ScrollArea(parent, name, **kwargs)
            
            def default_action():
                noop()
        else:
            raise Exception('unknown control of type ' + widget_type)
            
        def action():
            call_with_optional_arguments(default_action, name=name)
            call_with_optional_arguments(callback, name=name)
            
        widget.action = action

        parent.widgets[name] = DotMap(
            widget=widget,
            type=widget_type
        )

        return parent.widgets[name].widget
            
    def get_widget(self, name):
        if name not in self.widgets.toDict().keys():
            raise Exception('Widget ' + name + ' not found in ' + self.__class__.__name__)
        return self.widgets[name].widget

    def disable_all_widgets(self):
        for k, v in self.widgets.iteritems():
            v.widget.enabled = False

    def on_touch(self, xy, action):
        if self.has_dialog() is False:
            for k, v in self.widgets.iteritems():
                v.widget.on_touch(xy, action)
        else:
            self.dialog.on_touch(xy, action)

    def open_dialog(self, dialog):
        if self.dialog is not None:
            self.dialog.close()
            
        self.dialog = dialog
        self.state.needs_render = True
        self.on_show()

    def on_show(self):
        pass

    def on_hide(self):
        pass

    def close_dialog(self):
        if self.dialog is not None:
            self.dialog.cleanup()
            
        self.dialog = None
        self.state.needs_render = True
        self.on_hide()

    def has_dialog(self):
        return self.dialog is not None
コード例 #17
0
 def get_all_matches(self):
     all_matches = self._match_table.all()
     return [DotMap(match) for match in all_matches]
コード例 #18
0
                   for i in range(num_variables)]
    upper_bound = [(sample[i] + decayed_width / 2)
                   for i in range(num_variables)]

    # check whether updated range for each parameter is within original ranges
    for index in range(num_variables):
        if lower_bound[index] < 0:
            lower_bound[index] = 0
        if upper_bound[index] > 1:
            upper_bound[index] = 1

    return np.random.uniform(lower_bound, upper_bound)


# Basic set of parameters
sa_params = DotMap()
sa_params.T = 10
sa_params.decay_rate = 0.9
sa_params.iterations = 3
sa_params.reset_temp = 5
sa_params.num_epoch = 10
sa_params.temp_f = lambda t: 0.91 * t
sa_params.iter_f = lambda length: int(math.ceil(0.5 * length))
sa_params.proposal_f = proposal_func

## Tests


def test_simulatedAnnealing():
    carDomain = Struct({
        'position': Box([-10, 10], [-10, 10], [0, 1]),
コード例 #19
0
from verifai.samplers.scenic_sampler import ScenicSampler
from dotmap import DotMap
from verifai.falsifier import generic_falsifier


path_to_scenic_file = 'intersection_crash.sc'
sampler = ScenicSampler.fromScenario(path_to_scenic_file)

MAX_ITERS = 20
PORT = 8888
MAXREQS = 5
BUFSIZE = 4096

falsifier_params = DotMap()
falsifier_params.n_iters = MAX_ITERS
falsifier_params.save_error_table = False
falsifier_params.save_good_samples = False

server_options = DotMap(port=PORT, bufsize=BUFSIZE, maxreqs=MAXREQS)

falsifier = generic_falsifier(sampler=sampler, sampler_type='scenic',
                              falsifier_params=falsifier_params,
                              server_options=server_options)

falsifier.run_falsifier()

print("Scenic Samples")
for i in falsifier.samples.keys():
    print("Sample: ", i)
    print(falsifier.samples[i])
コード例 #20
0
def get_dm_params(variables):
    dm_params = DotMap()

    dm_params.height = variables.height
    dm_params.width = variables.width
    dm_params.ft = variables.ft
    dm_params.next_folder = variables.next_folder
    dm_params.suffix = variables.suffix
    dm_params.with_frame = variables.with_frame
    dm_params.return_pix_array = variables.return_pix_array
    dm_params.save_data = variables.save_data
    dm_params.save_pictures = variables.save_pictures
    dm_params.save_gif = variables.save_gif
    dm_params.functions_str_lst = variables.functions_str_lst
    dm_params.width_append_frame = variables.width_append_frame
    dm_params.with_resize_image = variables.with_resize_image
    dm_params.resize_factor = variables.resize_factor
    dm_params.lambdas_in_picture = variables.lambdas_in_picture
    dm_params.max_it = variables.max_it
    dm_params.bits = variables.bits
    dm_params.temp_path_lambda_file = variables.temp_path_lambda_file
    dm_params.image_by_str = variables.image_by_str
    dm_params.func_by_name = variables.func_by_name

    dm_params.path_dir = variables.path_dir
    dm_params.file_name_dm = variables.file_name_dm
    dm_params.file_name_txt = variables.file_name_txt

    return dm_params
コード例 #21
0
 def __init__(self, path):
     super(core, self).__init__()
     self.path = path
     self.r2 = r2pipe.open(path, ["-2"])
     self.regs = DotMap(self.r2.cmdj("drj"))
     self.stack = self.r2.cmdj("pxwj @rsp")
コード例 #22
0
    def __data_to_lines(self, tcx_data) -> list[ExerciseEvent]:
        """
        Method for transforming TCXFile/GPXFile data into [TrackSegment] list.
        Args:
            tcx_data: TCXData/GPXData generated dictionary
        Returns: list of TrackSegments
        """
        lines: list[ExerciseEvent] = []
        for i in range(len(tcx_data['positions'])):
            if (i != 0):
                point_a = DotMap()
                point_b = DotMap()
                point_a.latitude = tcx_data['positions'][i - 1][0]
                point_a.longitude = tcx_data['positions'][i - 1][1]
                point_a.time = tcx_data['timestamps'][i - 1]

                point_b.latitude = tcx_data['positions'][i][0]
                point_b.longitude = tcx_data['positions'][i][1]
                point_b.time = tcx_data['timestamps'][i]

                if len(tcx_data['altitudes']) == len(tcx_data['positions']):
                    point_a.elevation = tcx_data['altitudes'][i - 1]
                    point_b.elevation = tcx_data['altitudes'][i]

                if len(tcx_data['heartrates']) == len(tcx_data['positions']):
                    point_a.heartrate = tcx_data['heartrates'][i - 1]
                    point_b.heartrate = tcx_data['heartrates'][i]

                if len(tcx_data['distances']) == len(tcx_data['positions']):
                    point_a.distance = tcx_data['distances'][i - 1]
                    point_b.distance = tcx_data['distances'][i]

                if len(tcx_data['speeds']) == len(tcx_data['positions']):
                    point_a.distance = tcx_data['speeds'][i - 1]
                    point_b.distance = tcx_data['speeds'][i]

                prev_speed = None
                if (i > 1):
                    prev_speed = lines[-1].speed
                ts = TrackSegment(point_a, point_b, prev_speed)
                lines.append(ts)

        return lines
コード例 #23
0
ファイル: external_params.py プロジェクト: JayShenoy/scenic
    def __init__(self, params, globalParams):
        super().__init__(params, globalParams)
        import verifai.features
        import verifai.server

        # construct FeatureSpace
        usingProbs = False
        self.params = tuple(params)
        for index, param in enumerate(self.params):
            if not isinstance(param, VerifaiParameter):
                raise RuntimeError(
                    f'VerifaiSampler given parameter of wrong type: {param}')
            param.sampler = self
            param.index = index
            if param.probs is not None:
                usingProbs = True
        space = verifai.features.FeatureSpace({
            f'param{index}': verifai.features.Feature(param.domain)
            for index, param in enumerate(self.params)
        })

        # set up VerifAI sampler
        samplerType = globalParams.get('verifaiSamplerType', 'halton')
        samplerParams = globalParams.get('verifaiSamplerParams', None)
        if usingProbs and samplerType == 'ce':
            if samplerParams is None:
                samplerParams = DotMap()
            if 'cont' in samplerParams or 'disc' in samplerParams:
                raise RuntimeError(
                    'CE distributions specified in both VerifaiParameters'
                    'and verifaiSamplerParams')
            cont_buckets = []
            cont_dists = []
            disc_dists = []
            for param in self.params:
                if isinstance(param, VerifaiRange):
                    if param.probs is None:
                        buckets = 5
                        dist = numpy.ones(buckets) / buckets
                    else:
                        dist = numpy.array(param.probs)
                        buckets = len(dist)
                    cont_buckets.append(buckets)
                    cont_dists.append(dist)
                elif isinstance(param, VerifaiDiscreteRange):
                    n = param.high - param.low + 1
                    dist = numpy.ones(
                        n) / n if param.probs is None else numpy.array(
                            param.probs)
                    disc_dists.append(dist)
                else:
                    raise RuntimeError(
                        f'Parameter {param} not supported by CE sampler')
            samplerParams.cont.buckets = cont_buckets
            samplerParams.cont.dist = numpy.array(cont_dists)
            samplerParams.disc.dist = numpy.array(disc_dists)
        _, sampler = verifai.server.choose_sampler(
            space, samplerType, sampler_params=samplerParams)
        self.sampler = sampler

        # default rejection feedback is positive so cross-entropy sampler won't update;
        # for other active samplers an appropriate value should be set manually
        if self.rejectionFeedback is None:
            self.rejectionFeedback = 1
コード例 #24
0
def _create_ctrl_config(ctrl_cfg, cfg_module, ctrl_type, ctrl_args, type_map):
    """Creates controller configuration.

    """
    if ctrl_type == "MPC":
        ctrl_cfg.env = cfg_module.ENV
        if hasattr(cfg_module, "UPDATE_FNS"):
            ctrl_cfg.update_fns = cfg_module.UPDATE_FNS
        if hasattr(cfg_module, "obs_preproc"):
            ctrl_cfg.prop_cfg.obs_preproc = cfg_module.obs_preproc
        if hasattr(cfg_module, "obs_postproc"):
            ctrl_cfg.prop_cfg.obs_postproc = cfg_module.obs_postproc
        if hasattr(cfg_module, "obs_postproc2"):
            ctrl_cfg.prop_cfg.obs_postproc2 = cfg_module.obs_postproc2
        if hasattr(cfg_module, "targ_proc"):
            ctrl_cfg.prop_cfg.targ_proc = cfg_module.targ_proc

        ctrl_cfg.opt_cfg.plan_hor = cfg_module.PLAN_HOR
        ctrl_cfg.opt_cfg.obs_cost_fn = cfg_module.obs_cost_fn
        ctrl_cfg.opt_cfg.ac_cost_fn = cfg_module.ac_cost_fn

        # Process arguments here.
        model_init_cfg = ctrl_cfg.prop_cfg.model_init_cfg
        if ctrl_args.get("model-type", "PE") in ["P", "PE", "D", "DE"]:
            ctrl_args["model-type"] = ctrl_args.get("model-type", "PE")
            if ctrl_args["model-type"][0] == "P":
                model_init_cfg.model_class = BNN
            else:
                model_init_cfg.model_class = NN

            if len(ctrl_args["model-type"]) == 1:
                model_init_cfg.num_nets = 1
                type_map.ctrl_cfg.prop_cfg.model_init_cfg.num_nets = \
                    create_read_only("Number of nets for non-ensembled nets must be one, do not modify.")
            else:
                model_init_cfg.num_nets = 5
                type_map.ctrl_cfg.prop_cfg.model_init_cfg.num_nets = create_conditional(
                    int, lambda string: int(string) > 1, "Ensembled models must have more than one net."
                )
            ctrl_cfg.prop_cfg.model_train_cfg = cfg_module.NN_TRAIN_CFG
            model_init_cfg.model_constructor = cfg_module.nn_constructor

            # Add possible overrides
            type_map.ctrl_cfg.prop_cfg.model_init_cfg.model_dir = str
            type_map.ctrl_cfg.prop_cfg.model_init_cfg.load_model = make_bool

            type_map.ctrl_cfg.prop_cfg.model_train_cfg = DotMap(
                batch_size=int, epochs=int,
                holdout_ratio=float, max_logging=int
            )
        elif ctrl_args["model-type"] == "GP":
            model_init_cfg.model_class = TFGP
            model_init_cfg.kernel_class = gpflow.kernels.RBF
            model_init_cfg.kernel_args = {
                "input_dim": cfg_module.MODEL_IN,
                "output_dim": cfg_module.MODEL_OUT,
                "ARD": True
            }
            model_init_cfg.num_inducing_points = cfg_module.GP_NINDUCING_POINTS
            model_init_cfg.model_constructor = cfg_module.gp_constructor
        else:
            raise NotImplementedError("Unknown model type.")

        ctrl_cfg.prop_cfg.mode = ctrl_args.get("prop-type", "TSinf")
        ctrl_cfg.prop_cfg.npart = 20
        # Handle special cases
        if ctrl_cfg.prop_cfg.mode[:2] == "TS":
            if ctrl_args["model-type"] not in ["PE", "DE"]:
                raise RuntimeError("Cannot perform TS with non-ensembled models.")
            if ctrl_args["model-type"] == "DE":
                ctrl_cfg.prop_cfg.ign_var = True
                type_map.ctrl_cfg.prop_cfg.ign_var = \
                    create_read_only("DE-TS* methods must ignore variance, do not modify.")
        if ctrl_cfg.prop_cfg.mode == "E":
            ctrl_cfg.prop_cfg.npart = 1
            type_map.ctrl_cfg.prop_cfg.npart = \
                create_read_only("Only need one particle for deterministic propagation, do not modify.")
        if ctrl_args["model-type"] == "D" and ctrl_cfg.prop_cfg.mode != "E":
            raise ValueError("Can only use deterministic propagation for deterministic models.")

        ctrl_cfg.opt_cfg.mode = ctrl_args.get("opt-type", "CEM")
        if ctrl_cfg.opt_cfg.mode == "CEM":
            type_map.ctrl_cfg.opt_cfg.cfg = DotMap(
                max_iters=int,
                popsize=int,
                num_elites=int,
                epsilon=float,
                alpha=float
            )
        elif ctrl_cfg.opt_cfg.mode == "Random":
            type_map.ctrl_cfg.opt_cfg.cfg = DotMap(
                popsize=int
            )
        else:
            raise NotImplementedError("Unknown optimizer.")
        ctrl_cfg.opt_cfg.cfg = cfg_module.OPT_CFG[ctrl_cfg.opt_cfg.mode]
    else:
        raise NotImplementedError("Unknown controller class.")
コード例 #25
0
from dotmap import DotMap

Topics = DotMap({
    'PlayerPro': {
        'Incoming': {
            'Event': 'playerpro-incoming-event',
        },
        'Jobs': {
            'Analytics': 'playerpro-jobs-analytics',
            'Feeds': 'playerpro-jobs-feeds',
            'Notification': 'playerpro-jobs-notification'
        }
    },
    'Topic': {
        'test': 'topic3'
    }
})
コード例 #26
0
def create_params():
    p = DotMap()

    # Load the dependencies
    p.system_dynamics_params = create_system_dynamics_params()
    p.waypoint_params = create_waypoint_params()

    p.pipeline = ControlPipelineV0

    # The directory for saving the control pipeline files
    p.dir = os.path.join(base_data_dir(), 'control_pipelines')

    # Spline parameters
    p.spline_params = DotMap(spline=Spline3rdOrder,
                             max_final_time=6.0,
                             epsilon=1e-5)
    p.minimum_spline_horizon = 1.5

    # LQR setting parameters
    p.lqr_params = DotMap(cost_fn=QuadraticRegulatorRef,
                          quad_coeffs=np.array([1.0, 1.0, 1.0, 1.0, 1.0],
                                               dtype=np.float32),
                          linear_coeffs=np.zeros((5), dtype=np.float32))

    # Velocity binning parameters
    p.binning_parameters = DotMap(
        num_bins=61,
        min_speed=p.system_dynamics_params.v_bounds[0],
        max_speed=p.system_dynamics_params.v_bounds[1])

    # Converting K to world coordinates is slow
    # so only set this to true when LQR data is needed
    p.convert_K_to_world_coordinates = False

    # When not needed, LQR controllers can be discarded
    # to save memory
    p.discard_LQR_controller_data = True

    # Set this to True to ignore precomputed
    # LQR trajectories
    p.discard_precomputed_lqr_trajectories = False

    # Set this to true if you want trajectory objects to track
    # linear and angular acceleration. If not set to false to save memory
    p.track_trajectory_acceleration = True

    p.verbose = False
    return p
コード例 #27
0
ファイル: input_features.py プロジェクト: vumichien/FARM
def sample_to_features_squadOLD(
    sample,
    tokenizer,
    max_seq_len,
    doc_stride,
    max_query_length,
    tasks,
):
    sample.clear_text = DotMap(sample.clear_text, _dynamic=False)
    is_training = sample.clear_text.is_training

    unique_id = 1000000000
    features = []

    query_tokens = tokenizer.tokenize(sample.clear_text.question_text)

    if len(query_tokens) > max_query_length:
        query_tokens = query_tokens[0:max_query_length]

    tok_to_orig_index = []
    orig_to_tok_index = []
    all_doc_tokens = []
    for (i, token) in enumerate(sample.clear_text.doc_tokens):
        orig_to_tok_index.append(len(all_doc_tokens))
        sub_tokens = tokenizer.tokenize(token)
        for sub_token in sub_tokens:
            tok_to_orig_index.append(i)
            all_doc_tokens.append(sub_token)

    tok_start_position = None
    tok_end_position = None
    if is_training and sample.clear_text.is_impossible:
        tok_start_position = -1
        tok_end_position = -1
    if is_training and not sample.clear_text.is_impossible:
        tok_start_position = orig_to_tok_index[
            sample.clear_text.start_position]
        if sample.clear_text.end_position < len(
                sample.clear_text.doc_tokens) - 1:
            tok_end_position = orig_to_tok_index[sample.clear_text.end_position
                                                 + 1] - 1
        else:
            tok_end_position = len(all_doc_tokens) - 1
        (tok_start_position, tok_end_position) = _SQUAD_improve_answer_span(
            all_doc_tokens,
            tok_start_position,
            tok_end_position,
            tokenizer,
            sample.clear_text.orig_answer_text,
        )

    # The -3 accounts for [CLS], [SEP] and [SEP]
    max_tokens_for_doc = max_seq_len - len(query_tokens) - 3

    # We can have documents that are longer than the maximum sequence length.
    # To deal with this we do a sliding window approach, where we take chunks
    # of the up to our max length with a stride of `doc_stride`.
    _DocSpan = collections.namedtuple(  # pylint: disable=invalid-name
        "DocSpan", ["start", "length"])
    doc_spans = []
    start_offset = 0
    while start_offset < len(all_doc_tokens):
        length = len(all_doc_tokens) - start_offset
        if length > max_tokens_for_doc:
            length = max_tokens_for_doc
        doc_spans.append(_DocSpan(start=start_offset, length=length))
        if start_offset + length == len(all_doc_tokens):
            break
        start_offset += min(length, doc_stride)

    for (doc_span_index, doc_span) in enumerate(doc_spans):
        tokens = []
        segment_ids = []
        tokens.append("[CLS]")
        segment_ids.append(0)
        for token in query_tokens:
            tokens.append(token)
            segment_ids.append(0)
        tokens.append("[SEP]")
        segment_ids.append(0)

        for i in range(doc_span.length):
            split_token_index = doc_span.start + i
            tokens.append(all_doc_tokens[split_token_index])
            segment_ids.append(1)
        tokens.append("[SEP]")
        segment_ids.append(1)

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        padding_mask = [1] * len(input_ids)

        # Zero-pad up to the sequence length.
        while len(input_ids) < max_seq_len:
            input_ids.append(0)
            padding_mask.append(0)
            segment_ids.append(0)

        assert len(input_ids) == max_seq_len
        assert len(padding_mask) == max_seq_len
        assert len(segment_ids) == max_seq_len

        start_position = 0
        end_position = 0
        if is_training and not sample.clear_text.is_impossible:
            # For training, if our document chunk does not contain an annotation
            # we keep it but set the start and end position to unanswerable
            doc_start = doc_span.start
            doc_end = doc_span.start + doc_span.length - 1
            out_of_span = False
            if not (tok_start_position >= doc_start
                    and tok_end_position <= doc_end):
                out_of_span = True
            if out_of_span:
                start_position = 0
                end_position = 0
            else:
                doc_offset = len(query_tokens) + 2
                start_position = tok_start_position - doc_start + doc_offset
                end_position = tok_end_position - doc_start + doc_offset
        if is_training and sample.clear_text.is_impossible:
            start_position = 0
            end_position = 0

        inp_feat = {}
        inp_feat["input_ids"] = input_ids
        inp_feat["padding_mask"] = padding_mask  # attention_mask
        inp_feat["segment_ids"] = segment_ids  # token_type_ids
        inp_feat["start_position"] = start_position
        inp_feat["end_position"] = end_position
        inp_feat["is_impossible"] = sample.clear_text.is_impossible
        inp_feat["sample_id"] = sample.id
        inp_feat["passage_shift"] = doc_span.start
        features.append(inp_feat)
        unique_id += 1

    return features
コード例 #28
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_values(self):
     d = DotMap(self._get_dict())
     v = d.values()
     self.assertEqual(len(v), 3)
コード例 #29
0
with open("dependency-artifacts/uniswap/UniswapV2Factory.json") as f:
    UniswapV2Factory = json.load(f)

with open("dependency-artifacts/uniswap/UniswapV2Router02.json") as f:
    UniswapV2Router = json.load(f) 


aragon_registry = DotMap(
    addresses=DotMap(
        agentImpl="0x3a93c17fc82cc33420d1809dda9fb715cc89dd37",
        companyTemplate="0xd737632caC4d039C9B0EEcc94C12267407a271b5",
    ),
    artifacts=DotMap(
        Agent=Agent,
        CompanyTemplate=CompanyTemplate,
        Vault=Vault,
        Voting=Voting,
        Finance=Finance,
        TokenManager=TokenManager,
        MiniMeToken=MiniMeToken,
    ),
)

gnosis_safe_registry = DotMap(
    addresses=DotMap(
        proxyFactory="0x76E2cFc1F5Fa8F6a5b3fC4c8F4788F0116861F9B",
        masterCopy="0x34CfAC646f301356fAa8B21e94227e3583Fe3F5F",
    ),
    artifacts=DotMap(
        MasterCopy=MasterCopy, ProxyFactory=ProxyFactory, GnosisSafe=GnosisSafe
コード例 #30
0
    def _simulate_experiments(self,
                              dataset_initial: rdata.dataset,
                              n: int = 5) -> tuple:
        """
        sample n times from S^k_pi, the set of k experiments resulting from
           running a sequential policy, pi, k iterations

        :param dataset_initial:
        :param n: number of samples of S^k_pi. A hyperparameter
        :return:
        """

        datasets = [dataset_initial.copy()] * n
        acq_funcs = [EI(model=None, logs=None)] * n
        parameters = np.array([])
        objectives = np.array([])
        models = np.array([])
        k = self.batch_size

        for sample in range(n):
            np.append(parameters, np.array([]))
            for iteration in range(k):
                p = DotMap()
                p.verbosity = 0

                # Instantiate the model with given parameters
                self._model = self.model(parameters=p)
                # Train the model with provided dataset
                self._model.train(train_set=datasets[sample])
                # Update acquisition function with the posterior
                acq_funcs[sample].update(model=self._model, logs=self._logs)

                # Optimize acquisition function
                logging.info('Optimizing the acquisition function')
                task = OptTask(f=self.acq_func.evaluate,
                               n_parameters=self.task.get_n_parameters(),
                               n_objectives=1,
                               order=0,
                               bounds=self.task.get_bounds(),
                               name='Acquisition Function',
                               task={'minimize'},
                               labels_param=None,
                               labels_obj=None,
                               vectorized=True)
                stop_criteria = StopCriteria(maxEvals=self.optimizer.maxEvals)

                p = DotMap()
                p.verbosity = 1

                # Calculate the optimizer
                optimizer = self.optimizer.optimizer(
                    parameters=p, task=task, stopCriteria=stop_criteria)
                x = np.matrix(optimizer.optimize())
                fx = self._model.predict(dataset=x.T)
                dataset_new = rdata.dataset(data_input=x, data_output=fx)
                datasets[sample] = datasets[sample].merge(dataset_new)
                parameters[sample].append(x)
                objectives[sample].append(fx)
            models.append(self._model)

        return parameters, objectives, models
コード例 #31
0
ファイル: app.py プロジェクト: yyjinlong/lanus
def SSHBootstrap(client, rhost, rport):
    # NOTE(为每一个socket进程定义一些全局属性; 每个channel线程共享.)
    context = DotMap()
    context.client = client
    context.channel_list = []
    context.remote_host = rhost

    transport = paramiko.Transport(client, gss_kex=False)
    try:
        transport.load_server_moduli()
    except:
        LOG.error('*** Failed to load moduli -- gex will be unsupported.')
        client.close()
        sys.exit(1)

    context.transport = transport
    transport.add_server_key(SSHKeyGen.rsa_key())

    ssh_server = SSHServerInterface(context)
    try:
        transport.start_server(server=ssh_server)
    except paramiko.SSHException as _ex:
        LOG.error('*** Bootstrap ssh start server failed: %s' % str(_ex))
        LOG.error(traceback.format_exc())
        client.close()
        sys.exit(1)

    while transport.is_active():
        client_channel = transport.accept(timeout=CONF.SSH.timeout)
        if client_channel is None:
            if not context.channel_list:
                LOG.error('*** Client channel timeout from host: %s.' % rhost)
                LOG.error('*** First login timeout > %s, so close client.' %
                          CONF.SSH.timeout)
                try:
                    client.send(b'Connect from %s timeout.' % rhost)
                    close_data = client.recv(1024)
                    LOG.info(
                        '*** Login timeout receive client close data: %s' %
                        close_data)
                    client.close()
                    transport.atfork()
                except:
                    pass
                sys.exit(1)
            continue

        ssh_server.shell_request_event.wait(10)
        if not ssh_server.shell_request_event.is_set():
            LOG.error('*** Client never asked for a shell.')
            try:
                client.send(b'Must be shell request.')
                close_data = client.recv(1024)
                LOG.info('*** Client not use shell receive close data: %s' %
                         close_data)
                client.close()
                transport.atfork()
            except:
                pass
            sys.exit(1)
        LOG.info('*** Client asking for a shell.')

        if (len(context.channel_list) + 1) > CONF.SERVER.session_limit:
            tip = u'超出session预定上限值! 请使用已打开的窗口, 并关闭该窗口.'
            client_channel.sendall(cm.ws(tip, 1))
            close_data = client_channel.recv(1024)
            LOG.info(b'*** Session over limit, receive data: %s' % close_data)
            client_channel.close()
            continue

        # NOTE(channel list 需要多个线程共享, 因为若某个线程(session)
        # 自动退出,需要将自己自动从该列表剔除)
        context.channel_list.append(client_channel)
        context[client_channel] = queue.Queue()

        pid = os.getpid()
        LOG.info('*** Login user: %s from (%s:%s) on pid: %s.' %
                 (context.username, rhost, rport, pid))

        # NOTE(client channel 不能多线程共享全局变量, 否则session就会乱)
        try:
            ssh_interactive = SSHInteractive(context, client_channel)
            ssh_interactive.start()
        except:
            LOG.error(traceback.format_exc())

    try:
        client.close()
    except:
        pass
    LOG.info('*** Client from %s transport.is_active() is false.' % rhost)
    sys.exit(1)
コード例 #32
0
 def get_all_players(self):
     all_players = self._player_table.all()
     return [DotMap(player) for player in all_players]
コード例 #33
0
import json
import base64
import sys
import os
from dotmap import DotMap

url = sys.argv[1]
print 'Url: ', url

ign = DotMap()
config = DotMap()
ign.ignition.version = "2.2.0"
config.replace.source = url
ign.ignition.config = config

ignstr = json.dumps(dict(**ign.toDict()))

sshpath = os.path.expanduser('~/.ssh/id_rsa.pub')
with open(sshpath, "r") as sshFile:
    sshkey = sshFile.read()
with open("gw/master.ign", "r") as ignFile:
    master_ignition = json.load(ignFile)
with open("gw/worker.ign", "r") as ignFile:
    worker_ignition = json.load(ignFile)
with open("azuredeploy.parameters.json", "r") as jsonFile:
    data = DotMap(json.load(jsonFile))

data.parameters.BootstrapIgnition.value = base64.b64encode(ignstr)
data.parameters.MasterIgnition.value = base64.b64encode(
    json.dumps(master_ignition))
data.parameters.WorkerIgnition.value = base64.b64encode(
コード例 #34
0
dm_conf = DotMap({
    "transformer_base": {
        "name": None,
        "field": "src",
        "tokenize": None,  # tokenizer.encode
        "tokenizer": None,  # tokenizer
        "sequential": True,
        "eos": False,
        "sos": False,
        "use_vocab": False,
        "pad_token": None,
        "postprocess": None,
        "include_lengths": True,
        "lowercase": None,
        "fix_length": None,
    },
    "elmo": {
        "name": None,
        "field": "src",
        "tokenize": elmo_batch_to_ids,
        "tokenizer": None,
        "sequential": False,
        "eos": False,
        "sos": False,
        "use_vocab": False,
        "pad_token": 261,
        "postprocess": None,
        "include_lengths": True,
        "fix_length": None,
        "lowercase": True,
    },
    "normal": {
        "name": None,
        "field": "src",
        "tokenize": None,
        "tokenizer": None,
        "sequential": True,
        "eos": True,
        "sos": True,
        "use_vocab": True,
        "pad_token": "<pad>",
        "postprocess": None,
        "include_lengths": True,
        "fix_length": None,
        "lowercase": True,
    },
    "char": {
        "name": None,
        "field": "src",
        "tokenize": lambda x: list(x),
        "tokenizer": None,
        "sequential": True,
        "eos": False,
        "sos": False,
        "use_vocab": True,
        "pad_token": "<pad>",
        "postprocess": None,
        "include_lengths": True,
        "fix_length": 15,
        "lowercase": True,
    },
})
コード例 #35
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_pop(self):
     d = DotMap(self._get_dict())
     v = d.pop('a', 4)
     self.assertEqual(1, v)
     v = d.pop('a', 4)
     self.assertEqual(4, v)
コード例 #36
0
def deploy_sett_common_logic(deployer):
    return DotMap(
        Controller=Controller.deploy({"from": deployer}),
        Sett=Sett.deploy({"from": deployer}),
        StakingRewards=StakingRewards.deploy({"from": deployer}),
    )
コード例 #37
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_copy(self):
     d = DotMap(self._get_dict())
     c = d.copy()
     self.assertEqual(d, c)
コード例 #38
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_setdefault(self):
     d = DotMap()
     d.a = 'c'
     self.assertEqual('c', d.setdefault('a', 'd'))
     self.assertEqual('d', d.setdefault('b', 'd'))
コード例 #39
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_get(self):
     d = DotMap(self._get_dict())
     self.assertEqual(d.get('a'), 1)
     self.assertEqual(d.get('f', 33), 33)
     self.assertEqual(d.get('f'), None)
コード例 #40
0
def test_evaluation():
    ##########################
    ########## Settings
    ##########################
    lang_model = "deepset/roberta-base-squad2"
    do_lower_case = False

    test_assertions = True

    data_dir = Path("testsave/data/squad20")
    evaluation_filename = "dev-v2.0.json"

    device, n_gpu = initialize_device_settings(use_cuda=True)

    # loading models and evals
    model = AdaptiveModel.convert_from_transformers(lang_model, device=device, task_type="question_answering")
    model.prediction_heads[0].no_ans_boost = 0
    model.prediction_heads[0].n_best = 1

    tokenizer = Tokenizer.load(pretrained_model_name_or_path=lang_model,do_lower_case=do_lower_case)
    processor = SquadProcessor(
        tokenizer=tokenizer,
        max_seq_len=256,
        label_list= ["start_token", "end_token"],
        metric="squad",
        train_filename=None,
        dev_filename=None,
        dev_split=0,
        test_filename=evaluation_filename,
        data_dir=data_dir,
        doc_stride=128,
    )

    starttime = time()

    data_silo = DataSilo(processor=processor, batch_size=50)
    model.connect_heads_with_processor(data_silo.processor.tasks, require_labels=True)
    evaluator = Evaluator(data_loader=data_silo.get_data_loader("test"), tasks=data_silo.processor.tasks, device=device)

    # 1. Test FARM internal evaluation
    results = evaluator.eval(model)
    f1_score = results[0]["f1"]*100
    em_score = results[0]["EM"]*100
    tnacc = results[0]["top_n_accuracy"]*100
    elapsed = time() - starttime
    print(results)
    print(elapsed)

    gold_EM = 77.7478
    gold_f1 = 82.1557
    gold_tnacc = 84.0646 # top 1 recall
    gold_elapsed = 70 # 4x V100
    if test_assertions:
        np.testing.assert_allclose(em_score, gold_EM, rtol=0.001, err_msg=f"FARM Eval changed for EM by: {em_score-gold_EM}")
        np.testing.assert_allclose(f1_score, gold_f1, rtol=0.001, err_msg=f"FARM Eval changed for f1 score by: {f1_score-gold_f1}")
        np.testing.assert_allclose(tnacc, gold_tnacc, rtol=0.001, err_msg=f"FARM Eval changed for top 1 accuracy by: {em_score-gold_EM}")
        # np.testing.assert_allclose(elapsed, gold_elapsed, rtol=0.1, err_msg=f"FARM Eval speed changed significantly by: {elapsed - gold_elapsed} seconds")


    # 2. Test FARM predictions with outside eval script
    starttime = time()
    model = Inferencer(model=model, processor=processor, task_type="question_answering", batch_size=50, gpu=device.type=="cuda")
    filename = data_dir / evaluation_filename
    result = model.inference_from_file(file=filename, return_json=False)
    results_squad = [x.to_squad_eval() for x in result]

    elapsed = time() - starttime

    os.makedirs("../testsave", exist_ok=True)
    write_squad_predictions(
        predictions=results_squad,
        predictions_filename=filename,
        out_filename="testsave/predictions.json"
    )
    script_params = {"data_file": filename,
              "pred_file": "testsave/predictions.json",
              "na_prob_thresh" : 1,
              "na_prob_file": False,
              "out_file": False}
    results_official = squad_evaluation.main(OPTS=DotMap(script_params))
    f1_score = results_official["f1"]
    em_score = results_official["exact"]

    gold_EM = 78.4890
    gold_f1 = 81.7104
    gold_elapsed = 66 # 4x V100
    print(elapsed)
    if test_assertions:
        np.testing.assert_allclose(em_score, gold_EM, rtol=0.001,
                                   err_msg=f"Eval with official script changed for EM by: {em_score - gold_EM}")
        np.testing.assert_allclose(f1_score, gold_f1, rtol=0.001,
                                   err_msg=f"Eval with official script changed for f1 score by: {f1_score - gold_f1}")
        np.testing.assert_allclose(elapsed, gold_elapsed, rtol=0.1,
                                   err_msg=f"Inference speed changed significantly by: {elapsed - gold_elapsed} seconds")
コード例 #41
0
ファイル: utils.py プロジェクト: SHBLK/Bernoulli-VAE-Hashing
def Load_Dataset(filename):
    dataset = scipy.io.loadmat(filename)
    x_train = dataset['train']
    x_test = dataset['test']
    x_cv = dataset['cv']
    y_train = dataset['gnd_train']
    y_test = dataset['gnd_test']
    y_cv = dataset['gnd_cv']

    data = DotMap()
    data.n_trains = y_train.shape[0]
    data.n_tests = y_test.shape[0]
    data.n_cv = y_cv.shape[0]
    data.n_tags = y_train.shape[1]
    data.n_feas = x_train.shape[1]

    ## Convert sparse to dense matricesimport numpy as np
    train = x_train.toarray()
    nz_indices = np.where(np.sum(train, axis=1) > 0)[0]
    train = train[nz_indices, :]
    train_len = np.sum(train > 0, axis=1)

    test = x_test.toarray()
    test_len = np.sum(test > 0, axis=1)

    cv = x_cv.toarray()
    cv_len = np.sum(cv > 0, axis=1)

    gnd_train = y_train[nz_indices, :]
    gnd_test = y_test
    gnd_cv = y_cv

    data.train = train
    data.test = test
    data.cv = cv
    data.train_len = train_len
    data.test_len = test_len
    data.cv_len = cv_len
    data.gnd_train = gnd_train
    data.gnd_test = gnd_test
    data.gnd_cv = gnd_cv

    return data
コード例 #42
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_pprint(self, mock_pretty_printer):
     d = DotMap(self._get_dict())
     d.pprint()
     self.assertTrue(mock_pretty_printer.called)
コード例 #43
0
def get_default_variables():
    variables = DotMap()

    variables.save_data = True
    variables.path_dir = 'images/'
    variables.file_name_dm = "dm.pkl.gz"
    variables.file_name_txt = "lambdas.txt"
    variables.min_or = 4
    variables.max_or = 4
    variables.min_and = 3
    variables.max_and = 3
    variables.min_n = 2
    variables.max_n = 2

    variables.height = 64
    variables.width = variables.height+20

    variables.ft = 1

    dt = datetime.datetime.now()
    variables.next_folder = "{year:04}_{month:02}_{day:02}".format(year=dt.year, month=dt.month, day=dt.day)

    variables.with_frame = True
    variables.width_append_frame = 5

    variables.return_pix_array = True
    variables.save_pictures = True
    variables.save_gif = True

    variables.functions_str_lst = []

    variables.with_resize_image = False
    variables.resize_factor = 3
    variables.bits = 1
    variables.func_by_name = ''
    variables.lambdas_in_picture = True
    variables.max_it = 100

    variables.height_resize = variables.height * variables.resize_factor
    variables.width_resize = variables.width * variables.resize_factor

    variables.temp_path_lambda_file = ''
    variables.image_by_str = None

    variables.suffix = None

    return variables
コード例 #44
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_fromkeys(self):
     d = DotMap.fromkeys(['a', 'b', 'c'], 'd')
     self.assertEqual(d.a, 'd')
     self.assertEqual(d.c, 'd')
コード例 #45
0
class Director(object):
    """
    Directs traffic and tabs
    """


    # Defaults
    config_file = os.path.join(ui_etc, 'config.yml')
    caches_file = os.path.join(ui_etc, 'caches.yml')
    autonames_file = os.path.join(ui_etc, 'autonames.yml')
    schema_file = os.path.join(ui_etc, 'schema.yml')
    schema_functions_file = os.path.join(ui_tui, "schema_functions.py")
    deploy_file_host = os.path.join(host_root, 'deploy.yml')

    # state_file = '{0}/state.yml'.format(cache_root)
    state_file = None
    script_file = None
    deploy_file = None

    # TUI config, install script, state dicts and shortcuts
    tui_config = None
    config_dataset = None
    script_dataset = None
    state_dataset = None
    deploy_dataset = None

    template_vars = {}

    config = None
    script = None
    state = None
    deploy = None
    # state.step holds current deployment step
    # state.tab holds current tab within the step
    # state.license_accepted holds the obvious

    # Keybinds
    hotkeys = None

    # TabView
    tabs = None
    current_tab = None

    def __init__(self, config_file=None, script_file=None, state_file=None, deploy_file=None, init_tui=False):
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)

        if config_file is not None:
            self.config_file = config_file

        if script_file is not None:
            self.script_file = '{0}/{1}'.format(ui_etc, script_file)

        if state_file is not None:
            self.state_file = state_file

        if deploy_file is not None:
            self.deploy_file = deploy_file

        self.load_config()

        if init_tui is True:
            self.tui_config = TuiConfig()
            self.palette = self.tui_config.render_palette()

        if self.script_file is not None:
            self.load_script()

        if self.state_file is not None:
            self.load_state()

        if self.deploy_file is not None:
            try:
                self.validate_deploy()
            except RuntimeError as e:
                print(e.msg)
                sys.exit(1)
            self.load_deploy()

        if self.script_file is not None:
            self.bind_hotkeys()
            self.validate_script_step()

    def load_state(self):
        """
        Load current state (may be empty)
        :raises IOError: if filesystem exceptions prevent config file read or initial write
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if self.state_file is None:
            pass
        else:

            try:
                self.update_template_vars()
                self.state_dataset = DataSet(self.state_file,
                                             additional_template_vars=self.template_vars)
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
                logobj(self.state_dataset.content)
                self.state = DotMap(self.state_dataset.content)
                logobj(self.state)
                self.update_template_vars()
            except IOError as e:
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
                # must be a new install or else we don't have permissions.
                # Try to create an empty config and see what happens.
                try:
                    self.state_dataset = DataSet(self.state_file, create_if_missing=True,
                                                 additional_template_vars=self.template_vars)
                    logobj(self.state_dataset.content)
                    self.state = DotMap(self.state_dataset.content)
                    logobj(self.state)
                    self.update_template_vars()
                except IOError as cf:
                    logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(cf))
                    print("Unable to create a new state file: " + repr(cf))
                    # and presumably crash, though at some point we should tell
                    # the user to make sure they're mounting /opt correctly in Docker
                    raise

    def load_config(self):
        """
        Loads the config
        :raises IOError: if can't read config
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        try:
            self.update_template_vars()
            self.config_dataset = DataSet(self.config_file)
            self.config = DotMap(self.config_dataset.content)
            logobj(self.config)
            self.update_template_vars()
        except IOError as e:
            # If this fails, there's nothing more we can do.
            # Something is broken in the install container.
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
            print("Unable to locate config file: " + repr(e))
            raise

        if len(self.config.ui.script_file) != 0:
            self.script_file = self.config.ui.script_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for script in: ' + repr(self.script_file))

        if len(self.config.ui.state_file) != 0:
            self.state_file = self.config.ui.state_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for state in: ' + repr(self.state_file))

        if len(self.config.ui.deploy_file) != 0:
            self.deploy_file = self.config.ui.deploy_file
            logging.debug(self.__class__.__name__ + ': ' +
                          sys._getframe().f_code.co_name +
                          ': will look for deploy in: ' + repr(self.deploy_file))

    def load_script(self):
        """
        Loads the install script
        :raises IOError: if can't read script
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if self.script_file is None:
            pass
        else:
            try:
                self.update_template_vars()
                self.script_dataset = DataSet(self.script_file, additional_template_vars=self.config.toDict(),
                                              additional_template_yaml=self.config.yaml)
                self.script = DotMap(self.script_dataset.content)
                self.update_template_vars()
            except IOError as e:
                # If this fails, there's nothing more we can do.
                # Something is broken in the install container.
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
                print("Unable to locate script: " + repr(e))
                raise

    def validate_deploy(self):
        """
        Validates the deployment yaml file with the schema
        :raises pykwalify.errors.SchemaError: if validation fails
        :raises pykwalify.errors.CoreError: for other type of errors
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if not self.deploy_file:
            raise AssertionError

        try:
            c = Core(source_file=self.deploy_file,
                     schema_files=[self.schema_file],
                     extensions=[self.schema_functions_file])
            c.validate()
        except CoreError as e:
            # Most probably there is something wrong with the source files
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + e.msg)
            raise
        except SchemaError as e:
            # The deploy file is not valid
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + e.msg)
            print("The deployment file at '%s' is not valid." % (self.deploy_file_host,))
            raise

    def load_deploy(self):
        """
        Loads the deployment map
        :raises IOError: if can't read map
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if not self.deploy_file:
            raise AssertionError

        try:
            self.update_template_vars()
            self.deploy_dataset = DataSet(self.deploy_file,
                                          additional_template_vars=self.template_vars)
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
            logobj(self.deploy_dataset.content)
            self.deploy = DotMap(self.deploy_dataset.content)
            logobj(self.deploy)
            self.update_template_vars()

        except IOError as e:
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + repr(e))
            # must be a new install or else we don't have permissions.

        autonames_dataset = DataSet(self.autonames_file, additional_template_vars=self.template_vars)
        autonames = DotMap(autonames_dataset.content)
        self.config.update(autonames)

        caches_dataset = DataSet(self.caches_file, additional_template_vars=self.template_vars)
        caches = DotMap(caches_dataset.content)
        self.config.update(caches)

    def update_template_vars(self):
        """
        update template vars with new data
        :return: null
        """
        if self.config is not None:
            self.template_vars.update(self.config.toDict())
        if self.deploy is not None:
            self.template_vars.update(self.deploy.toDict())
        if self.state is not None:
            self.template_vars.update(self.state.toDict())
        if self.script is not None:
            self.template_vars.update(self.script.toDict())

    def reset_config_step(self):
        """
        Resets config to a known if it's not populated or broken.
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.state.step = 0
        self.save()

    def validate_script_step(self):
        """
        Makes sure we have an operational setlist
        :return: True or False
        :raises ValueError:
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        # Where are we? Previous engagement?
        if self.state.state:
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': state.state')
            if not self.state.state.step and not self.state.state.step == 0:
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': !state.state.step')
                self.reset_config_step()
                return True
            elif self.state.state.step > len(self.script.setlist) - 1 or self.state.state.step < 0:
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': OOB state.state.step')
                self.reset_config_step()
                return True
            else:
                logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': OK state.state.step')
                return True
        elif not self.state.state:
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': !state.state')
            self.reset_config_step()
            return True
        else:
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': wtf')
            # also wtf
            raise ValueError('Unexpected config state, delete config and start over.')

    def bind_hotkeys(self):
        """
        Creates keybind objects in self
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.hotkeys = {}
        # do keys
        for key, keyconf in self.script.hotkeys.iteritems():
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name +
                          ': keybind: ' + repr(key) + ':' + repr(keyconf))
            self.hotkeys[key] = KeyBind(key, keyconf.key, keyconf.call, keyconf.text)

    def reconfigure_hotkeys(self, keylist):
        """
        Reconfigures keybinds, activating keys in keylist and deactivating all others.
        :param keylist: list of keys to activate
        :return: pipe | separated string of hotkey quick-help texts
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        keytexts = []
        for hotkey, obj in self.hotkeys.iteritems():
            if obj.name in keylist:
                obj.activate()
                keytexts.append(obj.text)
            else:
                obj.deactivate()
        keytexts = " | ".join(map(str, keytexts))
        return keytexts

    def play_setlist(self, start_at=0):
        """
        Plays the setlist to the screen
        :param start_at: Set in setlist to start playing
        :returns: A complete TabView widget
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)

        # We should eventually be returning an object
        namespace = self.script.setlist[start_at].keys()[0]
        tabset = self.script.setlist[start_at].get(namespace)

        return self.play_set(namespace, tabset)

    def play_set(self, namespace, tabset):
        """
        :param namespace: namespace for input values from this tabset saved in this deployment config
        :param tabset: tabset config DotMap
        :return: A complete TabView widget
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)

        # Reconfigure hotkeys and get keytexts
        # For now we're just going to turn them all on
        # TODO: Fix this so it's per-tab
        # keylist = []
        # for name, hotkey in self.hotkeys:
        #     keylist.append(hotkey.name)
        keytexts = self.reconfigure_hotkeys(self.hotkeys.keys())

        # Build tabs
        factory = widgets.WidgetFactory()
        tabview_tabs = []
        for blueprint in tabset:
            blueprint['namespace'] = namespace
            blueprint['keytexts'] = keytexts
            blueprint['director'] = self
            # logobj(blueprint)
            tabview_tabs.append(([blueprint['name'], factory.create_widget(DotMap(blueprint)), False]))

        # Build TabView
        return TabView(self, 'tab_active', 'tab_inactive', tabview_tabs, focus=0)

    def save(self):
        """
        shortcut to doing the juggling for writing the deployment config
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.state_dataset.content = self.state.toDict()
        self.state_dataset.save()

    def add_tab(self, newtab):
        """
        Adds a tab to the TabView
        :param newtab: a tuple of ('name', widget_object, boolClosable)
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.tabs.add_tab(newtab)

    def del_tab(self, tabname):
        """
        Deletes a tab from the TabView (semi-dangerous)
        :param tabname: String name of the tab to close
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.tabs.close_tab_by_name(tabname, force=True)

    def accept_license(self, button=None):
        """
        Called when the "Accept" button of a LicenseConfirm widget instance
        is selected. Used only once.
        Rearranges tabs to continue the installer process.
        :param button: Button widget boilerplate
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.state.state.license_accepted = True
        self.save()
        self.next_step()
        # self.add_tab(('Environment', page1, False))
        # self.del_tab('Overview')
        # self.forward()

    def forward(self, button=None):
        """
        Flips to the next tab to the right
        :param button: Button widget boilerplate
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.tabs.set_active_next()

    def backward(self, button=None):
        """
        Flips to the previous tab on the left
        :param button: Button widget boilerplate
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.save()
        self.tabs.set_active_prev()

    def next_step(self, button=None):
        pass

    def prev_step(self, button=None):
        pass

    def add_fake(self):
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.save()
        self.add_tab(('fake', None, False))

    def del_fake(self):
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.del_tab('fake')

    def run(self):
        """
        Starts the director
        :return: initial widget object for urwid mainloop
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        self.tabs = self.play_setlist(self.state.state.step)
        return self.tabs

        # Why do we zero index? http://exple.tive.org/blarg/2013/10/22/citation-needed/

    def handle_unhandled_keys(self, key):
        """
        Global function to handle otherwise unhandled keypresses.
        :param key: keypress key
        :raises: Main loop exit (quits the Urwid loop, not necessarily other loops)
        :returns: Always True
        """
        # Keypress
        # logging.debug(sys._getframe().f_code.co_name + ': unhandled: ' + repr(type(key)) + ' ' + str(key))
        if not isinstance(key, tuple):
            if key is u'ctrl x':
                logging.debug(sys._getframe().f_code.co_name + ': handled_key: ' + repr(key))
                raise urwid.ExitMainLoop()
            if key in ('right',):
                logging.debug(sys._getframe().f_code.co_name + ': handled_key: ' + str(key))
                self.forward()
                return True
            if key in ('left',):
                logging.debug(sys._getframe().f_code.co_name + ': handled_key: ' + str(key))
                self.backward()
                return True
            else:
                logging.debug(sys._getframe().f_code.co_name + ': unhandled: ' + repr(type(key)) + ' ' + str(key))
                return True
        # Must be a mouse action
        elif isinstance(key, tuple):
            logging.debug(sys._getframe().f_code.co_name + ': mouse: ' + str(key))
            return True
        #     action, button, x, y = key
        #     if action is 'mouse press':
        #         if button is 4:
        else:
            logging.debug(sys._getframe().f_code.co_name + ': unhandled: ' + repr(type(key)) + ' ' + str(key))
            return True
コード例 #46
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_has_key(self):
     d = DotMap(self._get_dict())
     self.assertTrue(d.has_key('a'))
     self.assertTrue('a' in d)
     self.assertFalse(d.has_key('f'))
     self.assertFalse('f' in d)
コード例 #47
0
ファイル: eBooksLN_prev.py プロジェクト: Loreton/eBooks
    def _displayResults(self, book, data):
        # data=' '.join(book['content'])
        # occurrencies=regEx.FindIter(pattern, data=data, fPRINT=False)
        # result=self._color_words_in_text(data, occurrencies)
        ''' Sample
            {
                "word1": {"counter": 1 },
                "word2": {"counter": 1 },
                "data": {
                            "1": ["Lee Child"],
                            "2": ["Child pippo"],
                        }
            }
        '''
        dis_data = data.pop('data', [])
        words = data.keys()
        choice = ''
        _max = len(dis_data)
        _min = 0
        _step=4
        _from=_min

        # - prepard book info display data
        dmBook=DotMap(book, _dynamic=False) # di comodo
        dis_line=[]
        dis_line.append('')
        dis_line.append('book: {dmBook.title} - [{dmBook.author}]'.format(**locals()))
        dis_line.append('    - id: {dmBook._id}'.format(**locals()))
        dis_line.append('    - tags: {dmBook.tags}'.format(**locals()))
        for word in words:
            counter = data[word]['counter']
            dis_line.append('        - word: {word} - instances: {counter}'.format(**locals()))

        while True:
            if choice=='b': break # return to book_list

            # - display book metadata
            for line in dis_line:
                C.pYellowH(text=line, tab=8)


            ''' Display data.
                ruoto all'interno della lista visualizzando
                [step] results per volta'''

            # - set range to display menu
            if _from>=_max: _from=_max-_step
            if _from<0:     _from=0
            _to = _from+_step
            if _to>_max:    _to=_max

            # - display data
            for index in range(_from, _to):
                item = dis_data[index+1]
                print('{0:5} - {1}'.format(index+1, item[0]))
                for line in item[1:]:
                    print(' '*7, line)
                print()

            # - Get keybord input
            choice=prompt('[n]ext [p]rev [b]ooks_list [t]ag', validKeys='n|p|b|t')
            if   choice in ['b']: break
            elif choice in ['n']: _from+=_step
            elif choice in ['p']: _from-=_step
            elif choice in ['t']:
                if self._execute:
                    tags=prompt('Please enter TAGs (BLANK separator)')
                    book['tags'] = tags.split()
                    result = self._ePubs.updateField(rec=book, fld_name='tags')
                    if result.matched_count:
                        C.pCyanH(text='tags {0} have been added'.format(book['tags']), tab=4)
                        self._book_indexing(book, fields=['tags'])
                        print()
                else:
                    C.pCyanH(text='in DRY-RUN mode, tag setting not available', tab=4)
                    prompt()
コード例 #48
0
ファイル: test_dotmap.py プロジェクト: mmarkov/dotmap
 def test_to_dict(self):
     d = DotMap(self._get_dict())
     o = d.to_dict()
     self.assertEqual(o['a'], 1)
コード例 #49
0
class BatchBO(IterativeOptimizer):
    def __init__(self, task, stopCriteria, parameters=DotMap()):
        """
        Bayesian optimization
        :param task:
        :param parameters:
        """
        super(BatchBO, self).__init__(task=task,
                                      stopCriteria=stopCriteria,
                                      parameters=parameters)
        self.name = 'Bayesian Optimization'
        self.order = 0
        # ---
        self.batch_size = 5
        self.acq_func = parameters.get('acq_funcs',
                                       default=EI(model=None, logs=None))
        self.optimizer = DotMap()
        self.optimizer.optimizer = parameters.get('optimizer', default=CMAES)
        self.optimizer.maxEvals = 20000
        self.model = parameters.get('model', default=rregression.GP)
        self.past_evals = parameters.get('past_evals', default=None)
        self.n_initial_evals = parameters.get('n_initial_evals', default=10)
        self.log_best_mean = False  # optimize mean acq_func at each step

        self.store_model = True  # Should we store all models for logging?
        self._model = None  # Current model
        self._logs.data.m = None
        self._logs.data.v = None
        self._logs.data.model = None

    def _simulate_experiments(self,
                              dataset_initial: rdata.dataset,
                              n: int = 5) -> tuple:
        """
        sample n times from S^k_pi, the set of k experiments resulting from
           running a sequential policy, pi, k iterations

        :param dataset_initial:
        :param n: number of samples of S^k_pi. A hyperparameter
        :return:
        """

        datasets = [dataset_initial.copy()] * n
        acq_funcs = [EI(model=None, logs=None)] * n
        parameters = np.array([])
        objectives = np.array([])
        models = np.array([])
        k = self.batch_size

        for sample in range(n):
            np.append(parameters, np.array([]))
            for iteration in range(k):
                p = DotMap()
                p.verbosity = 0

                # Instantiate the model with given parameters
                self._model = self.model(parameters=p)
                # Train the model with provided dataset
                self._model.train(train_set=datasets[sample])
                # Update acquisition function with the posterior
                acq_funcs[sample].update(model=self._model, logs=self._logs)

                # Optimize acquisition function
                logging.info('Optimizing the acquisition function')
                task = OptTask(f=self.acq_func.evaluate,
                               n_parameters=self.task.get_n_parameters(),
                               n_objectives=1,
                               order=0,
                               bounds=self.task.get_bounds(),
                               name='Acquisition Function',
                               task={'minimize'},
                               labels_param=None,
                               labels_obj=None,
                               vectorized=True)
                stop_criteria = StopCriteria(maxEvals=self.optimizer.maxEvals)

                p = DotMap()
                p.verbosity = 1

                # Calculate the optimizer
                optimizer = self.optimizer.optimizer(
                    parameters=p, task=task, stopCriteria=stop_criteria)
                x = np.matrix(optimizer.optimize())
                fx = self._model.predict(dataset=x.T)
                dataset_new = rdata.dataset(data_input=x, data_output=fx)
                datasets[sample] = datasets[sample].merge(dataset_new)
                parameters[sample].append(x)
                objectives[sample].append(fx)
            models.append(self._model)

        return parameters, objectives, models

    def _weigh_data_points(self, X: np.array, model) -> np.array:
        assert X.shape[0] == model.shape[0], "Mismatching number of " \
                                             "Xs and models"

        n = X.shape[0]
        k = self.batch_size
        # TODO: check that this k matches Xs
        Ws = np.array([])

        A_master = np.full(X[0].shape, 1)
        np.fill_diagonal(A_master, -1)

        m, v = model._predict(X)

        def prob_a_geq_b(m_normalized):
            def helper(a):
                return 1 - norm.cdf(-a)

            helper_vect = np.vectorize(helper)
            return helper_vect(m_normalized)

        for iteration in range(k):
            A_mask = np.delete(A_master, iteration)
            # computing (A \Sigma_y A.T)^{-1\2}(A\mu_y)
            m_normalized = np.matmul(
                np.sqrt(
                    np.reciprocal(np.matmul(np.matmul(A_mask, v), A_mask.T))),
                np.matmul(A_mask, m))
            np.append(Ws, np.prod(prob_a_geq_b(m_normalized)))

        return Ws

    def _match_experiments(self, X: np.array, W: np.array, k: int) -> np.array:
        """
        given n experiments weighted by the probability they are the
            maximizer, return k of them that are most representative
        approximates to k_medoids
        """
        B = X

        def obj(X, W, B):
            sum = 0
            for i in range(X.shape[0]):
                nbrs = NearestNeighbors(n_neighbors=1).fit(B)
                dists, _ = nbrs.kneighbors(X)
                sum += W[i] * dists[i]
            return sum

        # greedily remove elements until we have k of them left
        for _ in range(X.shape[0] - k):
            # compute the objective value with one element of batch removed
            for i in range(X.shape[0]):
                objs = obj(X, W, np.delete(B, i))
            np.delete(X, np.argmin(objs))

        return X

    def _select_parameters(self):
        """
        Select the next set of parameters to evaluate on the objective function
        :return: parameters: np.matrix
        """

        # If we don't have any data to start with, randomly pick points
        k = self.batch_size

        if (self._iter == 0) and (self.past_evals is None):
            logging.info('Initializing with %d random evaluations' %
                         self.n_initial_evals)
            self._logs.data.model = [None]
            return self.task.get_bounds() \
                .sample_uniform((self.n_initial_evals,
                                 self.task.get_n_parameters()))
        else:
            # TODO: use past_evals
            logging.info('Fitting response surface')

            dataset = rdata.dataset(data_input=self._logs.get_parameters(),
                                    data_output=self._logs.get_objectives())

            Xs, FXs, GPs = self._simulate_experiments(dataset)
            Xs = Xs.flatten(-1, Xs.shape[1:])
            Ws = np.array([])
            for i in range(k):
                np.append(Ws, self._weigh_data_points(Xs[i], GPs[i]))
            # now Xs and Ws should both be flattened w.r.t samples axis
            Xs = self._match_experiments(Xs, Ws, k)

            # TODO: integrate the different acquisition functions to form one GP
            # # Log the mean and variance
            # if self._logs.data.m is None:
            #     self._logs.data.m = np.matrix(fx[0])
            #     self._logs.data.v = np.matrix(fx[1])
            # else:
            #     self._logs.data.m = np.concatenate((self._logs.data.m, fx[0]),
            #                                        axis=0)
            #     self._logs.data.v = np.concatenate((self._logs.data.v, fx[1]),
            #                                        axis=0)
            #
            # # Store the model
            # if self.store_model:
            #     if self._logs.data.model is None:
            #         self._logs.data.model = [self._model]
            #     else:
            #         self._logs.data.model.append(self._model)
            #
            # # Optimize mean function (for logging purposes)
            # if self.log_best_mean:
            #     logging.info('Optimizing the mean function')
            #     task = OptTask(f=self._model.predict_mean,
            #                    n_parameters=self.task.get_n_parameters(),
            #                    n_objectives=1,
            #                    order=0,
            #                    bounds=self.task.get_bounds(),
            #                    name='Mean Function',
            #                    task={'minimize'},
            #                    labels_param=None, labels_obj=None,
            #                    vectorized=True)
            #     stop_criteria = StopCriteria(maxEvals=self.optimizer.maxEvals)
            #     p = DotMap()
            #     p.verbosity = 1
            #     mean_opt = self.optimizer.optimizer(parameters=p,
            #                                         task=task,
            #                                         stopCriteria=stop_criteria)
            #
            #     best_x = np.matrix(optimizer.optimize())
            #     best_fx = self._model.predict(dataset=best_x.T)
            #
            #     if self._iter == 1:
            #         self._logs.data.best_m = np.matrix(best_fx[0])
            #         self._logs.data.best_v = np.matrix(best_fx[1])
            #     else:
            #         self._logs.data.best_m = np.concatenate(
            #             (self._logs.data.best_m, best_fx[0]), axis=0)
            #         self._logs.data.best_v = np.concatenate(
            #             (self._logs.data.best_v, best_fx[1]), axis=0)

            return Xs

    def f_visualize(self):
        # TODO: plot also model (see plot_optimization_curve)
        if self._iter == 0:
            self._fig = plt.figure()
            self._objectives_curve, _ = plt.plot(
                self.get_logs().get_objectives().T, linewidth=2, color='blue')
            plt.ylabel('Obj.Func.')
            plt.xlabel('N. Evaluations')
        else:
            self._objectives_curve.set_data(
                np.arange(self.get_logs().get_n_evals()),
                self.get_logs().get_objectives().T)
            self._fig.canvas.draw()
            plt.xlim([0, self.get_logs().get_n_evals()])
            plt.ylim([
                np.min(self.get_logs().get_objectives()),
                np.max(self.get_logs().get_objectives())
            ])

    def plot_optimization_curve(self, scale='log', plotDelta=True):
        import scipyplot as spp

        logs = self.get_logs()
        plt.figure()
        # logs.plot_optimization_curve()

        if (self.task.opt_obj is None) and (plotDelta is True):
            plt.plot(logs.get_objectives().T, c='red', linewidth=2)
            plt.ylabel('Obj.Func.')
            n_evals = logs.data.m.shape[0]
            x = np.arange(start=logs.get_n_evals() - n_evals,
                          stop=logs.get_n_evals())
            spp.gauss_1D(y=logs.data.m,
                         variance=logs.data.v,
                         x=x,
                         color='blue')
            if self.log_best_mean:
                spp.gauss_1D(y=logs.data.best_m,
                             variance=logs.data.best_v,
                             x=x,
                             color='green')
        else:
            plt.plot(logs.get_objectives().T - self.task.opt_obj,
                     c='red',
                     linewidth=2)
            plt.ylabel('Optimality gap')
            n_evals = logs.data.m.shape[0]
            x = np.arange(start=logs.get_n_evals() - n_evals,
                          stop=logs.get_n_evals())
            spp.gauss_1D(y=logs.data.m - self.task.opt_obj,
                         variance=logs.data.v,
                         x=x,
                         color='blue')
            if self.log_best_mean:
                spp.gauss_1D(y=logs.data.best_m - self.task.opt_obj,
                             variance=logs.data.best_v,
                             x=x,
                             color='green')

        plt.xlabel('N. Evaluations')
        if scale == 'log':
            ax = plt.gca()
            ax.set_yscale('log')

        # TODO: best performance expected
        # if self.log_best_mean:
        #     plt.legend(['Performance evaluated', 'performance expected', 'Best performance expected'])
        # else:
        #     plt.legend(['Performance evaluated', 'performance expected'])
        plt.show()
コード例 #50
0
ファイル: timeline.py プロジェクト: bigrayhicks/STreifen
def stix2timeline(stix):
    if not "objects" in stix:
        return False
    groups = []
    items = []
    for obj in stix["objects"]:
        if obj["type"] == "sighting":
            sight = DotMap(obj)
            sor = sight.sighting_of_ref
            a = {}
            if sor.split("--")[0] == "threat-actor":
                actor = DotMap(find_ref(sor, stix))
                act = {
                    "id": actor.id,
                    "content": actor.name,
                }
                if not act in groups:
                    groups.append(act)
            wsr = sight.where_sighted_refs
            for w in wsr:
                if w.split("--")[0] == "identity":
                    tgt = DotMap(find_ref(w, stix))
                    item = {
                        "id": sight.id,
                        "content": tgt.name,
                        "group": act["id"],
                        "start": sight.first_seen,
                        #"end": sight.last_seen,
                        "className": sight.type,
                    }
                    if sight.last_seen:
                        item["end"] = sight.last_seen
                    if tgt.sectors:
                        item["subgroup"] = tgt.sectors[0]
                    #if tgt.sectors.all():
                    #    item["className"] = tgt.sectors.all()[0]
                    if not item in items:
                        items.append(item)
        elif obj["type"] == "report":
            report = DotMap(obj)
            start = report.published
            if not start:
                start = report.created
            item = {
                "id": report.id,
                "content": report.name,
                "group": None,
                "className": report.type,
                "start": start
            }
            for ref in report.object_refs:
                if ref.split("--")[0] == "threat-actor":
                    actor = DotMap(find_ref(ref, stix))
                    a = {
                        "id": actor.id,
                        "content": actor.name,
                    }
                    if not a in groups:
                        groups.append(a)
                    if not item["group"]:
                        item["group"] = a["id"]
            if not item in items:
                items.append(item)
    dataset = {
        "items": items,
        "groups": groups,
    }
    return dataset
コード例 #51
0
def get_dm_params_lambda(variables):
    dm_params_lambda = DotMap()

    dm_params_lambda.ft = variables.ft
    dm_params_lambda.path_dir = None
    dm_params_lambda.save_data = False
    dm_params_lambda.file_name_dm = variables.file_name_dm
    dm_params_lambda.file_name_txt = variables.file_name_txt
    dm_params_lambda.min_or = variables.min_or
    dm_params_lambda.max_or = variables.max_or
    dm_params_lambda.min_and = variables.min_and
    dm_params_lambda.max_and = variables.max_and
    dm_params_lambda.min_n = variables.min_n
    dm_params_lambda.max_n = variables.max_n

    return dm_params_lambda
コード例 #52
0
ファイル: main.py プロジェクト: thegismar/auc
import json
from dotenv import load_dotenv
import os
import requests
from rich.status import Status
from brownie.network import gas_price

load_dotenv()
wallet = os.getenv('wallet')
brownie = os.getenv('network')

network.connect(brownie)
me = accounts.default = accounts.load(wallet)

with open('accounts.json', 'r') as f:
    accounts = DotMap(json.load(f))

c = Contract.from_explorer('0xA39d1e9CBecdb17901bFa87a5B306D67f15A2391')


def claim(r):
    d = DotMap()
    d.id = r['id']
    d.account = r['account']
    d.amount = int('0' + r['amount'])
    d.r = int('0' + r['r'])
    d.s = int('0' + r['s'])
    d.v = int('0' + r['v'])

    c.claim(d.id, d.account, d.amount, d.v, d.r, d.s)
コード例 #53
0
ファイル: eBooksLN_prev.py プロジェクト: Loreton/eBooks
    def load_eBooks(self, dir_path, file_pattern, target_dir=None):
        # - read list of files
        '''
        https://rednafi.github.io/digressions/python/2020/04/13/python-pathlib.html#pathglob
            top_level_py_files = Path("src").glob("*.py")
            all_py_files = Path("src").rglob("*.py")
            print(list(top_level_py_files))
            print(list(all_py_files))
            la lettura di un iterator automaticamente lo azzera.
            quindi utilizzarlo direttamente oppure con list salvare i dati
        '''
        all_files = list(Path(dir_path).rglob(file_pattern))
        if not all_files:
            logger.critical('no files found on {dir_path}/{file_pattern}'.format(**locals()))

        nFiles = len(all_files)
        loaded_books=0  # counter
        indexed_books=0 # counter
        # - insert each file
        for index, epub_file in enumerate(sorted(all_files), start=1):
            _dir = epub_file.parent
            book = self._readEbook(file=epub_file)
            if not book: continue # book not valid
            if self._inp_args.indexing and indexed_books >= self._inp_args.max_books:
                return
            elif loaded_books >= self._inp_args.max_books:
                return


            self._ePubs.set_id(book)

            # C.pYellowH(text='[{index:06}/{nFiles:06}] - {0} - [{1}]'.format(book['title'], book['author'], **locals()), tab=4)
            curr_book = self._ePubs.exists(rec=book)

            if curr_book:
                _msg='already catalogued'
                book = curr_book
                # printColor=None
                printColor=C.pWhite
                # if self._inp_args.verbose:
            else:   # - insert book into eBooks_collection
                printColor=C.pYellowH
                book['content'] = self._readContent(filename=epub_file)
                try:
                    if self._execute:
                        self._ePubs.insert_one(book, replace=False)
                        _msg='inserted as new book'
                    else:
                        _msg='[DRY-RUN] - inserted as new book'

                    loaded_books += 1
                except Exception as why:
                    C.pError(text=str(why))
                    C.pYellowH(text=epub_file, tab=8)
                    if self._inp_args.move_file:
                        epub_file.rename(epub_file / '.err.zip')
                    continue


            dmBook = DotMap(book, _dynamic=False)
            if self._inp_args.verbose:
                print()
                printColor(text='''
                [{index:05}/{nFiles:05}]
                    _id:       {dmBook._id}
                    book:      {dmBook.title} - [{dmBook.author}]
                    {_msg}
                    indexed:   {dmBook.indexed_fields}\
                '''.format(**locals()), tab=4)
            else:
                printColor(text='''[{index:05}/{nFiles:05}] book: {dmBook.title} - [{dmBook.author}] '''.format(**locals()), tab=4)


                # - forcing dictionary update
            if self._inp_args.indexing and not dmBook.indexed_fields:
                _fields = ['content', 'title', 'tags', 'author']
                if self._execute: self._book_indexing(book, fields=_fields )
                indexed_books += 1



            # move file if required
            if target_dir:
                target_file='{target_dir}/{dmBook.title}.epub'.format(**locals())
                target_file='{target_dir}/{dmBook.title}.epub'.format(**locals())
                C.pYellowH(text='... moving to:', tab=16)
                C.pYellowH(text='dir:   {target_dir}'.format(**locals()), tab=18)
                C.pYellowH(text='fname: {dmBook.title}'.format(**locals()), tab=18)

                if self._execute and self._inp_args.move_file:
                    moved, reason = epub_file.moveTo(target_file, replace=False)
                    if not moved:
                        # - rename source
                        epub_file.rename(epub_file.parent / '{dmBook.title}{epub_file.suffix}_{reason}'.format(**locals()) )
コード例 #54
0
ファイル: config.py プロジェクト: bausk/metabook
from dotmap import DotMap

ServerConfig = dict(
    port="8585",
    templates="templates",
    static="static"
)

metabook_config = DotMap(ServerConfig)

metabook_config.locals.newfile = "New metabook{}.ipynb"
metabook_config.extension = [".graph", ".ipynb"]
metabook_config.routes.tree = "tree"
metabook_config.routes.graph = "graph"
metabook_config.routes.files.default_template = "template/default.json"
metabook_config.routes.api.sessions = "api/sessions"
metabook_config.routes.api.file = "api/file"
metabook_config.routes.api.solvers = "api/solvers"
metabook_config.routes.api.template = "api/templates"
コード例 #55
0
def Load_Dataset(filename):
    dataset = scipy.io.loadmat(filename)
    x_train = dataset['train']
    x_test = dataset['test']
    x_cv = dataset['cv']
    y_train = dataset['gnd_train']
    y_test = dataset['gnd_test']
    y_cv = dataset['gnd_cv']
    
    data = DotMap()
    data.n_trains = y_train.shape[0]
    data.n_tests = y_test.shape[0]
    data.n_cv = y_cv.shape[0]
    data.n_tags = y_train.shape[1]
    data.n_feas = x_train.shape[1]

    ## Convert sparse to dense matricesimport numpy as np
    train = x_train.toarray()
    nz_indices = np.where(np.sum(train, axis=1) > 0)[0]
    train = train[nz_indices, :]
    train_len = np.sum(train > 0, axis=1)

    test = x_test.toarray()
    test_len = np.sum(test > 0, axis=1)

    cv = x_cv.toarray()
    cv_len = np.sum(cv > 0, axis=1)

    gnd_train = y_train[nz_indices, :]
    gnd_test = y_test
    gnd_cv = y_cv

    data.train = train
    data.test = test
    data.cv = cv
    data.train_len = train_len
    data.test_len = test_len
    data.cv_len = cv_len
    data.gnd_train = gnd_train
    data.gnd_test = gnd_test
    data.gnd_cv = gnd_cv
    
    return data